blob: 5a69e3288ab7091b8c8129842084ef56e5c37405 [file] [log] [blame]
Bart Van Asschebec9e8a2017-08-17 13:12:47 -07001/*
2 * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3 * was acquired by Western Digital in 2012.
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004 *
Bart Van Asschebec9e8a2017-08-17 13:12:47 -07005 * Copyright 2012 sTec, Inc.
6 * Copyright (c) 2017 Western Digital Corporation or its affiliates.
7 *
8 * This file is part of the Linux kernel, and is made available under
9 * the terms of the GNU General Public License version 2.
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060010 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/blkdev.h>
19#include <linux/sched.h>
20#include <linux/interrupt.h>
21#include <linux/compiler.h>
22#include <linux/workqueue.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060023#include <linux/delay.h>
24#include <linux/time.h>
25#include <linux/hdreg.h>
26#include <linux/dma-mapping.h>
27#include <linux/completion.h>
28#include <linux/scatterlist.h>
29#include <linux/version.h>
30#include <linux/err.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060031#include <linux/aer.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060032#include <linux/wait.h>
33#include <linux/uio.h>
34#include <scsi/scsi.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060035#include <scsi/sg.h>
36#include <linux/io.h>
37#include <linux/uaccess.h>
Bartlomiej Zolnierkiewicz4ca90b52013-11-05 12:37:04 +010038#include <asm/unaligned.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060039
40#include "skd_s1120.h"
41
42static int skd_dbg_level;
43static int skd_isr_comp_limit = 4;
44
45enum {
46 STEC_LINK_2_5GTS = 0,
47 STEC_LINK_5GTS = 1,
48 STEC_LINK_8GTS = 2,
49 STEC_LINK_UNKNOWN = 0xFF
50};
51
52enum {
53 SKD_FLUSH_INITIALIZER,
54 SKD_FLUSH_ZERO_SIZE_FIRST,
55 SKD_FLUSH_DATA_SECOND,
56};
57
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060058#define SKD_ASSERT(expr) \
59 do { \
60 if (unlikely(!(expr))) { \
61 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
62 # expr, __FILE__, __func__, __LINE__); \
63 } \
64 } while (0)
65
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060066#define DRV_NAME "skd"
67#define DRV_VERSION "2.2.1"
68#define DRV_BUILD_ID "0260"
69#define PFX DRV_NAME ": "
70#define DRV_BIN_VERSION 0x100
71#define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
72
Bart Van Asschebec9e8a2017-08-17 13:12:47 -070073MODULE_LICENSE("GPL");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060074
Mike Snitzer38d4a1b2013-11-01 15:05:10 -040075MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060076MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
77
78#define PCI_VENDOR_ID_STEC 0x1B39
79#define PCI_DEVICE_ID_S1120 0x0001
80
81#define SKD_FUA_NV (1 << 1)
82#define SKD_MINORS_PER_DEVICE 16
83
84#define SKD_MAX_QUEUE_DEPTH 200u
85
86#define SKD_PAUSE_TIMEOUT (5 * 1000)
87
88#define SKD_N_FITMSG_BYTES (512u)
89
90#define SKD_N_SPECIAL_CONTEXT 32u
91#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
92
93/* SG elements are 32 bytes, so we can make this 4096 and still be under the
94 * 128KB limit. That allows 4096*4K = 16M xfer size
95 */
96#define SKD_N_SG_PER_REQ_DEFAULT 256u
97#define SKD_N_SG_PER_SPECIAL 256u
98
99#define SKD_N_COMPLETION_ENTRY 256u
100#define SKD_N_READ_CAP_BYTES (8u)
101
102#define SKD_N_INTERNAL_BYTES (512u)
103
104/* 5 bits of uniqifier, 0xF800 */
105#define SKD_ID_INCR (0x400)
106#define SKD_ID_TABLE_MASK (3u << 8u)
107#define SKD_ID_RW_REQUEST (0u << 8u)
108#define SKD_ID_INTERNAL (1u << 8u)
109#define SKD_ID_SPECIAL_REQUEST (2u << 8u)
110#define SKD_ID_FIT_MSG (3u << 8u)
111#define SKD_ID_SLOT_MASK 0x00FFu
112#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
113
114#define SKD_N_TIMEOUT_SLOT 4u
115#define SKD_TIMEOUT_SLOT_MASK 3u
116
117#define SKD_N_MAX_SECTORS 2048u
118
119#define SKD_MAX_RETRIES 2u
120
121#define SKD_TIMER_SECONDS(seconds) (seconds)
122#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
123
124#define INQ_STD_NBYTES 36
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600125
126enum skd_drvr_state {
127 SKD_DRVR_STATE_LOAD,
128 SKD_DRVR_STATE_IDLE,
129 SKD_DRVR_STATE_BUSY,
130 SKD_DRVR_STATE_STARTING,
131 SKD_DRVR_STATE_ONLINE,
132 SKD_DRVR_STATE_PAUSING,
133 SKD_DRVR_STATE_PAUSED,
134 SKD_DRVR_STATE_DRAINING_TIMEOUT,
135 SKD_DRVR_STATE_RESTARTING,
136 SKD_DRVR_STATE_RESUMING,
137 SKD_DRVR_STATE_STOPPING,
138 SKD_DRVR_STATE_FAULT,
139 SKD_DRVR_STATE_DISAPPEARED,
140 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
141 SKD_DRVR_STATE_BUSY_ERASE,
142 SKD_DRVR_STATE_BUSY_SANITIZE,
143 SKD_DRVR_STATE_BUSY_IMMINENT,
144 SKD_DRVR_STATE_WAIT_BOOT,
145 SKD_DRVR_STATE_SYNCING,
146};
147
148#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
149#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
150#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
151#define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
152#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
153#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
154#define SKD_START_WAIT_SECONDS 90u
155
156enum skd_req_state {
157 SKD_REQ_STATE_IDLE,
158 SKD_REQ_STATE_SETUP,
159 SKD_REQ_STATE_BUSY,
160 SKD_REQ_STATE_COMPLETED,
161 SKD_REQ_STATE_TIMEOUT,
162 SKD_REQ_STATE_ABORTED,
163};
164
165enum skd_fit_msg_state {
166 SKD_MSG_STATE_IDLE,
167 SKD_MSG_STATE_BUSY,
168};
169
170enum skd_check_status_action {
171 SKD_CHECK_STATUS_REPORT_GOOD,
172 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
173 SKD_CHECK_STATUS_REQUEUE_REQUEST,
174 SKD_CHECK_STATUS_REPORT_ERROR,
175 SKD_CHECK_STATUS_BUSY_IMMINENT,
176};
177
178struct skd_fitmsg_context {
179 enum skd_fit_msg_state state;
180
181 struct skd_fitmsg_context *next;
182
183 u32 id;
184 u16 outstanding;
185
186 u32 length;
187 u32 offset;
188
189 u8 *msg_buf;
190 dma_addr_t mb_dma_address;
191};
192
193struct skd_request_context {
194 enum skd_req_state state;
195
196 struct skd_request_context *next;
197
198 u16 id;
199 u32 fitmsg_id;
200
201 struct request *req;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600202 u8 flush_cmd;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600203
204 u32 timeout_stamp;
205 u8 sg_data_dir;
206 struct scatterlist *sg;
207 u32 n_sg;
208 u32 sg_byte_count;
209
210 struct fit_sg_descriptor *sksg_list;
211 dma_addr_t sksg_dma_address;
212
213 struct fit_completion_entry_v1 completion;
214
215 struct fit_comp_error_info err_info;
216
217};
218#define SKD_DATA_DIR_HOST_TO_CARD 1
219#define SKD_DATA_DIR_CARD_TO_HOST 2
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600220
221struct skd_special_context {
222 struct skd_request_context req;
223
224 u8 orphaned;
225
226 void *data_buf;
227 dma_addr_t db_dma_address;
228
229 u8 *msg_buf;
230 dma_addr_t mb_dma_address;
231};
232
233struct skd_sg_io {
234 fmode_t mode;
235 void __user *argp;
236
237 struct sg_io_hdr sg;
238
239 u8 cdb[16];
240
241 u32 dxfer_len;
242 u32 iovcnt;
243 struct sg_iovec *iov;
244 struct sg_iovec no_iov_iov;
245
246 struct skd_special_context *skspcl;
247};
248
249typedef enum skd_irq_type {
250 SKD_IRQ_LEGACY,
251 SKD_IRQ_MSI,
252 SKD_IRQ_MSIX
253} skd_irq_type_t;
254
255#define SKD_MAX_BARS 2
256
257struct skd_device {
258 volatile void __iomem *mem_map[SKD_MAX_BARS];
259 resource_size_t mem_phys[SKD_MAX_BARS];
260 u32 mem_size[SKD_MAX_BARS];
261
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600262 struct skd_msix_entry *msix_entries;
263
264 struct pci_dev *pdev;
265 int pcie_error_reporting_is_enabled;
266
267 spinlock_t lock;
268 struct gendisk *disk;
269 struct request_queue *queue;
270 struct device *class_dev;
271 int gendisk_on;
272 int sync_done;
273
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600274 u32 devno;
275 u32 major;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600276 char isr_name[30];
277
278 enum skd_drvr_state state;
279 u32 drive_state;
280
281 u32 in_flight;
282 u32 cur_max_queue_depth;
283 u32 queue_low_water_mark;
284 u32 dev_max_queue_depth;
285
286 u32 num_fitmsg_context;
287 u32 num_req_context;
288
289 u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
290 u32 timeout_stamp;
291 struct skd_fitmsg_context *skmsg_free_list;
292 struct skd_fitmsg_context *skmsg_table;
293
294 struct skd_request_context *skreq_free_list;
295 struct skd_request_context *skreq_table;
296
297 struct skd_special_context *skspcl_free_list;
298 struct skd_special_context *skspcl_table;
299
300 struct skd_special_context internal_skspcl;
301 u32 read_cap_blocksize;
302 u32 read_cap_last_lba;
303 int read_cap_is_valid;
304 int inquiry_is_valid;
305 u8 inq_serial_num[13]; /*12 chars plus null term */
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600306
307 u8 skcomp_cycle;
308 u32 skcomp_ix;
309 struct fit_completion_entry_v1 *skcomp_table;
310 struct fit_comp_error_info *skerr_table;
311 dma_addr_t cq_dma_address;
312
313 wait_queue_head_t waitq;
314
315 struct timer_list timer;
316 u32 timer_countdown;
317 u32 timer_substate;
318
319 int n_special;
320 int sgs_per_request;
321 u32 last_mtd;
322
323 u32 proto_ver;
324
325 int dbg_level;
326 u32 connect_time_stamp;
327 int connect_retries;
328#define SKD_MAX_CONNECT_RETRIES 16
329 u32 drive_jiffies;
330
331 u32 timo_slot;
332
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600333 struct work_struct completion_worker;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600334};
335
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600336#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
337#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
338#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
339
340static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
341{
Bart Van Assche14262a42017-08-17 13:12:57 -0700342 u32 val = readl(skdev->mem_map[1] + offset);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600343
Bart Van Assche14262a42017-08-17 13:12:57 -0700344 if (unlikely(skdev->dbg_level >= 2))
Bart Van Asschef98806d2017-08-17 13:12:58 -0700345 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
Bart Van Assche14262a42017-08-17 13:12:57 -0700346 return val;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600347}
348
349static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
350 u32 offset)
351{
Bart Van Assche14262a42017-08-17 13:12:57 -0700352 writel(val, skdev->mem_map[1] + offset);
353 if (unlikely(skdev->dbg_level >= 2))
Bart Van Asschef98806d2017-08-17 13:12:58 -0700354 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600355}
356
357static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
358 u32 offset)
359{
Bart Van Assche14262a42017-08-17 13:12:57 -0700360 writeq(val, skdev->mem_map[1] + offset);
361 if (unlikely(skdev->dbg_level >= 2))
Bart Van Asschef98806d2017-08-17 13:12:58 -0700362 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
363 val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600364}
365
366
367#define SKD_IRQ_DEFAULT SKD_IRQ_MSI
368static int skd_isr_type = SKD_IRQ_DEFAULT;
369
370module_param(skd_isr_type, int, 0444);
371MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
372 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
373
374#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
375static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
376
377module_param(skd_max_req_per_msg, int, 0444);
378MODULE_PARM_DESC(skd_max_req_per_msg,
379 "Maximum SCSI requests packed in a single message."
380 " (1-14, default==1)");
381
382#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
383#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
384static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
385
386module_param(skd_max_queue_depth, int, 0444);
387MODULE_PARM_DESC(skd_max_queue_depth,
388 "Maximum SCSI requests issued to s1120."
389 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
390
391static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
392module_param(skd_sgs_per_request, int, 0444);
393MODULE_PARM_DESC(skd_sgs_per_request,
394 "Maximum SG elements per block request."
395 " (1-4096, default==256)");
396
397static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
398module_param(skd_max_pass_thru, int, 0444);
399MODULE_PARM_DESC(skd_max_pass_thru,
400 "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
401
402module_param(skd_dbg_level, int, 0444);
403MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
404
405module_param(skd_isr_comp_limit, int, 0444);
406MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
407
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600408/* Major device number dynamically assigned. */
409static u32 skd_major;
410
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600411static void skd_destruct(struct skd_device *skdev);
412static const struct block_device_operations skd_blockdev_ops;
413static void skd_send_fitmsg(struct skd_device *skdev,
414 struct skd_fitmsg_context *skmsg);
415static void skd_send_special_fitmsg(struct skd_device *skdev,
416 struct skd_special_context *skspcl);
417static void skd_request_fn(struct request_queue *rq);
418static void skd_end_request(struct skd_device *skdev,
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200419 struct skd_request_context *skreq, blk_status_t status);
420static bool skd_preop_sg_list(struct skd_device *skdev,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600421 struct skd_request_context *skreq);
422static void skd_postop_sg_list(struct skd_device *skdev,
423 struct skd_request_context *skreq);
424
425static void skd_restart_device(struct skd_device *skdev);
426static int skd_quiesce_dev(struct skd_device *skdev);
427static int skd_unquiesce_dev(struct skd_device *skdev);
428static void skd_release_special(struct skd_device *skdev,
429 struct skd_special_context *skspcl);
430static void skd_disable_interrupts(struct skd_device *skdev);
431static void skd_isr_fwstate(struct skd_device *skdev);
432static void skd_recover_requests(struct skd_device *skdev, int requeue);
433static void skd_soft_reset(struct skd_device *skdev);
434
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600435const char *skd_drive_state_to_str(int state);
436const char *skd_skdev_state_to_str(enum skd_drvr_state state);
437static void skd_log_skdev(struct skd_device *skdev, const char *event);
438static void skd_log_skmsg(struct skd_device *skdev,
439 struct skd_fitmsg_context *skmsg, const char *event);
440static void skd_log_skreq(struct skd_device *skdev,
441 struct skd_request_context *skreq, const char *event);
442
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600443/*
444 *****************************************************************************
445 * READ/WRITE REQUESTS
446 *****************************************************************************
447 */
Jens Axboefcd37eb2013-11-01 10:14:56 -0600448static void skd_fail_all_pending(struct skd_device *skdev)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600449{
450 struct request_queue *q = skdev->queue;
451 struct request *req;
452
453 for (;; ) {
454 req = blk_peek_request(q);
455 if (req == NULL)
456 break;
457 blk_start_request(req);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200458 __blk_end_request_all(req, BLK_STS_IOERR);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600459 }
460}
461
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600462static void
463skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
464 int data_dir, unsigned lba,
465 unsigned count)
466{
467 if (data_dir == READ)
468 scsi_req->cdb[0] = 0x28;
469 else
470 scsi_req->cdb[0] = 0x2a;
471
472 scsi_req->cdb[1] = 0;
473 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
474 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
475 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
476 scsi_req->cdb[5] = (lba & 0xff);
477 scsi_req->cdb[6] = 0;
478 scsi_req->cdb[7] = (count & 0xff00) >> 8;
479 scsi_req->cdb[8] = count & 0xff;
480 scsi_req->cdb[9] = 0;
481}
482
483static void
484skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400485 struct skd_request_context *skreq)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600486{
487 skreq->flush_cmd = 1;
488
489 scsi_req->cdb[0] = 0x35;
490 scsi_req->cdb[1] = 0;
491 scsi_req->cdb[2] = 0;
492 scsi_req->cdb[3] = 0;
493 scsi_req->cdb[4] = 0;
494 scsi_req->cdb[5] = 0;
495 scsi_req->cdb[6] = 0;
496 scsi_req->cdb[7] = 0;
497 scsi_req->cdb[8] = 0;
498 scsi_req->cdb[9] = 0;
499}
500
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600501static void skd_request_fn_not_online(struct request_queue *q);
502
503static void skd_request_fn(struct request_queue *q)
504{
505 struct skd_device *skdev = q->queuedata;
506 struct skd_fitmsg_context *skmsg = NULL;
507 struct fit_msg_hdr *fmh = NULL;
508 struct skd_request_context *skreq;
509 struct request *req = NULL;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600510 struct skd_scsi_request *scsi_req;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600511 unsigned long io_flags;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600512 u32 lba;
513 u32 count;
514 int data_dir;
Bart Van Assche4854afe2017-08-17 13:12:59 -0700515 __be64 be_dmaa;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600516 u64 cmdctxt;
517 u32 timo_slot;
518 void *cmd_ptr;
519 int flush, fua;
520
521 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
522 skd_request_fn_not_online(q);
523 return;
524 }
525
Jens Axboe6a5ec652013-11-01 10:38:45 -0600526 if (blk_queue_stopped(skdev->queue)) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600527 if (skdev->skmsg_free_list == NULL ||
528 skdev->skreq_free_list == NULL ||
529 skdev->in_flight >= skdev->queue_low_water_mark)
530 /* There is still some kind of shortage */
531 return;
532
Jens Axboe6a5ec652013-11-01 10:38:45 -0600533 queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600534 }
535
536 /*
537 * Stop conditions:
538 * - There are no more native requests
539 * - There are already the maximum number of requests in progress
540 * - There are no more skd_request_context entries
541 * - There are no more FIT msg buffers
542 */
543 for (;; ) {
544
545 flush = fua = 0;
546
Jens Axboefcd37eb2013-11-01 10:14:56 -0600547 req = blk_peek_request(q);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600548
Jens Axboefcd37eb2013-11-01 10:14:56 -0600549 /* Are there any native requests to start? */
550 if (req == NULL)
551 break;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600552
Jens Axboefcd37eb2013-11-01 10:14:56 -0600553 lba = (u32)blk_rq_pos(req);
554 count = blk_rq_sectors(req);
555 data_dir = rq_data_dir(req);
556 io_flags = req->cmd_flags;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600557
Mike Christie3a5e02c2016-06-05 14:32:23 -0500558 if (req_op(req) == REQ_OP_FLUSH)
Jens Axboefcd37eb2013-11-01 10:14:56 -0600559 flush++;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600560
Jens Axboefcd37eb2013-11-01 10:14:56 -0600561 if (io_flags & REQ_FUA)
562 fua++;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600563
Bart Van Asschef98806d2017-08-17 13:12:58 -0700564 dev_dbg(&skdev->pdev->dev,
565 "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
566 req, lba, lba, count, count, data_dir);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600567
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400568 /* At this point we know there is a request */
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600569
570 /* Are too many requets already in progress? */
571 if (skdev->in_flight >= skdev->cur_max_queue_depth) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700572 dev_dbg(&skdev->pdev->dev, "qdepth %d, limit %d\n",
573 skdev->in_flight, skdev->cur_max_queue_depth);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600574 break;
575 }
576
577 /* Is a skd_request_context available? */
578 skreq = skdev->skreq_free_list;
579 if (skreq == NULL) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700580 dev_dbg(&skdev->pdev->dev, "Out of req=%p\n", q);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600581 break;
582 }
583 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
584 SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
585
586 /* Now we check to see if we can get a fit msg */
587 if (skmsg == NULL) {
588 if (skdev->skmsg_free_list == NULL) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700589 dev_dbg(&skdev->pdev->dev, "Out of msg\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600590 break;
591 }
592 }
593
594 skreq->flush_cmd = 0;
595 skreq->n_sg = 0;
596 skreq->sg_byte_count = 0;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600597
598 /*
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400599 * OK to now dequeue request from q.
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600600 *
601 * At this point we are comitted to either start or reject
602 * the native request. Note that skd_request_context is
603 * available but is still at the head of the free list.
604 */
Jens Axboefcd37eb2013-11-01 10:14:56 -0600605 blk_start_request(req);
606 skreq->req = req;
607 skreq->fitmsg_id = 0;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600608
609 /* Either a FIT msg is in progress or we have to start one. */
610 if (skmsg == NULL) {
611 /* Are there any FIT msg buffers available? */
612 skmsg = skdev->skmsg_free_list;
613 if (skmsg == NULL) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700614 dev_dbg(&skdev->pdev->dev,
615 "Out of msg skdev=%p\n",
616 skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600617 break;
618 }
619 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
620 SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
621
622 skdev->skmsg_free_list = skmsg->next;
623
624 skmsg->state = SKD_MSG_STATE_BUSY;
625 skmsg->id += SKD_ID_INCR;
626
627 /* Initialize the FIT msg header */
628 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
629 memset(fmh, 0, sizeof(*fmh));
630 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
631 skmsg->length = sizeof(*fmh);
632 }
633
634 skreq->fitmsg_id = skmsg->id;
635
636 /*
637 * Note that a FIT msg may have just been started
638 * but contains no SoFIT requests yet.
639 */
640
641 /*
642 * Transcode the request, checking as we go. The outcome of
643 * the transcoding is represented by the error variable.
644 */
645 cmd_ptr = &skmsg->msg_buf[skmsg->length];
646 memset(cmd_ptr, 0, 32);
647
Bart Van Assche4854afe2017-08-17 13:12:59 -0700648 be_dmaa = cpu_to_be64(skreq->sksg_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600649 cmdctxt = skreq->id + SKD_ID_INCR;
650
651 scsi_req = cmd_ptr;
652 scsi_req->hdr.tag = cmdctxt;
653 scsi_req->hdr.sg_list_dma_address = be_dmaa;
654
655 if (data_dir == READ)
656 skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
657 else
658 skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
659
Jeff Moyer49bdedb2016-04-25 19:12:38 -0600660 if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600661 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
662 SKD_ASSERT(skreq->flush_cmd == 1);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600663 } else {
664 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
665 }
666
667 if (fua)
668 scsi_req->cdb[1] |= SKD_FUA_NV;
669
Jens Axboefcd37eb2013-11-01 10:14:56 -0600670 if (!req->bio)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600671 goto skip_sg;
672
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200673 if (!skd_preop_sg_list(skdev, skreq)) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600674 /*
675 * Complete the native request with error.
676 * Note that the request context is still at the
677 * head of the free list, and that the SoFIT request
678 * was encoded into the FIT msg buffer but the FIT
679 * msg length has not been updated. In short, the
680 * only resource that has been allocated but might
681 * not be used is that the FIT msg could be empty.
682 */
Bart Van Asschef98806d2017-08-17 13:12:58 -0700683 dev_dbg(&skdev->pdev->dev, "error Out\n");
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200684 skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600685 continue;
686 }
687
688skip_sg:
689 scsi_req->hdr.sg_list_len_bytes =
690 cpu_to_be32(skreq->sg_byte_count);
691
692 /* Complete resource allocations. */
693 skdev->skreq_free_list = skreq->next;
694 skreq->state = SKD_REQ_STATE_BUSY;
695 skreq->id += SKD_ID_INCR;
696
697 skmsg->length += sizeof(struct skd_scsi_request);
698 fmh->num_protocol_cmds_coalesced++;
699
700 /*
701 * Update the active request counts.
702 * Capture the timeout timestamp.
703 */
704 skreq->timeout_stamp = skdev->timeout_stamp;
705 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
706 skdev->timeout_slot[timo_slot]++;
707 skdev->in_flight++;
Bart Van Asschef98806d2017-08-17 13:12:58 -0700708 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
709 skdev->in_flight);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600710
711 /*
712 * If the FIT msg buffer is full send it.
713 */
714 if (skmsg->length >= SKD_N_FITMSG_BYTES ||
715 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
716 skd_send_fitmsg(skdev, skmsg);
717 skmsg = NULL;
718 fmh = NULL;
719 }
720 }
721
722 /*
723 * Is a FIT msg in progress? If it is empty put the buffer back
724 * on the free list. If it is non-empty send what we got.
725 * This minimizes latency when there are fewer requests than
726 * what fits in a FIT msg.
727 */
728 if (skmsg != NULL) {
729 /* Bigger than just a FIT msg header? */
730 if (skmsg->length > sizeof(struct fit_msg_hdr)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700731 dev_dbg(&skdev->pdev->dev, "sending msg=%p, len %d\n",
732 skmsg, skmsg->length);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600733 skd_send_fitmsg(skdev, skmsg);
734 } else {
735 /*
736 * The FIT msg is empty. It means we got started
737 * on the msg, but the requests were rejected.
738 */
739 skmsg->state = SKD_MSG_STATE_IDLE;
740 skmsg->id += SKD_ID_INCR;
741 skmsg->next = skdev->skmsg_free_list;
742 skdev->skmsg_free_list = skmsg;
743 }
744 skmsg = NULL;
745 fmh = NULL;
746 }
747
748 /*
749 * If req is non-NULL it means there is something to do but
750 * we are out of a resource.
751 */
Jens Axboefcd37eb2013-11-01 10:14:56 -0600752 if (req)
Jens Axboe6a5ec652013-11-01 10:38:45 -0600753 blk_stop_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600754}
755
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400756static void skd_end_request(struct skd_device *skdev,
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200757 struct skd_request_context *skreq, blk_status_t error)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600758{
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600759 if (unlikely(error)) {
760 struct request *req = skreq->req;
761 char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
762 u32 lba = (u32)blk_rq_pos(req);
763 u32 count = blk_rq_sectors(req);
764
Bart Van Asschef98806d2017-08-17 13:12:58 -0700765 dev_err(&skdev->pdev->dev,
766 "Error cmd=%s sect=%u count=%u id=0x%x\n", cmd, lba,
767 count, skreq->id);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600768 } else
Bart Van Asschef98806d2017-08-17 13:12:58 -0700769 dev_dbg(&skdev->pdev->dev, "id=0x%x error=%d\n", skreq->id,
770 error);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600771
772 __blk_end_request_all(skreq->req, error);
773}
774
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200775static bool skd_preop_sg_list(struct skd_device *skdev,
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400776 struct skd_request_context *skreq)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600777{
778 struct request *req = skreq->req;
779 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
780 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
781 struct scatterlist *sg = &skreq->sg[0];
782 int n_sg;
783 int i;
784
785 skreq->sg_byte_count = 0;
786
787 /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
788 skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
789
790 n_sg = blk_rq_map_sg(skdev->queue, req, sg);
791 if (n_sg <= 0)
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200792 return false;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600793
794 /*
795 * Map scatterlist to PCI bus addresses.
796 * Note PCI might change the number of entries.
797 */
798 n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
799 if (n_sg <= 0)
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200800 return false;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600801
802 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
803
804 skreq->n_sg = n_sg;
805
806 for (i = 0; i < n_sg; i++) {
807 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
808 u32 cnt = sg_dma_len(&sg[i]);
809 uint64_t dma_addr = sg_dma_address(&sg[i]);
810
811 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
812 sgd->byte_count = cnt;
813 skreq->sg_byte_count += cnt;
814 sgd->host_side_addr = dma_addr;
815 sgd->dev_side_addr = 0;
816 }
817
818 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
819 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
820
821 if (unlikely(skdev->dbg_level > 1)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700822 dev_dbg(&skdev->pdev->dev,
823 "skreq=%x sksg_list=%p sksg_dma=%llx\n",
824 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600825 for (i = 0; i < n_sg; i++) {
826 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
Bart Van Asschef98806d2017-08-17 13:12:58 -0700827
828 dev_dbg(&skdev->pdev->dev,
829 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
830 i, sgd->byte_count, sgd->control,
831 sgd->host_side_addr, sgd->next_desc_ptr);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600832 }
833 }
834
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200835 return true;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600836}
837
Jens Axboefcd37eb2013-11-01 10:14:56 -0600838static void skd_postop_sg_list(struct skd_device *skdev,
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400839 struct skd_request_context *skreq)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600840{
841 int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
842 int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
843
844 /*
845 * restore the next ptr for next IO request so we
846 * don't have to set it every time.
847 */
848 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
849 skreq->sksg_dma_address +
850 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
851 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
852}
853
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600854static void skd_request_fn_not_online(struct request_queue *q)
855{
856 struct skd_device *skdev = q->queuedata;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600857
858 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
859
860 skd_log_skdev(skdev, "req_not_online");
861 switch (skdev->state) {
862 case SKD_DRVR_STATE_PAUSING:
863 case SKD_DRVR_STATE_PAUSED:
864 case SKD_DRVR_STATE_STARTING:
865 case SKD_DRVR_STATE_RESTARTING:
866 case SKD_DRVR_STATE_WAIT_BOOT:
867 /* In case of starting, we haven't started the queue,
868 * so we can't get here... but requests are
869 * possibly hanging out waiting for us because we
870 * reported the dev/skd0 already. They'll wait
871 * forever if connect doesn't complete.
872 * What to do??? delay dev/skd0 ??
873 */
874 case SKD_DRVR_STATE_BUSY:
875 case SKD_DRVR_STATE_BUSY_IMMINENT:
876 case SKD_DRVR_STATE_BUSY_ERASE:
877 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
878 return;
879
880 case SKD_DRVR_STATE_BUSY_SANITIZE:
881 case SKD_DRVR_STATE_STOPPING:
882 case SKD_DRVR_STATE_SYNCING:
883 case SKD_DRVR_STATE_FAULT:
884 case SKD_DRVR_STATE_DISAPPEARED:
885 default:
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600886 break;
887 }
888
889 /* If we get here, terminate all pending block requeusts
890 * with EIO and any scsi pass thru with appropriate sense
891 */
892
893 skd_fail_all_pending(skdev);
894}
895
896/*
897 *****************************************************************************
898 * TIMER
899 *****************************************************************************
900 */
901
902static void skd_timer_tick_not_online(struct skd_device *skdev);
903
904static void skd_timer_tick(ulong arg)
905{
906 struct skd_device *skdev = (struct skd_device *)arg;
907
908 u32 timo_slot;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600909 unsigned long reqflags;
910 u32 state;
911
912 if (skdev->state == SKD_DRVR_STATE_FAULT)
913 /* The driver has declared fault, and we want it to
914 * stay that way until driver is reloaded.
915 */
916 return;
917
918 spin_lock_irqsave(&skdev->lock, reqflags);
919
920 state = SKD_READL(skdev, FIT_STATUS);
921 state &= FIT_SR_DRIVE_STATE_MASK;
922 if (state != skdev->drive_state)
923 skd_isr_fwstate(skdev);
924
925 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
926 skd_timer_tick_not_online(skdev);
927 goto timer_func_out;
928 }
929 skdev->timeout_stamp++;
930 timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
931
932 /*
933 * All requests that happened during the previous use of
934 * this slot should be done by now. The previous use was
935 * over 7 seconds ago.
936 */
937 if (skdev->timeout_slot[timo_slot] == 0)
938 goto timer_func_out;
939
940 /* Something is overdue */
Bart Van Asschef98806d2017-08-17 13:12:58 -0700941 dev_dbg(&skdev->pdev->dev, "found %d timeouts, draining busy=%d\n",
942 skdev->timeout_slot[timo_slot], skdev->in_flight);
943 dev_err(&skdev->pdev->dev, "Overdue IOs (%d), busy %d\n",
944 skdev->timeout_slot[timo_slot], skdev->in_flight);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600945
946 skdev->timer_countdown = SKD_DRAINING_TIMO;
947 skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
948 skdev->timo_slot = timo_slot;
Jens Axboe6a5ec652013-11-01 10:38:45 -0600949 blk_stop_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600950
951timer_func_out:
952 mod_timer(&skdev->timer, (jiffies + HZ));
953
954 spin_unlock_irqrestore(&skdev->lock, reqflags);
955}
956
957static void skd_timer_tick_not_online(struct skd_device *skdev)
958{
959 switch (skdev->state) {
960 case SKD_DRVR_STATE_IDLE:
961 case SKD_DRVR_STATE_LOAD:
962 break;
963 case SKD_DRVR_STATE_BUSY_SANITIZE:
Bart Van Asschef98806d2017-08-17 13:12:58 -0700964 dev_dbg(&skdev->pdev->dev,
965 "drive busy sanitize[%x], driver[%x]\n",
966 skdev->drive_state, skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600967 /* If we've been in sanitize for 3 seconds, we figure we're not
968 * going to get anymore completions, so recover requests now
969 */
970 if (skdev->timer_countdown > 0) {
971 skdev->timer_countdown--;
972 return;
973 }
974 skd_recover_requests(skdev, 0);
975 break;
976
977 case SKD_DRVR_STATE_BUSY:
978 case SKD_DRVR_STATE_BUSY_IMMINENT:
979 case SKD_DRVR_STATE_BUSY_ERASE:
Bart Van Asschef98806d2017-08-17 13:12:58 -0700980 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
981 skdev->state, skdev->timer_countdown);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600982 if (skdev->timer_countdown > 0) {
983 skdev->timer_countdown--;
984 return;
985 }
Bart Van Asschef98806d2017-08-17 13:12:58 -0700986 dev_dbg(&skdev->pdev->dev,
987 "busy[%x], timedout=%d, restarting device.",
988 skdev->state, skdev->timer_countdown);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600989 skd_restart_device(skdev);
990 break;
991
992 case SKD_DRVR_STATE_WAIT_BOOT:
993 case SKD_DRVR_STATE_STARTING:
994 if (skdev->timer_countdown > 0) {
995 skdev->timer_countdown--;
996 return;
997 }
998 /* For now, we fault the drive. Could attempt resets to
999 * revcover at some point. */
1000 skdev->state = SKD_DRVR_STATE_FAULT;
1001
Bart Van Asschef98806d2017-08-17 13:12:58 -07001002 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
1003 skdev->drive_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001004
1005 /*start the queue so we can respond with error to requests */
1006 /* wakeup anyone waiting for startup complete */
Jens Axboe6a5ec652013-11-01 10:38:45 -06001007 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001008 skdev->gendisk_on = -1;
1009 wake_up_interruptible(&skdev->waitq);
1010 break;
1011
1012 case SKD_DRVR_STATE_ONLINE:
1013 /* shouldn't get here. */
1014 break;
1015
1016 case SKD_DRVR_STATE_PAUSING:
1017 case SKD_DRVR_STATE_PAUSED:
1018 break;
1019
1020 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
Bart Van Asschef98806d2017-08-17 13:12:58 -07001021 dev_dbg(&skdev->pdev->dev,
1022 "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
1023 skdev->timo_slot, skdev->timer_countdown,
1024 skdev->in_flight,
1025 skdev->timeout_slot[skdev->timo_slot]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001026 /* if the slot has cleared we can let the I/O continue */
1027 if (skdev->timeout_slot[skdev->timo_slot] == 0) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001028 dev_dbg(&skdev->pdev->dev,
1029 "Slot drained, starting queue.\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001030 skdev->state = SKD_DRVR_STATE_ONLINE;
Jens Axboe6a5ec652013-11-01 10:38:45 -06001031 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001032 return;
1033 }
1034 if (skdev->timer_countdown > 0) {
1035 skdev->timer_countdown--;
1036 return;
1037 }
1038 skd_restart_device(skdev);
1039 break;
1040
1041 case SKD_DRVR_STATE_RESTARTING:
1042 if (skdev->timer_countdown > 0) {
1043 skdev->timer_countdown--;
1044 return;
1045 }
1046 /* For now, we fault the drive. Could attempt resets to
1047 * revcover at some point. */
1048 skdev->state = SKD_DRVR_STATE_FAULT;
Bart Van Asschef98806d2017-08-17 13:12:58 -07001049 dev_err(&skdev->pdev->dev,
1050 "DriveFault Reconnect Timeout (%x)\n",
1051 skdev->drive_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001052
1053 /*
1054 * Recovering does two things:
1055 * 1. completes IO with error
1056 * 2. reclaims dma resources
1057 * When is it safe to recover requests?
1058 * - if the drive state is faulted
1059 * - if the state is still soft reset after out timeout
1060 * - if the drive registers are dead (state = FF)
1061 * If it is "unsafe", we still need to recover, so we will
1062 * disable pci bus mastering and disable our interrupts.
1063 */
1064
1065 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
1066 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
1067 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
1068 /* It never came out of soft reset. Try to
1069 * recover the requests and then let them
1070 * fail. This is to mitigate hung processes. */
1071 skd_recover_requests(skdev, 0);
1072 else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001073 dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
1074 skdev->drive_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001075 pci_disable_device(skdev->pdev);
1076 skd_disable_interrupts(skdev);
1077 skd_recover_requests(skdev, 0);
1078 }
1079
1080 /*start the queue so we can respond with error to requests */
1081 /* wakeup anyone waiting for startup complete */
Jens Axboe6a5ec652013-11-01 10:38:45 -06001082 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001083 skdev->gendisk_on = -1;
1084 wake_up_interruptible(&skdev->waitq);
1085 break;
1086
1087 case SKD_DRVR_STATE_RESUMING:
1088 case SKD_DRVR_STATE_STOPPING:
1089 case SKD_DRVR_STATE_SYNCING:
1090 case SKD_DRVR_STATE_FAULT:
1091 case SKD_DRVR_STATE_DISAPPEARED:
1092 default:
1093 break;
1094 }
1095}
1096
1097static int skd_start_timer(struct skd_device *skdev)
1098{
1099 int rc;
1100
1101 init_timer(&skdev->timer);
1102 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
1103
1104 rc = mod_timer(&skdev->timer, (jiffies + HZ));
1105 if (rc)
Bart Van Asschef98806d2017-08-17 13:12:58 -07001106 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001107 return rc;
1108}
1109
1110static void skd_kill_timer(struct skd_device *skdev)
1111{
1112 del_timer_sync(&skdev->timer);
1113}
1114
1115/*
1116 *****************************************************************************
1117 * IOCTL
1118 *****************************************************************************
1119 */
1120static int skd_ioctl_sg_io(struct skd_device *skdev,
1121 fmode_t mode, void __user *argp);
1122static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1123 struct skd_sg_io *sksgio);
1124static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1125 struct skd_sg_io *sksgio);
1126static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1127 struct skd_sg_io *sksgio);
1128static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1129 struct skd_sg_io *sksgio, int dxfer_dir);
1130static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1131 struct skd_sg_io *sksgio);
1132static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
1133static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1134 struct skd_sg_io *sksgio);
1135static int skd_sg_io_put_status(struct skd_device *skdev,
1136 struct skd_sg_io *sksgio);
1137
1138static void skd_complete_special(struct skd_device *skdev,
1139 volatile struct fit_completion_entry_v1
1140 *skcomp,
1141 volatile struct fit_comp_error_info *skerr,
1142 struct skd_special_context *skspcl);
1143
1144static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
1145 uint cmd_in, ulong arg)
1146{
Christoph Hellwig3719fa82017-01-28 09:32:50 +01001147 static const int sg_version_num = 30527;
1148 int rc = 0, timeout;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001149 struct gendisk *disk = bdev->bd_disk;
1150 struct skd_device *skdev = disk->private_data;
Christoph Hellwig3719fa82017-01-28 09:32:50 +01001151 int __user *p = (int __user *)arg;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001152
Bart Van Asschef98806d2017-08-17 13:12:58 -07001153 dev_dbg(&skdev->pdev->dev,
1154 "%s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
1155 disk->disk_name, current->comm, mode, cmd_in, arg);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001156
1157 if (!capable(CAP_SYS_ADMIN))
1158 return -EPERM;
1159
1160 switch (cmd_in) {
1161 case SG_SET_TIMEOUT:
Christoph Hellwig3719fa82017-01-28 09:32:50 +01001162 rc = get_user(timeout, p);
1163 if (!rc)
1164 disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
1165 break;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001166 case SG_GET_TIMEOUT:
Christoph Hellwig3719fa82017-01-28 09:32:50 +01001167 rc = jiffies_to_clock_t(disk->queue->sg_timeout);
1168 break;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001169 case SG_GET_VERSION_NUM:
Christoph Hellwig3719fa82017-01-28 09:32:50 +01001170 rc = put_user(sg_version_num, p);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001171 break;
1172 case SG_IO:
Christoph Hellwig3719fa82017-01-28 09:32:50 +01001173 rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001174 break;
1175
1176 default:
1177 rc = -ENOTTY;
1178 break;
1179 }
1180
Bart Van Asschef98806d2017-08-17 13:12:58 -07001181 dev_dbg(&skdev->pdev->dev, "%s: completion rc %d\n", disk->disk_name,
1182 rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001183 return rc;
1184}
1185
1186static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
1187 void __user *argp)
1188{
1189 int rc;
1190 struct skd_sg_io sksgio;
1191
1192 memset(&sksgio, 0, sizeof(sksgio));
1193 sksgio.mode = mode;
1194 sksgio.argp = argp;
1195 sksgio.iov = &sksgio.no_iov_iov;
1196
1197 switch (skdev->state) {
1198 case SKD_DRVR_STATE_ONLINE:
1199 case SKD_DRVR_STATE_BUSY_IMMINENT:
1200 break;
1201
1202 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07001203 dev_dbg(&skdev->pdev->dev, "drive not online\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001204 rc = -ENXIO;
1205 goto out;
1206 }
1207
Akhil Bhansalif721bb02013-10-23 13:00:08 +01001208 rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
1209 if (rc)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001210 goto out;
1211
Akhil Bhansalif721bb02013-10-23 13:00:08 +01001212 rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
1213 if (rc)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001214 goto out;
1215
Akhil Bhansalif721bb02013-10-23 13:00:08 +01001216 rc = skd_sg_io_prep_buffering(skdev, &sksgio);
1217 if (rc)
1218 goto out;
1219
1220 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
1221 if (rc)
1222 goto out;
1223
1224 rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
1225 if (rc)
1226 goto out;
1227
1228 rc = skd_sg_io_await(skdev, &sksgio);
1229 if (rc)
1230 goto out;
1231
1232 rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
1233 if (rc)
1234 goto out;
1235
1236 rc = skd_sg_io_put_status(skdev, &sksgio);
1237 if (rc)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001238 goto out;
1239
1240 rc = 0;
1241
1242out:
1243 skd_sg_io_release_skspcl(skdev, &sksgio);
1244
1245 if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
1246 kfree(sksgio.iov);
1247 return rc;
1248}
1249
1250static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
1251 struct skd_sg_io *sksgio)
1252{
1253 struct sg_io_hdr *sgp = &sksgio->sg;
Bart Van Assche95895e12017-08-17 13:12:55 -07001254 int i, __maybe_unused acc;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001255
1256 if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001257 dev_dbg(&skdev->pdev->dev, "access sg failed %p\n",
1258 sksgio->argp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001259 return -EFAULT;
1260 }
1261
1262 if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001263 dev_dbg(&skdev->pdev->dev, "copy_from_user sg failed %p\n",
1264 sksgio->argp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001265 return -EFAULT;
1266 }
1267
1268 if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001269 dev_dbg(&skdev->pdev->dev, "interface_id invalid 0x%x\n",
1270 sgp->interface_id);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001271 return -EINVAL;
1272 }
1273
1274 if (sgp->cmd_len > sizeof(sksgio->cdb)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001275 dev_dbg(&skdev->pdev->dev, "cmd_len invalid %d\n",
1276 sgp->cmd_len);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001277 return -EINVAL;
1278 }
1279
1280 if (sgp->iovec_count > 256) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001281 dev_dbg(&skdev->pdev->dev, "iovec_count invalid %d\n",
1282 sgp->iovec_count);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001283 return -EINVAL;
1284 }
1285
1286 if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001287 dev_dbg(&skdev->pdev->dev, "dxfer_len invalid %d\n",
1288 sgp->dxfer_len);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001289 return -EINVAL;
1290 }
1291
1292 switch (sgp->dxfer_direction) {
1293 case SG_DXFER_NONE:
1294 acc = -1;
1295 break;
1296
1297 case SG_DXFER_TO_DEV:
1298 acc = VERIFY_READ;
1299 break;
1300
1301 case SG_DXFER_FROM_DEV:
1302 case SG_DXFER_TO_FROM_DEV:
1303 acc = VERIFY_WRITE;
1304 break;
1305
1306 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07001307 dev_dbg(&skdev->pdev->dev, "dxfer_dir invalid %d\n",
1308 sgp->dxfer_direction);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001309 return -EINVAL;
1310 }
1311
1312 if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001313 dev_dbg(&skdev->pdev->dev, "copy_from_user cmdp failed %p\n",
1314 sgp->cmdp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001315 return -EFAULT;
1316 }
1317
1318 if (sgp->mx_sb_len != 0) {
1319 if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001320 dev_dbg(&skdev->pdev->dev, "access sbp failed %p\n",
1321 sgp->sbp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001322 return -EFAULT;
1323 }
1324 }
1325
1326 if (sgp->iovec_count == 0) {
1327 sksgio->iov[0].iov_base = sgp->dxferp;
1328 sksgio->iov[0].iov_len = sgp->dxfer_len;
1329 sksgio->iovcnt = 1;
1330 sksgio->dxfer_len = sgp->dxfer_len;
1331 } else {
1332 struct sg_iovec *iov;
1333 uint nbytes = sizeof(*iov) * sgp->iovec_count;
1334 size_t iov_data_len;
1335
1336 iov = kmalloc(nbytes, GFP_KERNEL);
1337 if (iov == NULL) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001338 dev_dbg(&skdev->pdev->dev, "alloc iovec failed %d\n",
1339 sgp->iovec_count);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001340 return -ENOMEM;
1341 }
1342 sksgio->iov = iov;
1343 sksgio->iovcnt = sgp->iovec_count;
1344
1345 if (copy_from_user(iov, sgp->dxferp, nbytes)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001346 dev_dbg(&skdev->pdev->dev,
1347 "copy_from_user iovec failed %p\n",
1348 sgp->dxferp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001349 return -EFAULT;
1350 }
1351
1352 /*
1353 * Sum up the vecs, making sure they don't overflow
1354 */
1355 iov_data_len = 0;
1356 for (i = 0; i < sgp->iovec_count; i++) {
1357 if (iov_data_len + iov[i].iov_len < iov_data_len)
1358 return -EINVAL;
1359 iov_data_len += iov[i].iov_len;
1360 }
1361
1362 /* SG_IO howto says that the shorter of the two wins */
1363 if (sgp->dxfer_len < iov_data_len) {
1364 sksgio->iovcnt = iov_shorten((struct iovec *)iov,
1365 sgp->iovec_count,
1366 sgp->dxfer_len);
1367 sksgio->dxfer_len = sgp->dxfer_len;
1368 } else
1369 sksgio->dxfer_len = iov_data_len;
1370 }
1371
1372 if (sgp->dxfer_direction != SG_DXFER_NONE) {
1373 struct sg_iovec *iov = sksgio->iov;
1374 for (i = 0; i < sksgio->iovcnt; i++, iov++) {
1375 if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001376 dev_dbg(&skdev->pdev->dev,
1377 "access data failed %p/%zd\n",
1378 iov->iov_base, iov->iov_len);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001379 return -EFAULT;
1380 }
1381 }
1382 }
1383
1384 return 0;
1385}
1386
1387static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
1388 struct skd_sg_io *sksgio)
1389{
1390 struct skd_special_context *skspcl = NULL;
1391 int rc;
1392
Mike Snitzer38d4a1b2013-11-01 15:05:10 -04001393 for (;;) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001394 ulong flags;
1395
1396 spin_lock_irqsave(&skdev->lock, flags);
1397 skspcl = skdev->skspcl_free_list;
1398 if (skspcl != NULL) {
1399 skdev->skspcl_free_list =
1400 (struct skd_special_context *)skspcl->req.next;
1401 skspcl->req.id += SKD_ID_INCR;
1402 skspcl->req.state = SKD_REQ_STATE_SETUP;
1403 skspcl->orphaned = 0;
1404 skspcl->req.n_sg = 0;
1405 }
1406 spin_unlock_irqrestore(&skdev->lock, flags);
1407
1408 if (skspcl != NULL) {
1409 rc = 0;
1410 break;
1411 }
1412
Bart Van Asschef98806d2017-08-17 13:12:58 -07001413 dev_dbg(&skdev->pdev->dev, "blocking\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001414
1415 rc = wait_event_interruptible_timeout(
1416 skdev->waitq,
1417 (skdev->skspcl_free_list != NULL),
1418 msecs_to_jiffies(sksgio->sg.timeout));
1419
Bart Van Asschef98806d2017-08-17 13:12:58 -07001420 dev_dbg(&skdev->pdev->dev, "unblocking, rc=%d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001421
1422 if (rc <= 0) {
1423 if (rc == 0)
1424 rc = -ETIMEDOUT;
1425 else
1426 rc = -EINTR;
1427 break;
1428 }
1429 /*
1430 * If we get here rc > 0 meaning the timeout to
1431 * wait_event_interruptible_timeout() had time left, hence the
1432 * sought event -- non-empty free list -- happened.
1433 * Retry the allocation.
1434 */
1435 }
1436 sksgio->skspcl = skspcl;
1437
1438 return rc;
1439}
1440
1441static int skd_skreq_prep_buffering(struct skd_device *skdev,
1442 struct skd_request_context *skreq,
1443 u32 dxfer_len)
1444{
1445 u32 resid = dxfer_len;
1446
1447 /*
1448 * The DMA engine must have aligned addresses and byte counts.
1449 */
1450 resid += (-resid) & 3;
1451 skreq->sg_byte_count = resid;
1452
1453 skreq->n_sg = 0;
1454
1455 while (resid > 0) {
1456 u32 nbytes = PAGE_SIZE;
1457 u32 ix = skreq->n_sg;
1458 struct scatterlist *sg = &skreq->sg[ix];
1459 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1460 struct page *page;
1461
1462 if (nbytes > resid)
1463 nbytes = resid;
1464
1465 page = alloc_page(GFP_KERNEL);
1466 if (page == NULL)
1467 return -ENOMEM;
1468
1469 sg_set_page(sg, page, nbytes, 0);
1470
1471 /* TODO: This should be going through a pci_???()
1472 * routine to do proper mapping. */
1473 sksg->control = FIT_SGD_CONTROL_NOT_LAST;
1474 sksg->byte_count = nbytes;
1475
1476 sksg->host_side_addr = sg_phys(sg);
1477
1478 sksg->dev_side_addr = 0;
1479 sksg->next_desc_ptr = skreq->sksg_dma_address +
1480 (ix + 1) * sizeof(*sksg);
1481
1482 skreq->n_sg++;
1483 resid -= nbytes;
1484 }
1485
1486 if (skreq->n_sg > 0) {
1487 u32 ix = skreq->n_sg - 1;
1488 struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
1489
1490 sksg->control = FIT_SGD_CONTROL_LAST;
1491 sksg->next_desc_ptr = 0;
1492 }
1493
1494 if (unlikely(skdev->dbg_level > 1)) {
1495 u32 i;
1496
Bart Van Asschef98806d2017-08-17 13:12:58 -07001497 dev_dbg(&skdev->pdev->dev,
1498 "skreq=%x sksg_list=%p sksg_dma=%llx\n",
1499 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001500 for (i = 0; i < skreq->n_sg; i++) {
1501 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
1502
Bart Van Asschef98806d2017-08-17 13:12:58 -07001503 dev_dbg(&skdev->pdev->dev,
1504 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
1505 i, sgd->byte_count, sgd->control,
1506 sgd->host_side_addr, sgd->next_desc_ptr);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001507 }
1508 }
1509
1510 return 0;
1511}
1512
1513static int skd_sg_io_prep_buffering(struct skd_device *skdev,
1514 struct skd_sg_io *sksgio)
1515{
1516 struct skd_special_context *skspcl = sksgio->skspcl;
1517 struct skd_request_context *skreq = &skspcl->req;
1518 u32 dxfer_len = sksgio->dxfer_len;
1519 int rc;
1520
1521 rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
1522 /*
1523 * Eventually, errors or not, skd_release_special() is called
1524 * to recover allocations including partial allocations.
1525 */
1526 return rc;
1527}
1528
1529static int skd_sg_io_copy_buffer(struct skd_device *skdev,
1530 struct skd_sg_io *sksgio, int dxfer_dir)
1531{
1532 struct skd_special_context *skspcl = sksgio->skspcl;
1533 u32 iov_ix = 0;
1534 struct sg_iovec curiov;
1535 u32 sksg_ix = 0;
1536 u8 *bufp = NULL;
1537 u32 buf_len = 0;
1538 u32 resid = sksgio->dxfer_len;
1539 int rc;
1540
1541 curiov.iov_len = 0;
1542 curiov.iov_base = NULL;
1543
1544 if (dxfer_dir != sksgio->sg.dxfer_direction) {
1545 if (dxfer_dir != SG_DXFER_TO_DEV ||
1546 sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
1547 return 0;
1548 }
1549
1550 while (resid > 0) {
1551 u32 nbytes = PAGE_SIZE;
1552
1553 if (curiov.iov_len == 0) {
1554 curiov = sksgio->iov[iov_ix++];
1555 continue;
1556 }
1557
1558 if (buf_len == 0) {
1559 struct page *page;
1560 page = sg_page(&skspcl->req.sg[sksg_ix++]);
1561 bufp = page_address(page);
1562 buf_len = PAGE_SIZE;
1563 }
1564
1565 nbytes = min_t(u32, nbytes, resid);
1566 nbytes = min_t(u32, nbytes, curiov.iov_len);
1567 nbytes = min_t(u32, nbytes, buf_len);
1568
1569 if (dxfer_dir == SG_DXFER_TO_DEV)
1570 rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
1571 else
1572 rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
1573
1574 if (rc)
1575 return -EFAULT;
1576
1577 resid -= nbytes;
1578 curiov.iov_len -= nbytes;
1579 curiov.iov_base += nbytes;
1580 buf_len -= nbytes;
1581 }
1582
1583 return 0;
1584}
1585
1586static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
1587 struct skd_sg_io *sksgio)
1588{
1589 struct skd_special_context *skspcl = sksgio->skspcl;
1590 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
1591 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
1592
1593 memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
1594
1595 /* Initialize the FIT msg header */
1596 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1597 fmh->num_protocol_cmds_coalesced = 1;
1598
1599 /* Initialize the SCSI request */
1600 if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
1601 scsi_req->hdr.sg_list_dma_address =
1602 cpu_to_be64(skspcl->req.sksg_dma_address);
1603 scsi_req->hdr.tag = skspcl->req.id;
1604 scsi_req->hdr.sg_list_len_bytes =
1605 cpu_to_be32(skspcl->req.sg_byte_count);
1606 memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
1607
1608 skspcl->req.state = SKD_REQ_STATE_BUSY;
1609 skd_send_special_fitmsg(skdev, skspcl);
1610
1611 return 0;
1612}
1613
1614static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
1615{
1616 unsigned long flags;
1617 int rc;
1618
1619 rc = wait_event_interruptible_timeout(skdev->waitq,
1620 (sksgio->skspcl->req.state !=
1621 SKD_REQ_STATE_BUSY),
1622 msecs_to_jiffies(sksgio->sg.
1623 timeout));
1624
1625 spin_lock_irqsave(&skdev->lock, flags);
1626
1627 if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001628 dev_dbg(&skdev->pdev->dev, "skspcl %p aborted\n",
1629 sksgio->skspcl);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001630
1631 /* Build check cond, sense and let command finish. */
1632 /* For a timeout, we must fabricate completion and sense
1633 * data to complete the command */
1634 sksgio->skspcl->req.completion.status =
1635 SAM_STAT_CHECK_CONDITION;
1636
1637 memset(&sksgio->skspcl->req.err_info, 0,
1638 sizeof(sksgio->skspcl->req.err_info));
1639 sksgio->skspcl->req.err_info.type = 0x70;
1640 sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
1641 sksgio->skspcl->req.err_info.code = 0x44;
1642 sksgio->skspcl->req.err_info.qual = 0;
1643 rc = 0;
1644 } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
1645 /* No longer on the adapter. We finish. */
1646 rc = 0;
1647 else {
1648 /* Something's gone wrong. Still busy. Timeout or
1649 * user interrupted (control-C). Mark as an orphan
1650 * so it will be disposed when completed. */
1651 sksgio->skspcl->orphaned = 1;
1652 sksgio->skspcl = NULL;
1653 if (rc == 0) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001654 dev_dbg(&skdev->pdev->dev, "timed out %p (%u ms)\n",
1655 sksgio, sksgio->sg.timeout);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001656 rc = -ETIMEDOUT;
1657 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001658 dev_dbg(&skdev->pdev->dev, "cntlc %p\n", sksgio);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001659 rc = -EINTR;
1660 }
1661 }
1662
1663 spin_unlock_irqrestore(&skdev->lock, flags);
1664
1665 return rc;
1666}
1667
1668static int skd_sg_io_put_status(struct skd_device *skdev,
1669 struct skd_sg_io *sksgio)
1670{
1671 struct sg_io_hdr *sgp = &sksgio->sg;
1672 struct skd_special_context *skspcl = sksgio->skspcl;
1673 int resid = 0;
1674
1675 u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
1676
1677 sgp->status = skspcl->req.completion.status;
1678 resid = sksgio->dxfer_len - nb;
1679
1680 sgp->masked_status = sgp->status & STATUS_MASK;
1681 sgp->msg_status = 0;
1682 sgp->host_status = 0;
1683 sgp->driver_status = 0;
1684 sgp->resid = resid;
1685 if (sgp->masked_status || sgp->host_status || sgp->driver_status)
1686 sgp->info |= SG_INFO_CHECK;
1687
Bart Van Asschef98806d2017-08-17 13:12:58 -07001688 dev_dbg(&skdev->pdev->dev, "status %x masked %x resid 0x%x\n",
1689 sgp->status, sgp->masked_status, sgp->resid);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001690
1691 if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
1692 if (sgp->mx_sb_len > 0) {
1693 struct fit_comp_error_info *ei = &skspcl->req.err_info;
1694 u32 nbytes = sizeof(*ei);
1695
1696 nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
1697
1698 sgp->sb_len_wr = nbytes;
1699
1700 if (__copy_to_user(sgp->sbp, ei, nbytes)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001701 dev_dbg(&skdev->pdev->dev,
1702 "copy_to_user sense failed %p\n",
1703 sgp->sbp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001704 return -EFAULT;
1705 }
1706 }
1707 }
1708
1709 if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001710 dev_dbg(&skdev->pdev->dev, "copy_to_user sg failed %p\n",
1711 sksgio->argp);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001712 return -EFAULT;
1713 }
1714
1715 return 0;
1716}
1717
1718static int skd_sg_io_release_skspcl(struct skd_device *skdev,
1719 struct skd_sg_io *sksgio)
1720{
1721 struct skd_special_context *skspcl = sksgio->skspcl;
1722
1723 if (skspcl != NULL) {
1724 ulong flags;
1725
1726 sksgio->skspcl = NULL;
1727
1728 spin_lock_irqsave(&skdev->lock, flags);
1729 skd_release_special(skdev, skspcl);
1730 spin_unlock_irqrestore(&skdev->lock, flags);
1731 }
1732
1733 return 0;
1734}
1735
1736/*
1737 *****************************************************************************
1738 * INTERNAL REQUESTS -- generated by driver itself
1739 *****************************************************************************
1740 */
1741
1742static int skd_format_internal_skspcl(struct skd_device *skdev)
1743{
1744 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1745 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1746 struct fit_msg_hdr *fmh;
1747 uint64_t dma_address;
1748 struct skd_scsi_request *scsi;
1749
1750 fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
1751 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
1752 fmh->num_protocol_cmds_coalesced = 1;
1753
1754 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1755 memset(scsi, 0, sizeof(*scsi));
1756 dma_address = skspcl->req.sksg_dma_address;
1757 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
1758 sgd->control = FIT_SGD_CONTROL_LAST;
1759 sgd->byte_count = 0;
1760 sgd->host_side_addr = skspcl->db_dma_address;
1761 sgd->dev_side_addr = 0;
1762 sgd->next_desc_ptr = 0LL;
1763
1764 return 1;
1765}
1766
1767#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
1768
1769static void skd_send_internal_skspcl(struct skd_device *skdev,
1770 struct skd_special_context *skspcl,
1771 u8 opcode)
1772{
1773 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
1774 struct skd_scsi_request *scsi;
1775 unsigned char *buf = skspcl->data_buf;
1776 int i;
1777
1778 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
1779 /*
1780 * A refresh is already in progress.
1781 * Just wait for it to finish.
1782 */
1783 return;
1784
1785 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
1786 skspcl->req.state = SKD_REQ_STATE_BUSY;
1787 skspcl->req.id += SKD_ID_INCR;
1788
1789 scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
1790 scsi->hdr.tag = skspcl->req.id;
1791
1792 memset(scsi->cdb, 0, sizeof(scsi->cdb));
1793
1794 switch (opcode) {
1795 case TEST_UNIT_READY:
1796 scsi->cdb[0] = TEST_UNIT_READY;
1797 sgd->byte_count = 0;
1798 scsi->hdr.sg_list_len_bytes = 0;
1799 break;
1800
1801 case READ_CAPACITY:
1802 scsi->cdb[0] = READ_CAPACITY;
1803 sgd->byte_count = SKD_N_READ_CAP_BYTES;
1804 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1805 break;
1806
1807 case INQUIRY:
1808 scsi->cdb[0] = INQUIRY;
1809 scsi->cdb[1] = 0x01; /* evpd */
1810 scsi->cdb[2] = 0x80; /* serial number page */
1811 scsi->cdb[4] = 0x10;
1812 sgd->byte_count = 16;
1813 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1814 break;
1815
1816 case SYNCHRONIZE_CACHE:
1817 scsi->cdb[0] = SYNCHRONIZE_CACHE;
1818 sgd->byte_count = 0;
1819 scsi->hdr.sg_list_len_bytes = 0;
1820 break;
1821
1822 case WRITE_BUFFER:
1823 scsi->cdb[0] = WRITE_BUFFER;
1824 scsi->cdb[1] = 0x02;
1825 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1826 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1827 sgd->byte_count = WR_BUF_SIZE;
1828 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1829 /* fill incrementing byte pattern */
1830 for (i = 0; i < sgd->byte_count; i++)
1831 buf[i] = i & 0xFF;
1832 break;
1833
1834 case READ_BUFFER:
1835 scsi->cdb[0] = READ_BUFFER;
1836 scsi->cdb[1] = 0x02;
1837 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
1838 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
1839 sgd->byte_count = WR_BUF_SIZE;
1840 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
1841 memset(skspcl->data_buf, 0, sgd->byte_count);
1842 break;
1843
1844 default:
1845 SKD_ASSERT("Don't know what to send");
1846 return;
1847
1848 }
1849 skd_send_special_fitmsg(skdev, skspcl);
1850}
1851
1852static void skd_refresh_device_data(struct skd_device *skdev)
1853{
1854 struct skd_special_context *skspcl = &skdev->internal_skspcl;
1855
1856 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
1857}
1858
1859static int skd_chk_read_buf(struct skd_device *skdev,
1860 struct skd_special_context *skspcl)
1861{
1862 unsigned char *buf = skspcl->data_buf;
1863 int i;
1864
1865 /* check for incrementing byte pattern */
1866 for (i = 0; i < WR_BUF_SIZE; i++)
1867 if (buf[i] != (i & 0xFF))
1868 return 1;
1869
1870 return 0;
1871}
1872
1873static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1874 u8 code, u8 qual, u8 fruc)
1875{
1876 /* If the check condition is of special interest, log a message */
1877 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1878 && (code == 0x04) && (qual == 0x06)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001879 dev_err(&skdev->pdev->dev,
1880 "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1881 key, code, qual, fruc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001882 }
1883}
1884
1885static void skd_complete_internal(struct skd_device *skdev,
1886 volatile struct fit_completion_entry_v1
1887 *skcomp,
1888 volatile struct fit_comp_error_info *skerr,
1889 struct skd_special_context *skspcl)
1890{
1891 u8 *buf = skspcl->data_buf;
1892 u8 status;
1893 int i;
1894 struct skd_scsi_request *scsi =
1895 (struct skd_scsi_request *)&skspcl->msg_buf[64];
1896
1897 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1898
Bart Van Asschef98806d2017-08-17 13:12:58 -07001899 dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001900
1901 skspcl->req.completion = *skcomp;
1902 skspcl->req.state = SKD_REQ_STATE_IDLE;
1903 skspcl->req.id += SKD_ID_INCR;
1904
1905 status = skspcl->req.completion.status;
1906
1907 skd_log_check_status(skdev, status, skerr->key, skerr->code,
1908 skerr->qual, skerr->fruc);
1909
1910 switch (scsi->cdb[0]) {
1911 case TEST_UNIT_READY:
1912 if (status == SAM_STAT_GOOD)
1913 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1914 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1915 (skerr->key == MEDIUM_ERROR))
1916 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1917 else {
1918 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001919 dev_dbg(&skdev->pdev->dev,
1920 "TUR failed, don't send anymore state 0x%x\n",
1921 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001922 return;
1923 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07001924 dev_dbg(&skdev->pdev->dev,
1925 "**** TUR failed, retry skerr\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001926 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1927 }
1928 break;
1929
1930 case WRITE_BUFFER:
1931 if (status == SAM_STAT_GOOD)
1932 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1933 else {
1934 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001935 dev_dbg(&skdev->pdev->dev,
1936 "write buffer failed, don't send anymore state 0x%x\n",
1937 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001938 return;
1939 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07001940 dev_dbg(&skdev->pdev->dev,
1941 "**** write buffer failed, retry skerr\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001942 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1943 }
1944 break;
1945
1946 case READ_BUFFER:
1947 if (status == SAM_STAT_GOOD) {
1948 if (skd_chk_read_buf(skdev, skspcl) == 0)
1949 skd_send_internal_skspcl(skdev, skspcl,
1950 READ_CAPACITY);
1951 else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001952 dev_err(&skdev->pdev->dev,
1953 "*** W/R Buffer mismatch %d ***\n",
1954 skdev->connect_retries);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001955 if (skdev->connect_retries <
1956 SKD_MAX_CONNECT_RETRIES) {
1957 skdev->connect_retries++;
1958 skd_soft_reset(skdev);
1959 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001960 dev_err(&skdev->pdev->dev,
1961 "W/R Buffer Connect Error\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001962 return;
1963 }
1964 }
1965
1966 } else {
1967 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001968 dev_dbg(&skdev->pdev->dev,
1969 "read buffer failed, don't send anymore state 0x%x\n",
1970 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001971 return;
1972 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07001973 dev_dbg(&skdev->pdev->dev,
1974 "**** read buffer failed, retry skerr\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001975 skd_send_internal_skspcl(skdev, skspcl, 0x00);
1976 }
1977 break;
1978
1979 case READ_CAPACITY:
1980 skdev->read_cap_is_valid = 0;
1981 if (status == SAM_STAT_GOOD) {
1982 skdev->read_cap_last_lba =
1983 (buf[0] << 24) | (buf[1] << 16) |
1984 (buf[2] << 8) | buf[3];
1985 skdev->read_cap_blocksize =
1986 (buf[4] << 24) | (buf[5] << 16) |
1987 (buf[6] << 8) | buf[7];
1988
Bart Van Asschef98806d2017-08-17 13:12:58 -07001989 dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
1990 skdev->read_cap_last_lba,
1991 skdev->read_cap_blocksize);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001992
1993 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1994
1995 skdev->read_cap_is_valid = 1;
1996
1997 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1998 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
1999 (skerr->key == MEDIUM_ERROR)) {
2000 skdev->read_cap_last_lba = ~0;
2001 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002002 dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002003 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
2004 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002005 dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002006 skd_send_internal_skspcl(skdev, skspcl,
2007 TEST_UNIT_READY);
2008 }
2009 break;
2010
2011 case INQUIRY:
2012 skdev->inquiry_is_valid = 0;
2013 if (status == SAM_STAT_GOOD) {
2014 skdev->inquiry_is_valid = 1;
2015
2016 for (i = 0; i < 12; i++)
2017 skdev->inq_serial_num[i] = buf[i + 4];
2018 skdev->inq_serial_num[12] = 0;
2019 }
2020
2021 if (skd_unquiesce_dev(skdev) < 0)
Bart Van Asschef98806d2017-08-17 13:12:58 -07002022 dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002023 /* connection is complete */
2024 skdev->connect_retries = 0;
2025 break;
2026
2027 case SYNCHRONIZE_CACHE:
2028 if (status == SAM_STAT_GOOD)
2029 skdev->sync_done = 1;
2030 else
2031 skdev->sync_done = -1;
2032 wake_up_interruptible(&skdev->waitq);
2033 break;
2034
2035 default:
2036 SKD_ASSERT("we didn't send this");
2037 }
2038}
2039
2040/*
2041 *****************************************************************************
2042 * FIT MESSAGES
2043 *****************************************************************************
2044 */
2045
2046static void skd_send_fitmsg(struct skd_device *skdev,
2047 struct skd_fitmsg_context *skmsg)
2048{
2049 u64 qcmd;
2050 struct fit_msg_hdr *fmh;
2051
Bart Van Asschef98806d2017-08-17 13:12:58 -07002052 dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
2053 skmsg->mb_dma_address, skdev->in_flight);
2054 dev_dbg(&skdev->pdev->dev, "msg_buf 0x%p, offset %x\n", skmsg->msg_buf,
2055 skmsg->offset);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002056
2057 qcmd = skmsg->mb_dma_address;
2058 qcmd |= FIT_QCMD_QID_NORMAL;
2059
2060 fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
2061 skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
2062
2063 if (unlikely(skdev->dbg_level > 1)) {
2064 u8 *bp = (u8 *)skmsg->msg_buf;
2065 int i;
2066 for (i = 0; i < skmsg->length; i += 8) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002067 dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
2068 &bp[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002069 if (i == 0)
2070 i = 64 - 8;
2071 }
2072 }
2073
2074 if (skmsg->length > 256)
2075 qcmd |= FIT_QCMD_MSGSIZE_512;
2076 else if (skmsg->length > 128)
2077 qcmd |= FIT_QCMD_MSGSIZE_256;
2078 else if (skmsg->length > 64)
2079 qcmd |= FIT_QCMD_MSGSIZE_128;
2080 else
2081 /*
2082 * This makes no sense because the FIT msg header is
2083 * 64 bytes. If the msg is only 64 bytes long it has
2084 * no payload.
2085 */
2086 qcmd |= FIT_QCMD_MSGSIZE_64;
2087
Bart Van Assche5fbd5452017-08-17 13:12:46 -07002088 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2089 smp_wmb();
2090
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002091 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002092}
2093
2094static void skd_send_special_fitmsg(struct skd_device *skdev,
2095 struct skd_special_context *skspcl)
2096{
2097 u64 qcmd;
2098
2099 if (unlikely(skdev->dbg_level > 1)) {
2100 u8 *bp = (u8 *)skspcl->msg_buf;
2101 int i;
2102
2103 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002104 dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
2105 &bp[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002106 if (i == 0)
2107 i = 64 - 8;
2108 }
2109
Bart Van Asschef98806d2017-08-17 13:12:58 -07002110 dev_dbg(&skdev->pdev->dev,
2111 "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
2112 skspcl, skspcl->req.id, skspcl->req.sksg_list,
2113 skspcl->req.sksg_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002114 for (i = 0; i < skspcl->req.n_sg; i++) {
2115 struct fit_sg_descriptor *sgd =
2116 &skspcl->req.sksg_list[i];
2117
Bart Van Asschef98806d2017-08-17 13:12:58 -07002118 dev_dbg(&skdev->pdev->dev,
2119 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
2120 i, sgd->byte_count, sgd->control,
2121 sgd->host_side_addr, sgd->next_desc_ptr);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002122 }
2123 }
2124
2125 /*
2126 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
2127 * and one 64-byte SSDI command.
2128 */
2129 qcmd = skspcl->mb_dma_address;
2130 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
2131
Bart Van Assche5fbd5452017-08-17 13:12:46 -07002132 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
2133 smp_wmb();
2134
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002135 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
2136}
2137
2138/*
2139 *****************************************************************************
2140 * COMPLETION QUEUE
2141 *****************************************************************************
2142 */
2143
2144static void skd_complete_other(struct skd_device *skdev,
2145 volatile struct fit_completion_entry_v1 *skcomp,
2146 volatile struct fit_comp_error_info *skerr);
2147
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002148struct sns_info {
2149 u8 type;
2150 u8 stat;
2151 u8 key;
2152 u8 asc;
2153 u8 ascq;
2154 u8 mask;
2155 enum skd_check_status_action action;
2156};
2157
2158static struct sns_info skd_chkstat_table[] = {
2159 /* Good */
2160 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
2161 SKD_CHECK_STATUS_REPORT_GOOD },
2162
2163 /* Smart alerts */
2164 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
2165 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2166 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
2167 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2168 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
2169 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
2170
2171 /* Retry (with limits) */
2172 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
2173 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2174 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
2175 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2176 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
2177 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2178 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
2179 SKD_CHECK_STATUS_REQUEUE_REQUEST },
2180
2181 /* Busy (or about to be) */
2182 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
2183 SKD_CHECK_STATUS_BUSY_IMMINENT },
2184};
2185
2186/*
2187 * Look up status and sense data to decide how to handle the error
2188 * from the device.
2189 * mask says which fields must match e.g., mask=0x18 means check
2190 * type and stat, ignore key, asc, ascq.
2191 */
2192
Mike Snitzer38d4a1b2013-11-01 15:05:10 -04002193static enum skd_check_status_action
2194skd_check_status(struct skd_device *skdev,
2195 u8 cmp_status, volatile struct fit_comp_error_info *skerr)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002196{
2197 int i, n;
2198
Bart Van Asschef98806d2017-08-17 13:12:58 -07002199 dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
2200 skerr->key, skerr->code, skerr->qual, skerr->fruc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002201
Bart Van Asschef98806d2017-08-17 13:12:58 -07002202 dev_dbg(&skdev->pdev->dev,
2203 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
2204 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
2205 skerr->fruc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002206
2207 /* Does the info match an entry in the good category? */
2208 n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
2209 for (i = 0; i < n; i++) {
2210 struct sns_info *sns = &skd_chkstat_table[i];
2211
2212 if (sns->mask & 0x10)
2213 if (skerr->type != sns->type)
2214 continue;
2215
2216 if (sns->mask & 0x08)
2217 if (cmp_status != sns->stat)
2218 continue;
2219
2220 if (sns->mask & 0x04)
2221 if (skerr->key != sns->key)
2222 continue;
2223
2224 if (sns->mask & 0x02)
2225 if (skerr->code != sns->asc)
2226 continue;
2227
2228 if (sns->mask & 0x01)
2229 if (skerr->qual != sns->ascq)
2230 continue;
2231
2232 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002233 dev_err(&skdev->pdev->dev,
2234 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
2235 skerr->key, skerr->code, skerr->qual);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002236 }
2237 return sns->action;
2238 }
2239
2240 /* No other match, so nonzero status means error,
2241 * zero status means good
2242 */
2243 if (cmp_status) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002244 dev_dbg(&skdev->pdev->dev, "status check: error\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002245 return SKD_CHECK_STATUS_REPORT_ERROR;
2246 }
2247
Bart Van Asschef98806d2017-08-17 13:12:58 -07002248 dev_dbg(&skdev->pdev->dev, "status check good default\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002249 return SKD_CHECK_STATUS_REPORT_GOOD;
2250}
2251
2252static void skd_resolve_req_exception(struct skd_device *skdev,
2253 struct skd_request_context *skreq)
2254{
2255 u8 cmp_status = skreq->completion.status;
2256
2257 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
2258 case SKD_CHECK_STATUS_REPORT_GOOD:
2259 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02002260 skd_end_request(skdev, skreq, BLK_STS_OK);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002261 break;
2262
2263 case SKD_CHECK_STATUS_BUSY_IMMINENT:
2264 skd_log_skreq(skdev, skreq, "retry(busy)");
Mike Snitzer38d4a1b2013-11-01 15:05:10 -04002265 blk_requeue_request(skdev->queue, skreq->req);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002266 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002267 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
2268 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
2269 skd_quiesce_dev(skdev);
2270 break;
2271
2272 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
Jens Axboefcd37eb2013-11-01 10:14:56 -06002273 if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
2274 skd_log_skreq(skdev, skreq, "retry");
Mike Snitzer38d4a1b2013-11-01 15:05:10 -04002275 blk_requeue_request(skdev->queue, skreq->req);
Jens Axboefcd37eb2013-11-01 10:14:56 -06002276 break;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002277 }
Bart Van Asschece6882b2017-08-17 13:12:52 -07002278 /* fall through */
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002279
2280 case SKD_CHECK_STATUS_REPORT_ERROR:
2281 default:
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02002282 skd_end_request(skdev, skreq, BLK_STS_IOERR);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002283 break;
2284 }
2285}
2286
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002287/* assume spinlock is already held */
2288static void skd_release_skreq(struct skd_device *skdev,
2289 struct skd_request_context *skreq)
2290{
2291 u32 msg_slot;
2292 struct skd_fitmsg_context *skmsg;
2293
2294 u32 timo_slot;
2295
2296 /*
2297 * Reclaim the FIT msg buffer if this is
2298 * the first of the requests it carried to
2299 * be completed. The FIT msg buffer used to
2300 * send this request cannot be reused until
2301 * we are sure the s1120 card has copied
2302 * it to its memory. The FIT msg might have
2303 * contained several requests. As soon as
2304 * any of them are completed we know that
2305 * the entire FIT msg was transferred.
2306 * Only the first completed request will
2307 * match the FIT msg buffer id. The FIT
2308 * msg buffer id is immediately updated.
2309 * When subsequent requests complete the FIT
2310 * msg buffer id won't match, so we know
2311 * quite cheaply that it is already done.
2312 */
2313 msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
2314 SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
2315
2316 skmsg = &skdev->skmsg_table[msg_slot];
2317 if (skmsg->id == skreq->fitmsg_id) {
2318 SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
2319 SKD_ASSERT(skmsg->outstanding > 0);
2320 skmsg->outstanding--;
2321 if (skmsg->outstanding == 0) {
2322 skmsg->state = SKD_MSG_STATE_IDLE;
2323 skmsg->id += SKD_ID_INCR;
2324 skmsg->next = skdev->skmsg_free_list;
2325 skdev->skmsg_free_list = skmsg;
2326 }
2327 }
2328
2329 /*
2330 * Decrease the number of active requests.
2331 * Also decrements the count in the timeout slot.
2332 */
2333 SKD_ASSERT(skdev->in_flight > 0);
2334 skdev->in_flight -= 1;
2335
2336 timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
2337 SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
2338 skdev->timeout_slot[timo_slot] -= 1;
2339
2340 /*
2341 * Reset backpointer
2342 */
Jens Axboefcd37eb2013-11-01 10:14:56 -06002343 skreq->req = NULL;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002344
2345 /*
2346 * Reclaim the skd_request_context
2347 */
2348 skreq->state = SKD_REQ_STATE_IDLE;
2349 skreq->id += SKD_ID_INCR;
2350 skreq->next = skdev->skreq_free_list;
2351 skdev->skreq_free_list = skreq;
2352}
2353
2354#define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
2355
2356static void skd_do_inq_page_00(struct skd_device *skdev,
2357 volatile struct fit_completion_entry_v1 *skcomp,
2358 volatile struct fit_comp_error_info *skerr,
2359 uint8_t *cdb, uint8_t *buf)
2360{
2361 uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
2362
2363 /* Caller requested "supported pages". The driver needs to insert
2364 * its page.
2365 */
Bart Van Asschef98806d2017-08-17 13:12:58 -07002366 dev_dbg(&skdev->pdev->dev,
2367 "skd_do_driver_inquiry: modify supported pages.\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002368
2369 /* If the device rejected the request because the CDB was
2370 * improperly formed, then just leave.
2371 */
2372 if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
2373 skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
2374 return;
2375
2376 /* Get the amount of space the caller allocated */
2377 max_bytes = (cdb[3] << 8) | cdb[4];
2378
2379 /* Get the number of pages actually returned by the device */
2380 drive_pages = (buf[2] << 8) | buf[3];
2381 drive_bytes = drive_pages + 4;
2382 new_size = drive_pages + 1;
2383
2384 /* Supported pages must be in numerical order, so find where
2385 * the driver page needs to be inserted into the list of
2386 * pages returned by the device.
2387 */
2388 for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
2389 if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
2390 return; /* Device using this page code. abort */
2391 else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
2392 break;
2393 }
2394
2395 if (insert_pt < max_bytes) {
2396 uint16_t u;
2397
2398 /* Shift everything up one byte to make room. */
2399 for (u = new_size + 3; u > insert_pt; u--)
2400 buf[u] = buf[u - 1];
2401 buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
2402
2403 /* SCSI byte order increment of num_returned_bytes by 1 */
2404 skcomp->num_returned_bytes =
Bart Van Assche4854afe2017-08-17 13:12:59 -07002405 cpu_to_be32(be32_to_cpu(skcomp->num_returned_bytes) + 1);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002406 }
2407
2408 /* update page length field to reflect the driver's page too */
2409 buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
2410 buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
2411}
2412
2413static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
2414{
2415 int pcie_reg;
2416 u16 pci_bus_speed;
2417 u8 pci_lanes;
2418
2419 pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
2420 if (pcie_reg) {
2421 u16 linksta;
2422 pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
2423
2424 pci_bus_speed = linksta & 0xF;
2425 pci_lanes = (linksta & 0x3F0) >> 4;
2426 } else {
2427 *speed = STEC_LINK_UNKNOWN;
2428 *width = 0xFF;
2429 return;
2430 }
2431
2432 switch (pci_bus_speed) {
2433 case 1:
2434 *speed = STEC_LINK_2_5GTS;
2435 break;
2436 case 2:
2437 *speed = STEC_LINK_5GTS;
2438 break;
2439 case 3:
2440 *speed = STEC_LINK_8GTS;
2441 break;
2442 default:
2443 *speed = STEC_LINK_UNKNOWN;
2444 break;
2445 }
2446
2447 if (pci_lanes <= 0x20)
2448 *width = pci_lanes;
2449 else
2450 *width = 0xFF;
2451}
2452
2453static void skd_do_inq_page_da(struct skd_device *skdev,
2454 volatile struct fit_completion_entry_v1 *skcomp,
2455 volatile struct fit_comp_error_info *skerr,
2456 uint8_t *cdb, uint8_t *buf)
2457{
Bartlomiej Zolnierkiewiczfec23f62013-11-05 12:37:07 +01002458 struct pci_dev *pdev = skdev->pdev;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002459 unsigned max_bytes;
2460 struct driver_inquiry_data inq;
2461 u16 val;
2462
Bart Van Asschef98806d2017-08-17 13:12:58 -07002463 dev_dbg(&skdev->pdev->dev, "skd_do_driver_inquiry: return driver page\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002464
2465 memset(&inq, 0, sizeof(inq));
2466
2467 inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
2468
Bartlomiej Zolnierkiewiczfec23f62013-11-05 12:37:07 +01002469 skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
2470 inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
2471 inq.pcie_device_number = PCI_SLOT(pdev->devfn);
2472 inq.pcie_function_number = PCI_FUNC(pdev->devfn);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002473
Bartlomiej Zolnierkiewiczfec23f62013-11-05 12:37:07 +01002474 pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
2475 inq.pcie_vendor_id = cpu_to_be16(val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002476
Bartlomiej Zolnierkiewiczfec23f62013-11-05 12:37:07 +01002477 pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
2478 inq.pcie_device_id = cpu_to_be16(val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002479
Bartlomiej Zolnierkiewiczfec23f62013-11-05 12:37:07 +01002480 pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
2481 inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002482
Bartlomiej Zolnierkiewiczfec23f62013-11-05 12:37:07 +01002483 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
2484 inq.pcie_subsystem_device_id = cpu_to_be16(val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002485
2486 /* Driver version, fixed lenth, padded with spaces on the right */
2487 inq.driver_version_length = sizeof(inq.driver_version);
2488 memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
2489 memcpy(inq.driver_version, DRV_VER_COMPL,
2490 min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
2491
2492 inq.page_length = cpu_to_be16((sizeof(inq) - 4));
2493
2494 /* Clear the error set by the device */
2495 skcomp->status = SAM_STAT_GOOD;
2496 memset((void *)skerr, 0, sizeof(*skerr));
2497
2498 /* copy response into output buffer */
2499 max_bytes = (cdb[3] << 8) | cdb[4];
2500 memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
2501
2502 skcomp->num_returned_bytes =
Bart Van Assche4854afe2017-08-17 13:12:59 -07002503 cpu_to_be32(min_t(uint16_t, max_bytes, sizeof(inq)));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002504}
2505
2506static void skd_do_driver_inq(struct skd_device *skdev,
2507 volatile struct fit_completion_entry_v1 *skcomp,
2508 volatile struct fit_comp_error_info *skerr,
2509 uint8_t *cdb, uint8_t *buf)
2510{
2511 if (!buf)
2512 return;
2513 else if (cdb[0] != INQUIRY)
2514 return; /* Not an INQUIRY */
2515 else if ((cdb[1] & 1) == 0)
2516 return; /* EVPD not set */
2517 else if (cdb[2] == 0)
2518 /* Need to add driver's page to supported pages list */
2519 skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
2520 else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
2521 /* Caller requested driver's page */
2522 skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
2523}
2524
2525static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
2526{
2527 if (!sg)
2528 return NULL;
2529 if (!sg_page(sg))
2530 return NULL;
2531 return sg_virt(sg);
2532}
2533
2534static void skd_process_scsi_inq(struct skd_device *skdev,
2535 volatile struct fit_completion_entry_v1
2536 *skcomp,
2537 volatile struct fit_comp_error_info *skerr,
2538 struct skd_special_context *skspcl)
2539{
2540 uint8_t *buf;
2541 struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
2542 struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
2543
2544 dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
2545 skspcl->req.sg_data_dir);
2546 buf = skd_sg_1st_page_ptr(skspcl->req.sg);
2547
2548 if (buf)
2549 skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
2550}
2551
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002552static int skd_isr_completion_posted(struct skd_device *skdev,
2553 int limit, int *enqueued)
2554{
2555 volatile struct fit_completion_entry_v1 *skcmp = NULL;
2556 volatile struct fit_comp_error_info *skerr;
2557 u16 req_id;
2558 u32 req_slot;
2559 struct skd_request_context *skreq;
2560 u16 cmp_cntxt = 0;
2561 u8 cmp_status = 0;
2562 u8 cmp_cycle = 0;
2563 u32 cmp_bytes = 0;
2564 int rc = 0;
2565 int processed = 0;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002566
2567 for (;; ) {
2568 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
2569
2570 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
2571 cmp_cycle = skcmp->cycle;
2572 cmp_cntxt = skcmp->tag;
2573 cmp_status = skcmp->status;
2574 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
2575
2576 skerr = &skdev->skerr_table[skdev->skcomp_ix];
2577
Bart Van Asschef98806d2017-08-17 13:12:58 -07002578 dev_dbg(&skdev->pdev->dev,
2579 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
2580 skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
2581 cmp_cntxt, cmp_status, skdev->in_flight, cmp_bytes,
2582 skdev->proto_ver);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002583
2584 if (cmp_cycle != skdev->skcomp_cycle) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002585 dev_dbg(&skdev->pdev->dev, "end of completions\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002586 break;
2587 }
2588 /*
2589 * Update the completion queue head index and possibly
2590 * the completion cycle count. 8-bit wrap-around.
2591 */
2592 skdev->skcomp_ix++;
2593 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
2594 skdev->skcomp_ix = 0;
2595 skdev->skcomp_cycle++;
2596 }
2597
2598 /*
2599 * The command context is a unique 32-bit ID. The low order
2600 * bits help locate the request. The request is usually a
2601 * r/w request (see skd_start() above) or a special request.
2602 */
2603 req_id = cmp_cntxt;
2604 req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
2605
2606 /* Is this other than a r/w request? */
2607 if (req_slot >= skdev->num_req_context) {
2608 /*
2609 * This is not a completion for a r/w request.
2610 */
2611 skd_complete_other(skdev, skcmp, skerr);
2612 continue;
2613 }
2614
2615 skreq = &skdev->skreq_table[req_slot];
2616
2617 /*
2618 * Make sure the request ID for the slot matches.
2619 */
2620 if (skreq->id != req_id) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002621 dev_dbg(&skdev->pdev->dev,
2622 "mismatch comp_id=0x%x req_id=0x%x\n", req_id,
2623 skreq->id);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002624 {
2625 u16 new_id = cmp_cntxt;
Bart Van Asschef98806d2017-08-17 13:12:58 -07002626 dev_err(&skdev->pdev->dev,
2627 "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
2628 req_id, skreq->id, new_id);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002629
2630 continue;
2631 }
2632 }
2633
2634 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
2635
2636 if (skreq->state == SKD_REQ_STATE_ABORTED) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002637 dev_dbg(&skdev->pdev->dev, "reclaim req %p id=%04x\n",
2638 skreq, skreq->id);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002639 /* a previously timed out command can
2640 * now be cleaned up */
2641 skd_release_skreq(skdev, skreq);
2642 continue;
2643 }
2644
2645 skreq->completion = *skcmp;
2646 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
2647 skreq->err_info = *skerr;
2648 skd_log_check_status(skdev, cmp_status, skerr->key,
2649 skerr->code, skerr->qual,
2650 skerr->fruc);
2651 }
2652 /* Release DMA resources for the request. */
2653 if (skreq->n_sg > 0)
2654 skd_postop_sg_list(skdev, skreq);
2655
Jens Axboefcd37eb2013-11-01 10:14:56 -06002656 if (!skreq->req) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002657 dev_dbg(&skdev->pdev->dev,
2658 "NULL backptr skdreq %p, req=0x%x req_id=0x%x\n",
2659 skreq, skreq->id, req_id);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002660 } else {
2661 /*
2662 * Capture the outcome and post it back to the
2663 * native request.
2664 */
Jens Axboefcd37eb2013-11-01 10:14:56 -06002665 if (likely(cmp_status == SAM_STAT_GOOD))
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02002666 skd_end_request(skdev, skreq, BLK_STS_OK);
Jens Axboefcd37eb2013-11-01 10:14:56 -06002667 else
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002668 skd_resolve_req_exception(skdev, skreq);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002669 }
2670
2671 /*
2672 * Release the skreq, its FIT msg (if one), timeout slot,
2673 * and queue depth.
2674 */
2675 skd_release_skreq(skdev, skreq);
2676
2677 /* skd_isr_comp_limit equal zero means no limit */
2678 if (limit) {
2679 if (++processed >= limit) {
2680 rc = 1;
2681 break;
2682 }
2683 }
2684 }
2685
2686 if ((skdev->state == SKD_DRVR_STATE_PAUSING)
2687 && (skdev->in_flight) == 0) {
2688 skdev->state = SKD_DRVR_STATE_PAUSED;
2689 wake_up_interruptible(&skdev->waitq);
2690 }
2691
2692 return rc;
2693}
2694
2695static void skd_complete_other(struct skd_device *skdev,
2696 volatile struct fit_completion_entry_v1 *skcomp,
2697 volatile struct fit_comp_error_info *skerr)
2698{
2699 u32 req_id = 0;
2700 u32 req_table;
2701 u32 req_slot;
2702 struct skd_special_context *skspcl;
2703
2704 req_id = skcomp->tag;
2705 req_table = req_id & SKD_ID_TABLE_MASK;
2706 req_slot = req_id & SKD_ID_SLOT_MASK;
2707
Bart Van Asschef98806d2017-08-17 13:12:58 -07002708 dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
2709 req_id, req_slot);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002710
2711 /*
2712 * Based on the request id, determine how to dispatch this completion.
2713 * This swich/case is finding the good cases and forwarding the
2714 * completion entry. Errors are reported below the switch.
2715 */
2716 switch (req_table) {
2717 case SKD_ID_RW_REQUEST:
2718 /*
Bart Van Asschee1d06f22017-08-17 13:12:54 -07002719 * The caller, skd_isr_completion_posted() above,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002720 * handles r/w requests. The only way we get here
2721 * is if the req_slot is out of bounds.
2722 */
2723 break;
2724
2725 case SKD_ID_SPECIAL_REQUEST:
2726 /*
2727 * Make sure the req_slot is in bounds and that the id
2728 * matches.
2729 */
2730 if (req_slot < skdev->n_special) {
2731 skspcl = &skdev->skspcl_table[req_slot];
2732 if (skspcl->req.id == req_id &&
2733 skspcl->req.state == SKD_REQ_STATE_BUSY) {
2734 skd_complete_special(skdev,
2735 skcomp, skerr, skspcl);
2736 return;
2737 }
2738 }
2739 break;
2740
2741 case SKD_ID_INTERNAL:
2742 if (req_slot == 0) {
2743 skspcl = &skdev->internal_skspcl;
2744 if (skspcl->req.id == req_id &&
2745 skspcl->req.state == SKD_REQ_STATE_BUSY) {
2746 skd_complete_internal(skdev,
2747 skcomp, skerr, skspcl);
2748 return;
2749 }
2750 }
2751 break;
2752
2753 case SKD_ID_FIT_MSG:
2754 /*
2755 * These id's should never appear in a completion record.
2756 */
2757 break;
2758
2759 default:
2760 /*
2761 * These id's should never appear anywhere;
2762 */
2763 break;
2764 }
2765
2766 /*
2767 * If we get here it is a bad or stale id.
2768 */
2769}
2770
2771static void skd_complete_special(struct skd_device *skdev,
2772 volatile struct fit_completion_entry_v1
2773 *skcomp,
2774 volatile struct fit_comp_error_info *skerr,
2775 struct skd_special_context *skspcl)
2776{
Bart Van Asschef98806d2017-08-17 13:12:58 -07002777 dev_dbg(&skdev->pdev->dev, " completing special request %p\n", skspcl);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002778 if (skspcl->orphaned) {
2779 /* Discard orphaned request */
2780 /* ?: Can this release directly or does it need
2781 * to use a worker? */
Bart Van Asschef98806d2017-08-17 13:12:58 -07002782 dev_dbg(&skdev->pdev->dev, "release orphaned %p\n", skspcl);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002783 skd_release_special(skdev, skspcl);
2784 return;
2785 }
2786
2787 skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
2788
2789 skspcl->req.state = SKD_REQ_STATE_COMPLETED;
2790 skspcl->req.completion = *skcomp;
2791 skspcl->req.err_info = *skerr;
2792
2793 skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
2794 skerr->code, skerr->qual, skerr->fruc);
2795
2796 wake_up_interruptible(&skdev->waitq);
2797}
2798
2799/* assume spinlock is already held */
2800static void skd_release_special(struct skd_device *skdev,
2801 struct skd_special_context *skspcl)
2802{
2803 int i, was_depleted;
2804
2805 for (i = 0; i < skspcl->req.n_sg; i++) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002806 struct page *page = sg_page(&skspcl->req.sg[i]);
2807 __free_page(page);
2808 }
2809
2810 was_depleted = (skdev->skspcl_free_list == NULL);
2811
2812 skspcl->req.state = SKD_REQ_STATE_IDLE;
2813 skspcl->req.id += SKD_ID_INCR;
2814 skspcl->req.next =
2815 (struct skd_request_context *)skdev->skspcl_free_list;
2816 skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
2817
2818 if (was_depleted) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002819 dev_dbg(&skdev->pdev->dev, "skspcl was depleted\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002820 /* Free list was depleted. Their might be waiters. */
2821 wake_up_interruptible(&skdev->waitq);
2822 }
2823}
2824
2825static void skd_reset_skcomp(struct skd_device *skdev)
2826{
2827 u32 nbytes;
2828 struct fit_completion_entry_v1 *skcomp;
2829
2830 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
2831 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
2832
2833 memset(skdev->skcomp_table, 0, nbytes);
2834
2835 skdev->skcomp_ix = 0;
2836 skdev->skcomp_cycle = 1;
2837}
2838
2839/*
2840 *****************************************************************************
2841 * INTERRUPTS
2842 *****************************************************************************
2843 */
2844static void skd_completion_worker(struct work_struct *work)
2845{
2846 struct skd_device *skdev =
2847 container_of(work, struct skd_device, completion_worker);
2848 unsigned long flags;
2849 int flush_enqueued = 0;
2850
2851 spin_lock_irqsave(&skdev->lock, flags);
2852
2853 /*
2854 * pass in limit=0, which means no limit..
2855 * process everything in compq
2856 */
2857 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
2858 skd_request_fn(skdev->queue);
2859
2860 spin_unlock_irqrestore(&skdev->lock, flags);
2861}
2862
2863static void skd_isr_msg_from_dev(struct skd_device *skdev);
2864
Arnd Bergmann41c94992016-11-09 13:55:35 +01002865static irqreturn_t
2866skd_isr(int irq, void *ptr)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002867{
2868 struct skd_device *skdev;
2869 u32 intstat;
2870 u32 ack;
2871 int rc = 0;
2872 int deferred = 0;
2873 int flush_enqueued = 0;
2874
2875 skdev = (struct skd_device *)ptr;
2876 spin_lock(&skdev->lock);
2877
2878 for (;; ) {
2879 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
2880
2881 ack = FIT_INT_DEF_MASK;
2882 ack &= intstat;
2883
Bart Van Asschef98806d2017-08-17 13:12:58 -07002884 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
2885 ack);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002886
2887 /* As long as there is an int pending on device, keep
2888 * running loop. When none, get out, but if we've never
2889 * done any processing, call completion handler?
2890 */
2891 if (ack == 0) {
2892 /* No interrupts on device, but run the completion
2893 * processor anyway?
2894 */
2895 if (rc == 0)
2896 if (likely (skdev->state
2897 == SKD_DRVR_STATE_ONLINE))
2898 deferred = 1;
2899 break;
2900 }
2901
2902 rc = IRQ_HANDLED;
2903
2904 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
2905
2906 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
2907 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
2908 if (intstat & FIT_ISH_COMPLETION_POSTED) {
2909 /*
2910 * If we have already deferred completion
2911 * processing, don't bother running it again
2912 */
2913 if (deferred == 0)
2914 deferred =
2915 skd_isr_completion_posted(skdev,
2916 skd_isr_comp_limit, &flush_enqueued);
2917 }
2918
2919 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
2920 skd_isr_fwstate(skdev);
2921 if (skdev->state == SKD_DRVR_STATE_FAULT ||
2922 skdev->state ==
2923 SKD_DRVR_STATE_DISAPPEARED) {
2924 spin_unlock(&skdev->lock);
2925 return rc;
2926 }
2927 }
2928
2929 if (intstat & FIT_ISH_MSG_FROM_DEV)
2930 skd_isr_msg_from_dev(skdev);
2931 }
2932 }
2933
2934 if (unlikely(flush_enqueued))
2935 skd_request_fn(skdev->queue);
2936
2937 if (deferred)
2938 schedule_work(&skdev->completion_worker);
2939 else if (!flush_enqueued)
2940 skd_request_fn(skdev->queue);
2941
2942 spin_unlock(&skdev->lock);
2943
2944 return rc;
2945}
2946
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002947static void skd_drive_fault(struct skd_device *skdev)
2948{
2949 skdev->state = SKD_DRVR_STATE_FAULT;
Bart Van Asschef98806d2017-08-17 13:12:58 -07002950 dev_err(&skdev->pdev->dev, "Drive FAULT\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002951}
2952
2953static void skd_drive_disappeared(struct skd_device *skdev)
2954{
2955 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
Bart Van Asschef98806d2017-08-17 13:12:58 -07002956 dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002957}
2958
2959static void skd_isr_fwstate(struct skd_device *skdev)
2960{
2961 u32 sense;
2962 u32 state;
2963 u32 mtd;
2964 int prev_driver_state = skdev->state;
2965
2966 sense = SKD_READL(skdev, FIT_STATUS);
2967 state = sense & FIT_SR_DRIVE_STATE_MASK;
2968
Bart Van Asschef98806d2017-08-17 13:12:58 -07002969 dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
2970 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
2971 skd_drive_state_to_str(state), state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002972
2973 skdev->drive_state = state;
2974
2975 switch (skdev->drive_state) {
2976 case FIT_SR_DRIVE_INIT:
2977 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
2978 skd_disable_interrupts(skdev);
2979 break;
2980 }
2981 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
2982 skd_recover_requests(skdev, 0);
2983 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
2984 skdev->timer_countdown = SKD_STARTING_TIMO;
2985 skdev->state = SKD_DRVR_STATE_STARTING;
2986 skd_soft_reset(skdev);
2987 break;
2988 }
2989 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
2990 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
2991 skdev->last_mtd = mtd;
2992 break;
2993
2994 case FIT_SR_DRIVE_ONLINE:
2995 skdev->cur_max_queue_depth = skd_max_queue_depth;
2996 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
2997 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
2998
2999 skdev->queue_low_water_mark =
3000 skdev->cur_max_queue_depth * 2 / 3 + 1;
3001 if (skdev->queue_low_water_mark < 1)
3002 skdev->queue_low_water_mark = 1;
Bart Van Asschef98806d2017-08-17 13:12:58 -07003003 dev_info(&skdev->pdev->dev,
3004 "Queue depth limit=%d dev=%d lowat=%d\n",
3005 skdev->cur_max_queue_depth,
3006 skdev->dev_max_queue_depth,
3007 skdev->queue_low_water_mark);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003008
3009 skd_refresh_device_data(skdev);
3010 break;
3011
3012 case FIT_SR_DRIVE_BUSY:
3013 skdev->state = SKD_DRVR_STATE_BUSY;
3014 skdev->timer_countdown = SKD_BUSY_TIMO;
3015 skd_quiesce_dev(skdev);
3016 break;
3017 case FIT_SR_DRIVE_BUSY_SANITIZE:
3018 /* set timer for 3 seconds, we'll abort any unfinished
3019 * commands after that expires
3020 */
3021 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3022 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
Jens Axboe6a5ec652013-11-01 10:38:45 -06003023 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003024 break;
3025 case FIT_SR_DRIVE_BUSY_ERASE:
3026 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3027 skdev->timer_countdown = SKD_BUSY_TIMO;
3028 break;
3029 case FIT_SR_DRIVE_OFFLINE:
3030 skdev->state = SKD_DRVR_STATE_IDLE;
3031 break;
3032 case FIT_SR_DRIVE_SOFT_RESET:
3033 switch (skdev->state) {
3034 case SKD_DRVR_STATE_STARTING:
3035 case SKD_DRVR_STATE_RESTARTING:
3036 /* Expected by a caller of skd_soft_reset() */
3037 break;
3038 default:
3039 skdev->state = SKD_DRVR_STATE_RESTARTING;
3040 break;
3041 }
3042 break;
3043 case FIT_SR_DRIVE_FW_BOOTING:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003044 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003045 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3046 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3047 break;
3048
3049 case FIT_SR_DRIVE_DEGRADED:
3050 case FIT_SR_PCIE_LINK_DOWN:
3051 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3052 break;
3053
3054 case FIT_SR_DRIVE_FAULT:
3055 skd_drive_fault(skdev);
3056 skd_recover_requests(skdev, 0);
Jens Axboe6a5ec652013-11-01 10:38:45 -06003057 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003058 break;
3059
3060 /* PCIe bus returned all Fs? */
3061 case 0xFF:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003062 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
3063 sense);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003064 skd_drive_disappeared(skdev);
3065 skd_recover_requests(skdev, 0);
Jens Axboe6a5ec652013-11-01 10:38:45 -06003066 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003067 break;
3068 default:
3069 /*
3070 * Uknown FW State. Wait for a state we recognize.
3071 */
3072 break;
3073 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07003074 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
3075 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
3076 skd_skdev_state_to_str(skdev->state), skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003077}
3078
3079static void skd_recover_requests(struct skd_device *skdev, int requeue)
3080{
3081 int i;
3082
3083 for (i = 0; i < skdev->num_req_context; i++) {
3084 struct skd_request_context *skreq = &skdev->skreq_table[i];
3085
3086 if (skreq->state == SKD_REQ_STATE_BUSY) {
3087 skd_log_skreq(skdev, skreq, "recover");
3088
3089 SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
Jens Axboefcd37eb2013-11-01 10:14:56 -06003090 SKD_ASSERT(skreq->req != NULL);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003091
3092 /* Release DMA resources for the request. */
3093 if (skreq->n_sg > 0)
3094 skd_postop_sg_list(skdev, skreq);
3095
Jens Axboefcd37eb2013-11-01 10:14:56 -06003096 if (requeue &&
3097 (unsigned long) ++skreq->req->special <
3098 SKD_MAX_RETRIES)
Mike Snitzer38d4a1b2013-11-01 15:05:10 -04003099 blk_requeue_request(skdev->queue, skreq->req);
Jens Axboefcd37eb2013-11-01 10:14:56 -06003100 else
Christoph Hellwig2a842ac2017-06-03 09:38:04 +02003101 skd_end_request(skdev, skreq, BLK_STS_IOERR);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003102
Jens Axboefcd37eb2013-11-01 10:14:56 -06003103 skreq->req = NULL;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003104
3105 skreq->state = SKD_REQ_STATE_IDLE;
3106 skreq->id += SKD_ID_INCR;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003107 }
3108 if (i > 0)
3109 skreq[-1].next = skreq;
3110 skreq->next = NULL;
3111 }
3112 skdev->skreq_free_list = skdev->skreq_table;
3113
3114 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3115 struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
3116
3117 if (skmsg->state == SKD_MSG_STATE_BUSY) {
3118 skd_log_skmsg(skdev, skmsg, "salvaged");
3119 SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
3120 skmsg->state = SKD_MSG_STATE_IDLE;
3121 skmsg->id += SKD_ID_INCR;
3122 }
3123 if (i > 0)
3124 skmsg[-1].next = skmsg;
3125 skmsg->next = NULL;
3126 }
3127 skdev->skmsg_free_list = skdev->skmsg_table;
3128
3129 for (i = 0; i < skdev->n_special; i++) {
3130 struct skd_special_context *skspcl = &skdev->skspcl_table[i];
3131
3132 /* If orphaned, reclaim it because it has already been reported
3133 * to the process as an error (it was just waiting for
3134 * a completion that didn't come, and now it will never come)
3135 * If busy, change to a state that will cause it to error
3136 * out in the wait routine and let it do the normal
3137 * reporting and reclaiming
3138 */
3139 if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
3140 if (skspcl->orphaned) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003141 dev_dbg(&skdev->pdev->dev, "orphaned %p\n",
3142 skspcl);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003143 skd_release_special(skdev, skspcl);
3144 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003145 dev_dbg(&skdev->pdev->dev, "not orphaned %p\n",
3146 skspcl);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003147 skspcl->req.state = SKD_REQ_STATE_ABORTED;
3148 }
3149 }
3150 }
3151 skdev->skspcl_free_list = skdev->skspcl_table;
3152
3153 for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
3154 skdev->timeout_slot[i] = 0;
3155
3156 skdev->in_flight = 0;
3157}
3158
3159static void skd_isr_msg_from_dev(struct skd_device *skdev)
3160{
3161 u32 mfd;
3162 u32 mtd;
3163 u32 data;
3164
3165 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
3166
Bart Van Asschef98806d2017-08-17 13:12:58 -07003167 dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
3168 skdev->last_mtd);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003169
3170 /* ignore any mtd that is an ack for something we didn't send */
3171 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
3172 return;
3173
3174 switch (FIT_MXD_TYPE(mfd)) {
3175 case FIT_MTD_FITFW_INIT:
3176 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
3177
3178 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003179 dev_err(&skdev->pdev->dev, "protocol mismatch\n");
3180 dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
3181 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
3182 dev_err(&skdev->pdev->dev, " please upgrade driver\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003183 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
3184 skd_soft_reset(skdev);
3185 break;
3186 }
3187 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
3188 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3189 skdev->last_mtd = mtd;
3190 break;
3191
3192 case FIT_MTD_GET_CMDQ_DEPTH:
3193 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
3194 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
3195 SKD_N_COMPLETION_ENTRY);
3196 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3197 skdev->last_mtd = mtd;
3198 break;
3199
3200 case FIT_MTD_SET_COMPQ_DEPTH:
3201 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
3202 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
3203 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3204 skdev->last_mtd = mtd;
3205 break;
3206
3207 case FIT_MTD_SET_COMPQ_ADDR:
3208 skd_reset_skcomp(skdev);
3209 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
3210 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3211 skdev->last_mtd = mtd;
3212 break;
3213
3214 case FIT_MTD_CMD_LOG_HOST_ID:
3215 skdev->connect_time_stamp = get_seconds();
3216 data = skdev->connect_time_stamp & 0xFFFF;
3217 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
3218 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3219 skdev->last_mtd = mtd;
3220 break;
3221
3222 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
3223 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
3224 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
3225 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
3226 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3227 skdev->last_mtd = mtd;
3228 break;
3229
3230 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
3231 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
3232 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
3233 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
3234 skdev->last_mtd = mtd;
3235
Bart Van Asschef98806d2017-08-17 13:12:58 -07003236 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
3237 skdev->connect_time_stamp, skdev->drive_jiffies);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003238 break;
3239
3240 case FIT_MTD_ARM_QUEUE:
3241 skdev->last_mtd = 0;
3242 /*
3243 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
3244 */
3245 break;
3246
3247 default:
3248 break;
3249 }
3250}
3251
3252static void skd_disable_interrupts(struct skd_device *skdev)
3253{
3254 u32 sense;
3255
3256 sense = SKD_READL(skdev, FIT_CONTROL);
3257 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
3258 SKD_WRITEL(skdev, sense, FIT_CONTROL);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003259 dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003260
3261 /* Note that the 1s is written. A 1-bit means
3262 * disable, a 0 means enable.
3263 */
3264 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
3265}
3266
3267static void skd_enable_interrupts(struct skd_device *skdev)
3268{
3269 u32 val;
3270
3271 /* unmask interrupts first */
3272 val = FIT_ISH_FW_STATE_CHANGE +
3273 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
3274
3275 /* Note that the compliment of mask is written. A 1-bit means
3276 * disable, a 0 means enable. */
3277 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003278 dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003279
3280 val = SKD_READL(skdev, FIT_CONTROL);
3281 val |= FIT_CR_ENABLE_INTERRUPTS;
Bart Van Asschef98806d2017-08-17 13:12:58 -07003282 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003283 SKD_WRITEL(skdev, val, FIT_CONTROL);
3284}
3285
3286/*
3287 *****************************************************************************
3288 * START, STOP, RESTART, QUIESCE, UNQUIESCE
3289 *****************************************************************************
3290 */
3291
3292static void skd_soft_reset(struct skd_device *skdev)
3293{
3294 u32 val;
3295
3296 val = SKD_READL(skdev, FIT_CONTROL);
3297 val |= (FIT_CR_SOFT_RESET);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003298 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003299 SKD_WRITEL(skdev, val, FIT_CONTROL);
3300}
3301
3302static void skd_start_device(struct skd_device *skdev)
3303{
3304 unsigned long flags;
3305 u32 sense;
3306 u32 state;
3307
3308 spin_lock_irqsave(&skdev->lock, flags);
3309
3310 /* ack all ghost interrupts */
3311 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3312
3313 sense = SKD_READL(skdev, FIT_STATUS);
3314
Bart Van Asschef98806d2017-08-17 13:12:58 -07003315 dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003316
3317 state = sense & FIT_SR_DRIVE_STATE_MASK;
3318 skdev->drive_state = state;
3319 skdev->last_mtd = 0;
3320
3321 skdev->state = SKD_DRVR_STATE_STARTING;
3322 skdev->timer_countdown = SKD_STARTING_TIMO;
3323
3324 skd_enable_interrupts(skdev);
3325
3326 switch (skdev->drive_state) {
3327 case FIT_SR_DRIVE_OFFLINE:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003328 dev_err(&skdev->pdev->dev, "Drive offline...\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003329 break;
3330
3331 case FIT_SR_DRIVE_FW_BOOTING:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003332 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003333 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
3334 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
3335 break;
3336
3337 case FIT_SR_DRIVE_BUSY_SANITIZE:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003338 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003339 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
3340 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3341 break;
3342
3343 case FIT_SR_DRIVE_BUSY_ERASE:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003344 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003345 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
3346 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3347 break;
3348
3349 case FIT_SR_DRIVE_INIT:
3350 case FIT_SR_DRIVE_ONLINE:
3351 skd_soft_reset(skdev);
3352 break;
3353
3354 case FIT_SR_DRIVE_BUSY:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003355 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003356 skdev->state = SKD_DRVR_STATE_BUSY;
3357 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
3358 break;
3359
3360 case FIT_SR_DRIVE_SOFT_RESET:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003361 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003362 break;
3363
3364 case FIT_SR_DRIVE_FAULT:
3365 /* Fault state is bad...soft reset won't do it...
3366 * Hard reset, maybe, but does it work on device?
3367 * For now, just fault so the system doesn't hang.
3368 */
3369 skd_drive_fault(skdev);
3370 /*start the queue so we can respond with error to requests */
Bart Van Asschef98806d2017-08-17 13:12:58 -07003371 dev_dbg(&skdev->pdev->dev, "starting queue\n");
Jens Axboe6a5ec652013-11-01 10:38:45 -06003372 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003373 skdev->gendisk_on = -1;
3374 wake_up_interruptible(&skdev->waitq);
3375 break;
3376
3377 case 0xFF:
3378 /* Most likely the device isn't there or isn't responding
3379 * to the BAR1 addresses. */
3380 skd_drive_disappeared(skdev);
3381 /*start the queue so we can respond with error to requests */
Bart Van Asschef98806d2017-08-17 13:12:58 -07003382 dev_dbg(&skdev->pdev->dev,
3383 "starting queue to error-out reqs\n");
Jens Axboe6a5ec652013-11-01 10:38:45 -06003384 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003385 skdev->gendisk_on = -1;
3386 wake_up_interruptible(&skdev->waitq);
3387 break;
3388
3389 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003390 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
3391 skdev->drive_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003392 break;
3393 }
3394
3395 state = SKD_READL(skdev, FIT_CONTROL);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003396 dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003397
3398 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003399 dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003400
3401 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003402 dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003403
3404 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003405 dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003406
3407 state = SKD_READL(skdev, FIT_HW_VERSION);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003408 dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003409
3410 spin_unlock_irqrestore(&skdev->lock, flags);
3411}
3412
3413static void skd_stop_device(struct skd_device *skdev)
3414{
3415 unsigned long flags;
3416 struct skd_special_context *skspcl = &skdev->internal_skspcl;
3417 u32 dev_state;
3418 int i;
3419
3420 spin_lock_irqsave(&skdev->lock, flags);
3421
3422 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003423 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003424 goto stop_out;
3425 }
3426
3427 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003428 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003429 goto stop_out;
3430 }
3431
3432 skdev->state = SKD_DRVR_STATE_SYNCING;
3433 skdev->sync_done = 0;
3434
3435 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
3436
3437 spin_unlock_irqrestore(&skdev->lock, flags);
3438
3439 wait_event_interruptible_timeout(skdev->waitq,
3440 (skdev->sync_done), (10 * HZ));
3441
3442 spin_lock_irqsave(&skdev->lock, flags);
3443
3444 switch (skdev->sync_done) {
3445 case 0:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003446 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003447 break;
3448 case 1:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003449 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003450 break;
3451 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003452 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003453 }
3454
3455stop_out:
3456 skdev->state = SKD_DRVR_STATE_STOPPING;
3457 spin_unlock_irqrestore(&skdev->lock, flags);
3458
3459 skd_kill_timer(skdev);
3460
3461 spin_lock_irqsave(&skdev->lock, flags);
3462 skd_disable_interrupts(skdev);
3463
3464 /* ensure all ints on device are cleared */
3465 /* soft reset the device to unload with a clean slate */
3466 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3467 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
3468
3469 spin_unlock_irqrestore(&skdev->lock, flags);
3470
3471 /* poll every 100ms, 1 second timeout */
3472 for (i = 0; i < 10; i++) {
3473 dev_state =
3474 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
3475 if (dev_state == FIT_SR_DRIVE_INIT)
3476 break;
3477 set_current_state(TASK_INTERRUPTIBLE);
3478 schedule_timeout(msecs_to_jiffies(100));
3479 }
3480
3481 if (dev_state != FIT_SR_DRIVE_INIT)
Bart Van Asschef98806d2017-08-17 13:12:58 -07003482 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
3483 dev_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003484}
3485
3486/* assume spinlock is held */
3487static void skd_restart_device(struct skd_device *skdev)
3488{
3489 u32 state;
3490
3491 /* ack all ghost interrupts */
3492 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
3493
3494 state = SKD_READL(skdev, FIT_STATUS);
3495
Bart Van Asschef98806d2017-08-17 13:12:58 -07003496 dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003497
3498 state &= FIT_SR_DRIVE_STATE_MASK;
3499 skdev->drive_state = state;
3500 skdev->last_mtd = 0;
3501
3502 skdev->state = SKD_DRVR_STATE_RESTARTING;
3503 skdev->timer_countdown = SKD_RESTARTING_TIMO;
3504
3505 skd_soft_reset(skdev);
3506}
3507
3508/* assume spinlock is held */
3509static int skd_quiesce_dev(struct skd_device *skdev)
3510{
3511 int rc = 0;
3512
3513 switch (skdev->state) {
3514 case SKD_DRVR_STATE_BUSY:
3515 case SKD_DRVR_STATE_BUSY_IMMINENT:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003516 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
Jens Axboe6a5ec652013-11-01 10:38:45 -06003517 blk_stop_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003518 break;
3519 case SKD_DRVR_STATE_ONLINE:
3520 case SKD_DRVR_STATE_STOPPING:
3521 case SKD_DRVR_STATE_SYNCING:
3522 case SKD_DRVR_STATE_PAUSING:
3523 case SKD_DRVR_STATE_PAUSED:
3524 case SKD_DRVR_STATE_STARTING:
3525 case SKD_DRVR_STATE_RESTARTING:
3526 case SKD_DRVR_STATE_RESUMING:
3527 default:
3528 rc = -EINVAL;
Bart Van Asschef98806d2017-08-17 13:12:58 -07003529 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
3530 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003531 }
3532 return rc;
3533}
3534
3535/* assume spinlock is held */
3536static int skd_unquiesce_dev(struct skd_device *skdev)
3537{
3538 int prev_driver_state = skdev->state;
3539
3540 skd_log_skdev(skdev, "unquiesce");
3541 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003542 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003543 return 0;
3544 }
3545 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
3546 /*
3547 * If there has been an state change to other than
3548 * ONLINE, we will rely on controller state change
3549 * to come back online and restart the queue.
3550 * The BUSY state means that driver is ready to
3551 * continue normal processing but waiting for controller
3552 * to become available.
3553 */
3554 skdev->state = SKD_DRVR_STATE_BUSY;
Bart Van Asschef98806d2017-08-17 13:12:58 -07003555 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003556 return 0;
3557 }
3558
3559 /*
3560 * Drive has just come online, driver is either in startup,
3561 * paused performing a task, or bust waiting for hardware.
3562 */
3563 switch (skdev->state) {
3564 case SKD_DRVR_STATE_PAUSED:
3565 case SKD_DRVR_STATE_BUSY:
3566 case SKD_DRVR_STATE_BUSY_IMMINENT:
3567 case SKD_DRVR_STATE_BUSY_ERASE:
3568 case SKD_DRVR_STATE_STARTING:
3569 case SKD_DRVR_STATE_RESTARTING:
3570 case SKD_DRVR_STATE_FAULT:
3571 case SKD_DRVR_STATE_IDLE:
3572 case SKD_DRVR_STATE_LOAD:
3573 skdev->state = SKD_DRVR_STATE_ONLINE;
Bart Van Asschef98806d2017-08-17 13:12:58 -07003574 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
3575 skd_skdev_state_to_str(prev_driver_state),
3576 prev_driver_state, skd_skdev_state_to_str(skdev->state),
3577 skdev->state);
3578 dev_dbg(&skdev->pdev->dev,
3579 "**** device ONLINE...starting block queue\n");
3580 dev_dbg(&skdev->pdev->dev, "starting queue\n");
3581 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
Jens Axboe6a5ec652013-11-01 10:38:45 -06003582 blk_start_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003583 skdev->gendisk_on = 1;
3584 wake_up_interruptible(&skdev->waitq);
3585 break;
3586
3587 case SKD_DRVR_STATE_DISAPPEARED:
3588 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07003589 dev_dbg(&skdev->pdev->dev,
3590 "**** driver state %d, not implemented\n",
3591 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003592 return -EBUSY;
3593 }
3594 return 0;
3595}
3596
3597/*
3598 *****************************************************************************
3599 * PCIe MSI/MSI-X INTERRUPT HANDLERS
3600 *****************************************************************************
3601 */
3602
3603static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
3604{
3605 struct skd_device *skdev = skd_host_data;
3606 unsigned long flags;
3607
3608 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003609 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3610 SKD_READL(skdev, FIT_INT_STATUS_HOST));
3611 dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
3612 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003613 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
3614 spin_unlock_irqrestore(&skdev->lock, flags);
3615 return IRQ_HANDLED;
3616}
3617
3618static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
3619{
3620 struct skd_device *skdev = skd_host_data;
3621 unsigned long flags;
3622
3623 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003624 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3625 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003626 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
3627 skd_isr_fwstate(skdev);
3628 spin_unlock_irqrestore(&skdev->lock, flags);
3629 return IRQ_HANDLED;
3630}
3631
3632static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
3633{
3634 struct skd_device *skdev = skd_host_data;
3635 unsigned long flags;
3636 int flush_enqueued = 0;
3637 int deferred;
3638
3639 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003640 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3641 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003642 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
3643 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
3644 &flush_enqueued);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003645 if (flush_enqueued)
3646 skd_request_fn(skdev->queue);
3647
3648 if (deferred)
3649 schedule_work(&skdev->completion_worker);
3650 else if (!flush_enqueued)
3651 skd_request_fn(skdev->queue);
3652
3653 spin_unlock_irqrestore(&skdev->lock, flags);
3654
3655 return IRQ_HANDLED;
3656}
3657
3658static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
3659{
3660 struct skd_device *skdev = skd_host_data;
3661 unsigned long flags;
3662
3663 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003664 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3665 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003666 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
3667 skd_isr_msg_from_dev(skdev);
3668 spin_unlock_irqrestore(&skdev->lock, flags);
3669 return IRQ_HANDLED;
3670}
3671
3672static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
3673{
3674 struct skd_device *skdev = skd_host_data;
3675 unsigned long flags;
3676
3677 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003678 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
3679 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003680 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
3681 spin_unlock_irqrestore(&skdev->lock, flags);
3682 return IRQ_HANDLED;
3683}
3684
3685/*
3686 *****************************************************************************
3687 * PCIe MSI/MSI-X SETUP
3688 *****************************************************************************
3689 */
3690
3691struct skd_msix_entry {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003692 char isr_name[30];
3693};
3694
3695struct skd_init_msix_entry {
3696 const char *name;
3697 irq_handler_t handler;
3698};
3699
3700#define SKD_MAX_MSIX_COUNT 13
3701#define SKD_MIN_MSIX_COUNT 7
3702#define SKD_BASE_MSIX_IRQ 4
3703
3704static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
3705 { "(DMA 0)", skd_reserved_isr },
3706 { "(DMA 1)", skd_reserved_isr },
3707 { "(DMA 2)", skd_reserved_isr },
3708 { "(DMA 3)", skd_reserved_isr },
3709 { "(State Change)", skd_statec_isr },
3710 { "(COMPL_Q)", skd_comp_q },
3711 { "(MSG)", skd_msg_isr },
3712 { "(Reserved)", skd_reserved_isr },
3713 { "(Reserved)", skd_reserved_isr },
3714 { "(Queue Full 0)", skd_qfull_isr },
3715 { "(Queue Full 1)", skd_qfull_isr },
3716 { "(Queue Full 2)", skd_qfull_isr },
3717 { "(Queue Full 3)", skd_qfull_isr },
3718};
3719
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003720static int skd_acquire_msix(struct skd_device *skdev)
3721{
Alexander Gordeeva9df8622014-02-19 09:58:21 +01003722 int i, rc;
Alexander Gordeev46817762014-02-19 09:58:19 +01003723 struct pci_dev *pdev = skdev->pdev;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003724
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003725 rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
3726 PCI_IRQ_MSIX);
3727 if (rc < 0) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003728 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
Arnd Bergmann3bc84922016-11-09 13:55:34 +01003729 goto out;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003730 }
Alexander Gordeev46817762014-02-19 09:58:19 +01003731
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003732 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
3733 sizeof(struct skd_msix_entry), GFP_KERNEL);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003734 if (!skdev->msix_entries) {
3735 rc = -ENOMEM;
Bart Van Asschef98806d2017-08-17 13:12:58 -07003736 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
Arnd Bergmann3bc84922016-11-09 13:55:34 +01003737 goto out;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003738 }
3739
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003740 /* Enable MSI-X vectors for the base queue */
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003741 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3742 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
3743
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003744 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
3745 "%s%d-msix %s", DRV_NAME, skdev->devno,
3746 msix_entries[i].name);
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003747
3748 rc = devm_request_irq(&skdev->pdev->dev,
3749 pci_irq_vector(skdev->pdev, i),
3750 msix_entries[i].handler, 0,
3751 qentry->isr_name, skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003752 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003753 dev_err(&skdev->pdev->dev,
3754 "Unable to register(%d) MSI-X handler %d: %s\n",
3755 rc, i, qentry->isr_name);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003756 goto msix_out;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003757 }
3758 }
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003759
Bart Van Asschef98806d2017-08-17 13:12:58 -07003760 dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
3761 SKD_MAX_MSIX_COUNT);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003762 return 0;
3763
3764msix_out:
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003765 while (--i >= 0)
3766 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
Arnd Bergmann3bc84922016-11-09 13:55:34 +01003767out:
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003768 kfree(skdev->msix_entries);
3769 skdev->msix_entries = NULL;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003770 return rc;
3771}
3772
3773static int skd_acquire_irq(struct skd_device *skdev)
3774{
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003775 struct pci_dev *pdev = skdev->pdev;
3776 unsigned int irq_flag = PCI_IRQ_LEGACY;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003777 int rc;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003778
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003779 if (skd_isr_type == SKD_IRQ_MSIX) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003780 rc = skd_acquire_msix(skdev);
3781 if (!rc)
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003782 return 0;
3783
Bart Van Asschef98806d2017-08-17 13:12:58 -07003784 dev_err(&skdev->pdev->dev,
3785 "failed to enable MSI-X, re-trying with MSI %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003786 }
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003787
3788 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
3789 skdev->devno);
3790
3791 if (skd_isr_type != SKD_IRQ_LEGACY)
3792 irq_flag |= PCI_IRQ_MSI;
3793 rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
3794 if (rc < 0) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003795 dev_err(&skdev->pdev->dev,
3796 "failed to allocate the MSI interrupt %d\n", rc);
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003797 return rc;
3798 }
3799
3800 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
3801 pdev->msi_enabled ? 0 : IRQF_SHARED,
3802 skdev->isr_name, skdev);
3803 if (rc) {
3804 pci_free_irq_vectors(pdev);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003805 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
3806 rc);
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003807 return rc;
3808 }
3809
3810 return 0;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003811}
3812
3813static void skd_release_irq(struct skd_device *skdev)
3814{
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003815 struct pci_dev *pdev = skdev->pdev;
3816
3817 if (skdev->msix_entries) {
3818 int i;
3819
3820 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
3821 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
3822 skdev);
3823 }
3824
3825 kfree(skdev->msix_entries);
3826 skdev->msix_entries = NULL;
3827 } else {
3828 devm_free_irq(&pdev->dev, pdev->irq, skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003829 }
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08003830
3831 pci_free_irq_vectors(pdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003832}
3833
3834/*
3835 *****************************************************************************
3836 * CONSTRUCT
3837 *****************************************************************************
3838 */
3839
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003840static int skd_cons_skcomp(struct skd_device *skdev)
3841{
3842 int rc = 0;
3843 struct fit_completion_entry_v1 *skcomp;
3844 u32 nbytes;
3845
3846 nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
3847 nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
3848
Bart Van Asschef98806d2017-08-17 13:12:58 -07003849 dev_dbg(&skdev->pdev->dev,
3850 "comp pci_alloc, total bytes %d entries %d\n",
3851 nbytes, SKD_N_COMPLETION_ENTRY);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003852
Joe Perchesa5bbf612014-08-08 14:24:12 -07003853 skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
3854 &skdev->cq_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003855
3856 if (skcomp == NULL) {
3857 rc = -ENOMEM;
3858 goto err_out;
3859 }
3860
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003861 skdev->skcomp_table = skcomp;
3862 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
3863 sizeof(*skcomp) *
3864 SKD_N_COMPLETION_ENTRY);
3865
3866err_out:
3867 return rc;
3868}
3869
3870static int skd_cons_skmsg(struct skd_device *skdev)
3871{
3872 int rc = 0;
3873 u32 i;
3874
Bart Van Asschef98806d2017-08-17 13:12:58 -07003875 dev_dbg(&skdev->pdev->dev,
3876 "skmsg_table kzalloc, struct %lu, count %u total %lu\n",
3877 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
3878 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003879
3880 skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
3881 *skdev->num_fitmsg_context, GFP_KERNEL);
3882 if (skdev->skmsg_table == NULL) {
3883 rc = -ENOMEM;
3884 goto err_out;
3885 }
3886
3887 for (i = 0; i < skdev->num_fitmsg_context; i++) {
3888 struct skd_fitmsg_context *skmsg;
3889
3890 skmsg = &skdev->skmsg_table[i];
3891
3892 skmsg->id = i + SKD_ID_FIT_MSG;
3893
3894 skmsg->state = SKD_MSG_STATE_IDLE;
3895 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
3896 SKD_N_FITMSG_BYTES + 64,
3897 &skmsg->mb_dma_address);
3898
3899 if (skmsg->msg_buf == NULL) {
3900 rc = -ENOMEM;
3901 goto err_out;
3902 }
3903
3904 skmsg->offset = (u32)((u64)skmsg->msg_buf &
3905 (~FIT_QCMD_BASE_ADDRESS_MASK));
3906 skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
3907 skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
3908 FIT_QCMD_BASE_ADDRESS_MASK);
3909 skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
3910 skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
3911 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
3912
3913 skmsg->next = &skmsg[1];
3914 }
3915
3916 /* Free list is in order starting with the 0th entry. */
3917 skdev->skmsg_table[i - 1].next = NULL;
3918 skdev->skmsg_free_list = skdev->skmsg_table;
3919
3920err_out:
3921 return rc;
3922}
3923
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01003924static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
3925 u32 n_sg,
3926 dma_addr_t *ret_dma_addr)
3927{
3928 struct fit_sg_descriptor *sg_list;
3929 u32 nbytes;
3930
3931 nbytes = sizeof(*sg_list) * n_sg;
3932
3933 sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
3934
3935 if (sg_list != NULL) {
3936 uint64_t dma_address = *ret_dma_addr;
3937 u32 i;
3938
3939 memset(sg_list, 0, nbytes);
3940
3941 for (i = 0; i < n_sg - 1; i++) {
3942 uint64_t ndp_off;
3943 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
3944
3945 sg_list[i].next_desc_ptr = dma_address + ndp_off;
3946 }
3947 sg_list[i].next_desc_ptr = 0LL;
3948 }
3949
3950 return sg_list;
3951}
3952
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003953static int skd_cons_skreq(struct skd_device *skdev)
3954{
3955 int rc = 0;
3956 u32 i;
3957
Bart Van Asschef98806d2017-08-17 13:12:58 -07003958 dev_dbg(&skdev->pdev->dev,
3959 "skreq_table kzalloc, struct %lu, count %u total %lu\n",
3960 sizeof(struct skd_request_context), skdev->num_req_context,
3961 sizeof(struct skd_request_context) * skdev->num_req_context);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003962
3963 skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
3964 * skdev->num_req_context, GFP_KERNEL);
3965 if (skdev->skreq_table == NULL) {
3966 rc = -ENOMEM;
3967 goto err_out;
3968 }
3969
Bart Van Asschef98806d2017-08-17 13:12:58 -07003970 dev_dbg(&skdev->pdev->dev, "alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
3971 skdev->sgs_per_request, sizeof(struct scatterlist),
3972 skdev->sgs_per_request * sizeof(struct scatterlist));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003973
3974 for (i = 0; i < skdev->num_req_context; i++) {
3975 struct skd_request_context *skreq;
3976
3977 skreq = &skdev->skreq_table[i];
3978
3979 skreq->id = i + SKD_ID_RW_REQUEST;
3980 skreq->state = SKD_REQ_STATE_IDLE;
3981
3982 skreq->sg = kzalloc(sizeof(struct scatterlist) *
3983 skdev->sgs_per_request, GFP_KERNEL);
3984 if (skreq->sg == NULL) {
3985 rc = -ENOMEM;
3986 goto err_out;
3987 }
3988 sg_init_table(skreq->sg, skdev->sgs_per_request);
3989
3990 skreq->sksg_list = skd_cons_sg_list(skdev,
3991 skdev->sgs_per_request,
3992 &skreq->sksg_dma_address);
3993
3994 if (skreq->sksg_list == NULL) {
3995 rc = -ENOMEM;
3996 goto err_out;
3997 }
3998
3999 skreq->next = &skreq[1];
4000 }
4001
4002 /* Free list is in order starting with the 0th entry. */
4003 skdev->skreq_table[i - 1].next = NULL;
4004 skdev->skreq_free_list = skdev->skreq_table;
4005
4006err_out:
4007 return rc;
4008}
4009
4010static int skd_cons_skspcl(struct skd_device *skdev)
4011{
4012 int rc = 0;
4013 u32 i, nbytes;
4014
Bart Van Asschef98806d2017-08-17 13:12:58 -07004015 dev_dbg(&skdev->pdev->dev,
4016 "skspcl_table kzalloc, struct %lu, count %u total %lu\n",
4017 sizeof(struct skd_special_context), skdev->n_special,
4018 sizeof(struct skd_special_context) * skdev->n_special);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004019
4020 skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
4021 * skdev->n_special, GFP_KERNEL);
4022 if (skdev->skspcl_table == NULL) {
4023 rc = -ENOMEM;
4024 goto err_out;
4025 }
4026
4027 for (i = 0; i < skdev->n_special; i++) {
4028 struct skd_special_context *skspcl;
4029
4030 skspcl = &skdev->skspcl_table[i];
4031
4032 skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
4033 skspcl->req.state = SKD_REQ_STATE_IDLE;
4034
4035 skspcl->req.next = &skspcl[1].req;
4036
4037 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4038
Joe Perchesa5bbf612014-08-08 14:24:12 -07004039 skspcl->msg_buf =
4040 pci_zalloc_consistent(skdev->pdev, nbytes,
4041 &skspcl->mb_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004042 if (skspcl->msg_buf == NULL) {
4043 rc = -ENOMEM;
4044 goto err_out;
4045 }
4046
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004047 skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
4048 SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
4049 if (skspcl->req.sg == NULL) {
4050 rc = -ENOMEM;
4051 goto err_out;
4052 }
4053
4054 skspcl->req.sksg_list = skd_cons_sg_list(skdev,
4055 SKD_N_SG_PER_SPECIAL,
4056 &skspcl->req.
4057 sksg_dma_address);
4058 if (skspcl->req.sksg_list == NULL) {
4059 rc = -ENOMEM;
4060 goto err_out;
4061 }
4062 }
4063
4064 /* Free list is in order starting with the 0th entry. */
4065 skdev->skspcl_table[i - 1].req.next = NULL;
4066 skdev->skspcl_free_list = skdev->skspcl_table;
4067
4068 return rc;
4069
4070err_out:
4071 return rc;
4072}
4073
4074static int skd_cons_sksb(struct skd_device *skdev)
4075{
4076 int rc = 0;
4077 struct skd_special_context *skspcl;
4078 u32 nbytes;
4079
4080 skspcl = &skdev->internal_skspcl;
4081
4082 skspcl->req.id = 0 + SKD_ID_INTERNAL;
4083 skspcl->req.state = SKD_REQ_STATE_IDLE;
4084
4085 nbytes = SKD_N_INTERNAL_BYTES;
4086
Joe Perchesa5bbf612014-08-08 14:24:12 -07004087 skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4088 &skspcl->db_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004089 if (skspcl->data_buf == NULL) {
4090 rc = -ENOMEM;
4091 goto err_out;
4092 }
4093
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004094 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
Joe Perchesa5bbf612014-08-08 14:24:12 -07004095 skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
4096 &skspcl->mb_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004097 if (skspcl->msg_buf == NULL) {
4098 rc = -ENOMEM;
4099 goto err_out;
4100 }
4101
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004102 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
4103 &skspcl->req.sksg_dma_address);
4104 if (skspcl->req.sksg_list == NULL) {
4105 rc = -ENOMEM;
4106 goto err_out;
4107 }
4108
4109 if (!skd_format_internal_skspcl(skdev)) {
4110 rc = -EINVAL;
4111 goto err_out;
4112 }
4113
4114err_out:
4115 return rc;
4116}
4117
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004118static int skd_cons_disk(struct skd_device *skdev)
4119{
4120 int rc = 0;
4121 struct gendisk *disk;
4122 struct request_queue *q;
4123 unsigned long flags;
4124
4125 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
4126 if (!disk) {
4127 rc = -ENOMEM;
4128 goto err_out;
4129 }
4130
4131 skdev->disk = disk;
4132 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
4133
4134 disk->major = skdev->major;
4135 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
4136 disk->fops = &skd_blockdev_ops;
4137 disk->private_data = skdev;
4138
Jens Axboefcd37eb2013-11-01 10:14:56 -06004139 q = blk_init_queue(skd_request_fn, &skdev->lock);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004140 if (!q) {
4141 rc = -ENOMEM;
4142 goto err_out;
4143 }
Christoph Hellwig8fc45042017-06-19 09:26:26 +02004144 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004145
4146 skdev->queue = q;
4147 disk->queue = q;
4148 q->queuedata = skdev;
4149
Jens Axboe6975f732016-03-30 10:11:42 -06004150 blk_queue_write_cache(q, true, true);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004151 blk_queue_max_segments(q, skdev->sgs_per_request);
4152 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
4153
Bart Van Asschea5c5b392017-08-17 13:12:53 -07004154 /* set optimal I/O size to 8KB */
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004155 blk_queue_io_opt(q, 8192);
4156
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004157 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
Mike Snitzerb277da02014-10-04 10:55:32 -06004158 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004159
4160 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07004161 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
Jens Axboe6a5ec652013-11-01 10:38:45 -06004162 blk_stop_queue(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004163 spin_unlock_irqrestore(&skdev->lock, flags);
4164
4165err_out:
4166 return rc;
4167}
4168
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004169#define SKD_N_DEV_TABLE 16u
4170static u32 skd_next_devno;
4171
4172static struct skd_device *skd_construct(struct pci_dev *pdev)
4173{
4174 struct skd_device *skdev;
4175 int blk_major = skd_major;
4176 int rc;
4177
4178 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
4179
4180 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004181 dev_err(&pdev->dev, "memory alloc failure\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004182 return NULL;
4183 }
4184
4185 skdev->state = SKD_DRVR_STATE_LOAD;
4186 skdev->pdev = pdev;
4187 skdev->devno = skd_next_devno++;
4188 skdev->major = blk_major;
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004189 skdev->dev_max_queue_depth = 0;
4190
4191 skdev->num_req_context = skd_max_queue_depth;
4192 skdev->num_fitmsg_context = skd_max_queue_depth;
4193 skdev->n_special = skd_max_pass_thru;
4194 skdev->cur_max_queue_depth = 1;
4195 skdev->queue_low_water_mark = 1;
4196 skdev->proto_ver = 99;
4197 skdev->sgs_per_request = skd_sgs_per_request;
4198 skdev->dbg_level = skd_dbg_level;
4199
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004200 spin_lock_init(&skdev->lock);
4201
4202 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
4203
Bart Van Asschef98806d2017-08-17 13:12:58 -07004204 dev_dbg(&skdev->pdev->dev, "skcomp\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004205 rc = skd_cons_skcomp(skdev);
4206 if (rc < 0)
4207 goto err_out;
4208
Bart Van Asschef98806d2017-08-17 13:12:58 -07004209 dev_dbg(&skdev->pdev->dev, "skmsg\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004210 rc = skd_cons_skmsg(skdev);
4211 if (rc < 0)
4212 goto err_out;
4213
Bart Van Asschef98806d2017-08-17 13:12:58 -07004214 dev_dbg(&skdev->pdev->dev, "skreq\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004215 rc = skd_cons_skreq(skdev);
4216 if (rc < 0)
4217 goto err_out;
4218
Bart Van Asschef98806d2017-08-17 13:12:58 -07004219 dev_dbg(&skdev->pdev->dev, "skspcl\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004220 rc = skd_cons_skspcl(skdev);
4221 if (rc < 0)
4222 goto err_out;
4223
Bart Van Asschef98806d2017-08-17 13:12:58 -07004224 dev_dbg(&skdev->pdev->dev, "sksb\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004225 rc = skd_cons_sksb(skdev);
4226 if (rc < 0)
4227 goto err_out;
4228
Bart Van Asschef98806d2017-08-17 13:12:58 -07004229 dev_dbg(&skdev->pdev->dev, "disk\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004230 rc = skd_cons_disk(skdev);
4231 if (rc < 0)
4232 goto err_out;
4233
Bart Van Asschef98806d2017-08-17 13:12:58 -07004234 dev_dbg(&skdev->pdev->dev, "VICTORY\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004235 return skdev;
4236
4237err_out:
Bart Van Asschef98806d2017-08-17 13:12:58 -07004238 dev_dbg(&skdev->pdev->dev, "construct failed\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004239 skd_destruct(skdev);
4240 return NULL;
4241}
4242
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004243/*
4244 *****************************************************************************
4245 * DESTRUCT (FREE)
4246 *****************************************************************************
4247 */
4248
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004249static void skd_free_skcomp(struct skd_device *skdev)
4250{
4251 if (skdev->skcomp_table != NULL) {
4252 u32 nbytes;
4253
4254 nbytes = sizeof(skdev->skcomp_table[0]) *
4255 SKD_N_COMPLETION_ENTRY;
4256 pci_free_consistent(skdev->pdev, nbytes,
4257 skdev->skcomp_table, skdev->cq_dma_address);
4258 }
4259
4260 skdev->skcomp_table = NULL;
4261 skdev->cq_dma_address = 0;
4262}
4263
4264static void skd_free_skmsg(struct skd_device *skdev)
4265{
4266 u32 i;
4267
4268 if (skdev->skmsg_table == NULL)
4269 return;
4270
4271 for (i = 0; i < skdev->num_fitmsg_context; i++) {
4272 struct skd_fitmsg_context *skmsg;
4273
4274 skmsg = &skdev->skmsg_table[i];
4275
4276 if (skmsg->msg_buf != NULL) {
4277 skmsg->msg_buf += skmsg->offset;
4278 skmsg->mb_dma_address += skmsg->offset;
4279 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
4280 skmsg->msg_buf,
4281 skmsg->mb_dma_address);
4282 }
4283 skmsg->msg_buf = NULL;
4284 skmsg->mb_dma_address = 0;
4285 }
4286
4287 kfree(skdev->skmsg_table);
4288 skdev->skmsg_table = NULL;
4289}
4290
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004291static void skd_free_sg_list(struct skd_device *skdev,
4292 struct fit_sg_descriptor *sg_list,
4293 u32 n_sg, dma_addr_t dma_addr)
4294{
4295 if (sg_list != NULL) {
4296 u32 nbytes;
4297
4298 nbytes = sizeof(*sg_list) * n_sg;
4299
4300 pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
4301 }
4302}
4303
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004304static void skd_free_skreq(struct skd_device *skdev)
4305{
4306 u32 i;
4307
4308 if (skdev->skreq_table == NULL)
4309 return;
4310
4311 for (i = 0; i < skdev->num_req_context; i++) {
4312 struct skd_request_context *skreq;
4313
4314 skreq = &skdev->skreq_table[i];
4315
4316 skd_free_sg_list(skdev, skreq->sksg_list,
4317 skdev->sgs_per_request,
4318 skreq->sksg_dma_address);
4319
4320 skreq->sksg_list = NULL;
4321 skreq->sksg_dma_address = 0;
4322
4323 kfree(skreq->sg);
4324 }
4325
4326 kfree(skdev->skreq_table);
4327 skdev->skreq_table = NULL;
4328}
4329
4330static void skd_free_skspcl(struct skd_device *skdev)
4331{
4332 u32 i;
4333 u32 nbytes;
4334
4335 if (skdev->skspcl_table == NULL)
4336 return;
4337
4338 for (i = 0; i < skdev->n_special; i++) {
4339 struct skd_special_context *skspcl;
4340
4341 skspcl = &skdev->skspcl_table[i];
4342
4343 if (skspcl->msg_buf != NULL) {
4344 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4345 pci_free_consistent(skdev->pdev, nbytes,
4346 skspcl->msg_buf,
4347 skspcl->mb_dma_address);
4348 }
4349
4350 skspcl->msg_buf = NULL;
4351 skspcl->mb_dma_address = 0;
4352
4353 skd_free_sg_list(skdev, skspcl->req.sksg_list,
4354 SKD_N_SG_PER_SPECIAL,
4355 skspcl->req.sksg_dma_address);
4356
4357 skspcl->req.sksg_list = NULL;
4358 skspcl->req.sksg_dma_address = 0;
4359
4360 kfree(skspcl->req.sg);
4361 }
4362
4363 kfree(skdev->skspcl_table);
4364 skdev->skspcl_table = NULL;
4365}
4366
4367static void skd_free_sksb(struct skd_device *skdev)
4368{
4369 struct skd_special_context *skspcl;
4370 u32 nbytes;
4371
4372 skspcl = &skdev->internal_skspcl;
4373
4374 if (skspcl->data_buf != NULL) {
4375 nbytes = SKD_N_INTERNAL_BYTES;
4376
4377 pci_free_consistent(skdev->pdev, nbytes,
4378 skspcl->data_buf, skspcl->db_dma_address);
4379 }
4380
4381 skspcl->data_buf = NULL;
4382 skspcl->db_dma_address = 0;
4383
4384 if (skspcl->msg_buf != NULL) {
4385 nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
4386 pci_free_consistent(skdev->pdev, nbytes,
4387 skspcl->msg_buf, skspcl->mb_dma_address);
4388 }
4389
4390 skspcl->msg_buf = NULL;
4391 skspcl->mb_dma_address = 0;
4392
4393 skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
4394 skspcl->req.sksg_dma_address);
4395
4396 skspcl->req.sksg_list = NULL;
4397 skspcl->req.sksg_dma_address = 0;
4398}
4399
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004400static void skd_free_disk(struct skd_device *skdev)
4401{
4402 struct gendisk *disk = skdev->disk;
4403
Bart Van Assche7277cc62017-08-17 13:12:45 -07004404 if (disk && (disk->flags & GENHD_FL_UP))
4405 del_gendisk(disk);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004406
Bart Van Assche7277cc62017-08-17 13:12:45 -07004407 if (skdev->queue) {
4408 blk_cleanup_queue(skdev->queue);
4409 skdev->queue = NULL;
4410 disk->queue = NULL;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004411 }
Bart Van Assche7277cc62017-08-17 13:12:45 -07004412
4413 put_disk(disk);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004414 skdev->disk = NULL;
4415}
4416
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004417static void skd_destruct(struct skd_device *skdev)
4418{
4419 if (skdev == NULL)
4420 return;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004421
Bart Van Asschef98806d2017-08-17 13:12:58 -07004422 dev_dbg(&skdev->pdev->dev, "disk\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004423 skd_free_disk(skdev);
4424
Bart Van Asschef98806d2017-08-17 13:12:58 -07004425 dev_dbg(&skdev->pdev->dev, "sksb\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004426 skd_free_sksb(skdev);
4427
Bart Van Asschef98806d2017-08-17 13:12:58 -07004428 dev_dbg(&skdev->pdev->dev, "skspcl\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004429 skd_free_skspcl(skdev);
4430
Bart Van Asschef98806d2017-08-17 13:12:58 -07004431 dev_dbg(&skdev->pdev->dev, "skreq\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004432 skd_free_skreq(skdev);
4433
Bart Van Asschef98806d2017-08-17 13:12:58 -07004434 dev_dbg(&skdev->pdev->dev, "skmsg\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004435 skd_free_skmsg(skdev);
4436
Bart Van Asschef98806d2017-08-17 13:12:58 -07004437 dev_dbg(&skdev->pdev->dev, "skcomp\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004438 skd_free_skcomp(skdev);
4439
Bart Van Asschef98806d2017-08-17 13:12:58 -07004440 dev_dbg(&skdev->pdev->dev, "skdev\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01004441 kfree(skdev);
4442}
4443
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004444/*
4445 *****************************************************************************
4446 * BLOCK DEVICE (BDEV) GLUE
4447 *****************************************************************************
4448 */
4449
4450static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
4451{
4452 struct skd_device *skdev;
4453 u64 capacity;
4454
4455 skdev = bdev->bd_disk->private_data;
4456
Bart Van Asschef98806d2017-08-17 13:12:58 -07004457 dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
4458 bdev->bd_disk->disk_name, current->comm);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004459
4460 if (skdev->read_cap_is_valid) {
4461 capacity = get_capacity(skdev->disk);
4462 geo->heads = 64;
4463 geo->sectors = 255;
4464 geo->cylinders = (capacity) / (255 * 64);
4465
4466 return 0;
4467 }
4468 return -EIO;
4469}
4470
Dan Williams0d52c7562016-06-15 19:44:20 -07004471static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004472{
Bart Van Asschef98806d2017-08-17 13:12:58 -07004473 dev_dbg(&skdev->pdev->dev, "add_disk\n");
Dan Williams0d52c7562016-06-15 19:44:20 -07004474 device_add_disk(parent, skdev->disk);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004475 return 0;
4476}
4477
4478static const struct block_device_operations skd_blockdev_ops = {
4479 .owner = THIS_MODULE,
4480 .ioctl = skd_bdev_ioctl,
4481 .getgeo = skd_bdev_getgeo,
4482};
4483
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004484/*
4485 *****************************************************************************
4486 * PCIe DRIVER GLUE
4487 *****************************************************************************
4488 */
4489
Benoit Taine9baa3c32014-08-08 15:56:03 +02004490static const struct pci_device_id skd_pci_tbl[] = {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004491 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
4492 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
4493 { 0 } /* terminate list */
4494};
4495
4496MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
4497
4498static char *skd_pci_info(struct skd_device *skdev, char *str)
4499{
4500 int pcie_reg;
4501
4502 strcpy(str, "PCIe (");
4503 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
4504
4505 if (pcie_reg) {
4506
4507 char lwstr[6];
4508 uint16_t pcie_lstat, lspeed, lwidth;
4509
4510 pcie_reg += 0x12;
4511 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
4512 lspeed = pcie_lstat & (0xF);
4513 lwidth = (pcie_lstat & 0x3F0) >> 4;
4514
4515 if (lspeed == 1)
4516 strcat(str, "2.5GT/s ");
4517 else if (lspeed == 2)
4518 strcat(str, "5.0GT/s ");
4519 else
4520 strcat(str, "<unknown> ");
4521 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
4522 strcat(str, lwstr);
4523 }
4524 return str;
4525}
4526
4527static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4528{
4529 int i;
4530 int rc = 0;
4531 char pci_str[32];
4532 struct skd_device *skdev;
4533
Bart Van Asschef98806d2017-08-17 13:12:58 -07004534 dev_info(&pdev->dev, "STEC s1120 Driver(%s) version %s-b%s\n",
4535 DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
4536 dev_info(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
4537 pdev->device);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004538
4539 rc = pci_enable_device(pdev);
4540 if (rc)
4541 return rc;
4542 rc = pci_request_regions(pdev, DRV_NAME);
4543 if (rc)
4544 goto err_out;
4545 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4546 if (!rc) {
4547 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004548 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
4549 rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004550 }
4551 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004552 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004553 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004554 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004555 goto err_out_regions;
4556 }
4557 }
4558
Bartlomiej Zolnierkiewiczb8df6642013-11-05 12:37:02 +01004559 if (!skd_major) {
4560 rc = register_blkdev(0, DRV_NAME);
4561 if (rc < 0)
4562 goto err_out_regions;
4563 BUG_ON(!rc);
4564 skd_major = rc;
4565 }
4566
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004567 skdev = skd_construct(pdev);
Wei Yongjun1762b572013-10-30 13:23:53 +08004568 if (skdev == NULL) {
4569 rc = -ENOMEM;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004570 goto err_out_regions;
Wei Yongjun1762b572013-10-30 13:23:53 +08004571 }
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004572
4573 skd_pci_info(skdev, pci_str);
Bart Van Asschef98806d2017-08-17 13:12:58 -07004574 dev_info(&pdev->dev, "%s 64bit\n", pci_str);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004575
4576 pci_set_master(pdev);
4577 rc = pci_enable_pcie_error_reporting(pdev);
4578 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004579 dev_err(&pdev->dev,
4580 "bad enable of PCIe error reporting rc=%d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004581 skdev->pcie_error_reporting_is_enabled = 0;
4582 } else
4583 skdev->pcie_error_reporting_is_enabled = 1;
4584
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004585 pci_set_drvdata(pdev, skdev);
Bartlomiej Zolnierkiewiczebedd162013-11-05 12:37:05 +01004586
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004587 for (i = 0; i < SKD_MAX_BARS; i++) {
4588 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4589 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4590 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4591 skdev->mem_size[i]);
4592 if (!skdev->mem_map[i]) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004593 dev_err(&pdev->dev,
4594 "Unable to map adapter memory!\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004595 rc = -ENODEV;
4596 goto err_out_iounmap;
4597 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07004598 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
4599 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
4600 skdev->mem_size[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004601 }
4602
4603 rc = skd_acquire_irq(skdev);
4604 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004605 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004606 goto err_out_iounmap;
4607 }
4608
4609 rc = skd_start_timer(skdev);
4610 if (rc)
4611 goto err_out_timer;
4612
4613 init_waitqueue_head(&skdev->waitq);
4614
4615 skd_start_device(skdev);
4616
4617 rc = wait_event_interruptible_timeout(skdev->waitq,
4618 (skdev->gendisk_on),
4619 (SKD_START_WAIT_SECONDS * HZ));
4620 if (skdev->gendisk_on > 0) {
4621 /* device came on-line after reset */
Dan Williams0d52c7562016-06-15 19:44:20 -07004622 skd_bdev_attach(&pdev->dev, skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004623 rc = 0;
4624 } else {
4625 /* we timed out, something is wrong with the device,
4626 don't add the disk structure */
Bart Van Asschef98806d2017-08-17 13:12:58 -07004627 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
4628 rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004629 /* in case of no error; we timeout with ENXIO */
4630 if (!rc)
4631 rc = -ENXIO;
4632 goto err_out_timer;
4633 }
4634
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004635 return rc;
4636
4637err_out_timer:
4638 skd_stop_device(skdev);
4639 skd_release_irq(skdev);
4640
4641err_out_iounmap:
4642 for (i = 0; i < SKD_MAX_BARS; i++)
4643 if (skdev->mem_map[i])
4644 iounmap(skdev->mem_map[i]);
4645
4646 if (skdev->pcie_error_reporting_is_enabled)
4647 pci_disable_pcie_error_reporting(pdev);
4648
4649 skd_destruct(skdev);
4650
4651err_out_regions:
4652 pci_release_regions(pdev);
4653
4654err_out:
4655 pci_disable_device(pdev);
4656 pci_set_drvdata(pdev, NULL);
4657 return rc;
4658}
4659
4660static void skd_pci_remove(struct pci_dev *pdev)
4661{
4662 int i;
4663 struct skd_device *skdev;
4664
4665 skdev = pci_get_drvdata(pdev);
4666 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004667 dev_err(&pdev->dev, "no device data for PCI\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004668 return;
4669 }
4670 skd_stop_device(skdev);
4671 skd_release_irq(skdev);
4672
4673 for (i = 0; i < SKD_MAX_BARS; i++)
4674 if (skdev->mem_map[i])
Bart Van Assche4854afe2017-08-17 13:12:59 -07004675 iounmap(skdev->mem_map[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004676
4677 if (skdev->pcie_error_reporting_is_enabled)
4678 pci_disable_pcie_error_reporting(pdev);
4679
4680 skd_destruct(skdev);
4681
4682 pci_release_regions(pdev);
4683 pci_disable_device(pdev);
4684 pci_set_drvdata(pdev, NULL);
4685
4686 return;
4687}
4688
4689static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
4690{
4691 int i;
4692 struct skd_device *skdev;
4693
4694 skdev = pci_get_drvdata(pdev);
4695 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004696 dev_err(&pdev->dev, "no device data for PCI\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004697 return -EIO;
4698 }
4699
4700 skd_stop_device(skdev);
4701
4702 skd_release_irq(skdev);
4703
4704 for (i = 0; i < SKD_MAX_BARS; i++)
4705 if (skdev->mem_map[i])
Bart Van Assche4854afe2017-08-17 13:12:59 -07004706 iounmap(skdev->mem_map[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004707
4708 if (skdev->pcie_error_reporting_is_enabled)
4709 pci_disable_pcie_error_reporting(pdev);
4710
4711 pci_release_regions(pdev);
4712 pci_save_state(pdev);
4713 pci_disable_device(pdev);
4714 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4715 return 0;
4716}
4717
4718static int skd_pci_resume(struct pci_dev *pdev)
4719{
4720 int i;
4721 int rc = 0;
4722 struct skd_device *skdev;
4723
4724 skdev = pci_get_drvdata(pdev);
4725 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004726 dev_err(&pdev->dev, "no device data for PCI\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004727 return -1;
4728 }
4729
4730 pci_set_power_state(pdev, PCI_D0);
4731 pci_enable_wake(pdev, PCI_D0, 0);
4732 pci_restore_state(pdev);
4733
4734 rc = pci_enable_device(pdev);
4735 if (rc)
4736 return rc;
4737 rc = pci_request_regions(pdev, DRV_NAME);
4738 if (rc)
4739 goto err_out;
4740 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
4741 if (!rc) {
4742 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
4743
Bart Van Asschef98806d2017-08-17 13:12:58 -07004744 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
4745 rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004746 }
4747 } else {
4748 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4749 if (rc) {
4750
Bart Van Asschef98806d2017-08-17 13:12:58 -07004751 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004752 goto err_out_regions;
4753 }
4754 }
4755
4756 pci_set_master(pdev);
4757 rc = pci_enable_pcie_error_reporting(pdev);
4758 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004759 dev_err(&pdev->dev,
4760 "bad enable of PCIe error reporting rc=%d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004761 skdev->pcie_error_reporting_is_enabled = 0;
4762 } else
4763 skdev->pcie_error_reporting_is_enabled = 1;
4764
4765 for (i = 0; i < SKD_MAX_BARS; i++) {
4766
4767 skdev->mem_phys[i] = pci_resource_start(pdev, i);
4768 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
4769 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
4770 skdev->mem_size[i]);
4771 if (!skdev->mem_map[i]) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004772 dev_err(&pdev->dev, "Unable to map adapter memory!\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004773 rc = -ENODEV;
4774 goto err_out_iounmap;
4775 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07004776 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
4777 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
4778 skdev->mem_size[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004779 }
4780 rc = skd_acquire_irq(skdev);
4781 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004782 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004783 goto err_out_iounmap;
4784 }
4785
4786 rc = skd_start_timer(skdev);
4787 if (rc)
4788 goto err_out_timer;
4789
4790 init_waitqueue_head(&skdev->waitq);
4791
4792 skd_start_device(skdev);
4793
4794 return rc;
4795
4796err_out_timer:
4797 skd_stop_device(skdev);
4798 skd_release_irq(skdev);
4799
4800err_out_iounmap:
4801 for (i = 0; i < SKD_MAX_BARS; i++)
4802 if (skdev->mem_map[i])
4803 iounmap(skdev->mem_map[i]);
4804
4805 if (skdev->pcie_error_reporting_is_enabled)
4806 pci_disable_pcie_error_reporting(pdev);
4807
4808err_out_regions:
4809 pci_release_regions(pdev);
4810
4811err_out:
4812 pci_disable_device(pdev);
4813 return rc;
4814}
4815
4816static void skd_pci_shutdown(struct pci_dev *pdev)
4817{
4818 struct skd_device *skdev;
4819
Bart Van Asschef98806d2017-08-17 13:12:58 -07004820 dev_err(&pdev->dev, "%s called\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004821
4822 skdev = pci_get_drvdata(pdev);
4823 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07004824 dev_err(&pdev->dev, "no device data for PCI\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004825 return;
4826 }
4827
Bart Van Asschef98806d2017-08-17 13:12:58 -07004828 dev_err(&pdev->dev, "calling stop\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004829 skd_stop_device(skdev);
4830}
4831
4832static struct pci_driver skd_driver = {
4833 .name = DRV_NAME,
4834 .id_table = skd_pci_tbl,
4835 .probe = skd_pci_probe,
4836 .remove = skd_pci_remove,
4837 .suspend = skd_pci_suspend,
4838 .resume = skd_pci_resume,
4839 .shutdown = skd_pci_shutdown,
4840};
4841
4842/*
4843 *****************************************************************************
4844 * LOGGING SUPPORT
4845 *****************************************************************************
4846 */
4847
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004848const char *skd_drive_state_to_str(int state)
4849{
4850 switch (state) {
4851 case FIT_SR_DRIVE_OFFLINE:
4852 return "OFFLINE";
4853 case FIT_SR_DRIVE_INIT:
4854 return "INIT";
4855 case FIT_SR_DRIVE_ONLINE:
4856 return "ONLINE";
4857 case FIT_SR_DRIVE_BUSY:
4858 return "BUSY";
4859 case FIT_SR_DRIVE_FAULT:
4860 return "FAULT";
4861 case FIT_SR_DRIVE_DEGRADED:
4862 return "DEGRADED";
4863 case FIT_SR_PCIE_LINK_DOWN:
4864 return "INK_DOWN";
4865 case FIT_SR_DRIVE_SOFT_RESET:
4866 return "SOFT_RESET";
4867 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
4868 return "NEED_FW";
4869 case FIT_SR_DRIVE_INIT_FAULT:
4870 return "INIT_FAULT";
4871 case FIT_SR_DRIVE_BUSY_SANITIZE:
4872 return "BUSY_SANITIZE";
4873 case FIT_SR_DRIVE_BUSY_ERASE:
4874 return "BUSY_ERASE";
4875 case FIT_SR_DRIVE_FW_BOOTING:
4876 return "FW_BOOTING";
4877 default:
4878 return "???";
4879 }
4880}
4881
4882const char *skd_skdev_state_to_str(enum skd_drvr_state state)
4883{
4884 switch (state) {
4885 case SKD_DRVR_STATE_LOAD:
4886 return "LOAD";
4887 case SKD_DRVR_STATE_IDLE:
4888 return "IDLE";
4889 case SKD_DRVR_STATE_BUSY:
4890 return "BUSY";
4891 case SKD_DRVR_STATE_STARTING:
4892 return "STARTING";
4893 case SKD_DRVR_STATE_ONLINE:
4894 return "ONLINE";
4895 case SKD_DRVR_STATE_PAUSING:
4896 return "PAUSING";
4897 case SKD_DRVR_STATE_PAUSED:
4898 return "PAUSED";
4899 case SKD_DRVR_STATE_DRAINING_TIMEOUT:
4900 return "DRAINING_TIMEOUT";
4901 case SKD_DRVR_STATE_RESTARTING:
4902 return "RESTARTING";
4903 case SKD_DRVR_STATE_RESUMING:
4904 return "RESUMING";
4905 case SKD_DRVR_STATE_STOPPING:
4906 return "STOPPING";
4907 case SKD_DRVR_STATE_SYNCING:
4908 return "SYNCING";
4909 case SKD_DRVR_STATE_FAULT:
4910 return "FAULT";
4911 case SKD_DRVR_STATE_DISAPPEARED:
4912 return "DISAPPEARED";
4913 case SKD_DRVR_STATE_BUSY_ERASE:
4914 return "BUSY_ERASE";
4915 case SKD_DRVR_STATE_BUSY_SANITIZE:
4916 return "BUSY_SANITIZE";
4917 case SKD_DRVR_STATE_BUSY_IMMINENT:
4918 return "BUSY_IMMINENT";
4919 case SKD_DRVR_STATE_WAIT_BOOT:
4920 return "WAIT_BOOT";
4921
4922 default:
4923 return "???";
4924 }
4925}
4926
Rashika Kheriaa26ba7f2013-12-19 15:02:22 +05304927static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004928{
4929 switch (state) {
4930 case SKD_MSG_STATE_IDLE:
4931 return "IDLE";
4932 case SKD_MSG_STATE_BUSY:
4933 return "BUSY";
4934 default:
4935 return "???";
4936 }
4937}
4938
Rashika Kheriaa26ba7f2013-12-19 15:02:22 +05304939static const char *skd_skreq_state_to_str(enum skd_req_state state)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004940{
4941 switch (state) {
4942 case SKD_REQ_STATE_IDLE:
4943 return "IDLE";
4944 case SKD_REQ_STATE_SETUP:
4945 return "SETUP";
4946 case SKD_REQ_STATE_BUSY:
4947 return "BUSY";
4948 case SKD_REQ_STATE_COMPLETED:
4949 return "COMPLETED";
4950 case SKD_REQ_STATE_TIMEOUT:
4951 return "TIMEOUT";
4952 case SKD_REQ_STATE_ABORTED:
4953 return "ABORTED";
4954 default:
4955 return "???";
4956 }
4957}
4958
4959static void skd_log_skdev(struct skd_device *skdev, const char *event)
4960{
Bart Van Asschef98806d2017-08-17 13:12:58 -07004961 dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
4962 dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
4963 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
4964 skd_skdev_state_to_str(skdev->state), skdev->state);
4965 dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
4966 skdev->in_flight, skdev->cur_max_queue_depth,
4967 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
4968 dev_dbg(&skdev->pdev->dev, " timestamp=0x%x cycle=%d cycle_ix=%d\n",
4969 skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004970}
4971
4972static void skd_log_skmsg(struct skd_device *skdev,
4973 struct skd_fitmsg_context *skmsg, const char *event)
4974{
Bart Van Asschef98806d2017-08-17 13:12:58 -07004975 dev_dbg(&skdev->pdev->dev, "skmsg=%p event='%s'\n", skmsg, event);
4976 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x length=%d\n",
4977 skd_skmsg_state_to_str(skmsg->state), skmsg->state, skmsg->id,
4978 skmsg->length);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004979}
4980
4981static void skd_log_skreq(struct skd_device *skdev,
4982 struct skd_request_context *skreq, const char *event)
4983{
Bart Van Asschef98806d2017-08-17 13:12:58 -07004984 dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
4985 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
4986 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
4987 skreq->fitmsg_id);
4988 dev_dbg(&skdev->pdev->dev, " timo=0x%x sg_dir=%d n_sg=%d\n",
4989 skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004990
Jens Axboefcd37eb2013-11-01 10:14:56 -06004991 if (skreq->req != NULL) {
4992 struct request *req = skreq->req;
4993 u32 lba = (u32)blk_rq_pos(req);
4994 u32 count = blk_rq_sectors(req);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004995
Bart Van Asschef98806d2017-08-17 13:12:58 -07004996 dev_dbg(&skdev->pdev->dev,
4997 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req,
4998 lba, lba, count, count, (int)rq_data_dir(req));
Jens Axboefcd37eb2013-11-01 10:14:56 -06004999 } else
Bart Van Asschef98806d2017-08-17 13:12:58 -07005000 dev_dbg(&skdev->pdev->dev, "req=NULL\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005001}
5002
5003/*
5004 *****************************************************************************
5005 * MODULE GLUE
5006 *****************************************************************************
5007 */
5008
5009static int __init skd_init(void)
5010{
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005011 pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
5012
5013 switch (skd_isr_type) {
5014 case SKD_IRQ_LEGACY:
5015 case SKD_IRQ_MSI:
5016 case SKD_IRQ_MSIX:
5017 break;
5018 default:
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005019 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005020 skd_isr_type, SKD_IRQ_DEFAULT);
5021 skd_isr_type = SKD_IRQ_DEFAULT;
5022 }
5023
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005024 if (skd_max_queue_depth < 1 ||
5025 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
5026 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005027 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
5028 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
5029 }
5030
5031 if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005032 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005033 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
5034 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
5035 }
5036
5037 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005038 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005039 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
5040 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
5041 }
5042
5043 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005044 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005045 skd_dbg_level, 0);
5046 skd_dbg_level = 0;
5047 }
5048
5049 if (skd_isr_comp_limit < 0) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005050 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005051 skd_isr_comp_limit, 0);
5052 skd_isr_comp_limit = 0;
5053 }
5054
5055 if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01005056 pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005057 skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
5058 skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
5059 }
5060
Bartlomiej Zolnierkiewiczb8df6642013-11-05 12:37:02 +01005061 return pci_register_driver(&skd_driver);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005062}
5063
5064static void __exit skd_exit(void)
5065{
5066 pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
5067
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005068 pci_unregister_driver(&skd_driver);
Bartlomiej Zolnierkiewiczb8df6642013-11-05 12:37:02 +01005069
5070 if (skd_major)
5071 unregister_blkdev(skd_major, DRV_NAME);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005072}
5073
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06005074module_init(skd_init);
5075module_exit(skd_exit);