blob: 8ae0320f02b5a8593c36879ec8d9eef2e666c7e6 [file] [log] [blame]
Bart Van Asschebec9e8a2017-08-17 13:12:47 -07001/*
2 * Driver for sTec s1120 PCIe SSDs. sTec was acquired in 2013 by HGST and HGST
3 * was acquired by Western Digital in 2012.
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06004 *
Bart Van Asschebec9e8a2017-08-17 13:12:47 -07005 * Copyright 2012 sTec, Inc.
6 * Copyright (c) 2017 Western Digital Corporation or its affiliates.
7 *
8 * This file is part of the Linux kernel, and is made available under
9 * the terms of the GNU General Public License version 2.
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060010 */
11
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/slab.h>
17#include <linux/spinlock.h>
18#include <linux/blkdev.h>
Bart Van Asschef18c17c2017-08-17 13:13:26 -070019#include <linux/blk-mq.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060020#include <linux/sched.h>
21#include <linux/interrupt.h>
22#include <linux/compiler.h>
23#include <linux/workqueue.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060024#include <linux/delay.h>
25#include <linux/time.h>
26#include <linux/hdreg.h>
27#include <linux/dma-mapping.h>
28#include <linux/completion.h>
29#include <linux/scatterlist.h>
30#include <linux/version.h>
31#include <linux/err.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060032#include <linux/aer.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060033#include <linux/wait.h>
Bart Van Assche2da7b402017-08-17 13:13:01 -070034#include <linux/stringify.h>
Bart Van Asschea3db1022017-08-17 13:13:35 -070035#include <linux/slab_def.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060036#include <scsi/scsi.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060037#include <scsi/sg.h>
38#include <linux/io.h>
39#include <linux/uaccess.h>
Bartlomiej Zolnierkiewicz4ca90b52013-11-05 12:37:04 +010040#include <asm/unaligned.h>
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060041
42#include "skd_s1120.h"
43
44static int skd_dbg_level;
45static int skd_isr_comp_limit = 4;
46
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060047#define SKD_ASSERT(expr) \
48 do { \
49 if (unlikely(!(expr))) { \
50 pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
51 # expr, __FILE__, __func__, __LINE__); \
52 } \
53 } while (0)
54
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060055#define DRV_NAME "skd"
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060056#define PFX DRV_NAME ": "
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060057
Bart Van Asschebec9e8a2017-08-17 13:12:47 -070058MODULE_LICENSE("GPL");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060059
Bart Van Asschebb9f7dd2017-08-17 13:13:38 -070060MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060061
62#define PCI_VENDOR_ID_STEC 0x1B39
63#define PCI_DEVICE_ID_S1120 0x0001
64
65#define SKD_FUA_NV (1 << 1)
66#define SKD_MINORS_PER_DEVICE 16
67
68#define SKD_MAX_QUEUE_DEPTH 200u
69
70#define SKD_PAUSE_TIMEOUT (5 * 1000)
71
72#define SKD_N_FITMSG_BYTES (512u)
Bart Van Assche2da7b402017-08-17 13:13:01 -070073#define SKD_MAX_REQ_PER_MSG 14
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060074
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060075#define SKD_N_SPECIAL_FITMSG_BYTES (128u)
76
77/* SG elements are 32 bytes, so we can make this 4096 and still be under the
78 * 128KB limit. That allows 4096*4K = 16M xfer size
79 */
80#define SKD_N_SG_PER_REQ_DEFAULT 256u
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060081
82#define SKD_N_COMPLETION_ENTRY 256u
83#define SKD_N_READ_CAP_BYTES (8u)
84
85#define SKD_N_INTERNAL_BYTES (512u)
86
Bart Van Assche6f7c7672017-08-17 13:13:02 -070087#define SKD_SKCOMP_SIZE \
88 ((sizeof(struct fit_completion_entry_v1) + \
89 sizeof(struct fit_comp_error_info)) * SKD_N_COMPLETION_ENTRY)
90
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060091/* 5 bits of uniqifier, 0xF800 */
92#define SKD_ID_INCR (0x400)
93#define SKD_ID_TABLE_MASK (3u << 8u)
94#define SKD_ID_RW_REQUEST (0u << 8u)
95#define SKD_ID_INTERNAL (1u << 8u)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -060096#define SKD_ID_FIT_MSG (3u << 8u)
97#define SKD_ID_SLOT_MASK 0x00FFu
98#define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
99
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600100#define SKD_N_MAX_SECTORS 2048u
101
102#define SKD_MAX_RETRIES 2u
103
104#define SKD_TIMER_SECONDS(seconds) (seconds)
105#define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
106
107#define INQ_STD_NBYTES 36
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600108
109enum skd_drvr_state {
110 SKD_DRVR_STATE_LOAD,
111 SKD_DRVR_STATE_IDLE,
112 SKD_DRVR_STATE_BUSY,
113 SKD_DRVR_STATE_STARTING,
114 SKD_DRVR_STATE_ONLINE,
115 SKD_DRVR_STATE_PAUSING,
116 SKD_DRVR_STATE_PAUSED,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600117 SKD_DRVR_STATE_RESTARTING,
118 SKD_DRVR_STATE_RESUMING,
119 SKD_DRVR_STATE_STOPPING,
120 SKD_DRVR_STATE_FAULT,
121 SKD_DRVR_STATE_DISAPPEARED,
122 SKD_DRVR_STATE_PROTOCOL_MISMATCH,
123 SKD_DRVR_STATE_BUSY_ERASE,
124 SKD_DRVR_STATE_BUSY_SANITIZE,
125 SKD_DRVR_STATE_BUSY_IMMINENT,
126 SKD_DRVR_STATE_WAIT_BOOT,
127 SKD_DRVR_STATE_SYNCING,
128};
129
130#define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
131#define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
132#define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600133#define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
134#define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
135#define SKD_START_WAIT_SECONDS 90u
136
137enum skd_req_state {
138 SKD_REQ_STATE_IDLE,
139 SKD_REQ_STATE_SETUP,
140 SKD_REQ_STATE_BUSY,
141 SKD_REQ_STATE_COMPLETED,
142 SKD_REQ_STATE_TIMEOUT,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600143};
144
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600145enum skd_check_status_action {
146 SKD_CHECK_STATUS_REPORT_GOOD,
147 SKD_CHECK_STATUS_REPORT_SMART_ALERT,
148 SKD_CHECK_STATUS_REQUEUE_REQUEST,
149 SKD_CHECK_STATUS_REPORT_ERROR,
150 SKD_CHECK_STATUS_BUSY_IMMINENT,
151};
152
Bart Van Assched891fe62017-08-17 13:13:07 -0700153struct skd_msg_buf {
154 struct fit_msg_hdr fmh;
155 struct skd_scsi_request scsi[SKD_MAX_REQ_PER_MSG];
156};
157
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600158struct skd_fitmsg_context {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600159 u32 id;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600160
161 u32 length;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600162
Bart Van Assched891fe62017-08-17 13:13:07 -0700163 struct skd_msg_buf *msg_buf;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600164 dma_addr_t mb_dma_address;
165};
166
167struct skd_request_context {
168 enum skd_req_state state;
169
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600170 u16 id;
171 u32 fitmsg_id;
172
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600173 u8 flush_cmd;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600174
Bart Van Asscheb1824ee2017-08-17 13:13:12 -0700175 enum dma_data_direction data_dir;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600176 struct scatterlist *sg;
177 u32 n_sg;
178 u32 sg_byte_count;
179
180 struct fit_sg_descriptor *sksg_list;
181 dma_addr_t sksg_dma_address;
182
183 struct fit_completion_entry_v1 completion;
184
185 struct fit_comp_error_info err_info;
186
Bart Van Asschef2fe4452017-08-23 10:56:32 -0700187 blk_status_t status;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600188};
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600189
190struct skd_special_context {
191 struct skd_request_context req;
192
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600193 void *data_buf;
194 dma_addr_t db_dma_address;
195
Bart Van Assched891fe62017-08-17 13:13:07 -0700196 struct skd_msg_buf *msg_buf;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600197 dma_addr_t mb_dma_address;
198};
199
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600200typedef enum skd_irq_type {
201 SKD_IRQ_LEGACY,
202 SKD_IRQ_MSI,
203 SKD_IRQ_MSIX
204} skd_irq_type_t;
205
206#define SKD_MAX_BARS 2
207
208struct skd_device {
Bart Van Assche85e34112017-08-17 13:13:17 -0700209 void __iomem *mem_map[SKD_MAX_BARS];
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600210 resource_size_t mem_phys[SKD_MAX_BARS];
211 u32 mem_size[SKD_MAX_BARS];
212
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600213 struct skd_msix_entry *msix_entries;
214
215 struct pci_dev *pdev;
216 int pcie_error_reporting_is_enabled;
217
218 spinlock_t lock;
219 struct gendisk *disk;
Bart Van Asscheca33dd92017-08-17 13:13:32 -0700220 struct blk_mq_tag_set tag_set;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600221 struct request_queue *queue;
Bart Van Assche91f85da2017-08-17 13:13:28 -0700222 struct skd_fitmsg_context *skmsg;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600223 struct device *class_dev;
224 int gendisk_on;
225 int sync_done;
226
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600227 u32 devno;
228 u32 major;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600229 char isr_name[30];
230
231 enum skd_drvr_state state;
232 u32 drive_state;
233
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600234 u32 cur_max_queue_depth;
235 u32 queue_low_water_mark;
236 u32 dev_max_queue_depth;
237
238 u32 num_fitmsg_context;
239 u32 num_req_context;
240
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600241 struct skd_fitmsg_context *skmsg_table;
242
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600243 struct skd_special_context internal_skspcl;
244 u32 read_cap_blocksize;
245 u32 read_cap_last_lba;
246 int read_cap_is_valid;
247 int inquiry_is_valid;
248 u8 inq_serial_num[13]; /*12 chars plus null term */
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600249
250 u8 skcomp_cycle;
251 u32 skcomp_ix;
Bart Van Asschea3db1022017-08-17 13:13:35 -0700252 struct kmem_cache *msgbuf_cache;
253 struct kmem_cache *sglist_cache;
254 struct kmem_cache *databuf_cache;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600255 struct fit_completion_entry_v1 *skcomp_table;
256 struct fit_comp_error_info *skerr_table;
257 dma_addr_t cq_dma_address;
258
259 wait_queue_head_t waitq;
260
261 struct timer_list timer;
262 u32 timer_countdown;
263 u32 timer_substate;
264
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600265 int sgs_per_request;
266 u32 last_mtd;
267
268 u32 proto_ver;
269
270 int dbg_level;
271 u32 connect_time_stamp;
272 int connect_retries;
273#define SKD_MAX_CONNECT_RETRIES 16
274 u32 drive_jiffies;
275
276 u32 timo_slot;
277
Bart Van Asscheca33dd92017-08-17 13:13:32 -0700278 struct work_struct start_queue;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600279 struct work_struct completion_worker;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600280};
281
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600282#define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
283#define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
284#define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
285
286static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
287{
Bart Van Assche14262a42017-08-17 13:12:57 -0700288 u32 val = readl(skdev->mem_map[1] + offset);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600289
Bart Van Assche14262a42017-08-17 13:12:57 -0700290 if (unlikely(skdev->dbg_level >= 2))
Bart Van Asschef98806d2017-08-17 13:12:58 -0700291 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
Bart Van Assche14262a42017-08-17 13:12:57 -0700292 return val;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600293}
294
295static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
296 u32 offset)
297{
Bart Van Assche14262a42017-08-17 13:12:57 -0700298 writel(val, skdev->mem_map[1] + offset);
299 if (unlikely(skdev->dbg_level >= 2))
Bart Van Asschef98806d2017-08-17 13:12:58 -0700300 dev_dbg(&skdev->pdev->dev, "offset %x = %x\n", offset, val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600301}
302
303static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
304 u32 offset)
305{
Bart Van Assche14262a42017-08-17 13:12:57 -0700306 writeq(val, skdev->mem_map[1] + offset);
307 if (unlikely(skdev->dbg_level >= 2))
Bart Van Asschef98806d2017-08-17 13:12:58 -0700308 dev_dbg(&skdev->pdev->dev, "offset %x = %016llx\n", offset,
309 val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600310}
311
312
Bart Van Assche744353b2017-08-23 10:56:33 -0700313#define SKD_IRQ_DEFAULT SKD_IRQ_MSIX
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600314static int skd_isr_type = SKD_IRQ_DEFAULT;
315
316module_param(skd_isr_type, int, 0444);
317MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
318 " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
319
320#define SKD_MAX_REQ_PER_MSG_DEFAULT 1
321static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
322
323module_param(skd_max_req_per_msg, int, 0444);
324MODULE_PARM_DESC(skd_max_req_per_msg,
325 "Maximum SCSI requests packed in a single message."
Bart Van Assche2da7b402017-08-17 13:13:01 -0700326 " (1-" __stringify(SKD_MAX_REQ_PER_MSG) ", default==1)");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600327
328#define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
329#define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
330static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
331
332module_param(skd_max_queue_depth, int, 0444);
333MODULE_PARM_DESC(skd_max_queue_depth,
334 "Maximum SCSI requests issued to s1120."
335 " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
336
337static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
338module_param(skd_sgs_per_request, int, 0444);
339MODULE_PARM_DESC(skd_sgs_per_request,
340 "Maximum SG elements per block request."
341 " (1-4096, default==256)");
342
Bart Van Assche63214122017-08-17 13:13:23 -0700343static int skd_max_pass_thru = 1;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600344module_param(skd_max_pass_thru, int, 0444);
345MODULE_PARM_DESC(skd_max_pass_thru,
Bart Van Assche63214122017-08-17 13:13:23 -0700346 "Maximum SCSI pass-thru at a time. IGNORED");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600347
348module_param(skd_dbg_level, int, 0444);
349MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
350
351module_param(skd_isr_comp_limit, int, 0444);
352MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
353
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600354/* Major device number dynamically assigned. */
355static u32 skd_major;
356
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600357static void skd_destruct(struct skd_device *skdev);
358static const struct block_device_operations skd_blockdev_ops;
359static void skd_send_fitmsg(struct skd_device *skdev,
360 struct skd_fitmsg_context *skmsg);
361static void skd_send_special_fitmsg(struct skd_device *skdev,
362 struct skd_special_context *skspcl);
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200363static bool skd_preop_sg_list(struct skd_device *skdev,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600364 struct skd_request_context *skreq);
365static void skd_postop_sg_list(struct skd_device *skdev,
366 struct skd_request_context *skreq);
367
368static void skd_restart_device(struct skd_device *skdev);
369static int skd_quiesce_dev(struct skd_device *skdev);
370static int skd_unquiesce_dev(struct skd_device *skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600371static void skd_disable_interrupts(struct skd_device *skdev);
372static void skd_isr_fwstate(struct skd_device *skdev);
Bart Van Assche79ce12a2017-08-17 13:13:14 -0700373static void skd_recover_requests(struct skd_device *skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600374static void skd_soft_reset(struct skd_device *skdev);
375
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600376const char *skd_drive_state_to_str(int state);
377const char *skd_skdev_state_to_str(enum skd_drvr_state state);
378static void skd_log_skdev(struct skd_device *skdev, const char *event);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600379static void skd_log_skreq(struct skd_device *skdev,
380 struct skd_request_context *skreq, const char *event);
381
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600382/*
383 *****************************************************************************
384 * READ/WRITE REQUESTS
385 *****************************************************************************
386 */
Bart Van Assched4d0f5f2017-08-17 13:13:34 -0700387static void skd_inc_in_flight(struct request *rq, void *data, bool reserved)
388{
389 int *count = data;
390
391 count++;
392}
393
394static int skd_in_flight(struct skd_device *skdev)
395{
396 int count = 0;
397
398 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_inc_in_flight, &count);
399
400 return count;
401}
402
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600403static void
404skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
405 int data_dir, unsigned lba,
406 unsigned count)
407{
408 if (data_dir == READ)
Bart Van Asschefb4844b2017-08-17 13:13:19 -0700409 scsi_req->cdb[0] = READ_10;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600410 else
Bart Van Asschefb4844b2017-08-17 13:13:19 -0700411 scsi_req->cdb[0] = WRITE_10;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600412
413 scsi_req->cdb[1] = 0;
414 scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
415 scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
416 scsi_req->cdb[4] = (lba & 0xff00) >> 8;
417 scsi_req->cdb[5] = (lba & 0xff);
418 scsi_req->cdb[6] = 0;
419 scsi_req->cdb[7] = (count & 0xff00) >> 8;
420 scsi_req->cdb[8] = count & 0xff;
421 scsi_req->cdb[9] = 0;
422}
423
424static void
425skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400426 struct skd_request_context *skreq)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600427{
428 skreq->flush_cmd = 1;
429
Bart Van Asschefb4844b2017-08-17 13:13:19 -0700430 scsi_req->cdb[0] = SYNCHRONIZE_CACHE;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600431 scsi_req->cdb[1] = 0;
432 scsi_req->cdb[2] = 0;
433 scsi_req->cdb[3] = 0;
434 scsi_req->cdb[4] = 0;
435 scsi_req->cdb[5] = 0;
436 scsi_req->cdb[6] = 0;
437 scsi_req->cdb[7] = 0;
438 scsi_req->cdb[8] = 0;
439 scsi_req->cdb[9] = 0;
440}
441
Bart Van Assche3d17a672017-08-17 13:13:21 -0700442/*
443 * Return true if and only if all pending requests should be failed.
444 */
445static bool skd_fail_all(struct request_queue *q)
Bart Van Asschecb6981b2017-08-17 13:13:20 -0700446{
447 struct skd_device *skdev = q->queuedata;
448
449 SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
450
451 skd_log_skdev(skdev, "req_not_online");
452 switch (skdev->state) {
453 case SKD_DRVR_STATE_PAUSING:
454 case SKD_DRVR_STATE_PAUSED:
455 case SKD_DRVR_STATE_STARTING:
456 case SKD_DRVR_STATE_RESTARTING:
457 case SKD_DRVR_STATE_WAIT_BOOT:
458 /* In case of starting, we haven't started the queue,
459 * so we can't get here... but requests are
460 * possibly hanging out waiting for us because we
461 * reported the dev/skd0 already. They'll wait
462 * forever if connect doesn't complete.
463 * What to do??? delay dev/skd0 ??
464 */
465 case SKD_DRVR_STATE_BUSY:
466 case SKD_DRVR_STATE_BUSY_IMMINENT:
467 case SKD_DRVR_STATE_BUSY_ERASE:
Bart Van Assche3d17a672017-08-17 13:13:21 -0700468 return false;
Bart Van Asschecb6981b2017-08-17 13:13:20 -0700469
470 case SKD_DRVR_STATE_BUSY_SANITIZE:
471 case SKD_DRVR_STATE_STOPPING:
472 case SKD_DRVR_STATE_SYNCING:
473 case SKD_DRVR_STATE_FAULT:
474 case SKD_DRVR_STATE_DISAPPEARED:
475 default:
Bart Van Assche3d17a672017-08-17 13:13:21 -0700476 return true;
Bart Van Asschecb6981b2017-08-17 13:13:20 -0700477 }
Bart Van Asschecb6981b2017-08-17 13:13:20 -0700478}
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600479
Bart Van Asschec39c6c72017-08-23 10:56:31 -0700480static blk_status_t skd_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
481 const struct blk_mq_queue_data *mqd)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600482{
Bart Van Asschec39c6c72017-08-23 10:56:31 -0700483 struct request *const req = mqd->rq;
Bart Van Assche91f85da2017-08-17 13:13:28 -0700484 struct request_queue *const q = req->q;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600485 struct skd_device *skdev = q->queuedata;
Bart Van Assche91f85da2017-08-17 13:13:28 -0700486 struct skd_fitmsg_context *skmsg;
487 struct fit_msg_hdr *fmh;
488 const u32 tag = blk_mq_unique_tag(req);
Bart Van Asschee7278a82017-08-17 13:13:31 -0700489 struct skd_request_context *const skreq = blk_mq_rq_to_pdu(req);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600490 struct skd_scsi_request *scsi_req;
Bart Van Assche74c74282017-08-17 13:13:37 -0700491 unsigned long flags = 0;
Bart Van Asschee2bb5542017-08-17 13:13:36 -0700492 const u32 lba = blk_rq_pos(req);
493 const u32 count = blk_rq_sectors(req);
494 const int data_dir = rq_data_dir(req);
Bart Van Assche91f85da2017-08-17 13:13:28 -0700495
Bart Van Asschec39c6c72017-08-23 10:56:31 -0700496 if (unlikely(skdev->state != SKD_DRVR_STATE_ONLINE))
497 return skd_fail_all(q) ? BLK_STS_IOERR : BLK_STS_RESOURCE;
498
499 blk_mq_start_request(req);
500
Bart Van Assche91f85da2017-08-17 13:13:28 -0700501 WARN_ONCE(tag >= skd_max_queue_depth, "%#x > %#x (nr_requests = %lu)\n",
502 tag, skd_max_queue_depth, q->nr_requests);
503
504 SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
505
Bart Van Assche91f85da2017-08-17 13:13:28 -0700506 dev_dbg(&skdev->pdev->dev,
507 "new req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba,
508 lba, count, count, data_dir);
509
510 skreq->id = tag + SKD_ID_RW_REQUEST;
511 skreq->flush_cmd = 0;
512 skreq->n_sg = 0;
513 skreq->sg_byte_count = 0;
514
Bart Van Assche91f85da2017-08-17 13:13:28 -0700515 skreq->fitmsg_id = 0;
516
517 skreq->data_dir = data_dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
518
519 if (req->bio && !skd_preop_sg_list(skdev, skreq)) {
520 dev_dbg(&skdev->pdev->dev, "error Out\n");
Bart Van Assche795bc1b2017-08-25 14:24:12 -0700521 skreq->status = BLK_STS_RESOURCE;
522 blk_mq_complete_request(req);
Bart Van Asschec39c6c72017-08-23 10:56:31 -0700523 return BLK_STS_OK;
Bart Van Assche91f85da2017-08-17 13:13:28 -0700524 }
525
Bart Van Asschea3db1022017-08-17 13:13:35 -0700526 dma_sync_single_for_device(&skdev->pdev->dev, skreq->sksg_dma_address,
527 skreq->n_sg *
528 sizeof(struct fit_sg_descriptor),
529 DMA_TO_DEVICE);
530
Bart Van Assche91f85da2017-08-17 13:13:28 -0700531 /* Either a FIT msg is in progress or we have to start one. */
Bart Van Assche74c74282017-08-17 13:13:37 -0700532 if (skd_max_req_per_msg == 1) {
533 skmsg = NULL;
534 } else {
535 spin_lock_irqsave(&skdev->lock, flags);
536 skmsg = skdev->skmsg;
537 }
Bart Van Assche91f85da2017-08-17 13:13:28 -0700538 if (!skmsg) {
539 skmsg = &skdev->skmsg_table[tag];
540 skdev->skmsg = skmsg;
541
542 /* Initialize the FIT msg header */
543 fmh = &skmsg->msg_buf->fmh;
544 memset(fmh, 0, sizeof(*fmh));
545 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
546 skmsg->length = sizeof(*fmh);
547 } else {
548 fmh = &skmsg->msg_buf->fmh;
549 }
550
551 skreq->fitmsg_id = skmsg->id;
552
553 scsi_req = &skmsg->msg_buf->scsi[fmh->num_protocol_cmds_coalesced];
554 memset(scsi_req, 0, sizeof(*scsi_req));
555
Bart Van Assche91f85da2017-08-17 13:13:28 -0700556 scsi_req->hdr.tag = skreq->id;
Bart Van Asschee2bb5542017-08-17 13:13:36 -0700557 scsi_req->hdr.sg_list_dma_address =
558 cpu_to_be64(skreq->sksg_dma_address);
Bart Van Assche91f85da2017-08-17 13:13:28 -0700559
Bart Van Asschee2bb5542017-08-17 13:13:36 -0700560 if (req_op(req) == REQ_OP_FLUSH) {
Bart Van Assche91f85da2017-08-17 13:13:28 -0700561 skd_prep_zerosize_flush_cdb(scsi_req, skreq);
562 SKD_ASSERT(skreq->flush_cmd == 1);
563 } else {
564 skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
565 }
566
Bart Van Asschee2bb5542017-08-17 13:13:36 -0700567 if (req->cmd_flags & REQ_FUA)
Bart Van Assche91f85da2017-08-17 13:13:28 -0700568 scsi_req->cdb[1] |= SKD_FUA_NV;
569
570 scsi_req->hdr.sg_list_len_bytes = cpu_to_be32(skreq->sg_byte_count);
571
572 /* Complete resource allocations. */
573 skreq->state = SKD_REQ_STATE_BUSY;
574
575 skmsg->length += sizeof(struct skd_scsi_request);
576 fmh->num_protocol_cmds_coalesced++;
577
Bart Van Assche91f85da2017-08-17 13:13:28 -0700578 dev_dbg(&skdev->pdev->dev, "req=0x%x busy=%d\n", skreq->id,
Bart Van Assched4d0f5f2017-08-17 13:13:34 -0700579 skd_in_flight(skdev));
Bart Van Assche91f85da2017-08-17 13:13:28 -0700580
581 /*
582 * If the FIT msg buffer is full send it.
583 */
Bart Van Assche74c74282017-08-17 13:13:37 -0700584 if (skd_max_req_per_msg == 1) {
Bart Van Assche91f85da2017-08-17 13:13:28 -0700585 skd_send_fitmsg(skdev, skmsg);
Bart Van Assche74c74282017-08-17 13:13:37 -0700586 } else {
Bart Van Asschec39c6c72017-08-23 10:56:31 -0700587 if (mqd->last ||
Bart Van Assche74c74282017-08-17 13:13:37 -0700588 fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
589 skd_send_fitmsg(skdev, skmsg);
590 skdev->skmsg = NULL;
591 }
592 spin_unlock_irqrestore(&skdev->lock, flags);
Bart Van Assche91f85da2017-08-17 13:13:28 -0700593 }
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600594
Bart Van Asscheca33dd92017-08-17 13:13:32 -0700595 return BLK_STS_OK;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600596}
597
Bart Van Asschef2fe4452017-08-23 10:56:32 -0700598static enum blk_eh_timer_return skd_timed_out(struct request *req,
599 bool reserved)
Bart Van Asschea74d5b72017-08-17 13:13:33 -0700600{
601 struct skd_device *skdev = req->q->queuedata;
602
603 dev_err(&skdev->pdev->dev, "request with tag %#x timed out\n",
604 blk_mq_unique_tag(req));
605
Bart Van Asschef2fe4452017-08-23 10:56:32 -0700606 return BLK_EH_RESET_TIMER;
Bart Van Asschea74d5b72017-08-17 13:13:33 -0700607}
608
Bart Van Assche296cb942017-08-25 14:24:11 -0700609static void skd_complete_rq(struct request *req)
Bart Van Asschea74d5b72017-08-17 13:13:33 -0700610{
Bart Van Asschea74d5b72017-08-17 13:13:33 -0700611 struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
Bart Van Asschea74d5b72017-08-17 13:13:33 -0700612
Bart Van Asschef2fe4452017-08-23 10:56:32 -0700613 blk_mq_end_request(req, skreq->status);
Bart Van Asschea74d5b72017-08-17 13:13:33 -0700614}
615
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200616static bool skd_preop_sg_list(struct skd_device *skdev,
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400617 struct skd_request_context *skreq)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600618{
Bart Van Asschee7278a82017-08-17 13:13:31 -0700619 struct request *req = blk_mq_rq_from_pdu(skreq);
Bart Van Assche06f824c42017-08-17 13:13:15 -0700620 struct scatterlist *sgl = &skreq->sg[0], *sg;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600621 int n_sg;
622 int i;
623
624 skreq->sg_byte_count = 0;
625
Bart Van Asscheb1824ee2017-08-17 13:13:12 -0700626 WARN_ON_ONCE(skreq->data_dir != DMA_TO_DEVICE &&
627 skreq->data_dir != DMA_FROM_DEVICE);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600628
Bart Van Assche06f824c42017-08-17 13:13:15 -0700629 n_sg = blk_rq_map_sg(skdev->queue, req, sgl);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600630 if (n_sg <= 0)
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200631 return false;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600632
633 /*
634 * Map scatterlist to PCI bus addresses.
635 * Note PCI might change the number of entries.
636 */
Bart Van Assche06f824c42017-08-17 13:13:15 -0700637 n_sg = pci_map_sg(skdev->pdev, sgl, n_sg, skreq->data_dir);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600638 if (n_sg <= 0)
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200639 return false;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600640
641 SKD_ASSERT(n_sg <= skdev->sgs_per_request);
642
643 skreq->n_sg = n_sg;
644
Bart Van Assche06f824c42017-08-17 13:13:15 -0700645 for_each_sg(sgl, sg, n_sg, i) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600646 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
Bart Van Assche06f824c42017-08-17 13:13:15 -0700647 u32 cnt = sg_dma_len(sg);
648 uint64_t dma_addr = sg_dma_address(sg);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600649
650 sgd->control = FIT_SGD_CONTROL_NOT_LAST;
651 sgd->byte_count = cnt;
652 skreq->sg_byte_count += cnt;
653 sgd->host_side_addr = dma_addr;
654 sgd->dev_side_addr = 0;
655 }
656
657 skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
658 skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
659
660 if (unlikely(skdev->dbg_level > 1)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700661 dev_dbg(&skdev->pdev->dev,
662 "skreq=%x sksg_list=%p sksg_dma=%llx\n",
663 skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600664 for (i = 0; i < n_sg; i++) {
665 struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
Bart Van Asschef98806d2017-08-17 13:12:58 -0700666
667 dev_dbg(&skdev->pdev->dev,
668 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
669 i, sgd->byte_count, sgd->control,
670 sgd->host_side_addr, sgd->next_desc_ptr);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600671 }
672 }
673
Christoph Hellwig2a842ac2017-06-03 09:38:04 +0200674 return true;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600675}
676
Jens Axboefcd37eb2013-11-01 10:14:56 -0600677static void skd_postop_sg_list(struct skd_device *skdev,
Mike Snitzer38d4a1b2013-11-01 15:05:10 -0400678 struct skd_request_context *skreq)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600679{
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600680 /*
681 * restore the next ptr for next IO request so we
682 * don't have to set it every time.
683 */
684 skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
685 skreq->sksg_dma_address +
686 ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
Bart Van Asscheb1824ee2017-08-17 13:13:12 -0700687 pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, skreq->data_dir);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600688}
689
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600690/*
691 *****************************************************************************
692 * TIMER
693 *****************************************************************************
694 */
695
696static void skd_timer_tick_not_online(struct skd_device *skdev);
697
Bart Van Asscheca33dd92017-08-17 13:13:32 -0700698static void skd_start_queue(struct work_struct *work)
699{
700 struct skd_device *skdev = container_of(work, typeof(*skdev),
701 start_queue);
702
703 /*
704 * Although it is safe to call blk_start_queue() from interrupt
705 * context, blk_mq_start_hw_queues() must not be called from
706 * interrupt context.
707 */
708 blk_mq_start_hw_queues(skdev->queue);
709}
710
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600711static void skd_timer_tick(ulong arg)
712{
713 struct skd_device *skdev = (struct skd_device *)arg;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600714 unsigned long reqflags;
715 u32 state;
716
717 if (skdev->state == SKD_DRVR_STATE_FAULT)
718 /* The driver has declared fault, and we want it to
719 * stay that way until driver is reloaded.
720 */
721 return;
722
723 spin_lock_irqsave(&skdev->lock, reqflags);
724
725 state = SKD_READL(skdev, FIT_STATUS);
726 state &= FIT_SR_DRIVE_STATE_MASK;
727 if (state != skdev->drive_state)
728 skd_isr_fwstate(skdev);
729
Bart Van Asschea74d5b72017-08-17 13:13:33 -0700730 if (skdev->state != SKD_DRVR_STATE_ONLINE)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600731 skd_timer_tick_not_online(skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600732
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600733 mod_timer(&skdev->timer, (jiffies + HZ));
734
735 spin_unlock_irqrestore(&skdev->lock, reqflags);
736}
737
738static void skd_timer_tick_not_online(struct skd_device *skdev)
739{
740 switch (skdev->state) {
741 case SKD_DRVR_STATE_IDLE:
742 case SKD_DRVR_STATE_LOAD:
743 break;
744 case SKD_DRVR_STATE_BUSY_SANITIZE:
Bart Van Asschef98806d2017-08-17 13:12:58 -0700745 dev_dbg(&skdev->pdev->dev,
746 "drive busy sanitize[%x], driver[%x]\n",
747 skdev->drive_state, skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600748 /* If we've been in sanitize for 3 seconds, we figure we're not
749 * going to get anymore completions, so recover requests now
750 */
751 if (skdev->timer_countdown > 0) {
752 skdev->timer_countdown--;
753 return;
754 }
Bart Van Assche79ce12a2017-08-17 13:13:14 -0700755 skd_recover_requests(skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600756 break;
757
758 case SKD_DRVR_STATE_BUSY:
759 case SKD_DRVR_STATE_BUSY_IMMINENT:
760 case SKD_DRVR_STATE_BUSY_ERASE:
Bart Van Asschef98806d2017-08-17 13:12:58 -0700761 dev_dbg(&skdev->pdev->dev, "busy[%x], countdown=%d\n",
762 skdev->state, skdev->timer_countdown);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600763 if (skdev->timer_countdown > 0) {
764 skdev->timer_countdown--;
765 return;
766 }
Bart Van Asschef98806d2017-08-17 13:12:58 -0700767 dev_dbg(&skdev->pdev->dev,
768 "busy[%x], timedout=%d, restarting device.",
769 skdev->state, skdev->timer_countdown);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600770 skd_restart_device(skdev);
771 break;
772
773 case SKD_DRVR_STATE_WAIT_BOOT:
774 case SKD_DRVR_STATE_STARTING:
775 if (skdev->timer_countdown > 0) {
776 skdev->timer_countdown--;
777 return;
778 }
779 /* For now, we fault the drive. Could attempt resets to
780 * revcover at some point. */
781 skdev->state = SKD_DRVR_STATE_FAULT;
782
Bart Van Asschef98806d2017-08-17 13:12:58 -0700783 dev_err(&skdev->pdev->dev, "DriveFault Connect Timeout (%x)\n",
784 skdev->drive_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600785
786 /*start the queue so we can respond with error to requests */
787 /* wakeup anyone waiting for startup complete */
Bart Van Asscheca33dd92017-08-17 13:13:32 -0700788 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600789 skdev->gendisk_on = -1;
790 wake_up_interruptible(&skdev->waitq);
791 break;
792
793 case SKD_DRVR_STATE_ONLINE:
794 /* shouldn't get here. */
795 break;
796
797 case SKD_DRVR_STATE_PAUSING:
798 case SKD_DRVR_STATE_PAUSED:
799 break;
800
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600801 case SKD_DRVR_STATE_RESTARTING:
802 if (skdev->timer_countdown > 0) {
803 skdev->timer_countdown--;
804 return;
805 }
806 /* For now, we fault the drive. Could attempt resets to
807 * revcover at some point. */
808 skdev->state = SKD_DRVR_STATE_FAULT;
Bart Van Asschef98806d2017-08-17 13:12:58 -0700809 dev_err(&skdev->pdev->dev,
810 "DriveFault Reconnect Timeout (%x)\n",
811 skdev->drive_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600812
813 /*
814 * Recovering does two things:
815 * 1. completes IO with error
816 * 2. reclaims dma resources
817 * When is it safe to recover requests?
818 * - if the drive state is faulted
819 * - if the state is still soft reset after out timeout
820 * - if the drive registers are dead (state = FF)
821 * If it is "unsafe", we still need to recover, so we will
822 * disable pci bus mastering and disable our interrupts.
823 */
824
825 if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
826 (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
827 (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
828 /* It never came out of soft reset. Try to
829 * recover the requests and then let them
830 * fail. This is to mitigate hung processes. */
Bart Van Assche79ce12a2017-08-17 13:13:14 -0700831 skd_recover_requests(skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600832 else {
Bart Van Asschef98806d2017-08-17 13:12:58 -0700833 dev_err(&skdev->pdev->dev, "Disable BusMaster (%x)\n",
834 skdev->drive_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600835 pci_disable_device(skdev->pdev);
836 skd_disable_interrupts(skdev);
Bart Van Assche79ce12a2017-08-17 13:13:14 -0700837 skd_recover_requests(skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600838 }
839
840 /*start the queue so we can respond with error to requests */
841 /* wakeup anyone waiting for startup complete */
Bart Van Asscheca33dd92017-08-17 13:13:32 -0700842 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600843 skdev->gendisk_on = -1;
844 wake_up_interruptible(&skdev->waitq);
845 break;
846
847 case SKD_DRVR_STATE_RESUMING:
848 case SKD_DRVR_STATE_STOPPING:
849 case SKD_DRVR_STATE_SYNCING:
850 case SKD_DRVR_STATE_FAULT:
851 case SKD_DRVR_STATE_DISAPPEARED:
852 default:
853 break;
854 }
855}
856
857static int skd_start_timer(struct skd_device *skdev)
858{
859 int rc;
860
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600861 setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
862
863 rc = mod_timer(&skdev->timer, (jiffies + HZ));
864 if (rc)
Bart Van Asschef98806d2017-08-17 13:12:58 -0700865 dev_err(&skdev->pdev->dev, "failed to start timer %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600866 return rc;
867}
868
869static void skd_kill_timer(struct skd_device *skdev)
870{
871 del_timer_sync(&skdev->timer);
872}
873
874/*
875 *****************************************************************************
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600876 * INTERNAL REQUESTS -- generated by driver itself
877 *****************************************************************************
878 */
879
880static int skd_format_internal_skspcl(struct skd_device *skdev)
881{
882 struct skd_special_context *skspcl = &skdev->internal_skspcl;
883 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
884 struct fit_msg_hdr *fmh;
885 uint64_t dma_address;
886 struct skd_scsi_request *scsi;
887
Bart Van Assched891fe62017-08-17 13:13:07 -0700888 fmh = &skspcl->msg_buf->fmh;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600889 fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
890 fmh->num_protocol_cmds_coalesced = 1;
891
Bart Van Assched891fe62017-08-17 13:13:07 -0700892 scsi = &skspcl->msg_buf->scsi[0];
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600893 memset(scsi, 0, sizeof(*scsi));
894 dma_address = skspcl->req.sksg_dma_address;
895 scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
Bart Van Assche32494df2017-08-17 13:13:25 -0700896 skspcl->req.n_sg = 1;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600897 sgd->control = FIT_SGD_CONTROL_LAST;
898 sgd->byte_count = 0;
899 sgd->host_side_addr = skspcl->db_dma_address;
900 sgd->dev_side_addr = 0;
901 sgd->next_desc_ptr = 0LL;
902
903 return 1;
904}
905
906#define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
907
908static void skd_send_internal_skspcl(struct skd_device *skdev,
909 struct skd_special_context *skspcl,
910 u8 opcode)
911{
912 struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
913 struct skd_scsi_request *scsi;
914 unsigned char *buf = skspcl->data_buf;
915 int i;
916
917 if (skspcl->req.state != SKD_REQ_STATE_IDLE)
918 /*
919 * A refresh is already in progress.
920 * Just wait for it to finish.
921 */
922 return;
923
924 SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
925 skspcl->req.state = SKD_REQ_STATE_BUSY;
926 skspcl->req.id += SKD_ID_INCR;
927
Bart Van Assched891fe62017-08-17 13:13:07 -0700928 scsi = &skspcl->msg_buf->scsi[0];
Akhil Bhansalie67f86b2013-10-15 14:19:07 -0600929 scsi->hdr.tag = skspcl->req.id;
930
931 memset(scsi->cdb, 0, sizeof(scsi->cdb));
932
933 switch (opcode) {
934 case TEST_UNIT_READY:
935 scsi->cdb[0] = TEST_UNIT_READY;
936 sgd->byte_count = 0;
937 scsi->hdr.sg_list_len_bytes = 0;
938 break;
939
940 case READ_CAPACITY:
941 scsi->cdb[0] = READ_CAPACITY;
942 sgd->byte_count = SKD_N_READ_CAP_BYTES;
943 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
944 break;
945
946 case INQUIRY:
947 scsi->cdb[0] = INQUIRY;
948 scsi->cdb[1] = 0x01; /* evpd */
949 scsi->cdb[2] = 0x80; /* serial number page */
950 scsi->cdb[4] = 0x10;
951 sgd->byte_count = 16;
952 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
953 break;
954
955 case SYNCHRONIZE_CACHE:
956 scsi->cdb[0] = SYNCHRONIZE_CACHE;
957 sgd->byte_count = 0;
958 scsi->hdr.sg_list_len_bytes = 0;
959 break;
960
961 case WRITE_BUFFER:
962 scsi->cdb[0] = WRITE_BUFFER;
963 scsi->cdb[1] = 0x02;
964 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
965 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
966 sgd->byte_count = WR_BUF_SIZE;
967 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
968 /* fill incrementing byte pattern */
969 for (i = 0; i < sgd->byte_count; i++)
970 buf[i] = i & 0xFF;
971 break;
972
973 case READ_BUFFER:
974 scsi->cdb[0] = READ_BUFFER;
975 scsi->cdb[1] = 0x02;
976 scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
977 scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
978 sgd->byte_count = WR_BUF_SIZE;
979 scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
980 memset(skspcl->data_buf, 0, sgd->byte_count);
981 break;
982
983 default:
984 SKD_ASSERT("Don't know what to send");
985 return;
986
987 }
988 skd_send_special_fitmsg(skdev, skspcl);
989}
990
991static void skd_refresh_device_data(struct skd_device *skdev)
992{
993 struct skd_special_context *skspcl = &skdev->internal_skspcl;
994
995 skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
996}
997
998static int skd_chk_read_buf(struct skd_device *skdev,
999 struct skd_special_context *skspcl)
1000{
1001 unsigned char *buf = skspcl->data_buf;
1002 int i;
1003
1004 /* check for incrementing byte pattern */
1005 for (i = 0; i < WR_BUF_SIZE; i++)
1006 if (buf[i] != (i & 0xFF))
1007 return 1;
1008
1009 return 0;
1010}
1011
1012static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
1013 u8 code, u8 qual, u8 fruc)
1014{
1015 /* If the check condition is of special interest, log a message */
1016 if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
1017 && (code == 0x04) && (qual == 0x06)) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001018 dev_err(&skdev->pdev->dev,
1019 "*** LOST_WRITE_DATA ERROR *** key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1020 key, code, qual, fruc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001021 }
1022}
1023
1024static void skd_complete_internal(struct skd_device *skdev,
Bart Van Assche85e34112017-08-17 13:13:17 -07001025 struct fit_completion_entry_v1 *skcomp,
1026 struct fit_comp_error_info *skerr,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001027 struct skd_special_context *skspcl)
1028{
1029 u8 *buf = skspcl->data_buf;
1030 u8 status;
1031 int i;
Bart Van Assched891fe62017-08-17 13:13:07 -07001032 struct skd_scsi_request *scsi = &skspcl->msg_buf->scsi[0];
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001033
Bart Van Assche760b48c2017-08-17 13:13:00 -07001034 lockdep_assert_held(&skdev->lock);
1035
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001036 SKD_ASSERT(skspcl == &skdev->internal_skspcl);
1037
Bart Van Asschef98806d2017-08-17 13:12:58 -07001038 dev_dbg(&skdev->pdev->dev, "complete internal %x\n", scsi->cdb[0]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001039
Bart Van Asschea3db1022017-08-17 13:13:35 -07001040 dma_sync_single_for_cpu(&skdev->pdev->dev,
1041 skspcl->db_dma_address,
1042 skspcl->req.sksg_list[0].byte_count,
1043 DMA_BIDIRECTIONAL);
1044
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001045 skspcl->req.completion = *skcomp;
1046 skspcl->req.state = SKD_REQ_STATE_IDLE;
1047 skspcl->req.id += SKD_ID_INCR;
1048
1049 status = skspcl->req.completion.status;
1050
1051 skd_log_check_status(skdev, status, skerr->key, skerr->code,
1052 skerr->qual, skerr->fruc);
1053
1054 switch (scsi->cdb[0]) {
1055 case TEST_UNIT_READY:
1056 if (status == SAM_STAT_GOOD)
1057 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1058 else if ((status == SAM_STAT_CHECK_CONDITION) &&
1059 (skerr->key == MEDIUM_ERROR))
1060 skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
1061 else {
1062 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001063 dev_dbg(&skdev->pdev->dev,
1064 "TUR failed, don't send anymore state 0x%x\n",
1065 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001066 return;
1067 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07001068 dev_dbg(&skdev->pdev->dev,
1069 "**** TUR failed, retry skerr\n");
Bart Van Asschefb4844b2017-08-17 13:13:19 -07001070 skd_send_internal_skspcl(skdev, skspcl,
1071 TEST_UNIT_READY);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001072 }
1073 break;
1074
1075 case WRITE_BUFFER:
1076 if (status == SAM_STAT_GOOD)
1077 skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
1078 else {
1079 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001080 dev_dbg(&skdev->pdev->dev,
1081 "write buffer failed, don't send anymore state 0x%x\n",
1082 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001083 return;
1084 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07001085 dev_dbg(&skdev->pdev->dev,
1086 "**** write buffer failed, retry skerr\n");
Bart Van Asschefb4844b2017-08-17 13:13:19 -07001087 skd_send_internal_skspcl(skdev, skspcl,
1088 TEST_UNIT_READY);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001089 }
1090 break;
1091
1092 case READ_BUFFER:
1093 if (status == SAM_STAT_GOOD) {
1094 if (skd_chk_read_buf(skdev, skspcl) == 0)
1095 skd_send_internal_skspcl(skdev, skspcl,
1096 READ_CAPACITY);
1097 else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001098 dev_err(&skdev->pdev->dev,
1099 "*** W/R Buffer mismatch %d ***\n",
1100 skdev->connect_retries);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001101 if (skdev->connect_retries <
1102 SKD_MAX_CONNECT_RETRIES) {
1103 skdev->connect_retries++;
1104 skd_soft_reset(skdev);
1105 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001106 dev_err(&skdev->pdev->dev,
1107 "W/R Buffer Connect Error\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001108 return;
1109 }
1110 }
1111
1112 } else {
1113 if (skdev->state == SKD_DRVR_STATE_STOPPING) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001114 dev_dbg(&skdev->pdev->dev,
1115 "read buffer failed, don't send anymore state 0x%x\n",
1116 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001117 return;
1118 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07001119 dev_dbg(&skdev->pdev->dev,
1120 "**** read buffer failed, retry skerr\n");
Bart Van Asschefb4844b2017-08-17 13:13:19 -07001121 skd_send_internal_skspcl(skdev, skspcl,
1122 TEST_UNIT_READY);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001123 }
1124 break;
1125
1126 case READ_CAPACITY:
1127 skdev->read_cap_is_valid = 0;
1128 if (status == SAM_STAT_GOOD) {
1129 skdev->read_cap_last_lba =
1130 (buf[0] << 24) | (buf[1] << 16) |
1131 (buf[2] << 8) | buf[3];
1132 skdev->read_cap_blocksize =
1133 (buf[4] << 24) | (buf[5] << 16) |
1134 (buf[6] << 8) | buf[7];
1135
Bart Van Asschef98806d2017-08-17 13:12:58 -07001136 dev_dbg(&skdev->pdev->dev, "last lba %d, bs %d\n",
1137 skdev->read_cap_last_lba,
1138 skdev->read_cap_blocksize);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001139
1140 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
1141
1142 skdev->read_cap_is_valid = 1;
1143
1144 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1145 } else if ((status == SAM_STAT_CHECK_CONDITION) &&
1146 (skerr->key == MEDIUM_ERROR)) {
1147 skdev->read_cap_last_lba = ~0;
1148 set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
Bart Van Asschef98806d2017-08-17 13:12:58 -07001149 dev_dbg(&skdev->pdev->dev, "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001150 skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
1151 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001152 dev_dbg(&skdev->pdev->dev, "**** READCAP failed, retry TUR\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001153 skd_send_internal_skspcl(skdev, skspcl,
1154 TEST_UNIT_READY);
1155 }
1156 break;
1157
1158 case INQUIRY:
1159 skdev->inquiry_is_valid = 0;
1160 if (status == SAM_STAT_GOOD) {
1161 skdev->inquiry_is_valid = 1;
1162
1163 for (i = 0; i < 12; i++)
1164 skdev->inq_serial_num[i] = buf[i + 4];
1165 skdev->inq_serial_num[12] = 0;
1166 }
1167
1168 if (skd_unquiesce_dev(skdev) < 0)
Bart Van Asschef98806d2017-08-17 13:12:58 -07001169 dev_dbg(&skdev->pdev->dev, "**** failed, to ONLINE device\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001170 /* connection is complete */
1171 skdev->connect_retries = 0;
1172 break;
1173
1174 case SYNCHRONIZE_CACHE:
1175 if (status == SAM_STAT_GOOD)
1176 skdev->sync_done = 1;
1177 else
1178 skdev->sync_done = -1;
1179 wake_up_interruptible(&skdev->waitq);
1180 break;
1181
1182 default:
1183 SKD_ASSERT("we didn't send this");
1184 }
1185}
1186
1187/*
1188 *****************************************************************************
1189 * FIT MESSAGES
1190 *****************************************************************************
1191 */
1192
1193static void skd_send_fitmsg(struct skd_device *skdev,
1194 struct skd_fitmsg_context *skmsg)
1195{
1196 u64 qcmd;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001197
Bart Van Asschef98806d2017-08-17 13:12:58 -07001198 dev_dbg(&skdev->pdev->dev, "dma address 0x%llx, busy=%d\n",
Bart Van Assched4d0f5f2017-08-17 13:13:34 -07001199 skmsg->mb_dma_address, skd_in_flight(skdev));
Bart Van Assche6507f432017-08-17 13:13:06 -07001200 dev_dbg(&skdev->pdev->dev, "msg_buf %p\n", skmsg->msg_buf);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001201
1202 qcmd = skmsg->mb_dma_address;
1203 qcmd |= FIT_QCMD_QID_NORMAL;
1204
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001205 if (unlikely(skdev->dbg_level > 1)) {
1206 u8 *bp = (u8 *)skmsg->msg_buf;
1207 int i;
1208 for (i = 0; i < skmsg->length; i += 8) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001209 dev_dbg(&skdev->pdev->dev, "msg[%2d] %8ph\n", i,
1210 &bp[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001211 if (i == 0)
1212 i = 64 - 8;
1213 }
1214 }
1215
1216 if (skmsg->length > 256)
1217 qcmd |= FIT_QCMD_MSGSIZE_512;
1218 else if (skmsg->length > 128)
1219 qcmd |= FIT_QCMD_MSGSIZE_256;
1220 else if (skmsg->length > 64)
1221 qcmd |= FIT_QCMD_MSGSIZE_128;
1222 else
1223 /*
1224 * This makes no sense because the FIT msg header is
1225 * 64 bytes. If the msg is only 64 bytes long it has
1226 * no payload.
1227 */
1228 qcmd |= FIT_QCMD_MSGSIZE_64;
1229
Bart Van Asschea3db1022017-08-17 13:13:35 -07001230 dma_sync_single_for_device(&skdev->pdev->dev, skmsg->mb_dma_address,
1231 skmsg->length, DMA_TO_DEVICE);
1232
Bart Van Assche5fbd5452017-08-17 13:12:46 -07001233 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1234 smp_wmb();
1235
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001236 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001237}
1238
1239static void skd_send_special_fitmsg(struct skd_device *skdev,
1240 struct skd_special_context *skspcl)
1241{
1242 u64 qcmd;
1243
Bart Van Asschea3db1022017-08-17 13:13:35 -07001244 WARN_ON_ONCE(skspcl->req.n_sg != 1);
1245
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001246 if (unlikely(skdev->dbg_level > 1)) {
1247 u8 *bp = (u8 *)skspcl->msg_buf;
1248 int i;
1249
1250 for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001251 dev_dbg(&skdev->pdev->dev, " spcl[%2d] %8ph\n", i,
1252 &bp[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001253 if (i == 0)
1254 i = 64 - 8;
1255 }
1256
Bart Van Asschef98806d2017-08-17 13:12:58 -07001257 dev_dbg(&skdev->pdev->dev,
1258 "skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
1259 skspcl, skspcl->req.id, skspcl->req.sksg_list,
1260 skspcl->req.sksg_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001261 for (i = 0; i < skspcl->req.n_sg; i++) {
1262 struct fit_sg_descriptor *sgd =
1263 &skspcl->req.sksg_list[i];
1264
Bart Van Asschef98806d2017-08-17 13:12:58 -07001265 dev_dbg(&skdev->pdev->dev,
1266 " sg[%d] count=%u ctrl=0x%x addr=0x%llx next=0x%llx\n",
1267 i, sgd->byte_count, sgd->control,
1268 sgd->host_side_addr, sgd->next_desc_ptr);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001269 }
1270 }
1271
1272 /*
1273 * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
1274 * and one 64-byte SSDI command.
1275 */
1276 qcmd = skspcl->mb_dma_address;
1277 qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
1278
Bart Van Asschea3db1022017-08-17 13:13:35 -07001279 dma_sync_single_for_device(&skdev->pdev->dev, skspcl->mb_dma_address,
1280 SKD_N_SPECIAL_FITMSG_BYTES, DMA_TO_DEVICE);
1281 dma_sync_single_for_device(&skdev->pdev->dev,
1282 skspcl->req.sksg_dma_address,
1283 1 * sizeof(struct fit_sg_descriptor),
1284 DMA_TO_DEVICE);
1285 dma_sync_single_for_device(&skdev->pdev->dev,
1286 skspcl->db_dma_address,
1287 skspcl->req.sksg_list[0].byte_count,
1288 DMA_BIDIRECTIONAL);
1289
Bart Van Assche5fbd5452017-08-17 13:12:46 -07001290 /* Make sure skd_msg_buf is written before the doorbell is triggered. */
1291 smp_wmb();
1292
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001293 SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
1294}
1295
1296/*
1297 *****************************************************************************
1298 * COMPLETION QUEUE
1299 *****************************************************************************
1300 */
1301
1302static void skd_complete_other(struct skd_device *skdev,
Bart Van Assche85e34112017-08-17 13:13:17 -07001303 struct fit_completion_entry_v1 *skcomp,
1304 struct fit_comp_error_info *skerr);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001305
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001306struct sns_info {
1307 u8 type;
1308 u8 stat;
1309 u8 key;
1310 u8 asc;
1311 u8 ascq;
1312 u8 mask;
1313 enum skd_check_status_action action;
1314};
1315
1316static struct sns_info skd_chkstat_table[] = {
1317 /* Good */
1318 { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
1319 SKD_CHECK_STATUS_REPORT_GOOD },
1320
1321 /* Smart alerts */
1322 { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
1323 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1324 { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
1325 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1326 { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
1327 SKD_CHECK_STATUS_REPORT_SMART_ALERT },
1328
1329 /* Retry (with limits) */
1330 { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
1331 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1332 { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
1333 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1334 { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
1335 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1336 { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
1337 SKD_CHECK_STATUS_REQUEUE_REQUEST },
1338
1339 /* Busy (or about to be) */
1340 { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
1341 SKD_CHECK_STATUS_BUSY_IMMINENT },
1342};
1343
1344/*
1345 * Look up status and sense data to decide how to handle the error
1346 * from the device.
1347 * mask says which fields must match e.g., mask=0x18 means check
1348 * type and stat, ignore key, asc, ascq.
1349 */
1350
Mike Snitzer38d4a1b2013-11-01 15:05:10 -04001351static enum skd_check_status_action
1352skd_check_status(struct skd_device *skdev,
Bart Van Assche85e34112017-08-17 13:13:17 -07001353 u8 cmp_status, struct fit_comp_error_info *skerr)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001354{
Bart Van Assche0b2e0c02017-08-17 13:13:11 -07001355 int i;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001356
Bart Van Asschef98806d2017-08-17 13:12:58 -07001357 dev_err(&skdev->pdev->dev, "key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
1358 skerr->key, skerr->code, skerr->qual, skerr->fruc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001359
Bart Van Asschef98806d2017-08-17 13:12:58 -07001360 dev_dbg(&skdev->pdev->dev,
1361 "stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
1362 skerr->type, cmp_status, skerr->key, skerr->code, skerr->qual,
1363 skerr->fruc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001364
1365 /* Does the info match an entry in the good category? */
Bart Van Assche0b2e0c02017-08-17 13:13:11 -07001366 for (i = 0; i < ARRAY_SIZE(skd_chkstat_table); i++) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001367 struct sns_info *sns = &skd_chkstat_table[i];
1368
1369 if (sns->mask & 0x10)
1370 if (skerr->type != sns->type)
1371 continue;
1372
1373 if (sns->mask & 0x08)
1374 if (cmp_status != sns->stat)
1375 continue;
1376
1377 if (sns->mask & 0x04)
1378 if (skerr->key != sns->key)
1379 continue;
1380
1381 if (sns->mask & 0x02)
1382 if (skerr->code != sns->asc)
1383 continue;
1384
1385 if (sns->mask & 0x01)
1386 if (skerr->qual != sns->ascq)
1387 continue;
1388
1389 if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001390 dev_err(&skdev->pdev->dev,
1391 "SMART Alert: sense key/asc/ascq %02x/%02x/%02x\n",
1392 skerr->key, skerr->code, skerr->qual);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001393 }
1394 return sns->action;
1395 }
1396
1397 /* No other match, so nonzero status means error,
1398 * zero status means good
1399 */
1400 if (cmp_status) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001401 dev_dbg(&skdev->pdev->dev, "status check: error\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001402 return SKD_CHECK_STATUS_REPORT_ERROR;
1403 }
1404
Bart Van Asschef98806d2017-08-17 13:12:58 -07001405 dev_dbg(&skdev->pdev->dev, "status check good default\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001406 return SKD_CHECK_STATUS_REPORT_GOOD;
1407}
1408
1409static void skd_resolve_req_exception(struct skd_device *skdev,
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001410 struct skd_request_context *skreq,
1411 struct request *req)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001412{
1413 u8 cmp_status = skreq->completion.status;
1414
1415 switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
1416 case SKD_CHECK_STATUS_REPORT_GOOD:
1417 case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
Bart Van Assche795bc1b2017-08-25 14:24:12 -07001418 skreq->status = BLK_STS_OK;
1419 blk_mq_complete_request(req);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001420 break;
1421
1422 case SKD_CHECK_STATUS_BUSY_IMMINENT:
1423 skd_log_skreq(skdev, skreq, "retry(busy)");
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001424 blk_requeue_request(skdev->queue, req);
Bart Van Asschef98806d2017-08-17 13:12:58 -07001425 dev_info(&skdev->pdev->dev, "drive BUSY imminent\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001426 skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
1427 skdev->timer_countdown = SKD_TIMER_MINUTES(20);
1428 skd_quiesce_dev(skdev);
1429 break;
1430
1431 case SKD_CHECK_STATUS_REQUEUE_REQUEST:
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001432 if ((unsigned long) ++req->special < SKD_MAX_RETRIES) {
Jens Axboefcd37eb2013-11-01 10:14:56 -06001433 skd_log_skreq(skdev, skreq, "retry");
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001434 blk_requeue_request(skdev->queue, req);
Jens Axboefcd37eb2013-11-01 10:14:56 -06001435 break;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001436 }
Bart Van Asschece6882b2017-08-17 13:12:52 -07001437 /* fall through */
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001438
1439 case SKD_CHECK_STATUS_REPORT_ERROR:
1440 default:
Bart Van Assche795bc1b2017-08-25 14:24:12 -07001441 skreq->status = BLK_STS_IOERR;
1442 blk_mq_complete_request(req);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001443 break;
1444 }
1445}
1446
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001447static void skd_release_skreq(struct skd_device *skdev,
1448 struct skd_request_context *skreq)
1449{
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001450 /*
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001451 * Reclaim the skd_request_context
1452 */
1453 skreq->state = SKD_REQ_STATE_IDLE;
1454 skreq->id += SKD_ID_INCR;
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001455}
1456
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001457static int skd_isr_completion_posted(struct skd_device *skdev,
1458 int limit, int *enqueued)
1459{
Bart Van Assche85e34112017-08-17 13:13:17 -07001460 struct fit_completion_entry_v1 *skcmp;
1461 struct fit_comp_error_info *skerr;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001462 u16 req_id;
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001463 u32 tag;
Bart Van Asscheca33dd92017-08-17 13:13:32 -07001464 u16 hwq = 0;
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001465 struct request *rq;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001466 struct skd_request_context *skreq;
Bart Van Asschec830da82017-08-17 13:13:13 -07001467 u16 cmp_cntxt;
1468 u8 cmp_status;
1469 u8 cmp_cycle;
1470 u32 cmp_bytes;
Dan Carpenterc0b3dda2017-08-23 13:44:20 +03001471 int rc = 0;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001472 int processed = 0;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001473
Bart Van Assche760b48c2017-08-17 13:13:00 -07001474 lockdep_assert_held(&skdev->lock);
1475
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001476 for (;; ) {
1477 SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
1478
1479 skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
1480 cmp_cycle = skcmp->cycle;
1481 cmp_cntxt = skcmp->tag;
1482 cmp_status = skcmp->status;
1483 cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
1484
1485 skerr = &skdev->skerr_table[skdev->skcomp_ix];
1486
Bart Van Asschef98806d2017-08-17 13:12:58 -07001487 dev_dbg(&skdev->pdev->dev,
1488 "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d busy=%d rbytes=0x%x proto=%d\n",
1489 skdev->skcomp_cycle, skdev->skcomp_ix, cmp_cycle,
Bart Van Assched4d0f5f2017-08-17 13:13:34 -07001490 cmp_cntxt, cmp_status, skd_in_flight(skdev),
Bart Van Assche6fbb2de2017-08-17 13:13:27 -07001491 cmp_bytes, skdev->proto_ver);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001492
1493 if (cmp_cycle != skdev->skcomp_cycle) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001494 dev_dbg(&skdev->pdev->dev, "end of completions\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001495 break;
1496 }
1497 /*
1498 * Update the completion queue head index and possibly
1499 * the completion cycle count. 8-bit wrap-around.
1500 */
1501 skdev->skcomp_ix++;
1502 if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
1503 skdev->skcomp_ix = 0;
1504 skdev->skcomp_cycle++;
1505 }
1506
1507 /*
1508 * The command context is a unique 32-bit ID. The low order
1509 * bits help locate the request. The request is usually a
1510 * r/w request (see skd_start() above) or a special request.
1511 */
1512 req_id = cmp_cntxt;
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001513 tag = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001514
1515 /* Is this other than a r/w request? */
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001516 if (tag >= skdev->num_req_context) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001517 /*
1518 * This is not a completion for a r/w request.
1519 */
Bart Van Asscheca33dd92017-08-17 13:13:32 -07001520 WARN_ON_ONCE(blk_mq_tag_to_rq(skdev->tag_set.tags[hwq],
1521 tag));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001522 skd_complete_other(skdev, skcmp, skerr);
1523 continue;
1524 }
1525
Bart Van Asscheca33dd92017-08-17 13:13:32 -07001526 rq = blk_mq_tag_to_rq(skdev->tag_set.tags[hwq], tag);
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001527 if (WARN(!rq, "No request for tag %#x -> %#x\n", cmp_cntxt,
1528 tag))
1529 continue;
Bart Van Asschee7278a82017-08-17 13:13:31 -07001530 skreq = blk_mq_rq_to_pdu(rq);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001531
1532 /*
1533 * Make sure the request ID for the slot matches.
1534 */
1535 if (skreq->id != req_id) {
Bart Van Assche49f16e22017-08-23 10:56:30 -07001536 dev_err(&skdev->pdev->dev,
1537 "Completion mismatch comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
1538 req_id, skreq->id, cmp_cntxt);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001539
Bart Van Assche49f16e22017-08-23 10:56:30 -07001540 continue;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001541 }
1542
1543 SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
1544
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001545 skreq->completion = *skcmp;
1546 if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
1547 skreq->err_info = *skerr;
1548 skd_log_check_status(skdev, cmp_status, skerr->key,
1549 skerr->code, skerr->qual,
1550 skerr->fruc);
1551 }
1552 /* Release DMA resources for the request. */
1553 if (skreq->n_sg > 0)
1554 skd_postop_sg_list(skdev, skreq);
1555
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001556 skd_release_skreq(skdev, skreq);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001557
1558 /*
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001559 * Capture the outcome and post it back to the native request.
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001560 */
Bart Van Assche795bc1b2017-08-25 14:24:12 -07001561 if (likely(cmp_status == SAM_STAT_GOOD)) {
1562 skreq->status = BLK_STS_OK;
1563 blk_mq_complete_request(rq);
1564 } else {
Bart Van Asschef18c17c2017-08-17 13:13:26 -07001565 skd_resolve_req_exception(skdev, skreq, rq);
Bart Van Assche795bc1b2017-08-25 14:24:12 -07001566 }
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001567
1568 /* skd_isr_comp_limit equal zero means no limit */
1569 if (limit) {
1570 if (++processed >= limit) {
1571 rc = 1;
1572 break;
1573 }
1574 }
1575 }
1576
Bart Van Assche6fbb2de2017-08-17 13:13:27 -07001577 if (skdev->state == SKD_DRVR_STATE_PAUSING &&
Bart Van Assched4d0f5f2017-08-17 13:13:34 -07001578 skd_in_flight(skdev) == 0) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001579 skdev->state = SKD_DRVR_STATE_PAUSED;
1580 wake_up_interruptible(&skdev->waitq);
1581 }
1582
1583 return rc;
1584}
1585
1586static void skd_complete_other(struct skd_device *skdev,
Bart Van Assche85e34112017-08-17 13:13:17 -07001587 struct fit_completion_entry_v1 *skcomp,
1588 struct fit_comp_error_info *skerr)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001589{
1590 u32 req_id = 0;
1591 u32 req_table;
1592 u32 req_slot;
1593 struct skd_special_context *skspcl;
1594
Bart Van Assche760b48c2017-08-17 13:13:00 -07001595 lockdep_assert_held(&skdev->lock);
1596
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001597 req_id = skcomp->tag;
1598 req_table = req_id & SKD_ID_TABLE_MASK;
1599 req_slot = req_id & SKD_ID_SLOT_MASK;
1600
Bart Van Asschef98806d2017-08-17 13:12:58 -07001601 dev_dbg(&skdev->pdev->dev, "table=0x%x id=0x%x slot=%d\n", req_table,
1602 req_id, req_slot);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001603
1604 /*
1605 * Based on the request id, determine how to dispatch this completion.
1606 * This swich/case is finding the good cases and forwarding the
1607 * completion entry. Errors are reported below the switch.
1608 */
1609 switch (req_table) {
1610 case SKD_ID_RW_REQUEST:
1611 /*
Bart Van Asschee1d06f22017-08-17 13:12:54 -07001612 * The caller, skd_isr_completion_posted() above,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001613 * handles r/w requests. The only way we get here
1614 * is if the req_slot is out of bounds.
1615 */
1616 break;
1617
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001618 case SKD_ID_INTERNAL:
1619 if (req_slot == 0) {
1620 skspcl = &skdev->internal_skspcl;
1621 if (skspcl->req.id == req_id &&
1622 skspcl->req.state == SKD_REQ_STATE_BUSY) {
1623 skd_complete_internal(skdev,
1624 skcomp, skerr, skspcl);
1625 return;
1626 }
1627 }
1628 break;
1629
1630 case SKD_ID_FIT_MSG:
1631 /*
1632 * These id's should never appear in a completion record.
1633 */
1634 break;
1635
1636 default:
1637 /*
1638 * These id's should never appear anywhere;
1639 */
1640 break;
1641 }
1642
1643 /*
1644 * If we get here it is a bad or stale id.
1645 */
1646}
1647
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001648static void skd_reset_skcomp(struct skd_device *skdev)
1649{
Bart Van Assche6f7c7672017-08-17 13:13:02 -07001650 memset(skdev->skcomp_table, 0, SKD_SKCOMP_SIZE);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001651
1652 skdev->skcomp_ix = 0;
1653 skdev->skcomp_cycle = 1;
1654}
1655
1656/*
1657 *****************************************************************************
1658 * INTERRUPTS
1659 *****************************************************************************
1660 */
1661static void skd_completion_worker(struct work_struct *work)
1662{
1663 struct skd_device *skdev =
1664 container_of(work, struct skd_device, completion_worker);
1665 unsigned long flags;
1666 int flush_enqueued = 0;
1667
1668 spin_lock_irqsave(&skdev->lock, flags);
1669
1670 /*
1671 * pass in limit=0, which means no limit..
1672 * process everything in compq
1673 */
1674 skd_isr_completion_posted(skdev, 0, &flush_enqueued);
Bart Van Asscheca33dd92017-08-17 13:13:32 -07001675 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001676
1677 spin_unlock_irqrestore(&skdev->lock, flags);
1678}
1679
1680static void skd_isr_msg_from_dev(struct skd_device *skdev);
1681
Arnd Bergmann41c94992016-11-09 13:55:35 +01001682static irqreturn_t
1683skd_isr(int irq, void *ptr)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001684{
Bart Van Assche1cd3c1a2017-08-17 13:13:10 -07001685 struct skd_device *skdev = ptr;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001686 u32 intstat;
1687 u32 ack;
1688 int rc = 0;
1689 int deferred = 0;
1690 int flush_enqueued = 0;
1691
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001692 spin_lock(&skdev->lock);
1693
1694 for (;; ) {
1695 intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
1696
1697 ack = FIT_INT_DEF_MASK;
1698 ack &= intstat;
1699
Bart Van Asschef98806d2017-08-17 13:12:58 -07001700 dev_dbg(&skdev->pdev->dev, "intstat=0x%x ack=0x%x\n", intstat,
1701 ack);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001702
1703 /* As long as there is an int pending on device, keep
1704 * running loop. When none, get out, but if we've never
1705 * done any processing, call completion handler?
1706 */
1707 if (ack == 0) {
1708 /* No interrupts on device, but run the completion
1709 * processor anyway?
1710 */
1711 if (rc == 0)
1712 if (likely (skdev->state
1713 == SKD_DRVR_STATE_ONLINE))
1714 deferred = 1;
1715 break;
1716 }
1717
1718 rc = IRQ_HANDLED;
1719
1720 SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
1721
1722 if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
1723 (skdev->state != SKD_DRVR_STATE_STOPPING))) {
1724 if (intstat & FIT_ISH_COMPLETION_POSTED) {
1725 /*
1726 * If we have already deferred completion
1727 * processing, don't bother running it again
1728 */
1729 if (deferred == 0)
1730 deferred =
1731 skd_isr_completion_posted(skdev,
1732 skd_isr_comp_limit, &flush_enqueued);
1733 }
1734
1735 if (intstat & FIT_ISH_FW_STATE_CHANGE) {
1736 skd_isr_fwstate(skdev);
1737 if (skdev->state == SKD_DRVR_STATE_FAULT ||
1738 skdev->state ==
1739 SKD_DRVR_STATE_DISAPPEARED) {
1740 spin_unlock(&skdev->lock);
1741 return rc;
1742 }
1743 }
1744
1745 if (intstat & FIT_ISH_MSG_FROM_DEV)
1746 skd_isr_msg_from_dev(skdev);
1747 }
1748 }
1749
1750 if (unlikely(flush_enqueued))
Bart Van Asscheca33dd92017-08-17 13:13:32 -07001751 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001752
1753 if (deferred)
1754 schedule_work(&skdev->completion_worker);
1755 else if (!flush_enqueued)
Bart Van Asscheca33dd92017-08-17 13:13:32 -07001756 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001757
1758 spin_unlock(&skdev->lock);
1759
1760 return rc;
1761}
1762
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001763static void skd_drive_fault(struct skd_device *skdev)
1764{
1765 skdev->state = SKD_DRVR_STATE_FAULT;
Bart Van Asschef98806d2017-08-17 13:12:58 -07001766 dev_err(&skdev->pdev->dev, "Drive FAULT\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001767}
1768
1769static void skd_drive_disappeared(struct skd_device *skdev)
1770{
1771 skdev->state = SKD_DRVR_STATE_DISAPPEARED;
Bart Van Asschef98806d2017-08-17 13:12:58 -07001772 dev_err(&skdev->pdev->dev, "Drive DISAPPEARED\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001773}
1774
1775static void skd_isr_fwstate(struct skd_device *skdev)
1776{
1777 u32 sense;
1778 u32 state;
1779 u32 mtd;
1780 int prev_driver_state = skdev->state;
1781
1782 sense = SKD_READL(skdev, FIT_STATUS);
1783 state = sense & FIT_SR_DRIVE_STATE_MASK;
1784
Bart Van Asschef98806d2017-08-17 13:12:58 -07001785 dev_err(&skdev->pdev->dev, "s1120 state %s(%d)=>%s(%d)\n",
1786 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
1787 skd_drive_state_to_str(state), state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001788
1789 skdev->drive_state = state;
1790
1791 switch (skdev->drive_state) {
1792 case FIT_SR_DRIVE_INIT:
1793 if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
1794 skd_disable_interrupts(skdev);
1795 break;
1796 }
1797 if (skdev->state == SKD_DRVR_STATE_RESTARTING)
Bart Van Assche79ce12a2017-08-17 13:13:14 -07001798 skd_recover_requests(skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001799 if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
1800 skdev->timer_countdown = SKD_STARTING_TIMO;
1801 skdev->state = SKD_DRVR_STATE_STARTING;
1802 skd_soft_reset(skdev);
1803 break;
1804 }
1805 mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
1806 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1807 skdev->last_mtd = mtd;
1808 break;
1809
1810 case FIT_SR_DRIVE_ONLINE:
1811 skdev->cur_max_queue_depth = skd_max_queue_depth;
1812 if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
1813 skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
1814
1815 skdev->queue_low_water_mark =
1816 skdev->cur_max_queue_depth * 2 / 3 + 1;
1817 if (skdev->queue_low_water_mark < 1)
1818 skdev->queue_low_water_mark = 1;
Bart Van Asschef98806d2017-08-17 13:12:58 -07001819 dev_info(&skdev->pdev->dev,
1820 "Queue depth limit=%d dev=%d lowat=%d\n",
1821 skdev->cur_max_queue_depth,
1822 skdev->dev_max_queue_depth,
1823 skdev->queue_low_water_mark);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001824
1825 skd_refresh_device_data(skdev);
1826 break;
1827
1828 case FIT_SR_DRIVE_BUSY:
1829 skdev->state = SKD_DRVR_STATE_BUSY;
1830 skdev->timer_countdown = SKD_BUSY_TIMO;
1831 skd_quiesce_dev(skdev);
1832 break;
1833 case FIT_SR_DRIVE_BUSY_SANITIZE:
1834 /* set timer for 3 seconds, we'll abort any unfinished
1835 * commands after that expires
1836 */
1837 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
1838 skdev->timer_countdown = SKD_TIMER_SECONDS(3);
Bart Van Asscheca33dd92017-08-17 13:13:32 -07001839 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001840 break;
1841 case FIT_SR_DRIVE_BUSY_ERASE:
1842 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
1843 skdev->timer_countdown = SKD_BUSY_TIMO;
1844 break;
1845 case FIT_SR_DRIVE_OFFLINE:
1846 skdev->state = SKD_DRVR_STATE_IDLE;
1847 break;
1848 case FIT_SR_DRIVE_SOFT_RESET:
1849 switch (skdev->state) {
1850 case SKD_DRVR_STATE_STARTING:
1851 case SKD_DRVR_STATE_RESTARTING:
1852 /* Expected by a caller of skd_soft_reset() */
1853 break;
1854 default:
1855 skdev->state = SKD_DRVR_STATE_RESTARTING;
1856 break;
1857 }
1858 break;
1859 case FIT_SR_DRIVE_FW_BOOTING:
Bart Van Asschef98806d2017-08-17 13:12:58 -07001860 dev_dbg(&skdev->pdev->dev, "ISR FIT_SR_DRIVE_FW_BOOTING\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001861 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
1862 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
1863 break;
1864
1865 case FIT_SR_DRIVE_DEGRADED:
1866 case FIT_SR_PCIE_LINK_DOWN:
1867 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
1868 break;
1869
1870 case FIT_SR_DRIVE_FAULT:
1871 skd_drive_fault(skdev);
Bart Van Assche79ce12a2017-08-17 13:13:14 -07001872 skd_recover_requests(skdev);
Bart Van Asscheca33dd92017-08-17 13:13:32 -07001873 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001874 break;
1875
1876 /* PCIe bus returned all Fs? */
1877 case 0xFF:
Bart Van Asschef98806d2017-08-17 13:12:58 -07001878 dev_info(&skdev->pdev->dev, "state=0x%x sense=0x%x\n", state,
1879 sense);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001880 skd_drive_disappeared(skdev);
Bart Van Assche79ce12a2017-08-17 13:13:14 -07001881 skd_recover_requests(skdev);
Bart Van Asscheca33dd92017-08-17 13:13:32 -07001882 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001883 break;
1884 default:
1885 /*
1886 * Uknown FW State. Wait for a state we recognize.
1887 */
1888 break;
1889 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07001890 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
1891 skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
1892 skd_skdev_state_to_str(skdev->state), skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001893}
1894
Bart Van Asscheca33dd92017-08-17 13:13:32 -07001895static void skd_recover_request(struct request *req, void *data, bool reserved)
Bart Van Assche4e54b842017-08-17 13:13:29 -07001896{
Bart Van Asscheca33dd92017-08-17 13:13:32 -07001897 struct skd_device *const skdev = data;
1898 struct skd_request_context *skreq = blk_mq_rq_to_pdu(req);
Bart Van Assche4e54b842017-08-17 13:13:29 -07001899
1900 if (skreq->state != SKD_REQ_STATE_BUSY)
1901 return;
1902
1903 skd_log_skreq(skdev, skreq, "recover");
1904
Bart Van Assche4e54b842017-08-17 13:13:29 -07001905 /* Release DMA resources for the request. */
1906 if (skreq->n_sg > 0)
1907 skd_postop_sg_list(skdev, skreq);
1908
Bart Van Assche4e54b842017-08-17 13:13:29 -07001909 skreq->state = SKD_REQ_STATE_IDLE;
Bart Van Assche795bc1b2017-08-25 14:24:12 -07001910 skreq->status = BLK_STS_IOERR;
1911 blk_mq_complete_request(req);
Bart Van Assche4e54b842017-08-17 13:13:29 -07001912}
1913
Bart Van Assche79ce12a2017-08-17 13:13:14 -07001914static void skd_recover_requests(struct skd_device *skdev)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001915{
Bart Van Asscheca33dd92017-08-17 13:13:32 -07001916 blk_mq_tagset_busy_iter(&skdev->tag_set, skd_recover_request, skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001917}
1918
1919static void skd_isr_msg_from_dev(struct skd_device *skdev)
1920{
1921 u32 mfd;
1922 u32 mtd;
1923 u32 data;
1924
1925 mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
1926
Bart Van Asschef98806d2017-08-17 13:12:58 -07001927 dev_dbg(&skdev->pdev->dev, "mfd=0x%x last_mtd=0x%x\n", mfd,
1928 skdev->last_mtd);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001929
1930 /* ignore any mtd that is an ack for something we didn't send */
1931 if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
1932 return;
1933
1934 switch (FIT_MXD_TYPE(mfd)) {
1935 case FIT_MTD_FITFW_INIT:
1936 skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
1937
1938 if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07001939 dev_err(&skdev->pdev->dev, "protocol mismatch\n");
1940 dev_err(&skdev->pdev->dev, " got=%d support=%d\n",
1941 skdev->proto_ver, FIT_PROTOCOL_VERSION_1);
1942 dev_err(&skdev->pdev->dev, " please upgrade driver\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001943 skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
1944 skd_soft_reset(skdev);
1945 break;
1946 }
1947 mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
1948 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1949 skdev->last_mtd = mtd;
1950 break;
1951
1952 case FIT_MTD_GET_CMDQ_DEPTH:
1953 skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
1954 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
1955 SKD_N_COMPLETION_ENTRY);
1956 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1957 skdev->last_mtd = mtd;
1958 break;
1959
1960 case FIT_MTD_SET_COMPQ_DEPTH:
1961 SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
1962 mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
1963 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1964 skdev->last_mtd = mtd;
1965 break;
1966
1967 case FIT_MTD_SET_COMPQ_ADDR:
1968 skd_reset_skcomp(skdev);
1969 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
1970 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1971 skdev->last_mtd = mtd;
1972 break;
1973
1974 case FIT_MTD_CMD_LOG_HOST_ID:
1975 skdev->connect_time_stamp = get_seconds();
1976 data = skdev->connect_time_stamp & 0xFFFF;
1977 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
1978 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1979 skdev->last_mtd = mtd;
1980 break;
1981
1982 case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
1983 skdev->drive_jiffies = FIT_MXD_DATA(mfd);
1984 data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
1985 mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
1986 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1987 skdev->last_mtd = mtd;
1988 break;
1989
1990 case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
1991 skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
1992 mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
1993 SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
1994 skdev->last_mtd = mtd;
1995
Bart Van Asschef98806d2017-08-17 13:12:58 -07001996 dev_err(&skdev->pdev->dev, "Time sync driver=0x%x device=0x%x\n",
1997 skdev->connect_time_stamp, skdev->drive_jiffies);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06001998 break;
1999
2000 case FIT_MTD_ARM_QUEUE:
2001 skdev->last_mtd = 0;
2002 /*
2003 * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
2004 */
2005 break;
2006
2007 default:
2008 break;
2009 }
2010}
2011
2012static void skd_disable_interrupts(struct skd_device *skdev)
2013{
2014 u32 sense;
2015
2016 sense = SKD_READL(skdev, FIT_CONTROL);
2017 sense &= ~FIT_CR_ENABLE_INTERRUPTS;
2018 SKD_WRITEL(skdev, sense, FIT_CONTROL);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002019 dev_dbg(&skdev->pdev->dev, "sense 0x%x\n", sense);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002020
2021 /* Note that the 1s is written. A 1-bit means
2022 * disable, a 0 means enable.
2023 */
2024 SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
2025}
2026
2027static void skd_enable_interrupts(struct skd_device *skdev)
2028{
2029 u32 val;
2030
2031 /* unmask interrupts first */
2032 val = FIT_ISH_FW_STATE_CHANGE +
2033 FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
2034
2035 /* Note that the compliment of mask is written. A 1-bit means
2036 * disable, a 0 means enable. */
2037 SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002038 dev_dbg(&skdev->pdev->dev, "interrupt mask=0x%x\n", ~val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002039
2040 val = SKD_READL(skdev, FIT_CONTROL);
2041 val |= FIT_CR_ENABLE_INTERRUPTS;
Bart Van Asschef98806d2017-08-17 13:12:58 -07002042 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002043 SKD_WRITEL(skdev, val, FIT_CONTROL);
2044}
2045
2046/*
2047 *****************************************************************************
2048 * START, STOP, RESTART, QUIESCE, UNQUIESCE
2049 *****************************************************************************
2050 */
2051
2052static void skd_soft_reset(struct skd_device *skdev)
2053{
2054 u32 val;
2055
2056 val = SKD_READL(skdev, FIT_CONTROL);
2057 val |= (FIT_CR_SOFT_RESET);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002058 dev_dbg(&skdev->pdev->dev, "control=0x%x\n", val);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002059 SKD_WRITEL(skdev, val, FIT_CONTROL);
2060}
2061
2062static void skd_start_device(struct skd_device *skdev)
2063{
2064 unsigned long flags;
2065 u32 sense;
2066 u32 state;
2067
2068 spin_lock_irqsave(&skdev->lock, flags);
2069
2070 /* ack all ghost interrupts */
2071 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2072
2073 sense = SKD_READL(skdev, FIT_STATUS);
2074
Bart Van Asschef98806d2017-08-17 13:12:58 -07002075 dev_dbg(&skdev->pdev->dev, "initial status=0x%x\n", sense);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002076
2077 state = sense & FIT_SR_DRIVE_STATE_MASK;
2078 skdev->drive_state = state;
2079 skdev->last_mtd = 0;
2080
2081 skdev->state = SKD_DRVR_STATE_STARTING;
2082 skdev->timer_countdown = SKD_STARTING_TIMO;
2083
2084 skd_enable_interrupts(skdev);
2085
2086 switch (skdev->drive_state) {
2087 case FIT_SR_DRIVE_OFFLINE:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002088 dev_err(&skdev->pdev->dev, "Drive offline...\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002089 break;
2090
2091 case FIT_SR_DRIVE_FW_BOOTING:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002092 dev_dbg(&skdev->pdev->dev, "FIT_SR_DRIVE_FW_BOOTING\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002093 skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
2094 skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
2095 break;
2096
2097 case FIT_SR_DRIVE_BUSY_SANITIZE:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002098 dev_info(&skdev->pdev->dev, "Start: BUSY_SANITIZE\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002099 skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
2100 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2101 break;
2102
2103 case FIT_SR_DRIVE_BUSY_ERASE:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002104 dev_info(&skdev->pdev->dev, "Start: BUSY_ERASE\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002105 skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
2106 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2107 break;
2108
2109 case FIT_SR_DRIVE_INIT:
2110 case FIT_SR_DRIVE_ONLINE:
2111 skd_soft_reset(skdev);
2112 break;
2113
2114 case FIT_SR_DRIVE_BUSY:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002115 dev_err(&skdev->pdev->dev, "Drive Busy...\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002116 skdev->state = SKD_DRVR_STATE_BUSY;
2117 skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
2118 break;
2119
2120 case FIT_SR_DRIVE_SOFT_RESET:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002121 dev_err(&skdev->pdev->dev, "drive soft reset in prog\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002122 break;
2123
2124 case FIT_SR_DRIVE_FAULT:
2125 /* Fault state is bad...soft reset won't do it...
2126 * Hard reset, maybe, but does it work on device?
2127 * For now, just fault so the system doesn't hang.
2128 */
2129 skd_drive_fault(skdev);
2130 /*start the queue so we can respond with error to requests */
Bart Van Asschef98806d2017-08-17 13:12:58 -07002131 dev_dbg(&skdev->pdev->dev, "starting queue\n");
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002132 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002133 skdev->gendisk_on = -1;
2134 wake_up_interruptible(&skdev->waitq);
2135 break;
2136
2137 case 0xFF:
2138 /* Most likely the device isn't there or isn't responding
2139 * to the BAR1 addresses. */
2140 skd_drive_disappeared(skdev);
2141 /*start the queue so we can respond with error to requests */
Bart Van Asschef98806d2017-08-17 13:12:58 -07002142 dev_dbg(&skdev->pdev->dev,
2143 "starting queue to error-out reqs\n");
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002144 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002145 skdev->gendisk_on = -1;
2146 wake_up_interruptible(&skdev->waitq);
2147 break;
2148
2149 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002150 dev_err(&skdev->pdev->dev, "Start: unknown state %x\n",
2151 skdev->drive_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002152 break;
2153 }
2154
2155 state = SKD_READL(skdev, FIT_CONTROL);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002156 dev_dbg(&skdev->pdev->dev, "FIT Control Status=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002157
2158 state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002159 dev_dbg(&skdev->pdev->dev, "Intr Status=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002160
2161 state = SKD_READL(skdev, FIT_INT_MASK_HOST);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002162 dev_dbg(&skdev->pdev->dev, "Intr Mask=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002163
2164 state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002165 dev_dbg(&skdev->pdev->dev, "Msg from Dev=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002166
2167 state = SKD_READL(skdev, FIT_HW_VERSION);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002168 dev_dbg(&skdev->pdev->dev, "HW version=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002169
2170 spin_unlock_irqrestore(&skdev->lock, flags);
2171}
2172
2173static void skd_stop_device(struct skd_device *skdev)
2174{
2175 unsigned long flags;
2176 struct skd_special_context *skspcl = &skdev->internal_skspcl;
2177 u32 dev_state;
2178 int i;
2179
2180 spin_lock_irqsave(&skdev->lock, flags);
2181
2182 if (skdev->state != SKD_DRVR_STATE_ONLINE) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002183 dev_err(&skdev->pdev->dev, "%s not online no sync\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002184 goto stop_out;
2185 }
2186
2187 if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002188 dev_err(&skdev->pdev->dev, "%s no special\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002189 goto stop_out;
2190 }
2191
2192 skdev->state = SKD_DRVR_STATE_SYNCING;
2193 skdev->sync_done = 0;
2194
2195 skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
2196
2197 spin_unlock_irqrestore(&skdev->lock, flags);
2198
2199 wait_event_interruptible_timeout(skdev->waitq,
2200 (skdev->sync_done), (10 * HZ));
2201
2202 spin_lock_irqsave(&skdev->lock, flags);
2203
2204 switch (skdev->sync_done) {
2205 case 0:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002206 dev_err(&skdev->pdev->dev, "%s no sync\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002207 break;
2208 case 1:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002209 dev_err(&skdev->pdev->dev, "%s sync done\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002210 break;
2211 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002212 dev_err(&skdev->pdev->dev, "%s sync error\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002213 }
2214
2215stop_out:
2216 skdev->state = SKD_DRVR_STATE_STOPPING;
2217 spin_unlock_irqrestore(&skdev->lock, flags);
2218
2219 skd_kill_timer(skdev);
2220
2221 spin_lock_irqsave(&skdev->lock, flags);
2222 skd_disable_interrupts(skdev);
2223
2224 /* ensure all ints on device are cleared */
2225 /* soft reset the device to unload with a clean slate */
2226 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2227 SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
2228
2229 spin_unlock_irqrestore(&skdev->lock, flags);
2230
2231 /* poll every 100ms, 1 second timeout */
2232 for (i = 0; i < 10; i++) {
2233 dev_state =
2234 SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
2235 if (dev_state == FIT_SR_DRIVE_INIT)
2236 break;
2237 set_current_state(TASK_INTERRUPTIBLE);
2238 schedule_timeout(msecs_to_jiffies(100));
2239 }
2240
2241 if (dev_state != FIT_SR_DRIVE_INIT)
Bart Van Asschef98806d2017-08-17 13:12:58 -07002242 dev_err(&skdev->pdev->dev, "%s state error 0x%02x\n", __func__,
2243 dev_state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002244}
2245
2246/* assume spinlock is held */
2247static void skd_restart_device(struct skd_device *skdev)
2248{
2249 u32 state;
2250
2251 /* ack all ghost interrupts */
2252 SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
2253
2254 state = SKD_READL(skdev, FIT_STATUS);
2255
Bart Van Asschef98806d2017-08-17 13:12:58 -07002256 dev_dbg(&skdev->pdev->dev, "drive status=0x%x\n", state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002257
2258 state &= FIT_SR_DRIVE_STATE_MASK;
2259 skdev->drive_state = state;
2260 skdev->last_mtd = 0;
2261
2262 skdev->state = SKD_DRVR_STATE_RESTARTING;
2263 skdev->timer_countdown = SKD_RESTARTING_TIMO;
2264
2265 skd_soft_reset(skdev);
2266}
2267
2268/* assume spinlock is held */
2269static int skd_quiesce_dev(struct skd_device *skdev)
2270{
2271 int rc = 0;
2272
2273 switch (skdev->state) {
2274 case SKD_DRVR_STATE_BUSY:
2275 case SKD_DRVR_STATE_BUSY_IMMINENT:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002276 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002277 blk_mq_stop_hw_queues(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002278 break;
2279 case SKD_DRVR_STATE_ONLINE:
2280 case SKD_DRVR_STATE_STOPPING:
2281 case SKD_DRVR_STATE_SYNCING:
2282 case SKD_DRVR_STATE_PAUSING:
2283 case SKD_DRVR_STATE_PAUSED:
2284 case SKD_DRVR_STATE_STARTING:
2285 case SKD_DRVR_STATE_RESTARTING:
2286 case SKD_DRVR_STATE_RESUMING:
2287 default:
2288 rc = -EINVAL;
Bart Van Asschef98806d2017-08-17 13:12:58 -07002289 dev_dbg(&skdev->pdev->dev, "state [%d] not implemented\n",
2290 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002291 }
2292 return rc;
2293}
2294
2295/* assume spinlock is held */
2296static int skd_unquiesce_dev(struct skd_device *skdev)
2297{
2298 int prev_driver_state = skdev->state;
2299
2300 skd_log_skdev(skdev, "unquiesce");
2301 if (skdev->state == SKD_DRVR_STATE_ONLINE) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002302 dev_dbg(&skdev->pdev->dev, "**** device already ONLINE\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002303 return 0;
2304 }
2305 if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
2306 /*
2307 * If there has been an state change to other than
2308 * ONLINE, we will rely on controller state change
2309 * to come back online and restart the queue.
2310 * The BUSY state means that driver is ready to
2311 * continue normal processing but waiting for controller
2312 * to become available.
2313 */
2314 skdev->state = SKD_DRVR_STATE_BUSY;
Bart Van Asschef98806d2017-08-17 13:12:58 -07002315 dev_dbg(&skdev->pdev->dev, "drive BUSY state\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002316 return 0;
2317 }
2318
2319 /*
2320 * Drive has just come online, driver is either in startup,
2321 * paused performing a task, or bust waiting for hardware.
2322 */
2323 switch (skdev->state) {
2324 case SKD_DRVR_STATE_PAUSED:
2325 case SKD_DRVR_STATE_BUSY:
2326 case SKD_DRVR_STATE_BUSY_IMMINENT:
2327 case SKD_DRVR_STATE_BUSY_ERASE:
2328 case SKD_DRVR_STATE_STARTING:
2329 case SKD_DRVR_STATE_RESTARTING:
2330 case SKD_DRVR_STATE_FAULT:
2331 case SKD_DRVR_STATE_IDLE:
2332 case SKD_DRVR_STATE_LOAD:
2333 skdev->state = SKD_DRVR_STATE_ONLINE;
Bart Van Asschef98806d2017-08-17 13:12:58 -07002334 dev_err(&skdev->pdev->dev, "Driver state %s(%d)=>%s(%d)\n",
2335 skd_skdev_state_to_str(prev_driver_state),
2336 prev_driver_state, skd_skdev_state_to_str(skdev->state),
2337 skdev->state);
2338 dev_dbg(&skdev->pdev->dev,
2339 "**** device ONLINE...starting block queue\n");
2340 dev_dbg(&skdev->pdev->dev, "starting queue\n");
2341 dev_info(&skdev->pdev->dev, "STEC s1120 ONLINE\n");
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002342 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002343 skdev->gendisk_on = 1;
2344 wake_up_interruptible(&skdev->waitq);
2345 break;
2346
2347 case SKD_DRVR_STATE_DISAPPEARED:
2348 default:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002349 dev_dbg(&skdev->pdev->dev,
2350 "**** driver state %d, not implemented\n",
2351 skdev->state);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002352 return -EBUSY;
2353 }
2354 return 0;
2355}
2356
2357/*
2358 *****************************************************************************
2359 * PCIe MSI/MSI-X INTERRUPT HANDLERS
2360 *****************************************************************************
2361 */
2362
2363static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
2364{
2365 struct skd_device *skdev = skd_host_data;
2366 unsigned long flags;
2367
2368 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002369 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2370 SKD_READL(skdev, FIT_INT_STATUS_HOST));
2371 dev_err(&skdev->pdev->dev, "MSIX reserved irq %d = 0x%x\n", irq,
2372 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002373 SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
2374 spin_unlock_irqrestore(&skdev->lock, flags);
2375 return IRQ_HANDLED;
2376}
2377
2378static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
2379{
2380 struct skd_device *skdev = skd_host_data;
2381 unsigned long flags;
2382
2383 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002384 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2385 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002386 SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
2387 skd_isr_fwstate(skdev);
2388 spin_unlock_irqrestore(&skdev->lock, flags);
2389 return IRQ_HANDLED;
2390}
2391
2392static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
2393{
2394 struct skd_device *skdev = skd_host_data;
2395 unsigned long flags;
2396 int flush_enqueued = 0;
2397 int deferred;
2398
2399 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002400 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2401 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002402 SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
2403 deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
2404 &flush_enqueued);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002405 if (flush_enqueued)
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002406 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002407
2408 if (deferred)
2409 schedule_work(&skdev->completion_worker);
2410 else if (!flush_enqueued)
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002411 schedule_work(&skdev->start_queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002412
2413 spin_unlock_irqrestore(&skdev->lock, flags);
2414
2415 return IRQ_HANDLED;
2416}
2417
2418static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
2419{
2420 struct skd_device *skdev = skd_host_data;
2421 unsigned long flags;
2422
2423 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002424 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2425 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002426 SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
2427 skd_isr_msg_from_dev(skdev);
2428 spin_unlock_irqrestore(&skdev->lock, flags);
2429 return IRQ_HANDLED;
2430}
2431
2432static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
2433{
2434 struct skd_device *skdev = skd_host_data;
2435 unsigned long flags;
2436
2437 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002438 dev_dbg(&skdev->pdev->dev, "MSIX = 0x%x\n",
2439 SKD_READL(skdev, FIT_INT_STATUS_HOST));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002440 SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
2441 spin_unlock_irqrestore(&skdev->lock, flags);
2442 return IRQ_HANDLED;
2443}
2444
2445/*
2446 *****************************************************************************
2447 * PCIe MSI/MSI-X SETUP
2448 *****************************************************************************
2449 */
2450
2451struct skd_msix_entry {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002452 char isr_name[30];
2453};
2454
2455struct skd_init_msix_entry {
2456 const char *name;
2457 irq_handler_t handler;
2458};
2459
2460#define SKD_MAX_MSIX_COUNT 13
2461#define SKD_MIN_MSIX_COUNT 7
2462#define SKD_BASE_MSIX_IRQ 4
2463
2464static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
2465 { "(DMA 0)", skd_reserved_isr },
2466 { "(DMA 1)", skd_reserved_isr },
2467 { "(DMA 2)", skd_reserved_isr },
2468 { "(DMA 3)", skd_reserved_isr },
2469 { "(State Change)", skd_statec_isr },
2470 { "(COMPL_Q)", skd_comp_q },
2471 { "(MSG)", skd_msg_isr },
2472 { "(Reserved)", skd_reserved_isr },
2473 { "(Reserved)", skd_reserved_isr },
2474 { "(Queue Full 0)", skd_qfull_isr },
2475 { "(Queue Full 1)", skd_qfull_isr },
2476 { "(Queue Full 2)", skd_qfull_isr },
2477 { "(Queue Full 3)", skd_qfull_isr },
2478};
2479
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002480static int skd_acquire_msix(struct skd_device *skdev)
2481{
Alexander Gordeeva9df8622014-02-19 09:58:21 +01002482 int i, rc;
Alexander Gordeev46817762014-02-19 09:58:19 +01002483 struct pci_dev *pdev = skdev->pdev;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002484
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002485 rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
2486 PCI_IRQ_MSIX);
2487 if (rc < 0) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002488 dev_err(&skdev->pdev->dev, "failed to enable MSI-X %d\n", rc);
Arnd Bergmann3bc84922016-11-09 13:55:34 +01002489 goto out;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002490 }
Alexander Gordeev46817762014-02-19 09:58:19 +01002491
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002492 skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
2493 sizeof(struct skd_msix_entry), GFP_KERNEL);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002494 if (!skdev->msix_entries) {
2495 rc = -ENOMEM;
Bart Van Asschef98806d2017-08-17 13:12:58 -07002496 dev_err(&skdev->pdev->dev, "msix table allocation error\n");
Arnd Bergmann3bc84922016-11-09 13:55:34 +01002497 goto out;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002498 }
2499
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002500 /* Enable MSI-X vectors for the base queue */
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002501 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
2502 struct skd_msix_entry *qentry = &skdev->msix_entries[i];
2503
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002504 snprintf(qentry->isr_name, sizeof(qentry->isr_name),
2505 "%s%d-msix %s", DRV_NAME, skdev->devno,
2506 msix_entries[i].name);
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002507
2508 rc = devm_request_irq(&skdev->pdev->dev,
2509 pci_irq_vector(skdev->pdev, i),
2510 msix_entries[i].handler, 0,
2511 qentry->isr_name, skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002512 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002513 dev_err(&skdev->pdev->dev,
2514 "Unable to register(%d) MSI-X handler %d: %s\n",
2515 rc, i, qentry->isr_name);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002516 goto msix_out;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002517 }
2518 }
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002519
Bart Van Asschef98806d2017-08-17 13:12:58 -07002520 dev_dbg(&skdev->pdev->dev, "%d msix irq(s) enabled\n",
2521 SKD_MAX_MSIX_COUNT);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002522 return 0;
2523
2524msix_out:
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002525 while (--i >= 0)
2526 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
Arnd Bergmann3bc84922016-11-09 13:55:34 +01002527out:
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002528 kfree(skdev->msix_entries);
2529 skdev->msix_entries = NULL;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002530 return rc;
2531}
2532
2533static int skd_acquire_irq(struct skd_device *skdev)
2534{
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002535 struct pci_dev *pdev = skdev->pdev;
2536 unsigned int irq_flag = PCI_IRQ_LEGACY;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002537 int rc;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002538
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002539 if (skd_isr_type == SKD_IRQ_MSIX) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002540 rc = skd_acquire_msix(skdev);
2541 if (!rc)
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002542 return 0;
2543
Bart Van Asschef98806d2017-08-17 13:12:58 -07002544 dev_err(&skdev->pdev->dev,
2545 "failed to enable MSI-X, re-trying with MSI %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002546 }
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002547
2548 snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
2549 skdev->devno);
2550
2551 if (skd_isr_type != SKD_IRQ_LEGACY)
2552 irq_flag |= PCI_IRQ_MSI;
2553 rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
2554 if (rc < 0) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002555 dev_err(&skdev->pdev->dev,
2556 "failed to allocate the MSI interrupt %d\n", rc);
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002557 return rc;
2558 }
2559
2560 rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
2561 pdev->msi_enabled ? 0 : IRQF_SHARED,
2562 skdev->isr_name, skdev);
2563 if (rc) {
2564 pci_free_irq_vectors(pdev);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002565 dev_err(&skdev->pdev->dev, "failed to allocate interrupt %d\n",
2566 rc);
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002567 return rc;
2568 }
2569
2570 return 0;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002571}
2572
2573static void skd_release_irq(struct skd_device *skdev)
2574{
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002575 struct pci_dev *pdev = skdev->pdev;
2576
2577 if (skdev->msix_entries) {
2578 int i;
2579
2580 for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
2581 devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
2582 skdev);
2583 }
2584
2585 kfree(skdev->msix_entries);
2586 skdev->msix_entries = NULL;
2587 } else {
2588 devm_free_irq(&pdev->dev, pdev->irq, skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002589 }
Christoph Hellwig180b0ae2016-11-07 11:14:07 -08002590
2591 pci_free_irq_vectors(pdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002592}
2593
2594/*
2595 *****************************************************************************
2596 * CONSTRUCT
2597 *****************************************************************************
2598 */
2599
Bart Van Asschea3db1022017-08-17 13:13:35 -07002600static void *skd_alloc_dma(struct skd_device *skdev, struct kmem_cache *s,
2601 dma_addr_t *dma_handle, gfp_t gfp,
2602 enum dma_data_direction dir)
2603{
2604 struct device *dev = &skdev->pdev->dev;
2605 void *buf;
2606
2607 buf = kmem_cache_alloc(s, gfp);
2608 if (!buf)
2609 return NULL;
2610 *dma_handle = dma_map_single(dev, buf, s->size, dir);
2611 if (dma_mapping_error(dev, *dma_handle)) {
2612 kfree(buf);
2613 buf = NULL;
2614 }
2615 return buf;
2616}
2617
2618static void skd_free_dma(struct skd_device *skdev, struct kmem_cache *s,
2619 void *vaddr, dma_addr_t dma_handle,
2620 enum dma_data_direction dir)
2621{
2622 if (!vaddr)
2623 return;
2624
2625 dma_unmap_single(&skdev->pdev->dev, dma_handle, s->size, dir);
2626 kmem_cache_free(s, vaddr);
2627}
2628
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002629static int skd_cons_skcomp(struct skd_device *skdev)
2630{
2631 int rc = 0;
2632 struct fit_completion_entry_v1 *skcomp;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002633
Bart Van Asschef98806d2017-08-17 13:12:58 -07002634 dev_dbg(&skdev->pdev->dev,
Bart Van Assche6f7c7672017-08-17 13:13:02 -07002635 "comp pci_alloc, total bytes %zd entries %d\n",
2636 SKD_SKCOMP_SIZE, SKD_N_COMPLETION_ENTRY);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002637
Bart Van Assche6f7c7672017-08-17 13:13:02 -07002638 skcomp = pci_zalloc_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
Joe Perchesa5bbf612014-08-08 14:24:12 -07002639 &skdev->cq_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002640
2641 if (skcomp == NULL) {
2642 rc = -ENOMEM;
2643 goto err_out;
2644 }
2645
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002646 skdev->skcomp_table = skcomp;
2647 skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
2648 sizeof(*skcomp) *
2649 SKD_N_COMPLETION_ENTRY);
2650
2651err_out:
2652 return rc;
2653}
2654
2655static int skd_cons_skmsg(struct skd_device *skdev)
2656{
2657 int rc = 0;
2658 u32 i;
2659
Bart Van Asschef98806d2017-08-17 13:12:58 -07002660 dev_dbg(&skdev->pdev->dev,
Bart Van Assche01433d02017-08-17 13:13:18 -07002661 "skmsg_table kcalloc, struct %lu, count %u total %lu\n",
Bart Van Asschef98806d2017-08-17 13:12:58 -07002662 sizeof(struct skd_fitmsg_context), skdev->num_fitmsg_context,
2663 sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002664
Bart Van Assche01433d02017-08-17 13:13:18 -07002665 skdev->skmsg_table = kcalloc(skdev->num_fitmsg_context,
2666 sizeof(struct skd_fitmsg_context),
2667 GFP_KERNEL);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002668 if (skdev->skmsg_table == NULL) {
2669 rc = -ENOMEM;
2670 goto err_out;
2671 }
2672
2673 for (i = 0; i < skdev->num_fitmsg_context; i++) {
2674 struct skd_fitmsg_context *skmsg;
2675
2676 skmsg = &skdev->skmsg_table[i];
2677
2678 skmsg->id = i + SKD_ID_FIT_MSG;
2679
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002680 skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
Bart Van Assche6507f432017-08-17 13:13:06 -07002681 SKD_N_FITMSG_BYTES,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002682 &skmsg->mb_dma_address);
2683
2684 if (skmsg->msg_buf == NULL) {
2685 rc = -ENOMEM;
2686 goto err_out;
2687 }
2688
Bart Van Assche6507f432017-08-17 13:13:06 -07002689 WARN(((uintptr_t)skmsg->msg_buf | skmsg->mb_dma_address) &
2690 (FIT_QCMD_ALIGN - 1),
2691 "not aligned: msg_buf %p mb_dma_address %#llx\n",
2692 skmsg->msg_buf, skmsg->mb_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002693 memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002694 }
2695
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002696err_out:
2697 return rc;
2698}
2699
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002700static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
2701 u32 n_sg,
2702 dma_addr_t *ret_dma_addr)
2703{
2704 struct fit_sg_descriptor *sg_list;
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002705
Bart Van Asschea3db1022017-08-17 13:13:35 -07002706 sg_list = skd_alloc_dma(skdev, skdev->sglist_cache, ret_dma_addr,
2707 GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002708
2709 if (sg_list != NULL) {
2710 uint64_t dma_address = *ret_dma_addr;
2711 u32 i;
2712
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002713 for (i = 0; i < n_sg - 1; i++) {
2714 uint64_t ndp_off;
2715 ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
2716
2717 sg_list[i].next_desc_ptr = dma_address + ndp_off;
2718 }
2719 sg_list[i].next_desc_ptr = 0LL;
2720 }
2721
2722 return sg_list;
2723}
2724
Bart Van Assche5d003242017-08-17 13:13:30 -07002725static void skd_free_sg_list(struct skd_device *skdev,
Bart Van Asschea3db1022017-08-17 13:13:35 -07002726 struct fit_sg_descriptor *sg_list,
Bart Van Assche5d003242017-08-17 13:13:30 -07002727 dma_addr_t dma_addr)
2728{
Bart Van Assche5d003242017-08-17 13:13:30 -07002729 if (WARN_ON_ONCE(!sg_list))
2730 return;
2731
Bart Van Asschea3db1022017-08-17 13:13:35 -07002732 skd_free_dma(skdev, skdev->sglist_cache, sg_list, dma_addr,
2733 DMA_TO_DEVICE);
Bart Van Assche5d003242017-08-17 13:13:30 -07002734}
2735
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002736static int skd_init_request(struct blk_mq_tag_set *set, struct request *rq,
2737 unsigned int hctx_idx, unsigned int numa_node)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002738{
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002739 struct skd_device *skdev = set->driver_data;
Bart Van Asschee7278a82017-08-17 13:13:31 -07002740 struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002741
Bart Van Asschee7278a82017-08-17 13:13:31 -07002742 skreq->state = SKD_REQ_STATE_IDLE;
2743 skreq->sg = (void *)(skreq + 1);
2744 sg_init_table(skreq->sg, skd_sgs_per_request);
2745 skreq->sksg_list = skd_cons_sg_list(skdev, skd_sgs_per_request,
2746 &skreq->sksg_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002747
Bart Van Asschee7278a82017-08-17 13:13:31 -07002748 return skreq->sksg_list ? 0 : -ENOMEM;
2749}
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002750
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002751static void skd_exit_request(struct blk_mq_tag_set *set, struct request *rq,
2752 unsigned int hctx_idx)
Bart Van Asschee7278a82017-08-17 13:13:31 -07002753{
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002754 struct skd_device *skdev = set->driver_data;
Bart Van Asschee7278a82017-08-17 13:13:31 -07002755 struct skd_request_context *skreq = blk_mq_rq_to_pdu(rq);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002756
Bart Van Asschea3db1022017-08-17 13:13:35 -07002757 skd_free_sg_list(skdev, skreq->sksg_list, skreq->sksg_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002758}
2759
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002760static int skd_cons_sksb(struct skd_device *skdev)
2761{
2762 int rc = 0;
2763 struct skd_special_context *skspcl;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002764
2765 skspcl = &skdev->internal_skspcl;
2766
2767 skspcl->req.id = 0 + SKD_ID_INTERNAL;
2768 skspcl->req.state = SKD_REQ_STATE_IDLE;
2769
Bart Van Asschea3db1022017-08-17 13:13:35 -07002770 skspcl->data_buf = skd_alloc_dma(skdev, skdev->databuf_cache,
2771 &skspcl->db_dma_address,
2772 GFP_DMA | __GFP_ZERO,
2773 DMA_BIDIRECTIONAL);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002774 if (skspcl->data_buf == NULL) {
2775 rc = -ENOMEM;
2776 goto err_out;
2777 }
2778
Bart Van Asschea3db1022017-08-17 13:13:35 -07002779 skspcl->msg_buf = skd_alloc_dma(skdev, skdev->msgbuf_cache,
2780 &skspcl->mb_dma_address,
2781 GFP_DMA | __GFP_ZERO, DMA_TO_DEVICE);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002782 if (skspcl->msg_buf == NULL) {
2783 rc = -ENOMEM;
2784 goto err_out;
2785 }
2786
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002787 skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
2788 &skspcl->req.sksg_dma_address);
2789 if (skspcl->req.sksg_list == NULL) {
2790 rc = -ENOMEM;
2791 goto err_out;
2792 }
2793
2794 if (!skd_format_internal_skspcl(skdev)) {
2795 rc = -EINVAL;
2796 goto err_out;
2797 }
2798
2799err_out:
2800 return rc;
2801}
2802
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002803static const struct blk_mq_ops skd_mq_ops = {
2804 .queue_rq = skd_mq_queue_rq,
Bart Van Assche296cb942017-08-25 14:24:11 -07002805 .complete = skd_complete_rq,
Bart Van Asschef2fe4452017-08-23 10:56:32 -07002806 .timeout = skd_timed_out,
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002807 .init_request = skd_init_request,
2808 .exit_request = skd_exit_request,
2809};
2810
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002811static int skd_cons_disk(struct skd_device *skdev)
2812{
2813 int rc = 0;
2814 struct gendisk *disk;
2815 struct request_queue *q;
2816 unsigned long flags;
2817
2818 disk = alloc_disk(SKD_MINORS_PER_DEVICE);
2819 if (!disk) {
2820 rc = -ENOMEM;
2821 goto err_out;
2822 }
2823
2824 skdev->disk = disk;
2825 sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
2826
2827 disk->major = skdev->major;
2828 disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
2829 disk->fops = &skd_blockdev_ops;
2830 disk->private_data = skdev;
2831
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002832 memset(&skdev->tag_set, 0, sizeof(skdev->tag_set));
2833 skdev->tag_set.ops = &skd_mq_ops;
2834 skdev->tag_set.nr_hw_queues = 1;
2835 skdev->tag_set.queue_depth = skd_max_queue_depth;
2836 skdev->tag_set.cmd_size = sizeof(struct skd_request_context) +
2837 skdev->sgs_per_request * sizeof(struct scatterlist);
2838 skdev->tag_set.numa_node = NUMA_NO_NODE;
2839 skdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE |
2840 BLK_MQ_F_SG_MERGE |
2841 BLK_ALLOC_POLICY_TO_MQ_FLAG(BLK_TAG_ALLOC_FIFO);
2842 skdev->tag_set.driver_data = skdev;
Dan Carpenter92d499d42017-08-23 14:20:57 +03002843 rc = blk_mq_alloc_tag_set(&skdev->tag_set);
2844 if (rc)
2845 goto err_out;
2846 q = blk_mq_init_queue(&skdev->tag_set);
2847 if (IS_ERR(q)) {
2848 blk_mq_free_tag_set(&skdev->tag_set);
2849 rc = PTR_ERR(q);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002850 goto err_out;
2851 }
Christoph Hellwig8fc45042017-06-19 09:26:26 +02002852 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
Bart Van Asschee7278a82017-08-17 13:13:31 -07002853 q->queuedata = skdev;
Bart Van Asschef18c17c2017-08-17 13:13:26 -07002854 q->nr_requests = skd_max_queue_depth / 2;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002855
2856 skdev->queue = q;
2857 disk->queue = q;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002858
Jens Axboe6975f732016-03-30 10:11:42 -06002859 blk_queue_write_cache(q, true, true);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002860 blk_queue_max_segments(q, skdev->sgs_per_request);
2861 blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
2862
Bart Van Asschea5c5b392017-08-17 13:12:53 -07002863 /* set optimal I/O size to 8KB */
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002864 blk_queue_io_opt(q, 8192);
2865
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002866 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
Mike Snitzerb277da02014-10-04 10:55:32 -06002867 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002868
Bart Van Asschea74d5b72017-08-17 13:13:33 -07002869 blk_queue_rq_timeout(q, 8 * HZ);
Bart Van Asschea74d5b72017-08-17 13:13:33 -07002870
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002871 spin_lock_irqsave(&skdev->lock, flags);
Bart Van Asschef98806d2017-08-17 13:12:58 -07002872 dev_dbg(&skdev->pdev->dev, "stopping queue\n");
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002873 blk_mq_stop_hw_queues(skdev->queue);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002874 spin_unlock_irqrestore(&skdev->lock, flags);
2875
2876err_out:
2877 return rc;
2878}
2879
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002880#define SKD_N_DEV_TABLE 16u
2881static u32 skd_next_devno;
2882
2883static struct skd_device *skd_construct(struct pci_dev *pdev)
2884{
2885 struct skd_device *skdev;
2886 int blk_major = skd_major;
Bart Van Asschea3db1022017-08-17 13:13:35 -07002887 size_t size;
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002888 int rc;
2889
2890 skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
2891
2892 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07002893 dev_err(&pdev->dev, "memory alloc failure\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002894 return NULL;
2895 }
2896
2897 skdev->state = SKD_DRVR_STATE_LOAD;
2898 skdev->pdev = pdev;
2899 skdev->devno = skd_next_devno++;
2900 skdev->major = blk_major;
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002901 skdev->dev_max_queue_depth = 0;
2902
2903 skdev->num_req_context = skd_max_queue_depth;
2904 skdev->num_fitmsg_context = skd_max_queue_depth;
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002905 skdev->cur_max_queue_depth = 1;
2906 skdev->queue_low_water_mark = 1;
2907 skdev->proto_ver = 99;
2908 skdev->sgs_per_request = skd_sgs_per_request;
2909 skdev->dbg_level = skd_dbg_level;
2910
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002911 spin_lock_init(&skdev->lock);
2912
Bart Van Asscheca33dd92017-08-17 13:13:32 -07002913 INIT_WORK(&skdev->start_queue, skd_start_queue);
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002914 INIT_WORK(&skdev->completion_worker, skd_completion_worker);
2915
Bart Van Asschea3db1022017-08-17 13:13:35 -07002916 size = max(SKD_N_FITMSG_BYTES, SKD_N_SPECIAL_FITMSG_BYTES);
2917 skdev->msgbuf_cache = kmem_cache_create("skd-msgbuf", size, 0,
2918 SLAB_HWCACHE_ALIGN, NULL);
2919 if (!skdev->msgbuf_cache)
2920 goto err_out;
2921 WARN_ONCE(kmem_cache_size(skdev->msgbuf_cache) < size,
2922 "skd-msgbuf: %d < %zd\n",
2923 kmem_cache_size(skdev->msgbuf_cache), size);
2924 size = skd_sgs_per_request * sizeof(struct fit_sg_descriptor);
2925 skdev->sglist_cache = kmem_cache_create("skd-sglist", size, 0,
2926 SLAB_HWCACHE_ALIGN, NULL);
2927 if (!skdev->sglist_cache)
2928 goto err_out;
2929 WARN_ONCE(kmem_cache_size(skdev->sglist_cache) < size,
2930 "skd-sglist: %d < %zd\n",
2931 kmem_cache_size(skdev->sglist_cache), size);
2932 size = SKD_N_INTERNAL_BYTES;
2933 skdev->databuf_cache = kmem_cache_create("skd-databuf", size, 0,
2934 SLAB_HWCACHE_ALIGN, NULL);
2935 if (!skdev->databuf_cache)
2936 goto err_out;
2937 WARN_ONCE(kmem_cache_size(skdev->databuf_cache) < size,
2938 "skd-databuf: %d < %zd\n",
2939 kmem_cache_size(skdev->databuf_cache), size);
2940
Bart Van Asschef98806d2017-08-17 13:12:58 -07002941 dev_dbg(&skdev->pdev->dev, "skcomp\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002942 rc = skd_cons_skcomp(skdev);
2943 if (rc < 0)
2944 goto err_out;
2945
Bart Van Asschef98806d2017-08-17 13:12:58 -07002946 dev_dbg(&skdev->pdev->dev, "skmsg\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002947 rc = skd_cons_skmsg(skdev);
2948 if (rc < 0)
2949 goto err_out;
2950
Bart Van Asschef98806d2017-08-17 13:12:58 -07002951 dev_dbg(&skdev->pdev->dev, "sksb\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002952 rc = skd_cons_sksb(skdev);
2953 if (rc < 0)
2954 goto err_out;
2955
Bart Van Asschef98806d2017-08-17 13:12:58 -07002956 dev_dbg(&skdev->pdev->dev, "disk\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002957 rc = skd_cons_disk(skdev);
2958 if (rc < 0)
2959 goto err_out;
2960
Bart Van Asschef98806d2017-08-17 13:12:58 -07002961 dev_dbg(&skdev->pdev->dev, "VICTORY\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002962 return skdev;
2963
2964err_out:
Bart Van Asschef98806d2017-08-17 13:12:58 -07002965 dev_dbg(&skdev->pdev->dev, "construct failed\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01002966 skd_destruct(skdev);
2967 return NULL;
2968}
2969
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002970/*
2971 *****************************************************************************
2972 * DESTRUCT (FREE)
2973 *****************************************************************************
2974 */
2975
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002976static void skd_free_skcomp(struct skd_device *skdev)
2977{
Bart Van Assche7f13bda2017-08-17 13:13:03 -07002978 if (skdev->skcomp_table)
2979 pci_free_consistent(skdev->pdev, SKD_SKCOMP_SIZE,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002980 skdev->skcomp_table, skdev->cq_dma_address);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002981
2982 skdev->skcomp_table = NULL;
2983 skdev->cq_dma_address = 0;
2984}
2985
2986static void skd_free_skmsg(struct skd_device *skdev)
2987{
2988 u32 i;
2989
2990 if (skdev->skmsg_table == NULL)
2991 return;
2992
2993 for (i = 0; i < skdev->num_fitmsg_context; i++) {
2994 struct skd_fitmsg_context *skmsg;
2995
2996 skmsg = &skdev->skmsg_table[i];
2997
2998 if (skmsg->msg_buf != NULL) {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06002999 pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
3000 skmsg->msg_buf,
3001 skmsg->mb_dma_address);
3002 }
3003 skmsg->msg_buf = NULL;
3004 skmsg->mb_dma_address = 0;
3005 }
3006
3007 kfree(skdev->skmsg_table);
3008 skdev->skmsg_table = NULL;
3009}
3010
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003011static void skd_free_sksb(struct skd_device *skdev)
3012{
Bart Van Asschea3db1022017-08-17 13:13:35 -07003013 struct skd_special_context *skspcl = &skdev->internal_skspcl;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003014
Bart Van Asschea3db1022017-08-17 13:13:35 -07003015 skd_free_dma(skdev, skdev->databuf_cache, skspcl->data_buf,
3016 skspcl->db_dma_address, DMA_BIDIRECTIONAL);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003017
3018 skspcl->data_buf = NULL;
3019 skspcl->db_dma_address = 0;
3020
Bart Van Asschea3db1022017-08-17 13:13:35 -07003021 skd_free_dma(skdev, skdev->msgbuf_cache, skspcl->msg_buf,
3022 skspcl->mb_dma_address, DMA_TO_DEVICE);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003023
3024 skspcl->msg_buf = NULL;
3025 skspcl->mb_dma_address = 0;
3026
Bart Van Asschea3db1022017-08-17 13:13:35 -07003027 skd_free_sg_list(skdev, skspcl->req.sksg_list,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003028 skspcl->req.sksg_dma_address);
3029
3030 skspcl->req.sksg_list = NULL;
3031 skspcl->req.sksg_dma_address = 0;
3032}
3033
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003034static void skd_free_disk(struct skd_device *skdev)
3035{
3036 struct gendisk *disk = skdev->disk;
3037
Bart Van Assche7277cc62017-08-17 13:12:45 -07003038 if (disk && (disk->flags & GENHD_FL_UP))
3039 del_gendisk(disk);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003040
Bart Van Assche7277cc62017-08-17 13:12:45 -07003041 if (skdev->queue) {
3042 blk_cleanup_queue(skdev->queue);
3043 skdev->queue = NULL;
3044 disk->queue = NULL;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003045 }
Bart Van Assche7277cc62017-08-17 13:12:45 -07003046
Bart Van Asscheca33dd92017-08-17 13:13:32 -07003047 if (skdev->tag_set.tags)
3048 blk_mq_free_tag_set(&skdev->tag_set);
3049
Bart Van Assche7277cc62017-08-17 13:12:45 -07003050 put_disk(disk);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003051 skdev->disk = NULL;
3052}
3053
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01003054static void skd_destruct(struct skd_device *skdev)
3055{
3056 if (skdev == NULL)
3057 return;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003058
Bart Van Asscheca33dd92017-08-17 13:13:32 -07003059 cancel_work_sync(&skdev->start_queue);
3060
Bart Van Asschef98806d2017-08-17 13:12:58 -07003061 dev_dbg(&skdev->pdev->dev, "disk\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01003062 skd_free_disk(skdev);
3063
Bart Van Asschef98806d2017-08-17 13:12:58 -07003064 dev_dbg(&skdev->pdev->dev, "sksb\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01003065 skd_free_sksb(skdev);
3066
Bart Van Asschef98806d2017-08-17 13:12:58 -07003067 dev_dbg(&skdev->pdev->dev, "skmsg\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01003068 skd_free_skmsg(skdev);
3069
Bart Van Asschef98806d2017-08-17 13:12:58 -07003070 dev_dbg(&skdev->pdev->dev, "skcomp\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01003071 skd_free_skcomp(skdev);
3072
Bart Van Asschea3db1022017-08-17 13:13:35 -07003073 kmem_cache_destroy(skdev->databuf_cache);
3074 kmem_cache_destroy(skdev->sglist_cache);
3075 kmem_cache_destroy(skdev->msgbuf_cache);
3076
Bart Van Asschef98806d2017-08-17 13:12:58 -07003077 dev_dbg(&skdev->pdev->dev, "skdev\n");
Bartlomiej Zolnierkiewicz542d7b02013-11-05 12:37:08 +01003078 kfree(skdev);
3079}
3080
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003081/*
3082 *****************************************************************************
3083 * BLOCK DEVICE (BDEV) GLUE
3084 *****************************************************************************
3085 */
3086
3087static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
3088{
3089 struct skd_device *skdev;
3090 u64 capacity;
3091
3092 skdev = bdev->bd_disk->private_data;
3093
Bart Van Asschef98806d2017-08-17 13:12:58 -07003094 dev_dbg(&skdev->pdev->dev, "%s: CMD[%s] getgeo device\n",
3095 bdev->bd_disk->disk_name, current->comm);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003096
3097 if (skdev->read_cap_is_valid) {
3098 capacity = get_capacity(skdev->disk);
3099 geo->heads = 64;
3100 geo->sectors = 255;
3101 geo->cylinders = (capacity) / (255 * 64);
3102
3103 return 0;
3104 }
3105 return -EIO;
3106}
3107
Dan Williams0d52c7562016-06-15 19:44:20 -07003108static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003109{
Bart Van Asschef98806d2017-08-17 13:12:58 -07003110 dev_dbg(&skdev->pdev->dev, "add_disk\n");
Dan Williams0d52c7562016-06-15 19:44:20 -07003111 device_add_disk(parent, skdev->disk);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003112 return 0;
3113}
3114
3115static const struct block_device_operations skd_blockdev_ops = {
3116 .owner = THIS_MODULE,
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003117 .getgeo = skd_bdev_getgeo,
3118};
3119
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003120/*
3121 *****************************************************************************
3122 * PCIe DRIVER GLUE
3123 *****************************************************************************
3124 */
3125
Benoit Taine9baa3c32014-08-08 15:56:03 +02003126static const struct pci_device_id skd_pci_tbl[] = {
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003127 { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
3128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
3129 { 0 } /* terminate list */
3130};
3131
3132MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
3133
3134static char *skd_pci_info(struct skd_device *skdev, char *str)
3135{
3136 int pcie_reg;
3137
3138 strcpy(str, "PCIe (");
3139 pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
3140
3141 if (pcie_reg) {
3142
3143 char lwstr[6];
3144 uint16_t pcie_lstat, lspeed, lwidth;
3145
3146 pcie_reg += 0x12;
3147 pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
3148 lspeed = pcie_lstat & (0xF);
3149 lwidth = (pcie_lstat & 0x3F0) >> 4;
3150
3151 if (lspeed == 1)
3152 strcat(str, "2.5GT/s ");
3153 else if (lspeed == 2)
3154 strcat(str, "5.0GT/s ");
3155 else
3156 strcat(str, "<unknown> ");
3157 snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
3158 strcat(str, lwstr);
3159 }
3160 return str;
3161}
3162
3163static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3164{
3165 int i;
3166 int rc = 0;
3167 char pci_str[32];
3168 struct skd_device *skdev;
3169
Bart Van Asschebb9f7dd2017-08-17 13:13:38 -07003170 dev_dbg(&pdev->dev, "vendor=%04X device=%04x\n", pdev->vendor,
3171 pdev->device);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003172
3173 rc = pci_enable_device(pdev);
3174 if (rc)
3175 return rc;
3176 rc = pci_request_regions(pdev, DRV_NAME);
3177 if (rc)
3178 goto err_out;
3179 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3180 if (!rc) {
3181 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003182 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
3183 rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003184 }
3185 } else {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003186 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003187 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003188 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003189 goto err_out_regions;
3190 }
3191 }
3192
Bartlomiej Zolnierkiewiczb8df6642013-11-05 12:37:02 +01003193 if (!skd_major) {
3194 rc = register_blkdev(0, DRV_NAME);
3195 if (rc < 0)
3196 goto err_out_regions;
3197 BUG_ON(!rc);
3198 skd_major = rc;
3199 }
3200
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003201 skdev = skd_construct(pdev);
Wei Yongjun1762b572013-10-30 13:23:53 +08003202 if (skdev == NULL) {
3203 rc = -ENOMEM;
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003204 goto err_out_regions;
Wei Yongjun1762b572013-10-30 13:23:53 +08003205 }
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003206
3207 skd_pci_info(skdev, pci_str);
Bart Van Asschef98806d2017-08-17 13:12:58 -07003208 dev_info(&pdev->dev, "%s 64bit\n", pci_str);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003209
3210 pci_set_master(pdev);
3211 rc = pci_enable_pcie_error_reporting(pdev);
3212 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003213 dev_err(&pdev->dev,
3214 "bad enable of PCIe error reporting rc=%d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003215 skdev->pcie_error_reporting_is_enabled = 0;
3216 } else
3217 skdev->pcie_error_reporting_is_enabled = 1;
3218
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003219 pci_set_drvdata(pdev, skdev);
Bartlomiej Zolnierkiewiczebedd162013-11-05 12:37:05 +01003220
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003221 for (i = 0; i < SKD_MAX_BARS; i++) {
3222 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3223 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3224 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3225 skdev->mem_size[i]);
3226 if (!skdev->mem_map[i]) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003227 dev_err(&pdev->dev,
3228 "Unable to map adapter memory!\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003229 rc = -ENODEV;
3230 goto err_out_iounmap;
3231 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07003232 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
3233 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3234 skdev->mem_size[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003235 }
3236
3237 rc = skd_acquire_irq(skdev);
3238 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003239 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003240 goto err_out_iounmap;
3241 }
3242
3243 rc = skd_start_timer(skdev);
3244 if (rc)
3245 goto err_out_timer;
3246
3247 init_waitqueue_head(&skdev->waitq);
3248
3249 skd_start_device(skdev);
3250
3251 rc = wait_event_interruptible_timeout(skdev->waitq,
3252 (skdev->gendisk_on),
3253 (SKD_START_WAIT_SECONDS * HZ));
3254 if (skdev->gendisk_on > 0) {
3255 /* device came on-line after reset */
Dan Williams0d52c7562016-06-15 19:44:20 -07003256 skd_bdev_attach(&pdev->dev, skdev);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003257 rc = 0;
3258 } else {
3259 /* we timed out, something is wrong with the device,
3260 don't add the disk structure */
Bart Van Asschef98806d2017-08-17 13:12:58 -07003261 dev_err(&pdev->dev, "error: waiting for s1120 timed out %d!\n",
3262 rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003263 /* in case of no error; we timeout with ENXIO */
3264 if (!rc)
3265 rc = -ENXIO;
3266 goto err_out_timer;
3267 }
3268
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003269 return rc;
3270
3271err_out_timer:
3272 skd_stop_device(skdev);
3273 skd_release_irq(skdev);
3274
3275err_out_iounmap:
3276 for (i = 0; i < SKD_MAX_BARS; i++)
3277 if (skdev->mem_map[i])
3278 iounmap(skdev->mem_map[i]);
3279
3280 if (skdev->pcie_error_reporting_is_enabled)
3281 pci_disable_pcie_error_reporting(pdev);
3282
3283 skd_destruct(skdev);
3284
3285err_out_regions:
3286 pci_release_regions(pdev);
3287
3288err_out:
3289 pci_disable_device(pdev);
3290 pci_set_drvdata(pdev, NULL);
3291 return rc;
3292}
3293
3294static void skd_pci_remove(struct pci_dev *pdev)
3295{
3296 int i;
3297 struct skd_device *skdev;
3298
3299 skdev = pci_get_drvdata(pdev);
3300 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003301 dev_err(&pdev->dev, "no device data for PCI\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003302 return;
3303 }
3304 skd_stop_device(skdev);
3305 skd_release_irq(skdev);
3306
3307 for (i = 0; i < SKD_MAX_BARS; i++)
3308 if (skdev->mem_map[i])
Bart Van Assche4854afe2017-08-17 13:12:59 -07003309 iounmap(skdev->mem_map[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003310
3311 if (skdev->pcie_error_reporting_is_enabled)
3312 pci_disable_pcie_error_reporting(pdev);
3313
3314 skd_destruct(skdev);
3315
3316 pci_release_regions(pdev);
3317 pci_disable_device(pdev);
3318 pci_set_drvdata(pdev, NULL);
3319
3320 return;
3321}
3322
3323static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
3324{
3325 int i;
3326 struct skd_device *skdev;
3327
3328 skdev = pci_get_drvdata(pdev);
3329 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003330 dev_err(&pdev->dev, "no device data for PCI\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003331 return -EIO;
3332 }
3333
3334 skd_stop_device(skdev);
3335
3336 skd_release_irq(skdev);
3337
3338 for (i = 0; i < SKD_MAX_BARS; i++)
3339 if (skdev->mem_map[i])
Bart Van Assche4854afe2017-08-17 13:12:59 -07003340 iounmap(skdev->mem_map[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003341
3342 if (skdev->pcie_error_reporting_is_enabled)
3343 pci_disable_pcie_error_reporting(pdev);
3344
3345 pci_release_regions(pdev);
3346 pci_save_state(pdev);
3347 pci_disable_device(pdev);
3348 pci_set_power_state(pdev, pci_choose_state(pdev, state));
3349 return 0;
3350}
3351
3352static int skd_pci_resume(struct pci_dev *pdev)
3353{
3354 int i;
3355 int rc = 0;
3356 struct skd_device *skdev;
3357
3358 skdev = pci_get_drvdata(pdev);
3359 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003360 dev_err(&pdev->dev, "no device data for PCI\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003361 return -1;
3362 }
3363
3364 pci_set_power_state(pdev, PCI_D0);
3365 pci_enable_wake(pdev, PCI_D0, 0);
3366 pci_restore_state(pdev);
3367
3368 rc = pci_enable_device(pdev);
3369 if (rc)
3370 return rc;
3371 rc = pci_request_regions(pdev, DRV_NAME);
3372 if (rc)
3373 goto err_out;
3374 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
3375 if (!rc) {
3376 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
3377
Bart Van Asschef98806d2017-08-17 13:12:58 -07003378 dev_err(&pdev->dev, "consistent DMA mask error %d\n",
3379 rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003380 }
3381 } else {
3382 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3383 if (rc) {
3384
Bart Van Asschef98806d2017-08-17 13:12:58 -07003385 dev_err(&pdev->dev, "DMA mask error %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003386 goto err_out_regions;
3387 }
3388 }
3389
3390 pci_set_master(pdev);
3391 rc = pci_enable_pcie_error_reporting(pdev);
3392 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003393 dev_err(&pdev->dev,
3394 "bad enable of PCIe error reporting rc=%d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003395 skdev->pcie_error_reporting_is_enabled = 0;
3396 } else
3397 skdev->pcie_error_reporting_is_enabled = 1;
3398
3399 for (i = 0; i < SKD_MAX_BARS; i++) {
3400
3401 skdev->mem_phys[i] = pci_resource_start(pdev, i);
3402 skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
3403 skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
3404 skdev->mem_size[i]);
3405 if (!skdev->mem_map[i]) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003406 dev_err(&pdev->dev, "Unable to map adapter memory!\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003407 rc = -ENODEV;
3408 goto err_out_iounmap;
3409 }
Bart Van Asschef98806d2017-08-17 13:12:58 -07003410 dev_dbg(&pdev->dev, "mem_map=%p, phyd=%016llx, size=%d\n",
3411 skdev->mem_map[i], (uint64_t)skdev->mem_phys[i],
3412 skdev->mem_size[i]);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003413 }
3414 rc = skd_acquire_irq(skdev);
3415 if (rc) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003416 dev_err(&pdev->dev, "interrupt resource error %d\n", rc);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003417 goto err_out_iounmap;
3418 }
3419
3420 rc = skd_start_timer(skdev);
3421 if (rc)
3422 goto err_out_timer;
3423
3424 init_waitqueue_head(&skdev->waitq);
3425
3426 skd_start_device(skdev);
3427
3428 return rc;
3429
3430err_out_timer:
3431 skd_stop_device(skdev);
3432 skd_release_irq(skdev);
3433
3434err_out_iounmap:
3435 for (i = 0; i < SKD_MAX_BARS; i++)
3436 if (skdev->mem_map[i])
3437 iounmap(skdev->mem_map[i]);
3438
3439 if (skdev->pcie_error_reporting_is_enabled)
3440 pci_disable_pcie_error_reporting(pdev);
3441
3442err_out_regions:
3443 pci_release_regions(pdev);
3444
3445err_out:
3446 pci_disable_device(pdev);
3447 return rc;
3448}
3449
3450static void skd_pci_shutdown(struct pci_dev *pdev)
3451{
3452 struct skd_device *skdev;
3453
Bart Van Asschef98806d2017-08-17 13:12:58 -07003454 dev_err(&pdev->dev, "%s called\n", __func__);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003455
3456 skdev = pci_get_drvdata(pdev);
3457 if (!skdev) {
Bart Van Asschef98806d2017-08-17 13:12:58 -07003458 dev_err(&pdev->dev, "no device data for PCI\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003459 return;
3460 }
3461
Bart Van Asschef98806d2017-08-17 13:12:58 -07003462 dev_err(&pdev->dev, "calling stop\n");
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003463 skd_stop_device(skdev);
3464}
3465
3466static struct pci_driver skd_driver = {
3467 .name = DRV_NAME,
3468 .id_table = skd_pci_tbl,
3469 .probe = skd_pci_probe,
3470 .remove = skd_pci_remove,
3471 .suspend = skd_pci_suspend,
3472 .resume = skd_pci_resume,
3473 .shutdown = skd_pci_shutdown,
3474};
3475
3476/*
3477 *****************************************************************************
3478 * LOGGING SUPPORT
3479 *****************************************************************************
3480 */
3481
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003482const char *skd_drive_state_to_str(int state)
3483{
3484 switch (state) {
3485 case FIT_SR_DRIVE_OFFLINE:
3486 return "OFFLINE";
3487 case FIT_SR_DRIVE_INIT:
3488 return "INIT";
3489 case FIT_SR_DRIVE_ONLINE:
3490 return "ONLINE";
3491 case FIT_SR_DRIVE_BUSY:
3492 return "BUSY";
3493 case FIT_SR_DRIVE_FAULT:
3494 return "FAULT";
3495 case FIT_SR_DRIVE_DEGRADED:
3496 return "DEGRADED";
3497 case FIT_SR_PCIE_LINK_DOWN:
3498 return "INK_DOWN";
3499 case FIT_SR_DRIVE_SOFT_RESET:
3500 return "SOFT_RESET";
3501 case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
3502 return "NEED_FW";
3503 case FIT_SR_DRIVE_INIT_FAULT:
3504 return "INIT_FAULT";
3505 case FIT_SR_DRIVE_BUSY_SANITIZE:
3506 return "BUSY_SANITIZE";
3507 case FIT_SR_DRIVE_BUSY_ERASE:
3508 return "BUSY_ERASE";
3509 case FIT_SR_DRIVE_FW_BOOTING:
3510 return "FW_BOOTING";
3511 default:
3512 return "???";
3513 }
3514}
3515
3516const char *skd_skdev_state_to_str(enum skd_drvr_state state)
3517{
3518 switch (state) {
3519 case SKD_DRVR_STATE_LOAD:
3520 return "LOAD";
3521 case SKD_DRVR_STATE_IDLE:
3522 return "IDLE";
3523 case SKD_DRVR_STATE_BUSY:
3524 return "BUSY";
3525 case SKD_DRVR_STATE_STARTING:
3526 return "STARTING";
3527 case SKD_DRVR_STATE_ONLINE:
3528 return "ONLINE";
3529 case SKD_DRVR_STATE_PAUSING:
3530 return "PAUSING";
3531 case SKD_DRVR_STATE_PAUSED:
3532 return "PAUSED";
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003533 case SKD_DRVR_STATE_RESTARTING:
3534 return "RESTARTING";
3535 case SKD_DRVR_STATE_RESUMING:
3536 return "RESUMING";
3537 case SKD_DRVR_STATE_STOPPING:
3538 return "STOPPING";
3539 case SKD_DRVR_STATE_SYNCING:
3540 return "SYNCING";
3541 case SKD_DRVR_STATE_FAULT:
3542 return "FAULT";
3543 case SKD_DRVR_STATE_DISAPPEARED:
3544 return "DISAPPEARED";
3545 case SKD_DRVR_STATE_BUSY_ERASE:
3546 return "BUSY_ERASE";
3547 case SKD_DRVR_STATE_BUSY_SANITIZE:
3548 return "BUSY_SANITIZE";
3549 case SKD_DRVR_STATE_BUSY_IMMINENT:
3550 return "BUSY_IMMINENT";
3551 case SKD_DRVR_STATE_WAIT_BOOT:
3552 return "WAIT_BOOT";
3553
3554 default:
3555 return "???";
3556 }
3557}
3558
Rashika Kheriaa26ba7f2013-12-19 15:02:22 +05303559static const char *skd_skreq_state_to_str(enum skd_req_state state)
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003560{
3561 switch (state) {
3562 case SKD_REQ_STATE_IDLE:
3563 return "IDLE";
3564 case SKD_REQ_STATE_SETUP:
3565 return "SETUP";
3566 case SKD_REQ_STATE_BUSY:
3567 return "BUSY";
3568 case SKD_REQ_STATE_COMPLETED:
3569 return "COMPLETED";
3570 case SKD_REQ_STATE_TIMEOUT:
3571 return "TIMEOUT";
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003572 default:
3573 return "???";
3574 }
3575}
3576
3577static void skd_log_skdev(struct skd_device *skdev, const char *event)
3578{
Bart Van Asschef98806d2017-08-17 13:12:58 -07003579 dev_dbg(&skdev->pdev->dev, "skdev=%p event='%s'\n", skdev, event);
3580 dev_dbg(&skdev->pdev->dev, " drive_state=%s(%d) driver_state=%s(%d)\n",
3581 skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
3582 skd_skdev_state_to_str(skdev->state), skdev->state);
3583 dev_dbg(&skdev->pdev->dev, " busy=%d limit=%d dev=%d lowat=%d\n",
Bart Van Assched4d0f5f2017-08-17 13:13:34 -07003584 skd_in_flight(skdev), skdev->cur_max_queue_depth,
Bart Van Asschef98806d2017-08-17 13:12:58 -07003585 skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
Bart Van Asschea74d5b72017-08-17 13:13:33 -07003586 dev_dbg(&skdev->pdev->dev, " cycle=%d cycle_ix=%d\n",
3587 skdev->skcomp_cycle, skdev->skcomp_ix);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003588}
3589
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003590static void skd_log_skreq(struct skd_device *skdev,
3591 struct skd_request_context *skreq, const char *event)
3592{
Bart Van Asschee7278a82017-08-17 13:13:31 -07003593 struct request *req = blk_mq_rq_from_pdu(skreq);
3594 u32 lba = blk_rq_pos(req);
3595 u32 count = blk_rq_sectors(req);
3596
Bart Van Asschef98806d2017-08-17 13:12:58 -07003597 dev_dbg(&skdev->pdev->dev, "skreq=%p event='%s'\n", skreq, event);
3598 dev_dbg(&skdev->pdev->dev, " state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
3599 skd_skreq_state_to_str(skreq->state), skreq->state, skreq->id,
3600 skreq->fitmsg_id);
Bart Van Asschea74d5b72017-08-17 13:13:33 -07003601 dev_dbg(&skdev->pdev->dev, " sg_dir=%d n_sg=%d\n",
3602 skreq->data_dir, skreq->n_sg);
Bart Van Asscheca33dd92017-08-17 13:13:32 -07003603
Bart Van Asschee7278a82017-08-17 13:13:31 -07003604 dev_dbg(&skdev->pdev->dev,
3605 "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n", req, lba, lba,
3606 count, count, (int)rq_data_dir(req));
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003607}
3608
3609/*
3610 *****************************************************************************
3611 * MODULE GLUE
3612 *****************************************************************************
3613 */
3614
3615static int __init skd_init(void)
3616{
Bart Van Assche16a70532017-08-17 13:13:08 -07003617 BUILD_BUG_ON(sizeof(struct fit_completion_entry_v1) != 8);
3618 BUILD_BUG_ON(sizeof(struct fit_comp_error_info) != 32);
3619 BUILD_BUG_ON(sizeof(struct skd_command_header) != 16);
3620 BUILD_BUG_ON(sizeof(struct skd_scsi_request) != 32);
3621 BUILD_BUG_ON(sizeof(struct driver_inquiry_data) != 44);
Bart Van Assched891fe62017-08-17 13:13:07 -07003622 BUILD_BUG_ON(offsetof(struct skd_msg_buf, fmh) != 0);
3623 BUILD_BUG_ON(offsetof(struct skd_msg_buf, scsi) != 64);
3624 BUILD_BUG_ON(sizeof(struct skd_msg_buf) != SKD_N_FITMSG_BYTES);
Bart Van Assche2da7b402017-08-17 13:13:01 -07003625
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003626 switch (skd_isr_type) {
3627 case SKD_IRQ_LEGACY:
3628 case SKD_IRQ_MSI:
3629 case SKD_IRQ_MSIX:
3630 break;
3631 default:
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01003632 pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003633 skd_isr_type, SKD_IRQ_DEFAULT);
3634 skd_isr_type = SKD_IRQ_DEFAULT;
3635 }
3636
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01003637 if (skd_max_queue_depth < 1 ||
3638 skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
3639 pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003640 skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
3641 skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
3642 }
3643
Bart Van Assche2da7b402017-08-17 13:13:01 -07003644 if (skd_max_req_per_msg < 1 ||
3645 skd_max_req_per_msg > SKD_MAX_REQ_PER_MSG) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01003646 pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003647 skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
3648 skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
3649 }
3650
3651 if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01003652 pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003653 skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
3654 skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
3655 }
3656
3657 if (skd_dbg_level < 0 || skd_dbg_level > 2) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01003658 pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003659 skd_dbg_level, 0);
3660 skd_dbg_level = 0;
3661 }
3662
3663 if (skd_isr_comp_limit < 0) {
Bartlomiej Zolnierkiewiczfbed1492013-11-05 12:37:01 +01003664 pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003665 skd_isr_comp_limit, 0);
3666 skd_isr_comp_limit = 0;
3667 }
3668
Bartlomiej Zolnierkiewiczb8df6642013-11-05 12:37:02 +01003669 return pci_register_driver(&skd_driver);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003670}
3671
3672static void __exit skd_exit(void)
3673{
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003674 pci_unregister_driver(&skd_driver);
Bartlomiej Zolnierkiewiczb8df6642013-11-05 12:37:02 +01003675
3676 if (skd_major)
3677 unregister_blkdev(skd_major, DRV_NAME);
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003678}
3679
Akhil Bhansalie67f86b2013-10-15 14:19:07 -06003680module_init(skd_init);
3681module_exit(skd_exit);