blob: 41c717310f90db46e6024b574aab21ae9ca05e02 [file] [log] [blame]
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001/*
2 * Disk Array driver for HP Smart Array SAS controllers
3 * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
Matthew Garrette5a44df2011-11-11 11:14:23 -050026#include <linux/pci-aspm.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080027#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/fs.h>
31#include <linux/timer.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080032#include <linux/init.h>
33#include <linux/spinlock.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080034#include <linux/compat.h>
35#include <linux/blktrace_api.h>
36#include <linux/uaccess.h>
37#include <linux/io.h>
38#include <linux/dma-mapping.h>
39#include <linux/completion.h>
40#include <linux/moduleparam.h>
41#include <scsi/scsi.h>
42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h>
Stephen M. Cameron667e23d2010-02-25 14:02:51 -060045#include <scsi/scsi_tcq.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080046#include <linux/cciss_ioctl.h>
47#include <linux/string.h>
48#include <linux/bitmap.h>
Arun Sharma600634972011-07-26 16:09:06 -070049#include <linux/atomic.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080050#include <linux/kthread.h>
Stephen M. Camerona0c12412011-10-26 16:22:04 -050051#include <linux/jiffies.h>
Stephen M. Cameron283b4a92014-02-18 13:55:33 -060052#include <asm/div64.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080053#include "hpsa_cmd.h"
54#include "hpsa.h"
55
56/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
Mike Millere481cce2013-09-04 15:12:27 -050057#define HPSA_DRIVER_VERSION "3.4.0-1"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080058#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -060059#define HPSA "hpsa"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080060
61/* How long to wait (in milliseconds) for board to go into simple mode */
62#define MAX_CONFIG_WAIT 30000
63#define MAX_IOCTL_CONFIG_WAIT 1000
64
65/*define how many times we will try a command because of bus resets */
66#define MAX_CMD_RETRIES 3
67
68/* Embedded module documentation macros - see modules.h */
69MODULE_AUTHOR("Hewlett-Packard Company");
70MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
71 HPSA_DRIVER_VERSION);
72MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
73MODULE_VERSION(HPSA_DRIVER_VERSION);
74MODULE_LICENSE("GPL");
75
76static int hpsa_allow_any;
77module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
78MODULE_PARM_DESC(hpsa_allow_any,
79 "Allow hpsa driver to access unknown HP Smart Array hardware");
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -060080static int hpsa_simple_mode;
81module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
82MODULE_PARM_DESC(hpsa_simple_mode,
83 "Use 'simple mode' rather than 'performant mode'");
Stephen M. Cameronedd16362009-12-08 14:09:11 -080084
85/* define the PCI info for the cards we can control */
86static const struct pci_device_id hpsa_pci_device_id[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -080087 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
Mike Miller163dbcd2013-09-04 15:11:10 -050092 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
Mike Millerf8b01eb2010-02-04 08:42:45 -060094 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
scameron@beardog.cce.hp.com9143a962011-03-07 10:44:16 -060095 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
Mike Millerfe0c9612012-09-20 16:05:18 -0500102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
Mike Miller97b9f532013-09-04 15:05:55 -0500109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
121 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
Mike Miller7c03b872010-12-01 11:16:07 -0600122 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
Stephen M. Cameron6798cc02010-06-16 13:51:20 -0500123 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800124 {0,}
125};
126
127MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
128
129/* board_id = Subsystem Device ID & Vendor ID
130 * product = Marketing Name for the board
131 * access = Address of the struct of function pointers
132 */
133static struct board_type products[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800134 {0x3241103C, "Smart Array P212", &SA5_access},
135 {0x3243103C, "Smart Array P410", &SA5_access},
136 {0x3245103C, "Smart Array P410i", &SA5_access},
137 {0x3247103C, "Smart Array P411", &SA5_access},
138 {0x3249103C, "Smart Array P812", &SA5_access},
Mike Miller163dbcd2013-09-04 15:11:10 -0500139 {0x324A103C, "Smart Array P712m", &SA5_access},
140 {0x324B103C, "Smart Array P711m", &SA5_access},
Mike Millerfe0c9612012-09-20 16:05:18 -0500141 {0x3350103C, "Smart Array P222", &SA5_access},
142 {0x3351103C, "Smart Array P420", &SA5_access},
143 {0x3352103C, "Smart Array P421", &SA5_access},
144 {0x3353103C, "Smart Array P822", &SA5_access},
145 {0x3354103C, "Smart Array P420i", &SA5_access},
146 {0x3355103C, "Smart Array P220i", &SA5_access},
147 {0x3356103C, "Smart Array P721m", &SA5_access},
Mike Miller1fd6c8e2013-09-04 15:08:29 -0500148 {0x1921103C, "Smart Array P830i", &SA5_access},
149 {0x1922103C, "Smart Array P430", &SA5_access},
150 {0x1923103C, "Smart Array P431", &SA5_access},
151 {0x1924103C, "Smart Array P830", &SA5_access},
152 {0x1926103C, "Smart Array P731m", &SA5_access},
153 {0x1928103C, "Smart Array P230i", &SA5_access},
154 {0x1929103C, "Smart Array P530", &SA5_access},
Mike Miller97b9f532013-09-04 15:05:55 -0500155 {0x21BD103C, "Smart Array", &SA5_access},
156 {0x21BE103C, "Smart Array", &SA5_access},
157 {0x21BF103C, "Smart Array", &SA5_access},
158 {0x21C0103C, "Smart Array", &SA5_access},
159 {0x21C1103C, "Smart Array", &SA5_access},
160 {0x21C2103C, "Smart Array", &SA5_access},
161 {0x21C3103C, "Smart Array", &SA5_access},
162 {0x21C4103C, "Smart Array", &SA5_access},
163 {0x21C5103C, "Smart Array", &SA5_access},
164 {0x21C7103C, "Smart Array", &SA5_access},
165 {0x21C8103C, "Smart Array", &SA5_access},
166 {0x21C9103C, "Smart Array", &SA5_access},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800167 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
168};
169
170static int number_of_controllers;
171
Stephen M. Cameron10f66012010-06-16 13:51:50 -0500172static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
173static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800174static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
175static void start_io(struct ctlr_info *h);
176
177#ifdef CONFIG_COMPAT
178static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
179#endif
180
181static void cmd_free(struct ctlr_info *h, struct CommandList *c);
182static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
183static struct CommandList *cmd_alloc(struct ctlr_info *h);
184static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
Stephen M. Camerona2dac132013-02-20 11:24:41 -0600185static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -0600186 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800187 int cmd_type);
188
Jeff Garzikf2812332010-11-16 02:10:29 -0500189static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
Stephen M. Camerona08a84712010-02-04 08:43:16 -0600190static void hpsa_scan_start(struct Scsi_Host *);
191static int hpsa_scan_finished(struct Scsi_Host *sh,
192 unsigned long elapsed_time);
Stephen M. Cameron667e23d2010-02-25 14:02:51 -0600193static int hpsa_change_queue_depth(struct scsi_device *sdev,
194 int qdepth, int reason);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800195
196static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500197static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800198static int hpsa_slave_alloc(struct scsi_device *sdev);
199static void hpsa_slave_destroy(struct scsi_device *sdev);
200
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800201static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800202static int check_for_unit_attention(struct ctlr_info *h,
203 struct CommandList *c);
204static void check_ioctl_unit_attention(struct ctlr_info *h,
205 struct CommandList *c);
Don Brace303932f2010-02-04 08:42:40 -0600206/* performant mode helper functions */
207static void calc_bucket_map(int *bucket, int num_buckets,
Matt Gatese1f7de02014-02-18 13:55:17 -0600208 int nsgs, int min_blocks, int *bucket_map);
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800209static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
Matt Gates254f7962012-05-01 11:43:06 -0500210static inline u32 next_command(struct ctlr_info *h, u8 q);
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800211static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
212 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
213 u64 *cfg_offset);
214static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
215 unsigned long *memory_bar);
216static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
217static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
218 int wait_for_ready);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500219static inline void finish_cmd(struct CommandList *c);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -0600220static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -0600221#define BOARD_NOT_READY 0
222#define BOARD_READY 1
Stephen M. Cameron76438d02014-02-18 13:55:43 -0600223static void hpsa_drain_commands(struct ctlr_info *h);
224static void hpsa_flush_cache(struct ctlr_info *h);
Scott Teelc3497752014-02-18 13:56:34 -0600225static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
226 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
227 u8 *scsi3addr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800228
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800229static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
230{
231 unsigned long *priv = shost_priv(sdev->host);
232 return (struct ctlr_info *) *priv;
233}
234
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600235static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
236{
237 unsigned long *priv = shost_priv(sh);
238 return (struct ctlr_info *) *priv;
239}
240
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800241static int check_for_unit_attention(struct ctlr_info *h,
242 struct CommandList *c)
243{
244 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
245 return 0;
246
247 switch (c->err_info->SenseInfo[12]) {
248 case STATE_CHANGED:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600249 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800250 "detected, command retried\n", h->ctlr);
251 break;
252 case LUN_FAILED:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600253 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800254 "detected, action required\n", h->ctlr);
255 break;
256 case REPORT_LUNS_CHANGED:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600257 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
Mike Miller31468402010-02-25 14:03:12 -0600258 "changed, action required\n", h->ctlr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800259 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -0600260 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
261 * target (array) devices.
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800262 */
263 break;
264 case POWER_OR_RESET:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600265 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800266 "or device reset detected\n", h->ctlr);
267 break;
268 case UNIT_ATTENTION_CLEARED:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600269 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800270 "cleared by another initiator\n", h->ctlr);
271 break;
272 default:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600273 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800274 "unit attention detected\n", h->ctlr);
275 break;
276 }
277 return 1;
278}
279
Matt Bondurant852af202012-05-01 11:42:35 -0500280static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
281{
282 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
283 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
284 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
285 return 0;
286 dev_warn(&h->pdev->dev, HPSA "device busy");
287 return 1;
288}
289
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800290static ssize_t host_store_rescan(struct device *dev,
291 struct device_attribute *attr,
292 const char *buf, size_t count)
293{
294 struct ctlr_info *h;
295 struct Scsi_Host *shost = class_to_shost(dev);
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600296 h = shost_to_hba(shost);
Mike Miller31468402010-02-25 14:03:12 -0600297 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800298 return count;
299}
300
Stephen M. Camerond28ce022010-05-27 15:14:34 -0500301static ssize_t host_show_firmware_revision(struct device *dev,
302 struct device_attribute *attr, char *buf)
303{
304 struct ctlr_info *h;
305 struct Scsi_Host *shost = class_to_shost(dev);
306 unsigned char *fwrev;
307
308 h = shost_to_hba(shost);
309 if (!h->hba_inquiry_data)
310 return 0;
311 fwrev = &h->hba_inquiry_data[32];
312 return snprintf(buf, 20, "%c%c%c%c\n",
313 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
314}
315
Stephen M. Cameron94a13642011-01-06 14:48:39 -0600316static ssize_t host_show_commands_outstanding(struct device *dev,
317 struct device_attribute *attr, char *buf)
318{
319 struct Scsi_Host *shost = class_to_shost(dev);
320 struct ctlr_info *h = shost_to_hba(shost);
321
322 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
323}
324
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600325static ssize_t host_show_transport_mode(struct device *dev,
326 struct device_attribute *attr, char *buf)
327{
328 struct ctlr_info *h;
329 struct Scsi_Host *shost = class_to_shost(dev);
330
331 h = shost_to_hba(shost);
332 return snprintf(buf, 20, "%s\n",
Stephen M. Cameron960a30e2011-02-15 15:33:03 -0600333 h->transMethod & CFGTBL_Trans_Performant ?
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600334 "performant" : "simple");
335}
336
Stephen M. Cameron46380782011-05-03 15:00:01 -0500337/* List of controllers which cannot be hard reset on kexec with reset_devices */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600338static u32 unresettable_controller[] = {
339 0x324a103C, /* Smart Array P712m */
340 0x324b103C, /* SmartArray P711m */
341 0x3223103C, /* Smart Array P800 */
342 0x3234103C, /* Smart Array P400 */
343 0x3235103C, /* Smart Array P400i */
344 0x3211103C, /* Smart Array E200i */
345 0x3212103C, /* Smart Array E200 */
346 0x3213103C, /* Smart Array E200i */
347 0x3214103C, /* Smart Array E200i */
348 0x3215103C, /* Smart Array E200i */
349 0x3237103C, /* Smart Array E500 */
350 0x323D103C, /* Smart Array P700m */
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100351 0x40800E11, /* Smart Array 5i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600352 0x409C0E11, /* Smart Array 6400 */
353 0x409D0E11, /* Smart Array 6400 EM */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100354 0x40700E11, /* Smart Array 5300 */
355 0x40820E11, /* Smart Array 532 */
356 0x40830E11, /* Smart Array 5312 */
357 0x409A0E11, /* Smart Array 641 */
358 0x409B0E11, /* Smart Array 642 */
359 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600360};
361
Stephen M. Cameron46380782011-05-03 15:00:01 -0500362/* List of controllers which cannot even be soft reset */
363static u32 soft_unresettable_controller[] = {
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100364 0x40800E11, /* Smart Array 5i */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100365 0x40700E11, /* Smart Array 5300 */
366 0x40820E11, /* Smart Array 532 */
367 0x40830E11, /* Smart Array 5312 */
368 0x409A0E11, /* Smart Array 641 */
369 0x409B0E11, /* Smart Array 642 */
370 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron46380782011-05-03 15:00:01 -0500371 /* Exclude 640x boards. These are two pci devices in one slot
372 * which share a battery backed cache module. One controls the
373 * cache, the other accesses the cache through the one that controls
374 * it. If we reset the one controlling the cache, the other will
375 * likely not be happy. Just forbid resetting this conjoined mess.
376 * The 640x isn't really supported by hpsa anyway.
377 */
378 0x409C0E11, /* Smart Array 6400 */
379 0x409D0E11, /* Smart Array 6400 EM */
380};
381
382static int ctlr_is_hard_resettable(u32 board_id)
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600383{
384 int i;
385
386 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
Stephen M. Cameron46380782011-05-03 15:00:01 -0500387 if (unresettable_controller[i] == board_id)
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600388 return 0;
389 return 1;
390}
391
Stephen M. Cameron46380782011-05-03 15:00:01 -0500392static int ctlr_is_soft_resettable(u32 board_id)
393{
394 int i;
395
396 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
397 if (soft_unresettable_controller[i] == board_id)
398 return 0;
399 return 1;
400}
401
402static int ctlr_is_resettable(u32 board_id)
403{
404 return ctlr_is_hard_resettable(board_id) ||
405 ctlr_is_soft_resettable(board_id);
406}
407
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600408static ssize_t host_show_resettable(struct device *dev,
409 struct device_attribute *attr, char *buf)
410{
411 struct ctlr_info *h;
412 struct Scsi_Host *shost = class_to_shost(dev);
413
414 h = shost_to_hba(shost);
Stephen M. Cameron46380782011-05-03 15:00:01 -0500415 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600416}
417
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800418static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
419{
420 return (scsi3addr[3] & 0xC0) == 0x40;
421}
422
423static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
Mike Millerd82357e2012-05-01 11:43:32 -0500424 "1(ADM)", "UNKNOWN"
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800425};
426#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
427
428static ssize_t raid_level_show(struct device *dev,
429 struct device_attribute *attr, char *buf)
430{
431 ssize_t l = 0;
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600432 unsigned char rlevel;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800433 struct ctlr_info *h;
434 struct scsi_device *sdev;
435 struct hpsa_scsi_dev_t *hdev;
436 unsigned long flags;
437
438 sdev = to_scsi_device(dev);
439 h = sdev_to_hba(sdev);
440 spin_lock_irqsave(&h->lock, flags);
441 hdev = sdev->hostdata;
442 if (!hdev) {
443 spin_unlock_irqrestore(&h->lock, flags);
444 return -ENODEV;
445 }
446
447 /* Is this even a logical drive? */
448 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
449 spin_unlock_irqrestore(&h->lock, flags);
450 l = snprintf(buf, PAGE_SIZE, "N/A\n");
451 return l;
452 }
453
454 rlevel = hdev->raid_level;
455 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600456 if (rlevel > RAID_UNKNOWN)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800457 rlevel = RAID_UNKNOWN;
458 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
459 return l;
460}
461
462static ssize_t lunid_show(struct device *dev,
463 struct device_attribute *attr, char *buf)
464{
465 struct ctlr_info *h;
466 struct scsi_device *sdev;
467 struct hpsa_scsi_dev_t *hdev;
468 unsigned long flags;
469 unsigned char lunid[8];
470
471 sdev = to_scsi_device(dev);
472 h = sdev_to_hba(sdev);
473 spin_lock_irqsave(&h->lock, flags);
474 hdev = sdev->hostdata;
475 if (!hdev) {
476 spin_unlock_irqrestore(&h->lock, flags);
477 return -ENODEV;
478 }
479 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
480 spin_unlock_irqrestore(&h->lock, flags);
481 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
482 lunid[0], lunid[1], lunid[2], lunid[3],
483 lunid[4], lunid[5], lunid[6], lunid[7]);
484}
485
486static ssize_t unique_id_show(struct device *dev,
487 struct device_attribute *attr, char *buf)
488{
489 struct ctlr_info *h;
490 struct scsi_device *sdev;
491 struct hpsa_scsi_dev_t *hdev;
492 unsigned long flags;
493 unsigned char sn[16];
494
495 sdev = to_scsi_device(dev);
496 h = sdev_to_hba(sdev);
497 spin_lock_irqsave(&h->lock, flags);
498 hdev = sdev->hostdata;
499 if (!hdev) {
500 spin_unlock_irqrestore(&h->lock, flags);
501 return -ENODEV;
502 }
503 memcpy(sn, hdev->device_id, sizeof(sn));
504 spin_unlock_irqrestore(&h->lock, flags);
505 return snprintf(buf, 16 * 2 + 2,
506 "%02X%02X%02X%02X%02X%02X%02X%02X"
507 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
508 sn[0], sn[1], sn[2], sn[3],
509 sn[4], sn[5], sn[6], sn[7],
510 sn[8], sn[9], sn[10], sn[11],
511 sn[12], sn[13], sn[14], sn[15]);
512}
513
Scott Teelc1988682014-02-18 13:55:54 -0600514static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
515 struct device_attribute *attr, char *buf)
516{
517 struct ctlr_info *h;
518 struct scsi_device *sdev;
519 struct hpsa_scsi_dev_t *hdev;
520 unsigned long flags;
521 int offload_enabled;
522
523 sdev = to_scsi_device(dev);
524 h = sdev_to_hba(sdev);
525 spin_lock_irqsave(&h->lock, flags);
526 hdev = sdev->hostdata;
527 if (!hdev) {
528 spin_unlock_irqrestore(&h->lock, flags);
529 return -ENODEV;
530 }
531 offload_enabled = hdev->offload_enabled;
532 spin_unlock_irqrestore(&h->lock, flags);
533 return snprintf(buf, 20, "%d\n", offload_enabled);
534}
535
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600536static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
537static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
538static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
539static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
Scott Teelc1988682014-02-18 13:55:54 -0600540static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
541 host_show_hp_ssd_smart_path_enabled, NULL);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600542static DEVICE_ATTR(firmware_revision, S_IRUGO,
543 host_show_firmware_revision, NULL);
544static DEVICE_ATTR(commands_outstanding, S_IRUGO,
545 host_show_commands_outstanding, NULL);
546static DEVICE_ATTR(transport_mode, S_IRUGO,
547 host_show_transport_mode, NULL);
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600548static DEVICE_ATTR(resettable, S_IRUGO,
549 host_show_resettable, NULL);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600550
551static struct device_attribute *hpsa_sdev_attrs[] = {
552 &dev_attr_raid_level,
553 &dev_attr_lunid,
554 &dev_attr_unique_id,
Scott Teelc1988682014-02-18 13:55:54 -0600555 &dev_attr_hp_ssd_smart_path_enabled,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600556 NULL,
557};
558
559static struct device_attribute *hpsa_shost_attrs[] = {
560 &dev_attr_rescan,
561 &dev_attr_firmware_revision,
562 &dev_attr_commands_outstanding,
563 &dev_attr_transport_mode,
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600564 &dev_attr_resettable,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600565 NULL,
566};
567
568static struct scsi_host_template hpsa_driver_template = {
569 .module = THIS_MODULE,
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600570 .name = HPSA,
571 .proc_name = HPSA,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600572 .queuecommand = hpsa_scsi_queue_command,
573 .scan_start = hpsa_scan_start,
574 .scan_finished = hpsa_scan_finished,
575 .change_queue_depth = hpsa_change_queue_depth,
576 .this_id = -1,
577 .use_clustering = ENABLE_CLUSTERING,
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500578 .eh_abort_handler = hpsa_eh_abort_handler,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600579 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
580 .ioctl = hpsa_ioctl,
581 .slave_alloc = hpsa_slave_alloc,
582 .slave_destroy = hpsa_slave_destroy,
583#ifdef CONFIG_COMPAT
584 .compat_ioctl = hpsa_compat_ioctl,
585#endif
586 .sdev_attrs = hpsa_sdev_attrs,
587 .shost_attrs = hpsa_shost_attrs,
Stephen M. Cameronc0d6a4d2011-10-26 16:20:53 -0500588 .max_sectors = 8192,
Martin K. Petersen54b2b502013-10-23 06:25:40 -0400589 .no_write_same = 1,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600590};
591
592
593/* Enqueuing and dequeuing functions for cmdlists. */
594static inline void addQ(struct list_head *list, struct CommandList *c)
595{
596 list_add_tail(&c->list, list);
597}
598
Matt Gates254f7962012-05-01 11:43:06 -0500599static inline u32 next_command(struct ctlr_info *h, u8 q)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600600{
601 u32 a;
Matt Gates254f7962012-05-01 11:43:06 -0500602 struct reply_pool *rq = &h->reply_queue[q];
Matt Gatese16a33a2012-05-01 11:43:11 -0500603 unsigned long flags;
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600604
Matt Gatese1f7de02014-02-18 13:55:17 -0600605 if (h->transMethod & CFGTBL_Trans_io_accel1)
606 return h->access.command_completed(h, q);
607
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600608 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Matt Gates254f7962012-05-01 11:43:06 -0500609 return h->access.command_completed(h, q);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600610
Matt Gates254f7962012-05-01 11:43:06 -0500611 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
612 a = rq->head[rq->current_entry];
613 rq->current_entry++;
Matt Gatese16a33a2012-05-01 11:43:11 -0500614 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600615 h->commands_outstanding--;
Matt Gatese16a33a2012-05-01 11:43:11 -0500616 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600617 } else {
618 a = FIFO_EMPTY;
619 }
620 /* Check for wraparound */
Matt Gates254f7962012-05-01 11:43:06 -0500621 if (rq->current_entry == h->max_commands) {
622 rq->current_entry = 0;
623 rq->wraparound ^= 1;
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600624 }
625 return a;
626}
627
Scott Teelc3497752014-02-18 13:56:34 -0600628/*
629 * There are some special bits in the bus address of the
630 * command that we have to set for the controller to know
631 * how to process the command:
632 *
633 * Normal performant mode:
634 * bit 0: 1 means performant mode, 0 means simple mode.
635 * bits 1-3 = block fetch table entry
636 * bits 4-6 = command type (== 0)
637 *
638 * ioaccel1 mode:
639 * bit 0 = "performant mode" bit.
640 * bits 1-3 = block fetch table entry
641 * bits 4-6 = command type (== 110)
642 * (command type is needed because ioaccel1 mode
643 * commands are submitted through the same register as normal
644 * mode commands, so this is how the controller knows whether
645 * the command is normal mode or ioaccel1 mode.)
646 *
647 * ioaccel2 mode:
648 * bit 0 = "performant mode" bit.
649 * bits 1-4 = block fetch table entry (note extra bit)
650 * bits 4-6 = not needed, because ioaccel2 mode has
651 * a separate special register for submitting commands.
652 */
653
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600654/* set_performant_mode: Modify the tag for cciss performant
655 * set bit 0 for pull model, bits 3-1 for block fetch
656 * register number
657 */
658static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
659{
Matt Gates254f7962012-05-01 11:43:06 -0500660 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600661 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
Hannes Reineckeeee0f032014-01-15 13:30:53 +0100662 if (likely(h->msix_vector > 0))
Matt Gates254f7962012-05-01 11:43:06 -0500663 c->Header.ReplyQueue =
John Kacur804a5cb2013-07-26 16:06:18 +0200664 raw_smp_processor_id() % h->nreply_queues;
Matt Gates254f7962012-05-01 11:43:06 -0500665 }
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600666}
667
Scott Teelc3497752014-02-18 13:56:34 -0600668static void set_ioaccel1_performant_mode(struct ctlr_info *h,
669 struct CommandList *c)
670{
671 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
672
673 /* Tell the controller to post the reply to the queue for this
674 * processor. This seems to give the best I/O throughput.
675 */
676 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
677 /* Set the bits in the address sent down to include:
678 * - performant mode bit (bit 0)
679 * - pull count (bits 1-3)
680 * - command type (bits 4-6)
681 */
682 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
683 IOACCEL1_BUSADDR_CMDTYPE;
684}
685
686static void set_ioaccel2_performant_mode(struct ctlr_info *h,
687 struct CommandList *c)
688{
689 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
690
691 /* Tell the controller to post the reply to the queue for this
692 * processor. This seems to give the best I/O throughput.
693 */
694 cp->reply_queue = smp_processor_id() % h->nreply_queues;
695 /* Set the bits in the address sent down to include:
696 * - performant mode bit not used in ioaccel mode 2
697 * - pull count (bits 0-3)
698 * - command type isn't needed for ioaccel2
699 */
700 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
701}
702
Stephen M. Camerone85c5972012-05-01 11:43:42 -0500703static int is_firmware_flash_cmd(u8 *cdb)
704{
705 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
706}
707
708/*
709 * During firmware flash, the heartbeat register may not update as frequently
710 * as it should. So we dial down lockup detection during firmware flash. and
711 * dial it back up when firmware flash completes.
712 */
713#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
714#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
715static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
716 struct CommandList *c)
717{
718 if (!is_firmware_flash_cmd(c->Request.CDB))
719 return;
720 atomic_inc(&h->firmware_flash_in_progress);
721 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
722}
723
724static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
725 struct CommandList *c)
726{
727 if (is_firmware_flash_cmd(c->Request.CDB) &&
728 atomic_dec_and_test(&h->firmware_flash_in_progress))
729 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
730}
731
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600732static void enqueue_cmd_and_start_io(struct ctlr_info *h,
733 struct CommandList *c)
734{
735 unsigned long flags;
736
Scott Teelc3497752014-02-18 13:56:34 -0600737 switch (c->cmd_type) {
738 case CMD_IOACCEL1:
739 set_ioaccel1_performant_mode(h, c);
740 break;
741 case CMD_IOACCEL2:
742 set_ioaccel2_performant_mode(h, c);
743 break;
744 default:
745 set_performant_mode(h, c);
746 }
Stephen M. Camerone85c5972012-05-01 11:43:42 -0500747 dial_down_lockup_detection_during_fw_flash(h, c);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600748 spin_lock_irqsave(&h->lock, flags);
749 addQ(&h->reqQ, c);
750 h->Qdepth++;
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600751 spin_unlock_irqrestore(&h->lock, flags);
Matt Gatese16a33a2012-05-01 11:43:11 -0500752 start_io(h);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600753}
754
755static inline void removeQ(struct CommandList *c)
756{
757 if (WARN_ON(list_empty(&c->list)))
758 return;
759 list_del_init(&c->list);
760}
761
762static inline int is_hba_lunid(unsigned char scsi3addr[])
763{
764 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
765}
766
767static inline int is_scsi_rev_5(struct ctlr_info *h)
768{
769 if (!h->hba_inquiry_data)
770 return 0;
771 if ((h->hba_inquiry_data[2] & 0x07) == 5)
772 return 1;
773 return 0;
774}
775
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800776static int hpsa_find_target_lun(struct ctlr_info *h,
777 unsigned char scsi3addr[], int bus, int *target, int *lun)
778{
779 /* finds an unused bus, target, lun for a new physical device
780 * assumes h->devlock is held
781 */
782 int i, found = 0;
Scott Teelcfe5bad2011-10-26 16:21:07 -0500783 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800784
Akinobu Mita263d9402012-01-21 00:15:27 +0900785 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800786
787 for (i = 0; i < h->ndevices; i++) {
788 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
Akinobu Mita263d9402012-01-21 00:15:27 +0900789 __set_bit(h->dev[i]->target, lun_taken);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800790 }
791
Akinobu Mita263d9402012-01-21 00:15:27 +0900792 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
793 if (i < HPSA_MAX_DEVICES) {
794 /* *bus = 1; */
795 *target = i;
796 *lun = 0;
797 found = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800798 }
799 return !found;
800}
801
802/* Add an entry into h->dev[] array. */
803static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
804 struct hpsa_scsi_dev_t *device,
805 struct hpsa_scsi_dev_t *added[], int *nadded)
806{
807 /* assumes h->devlock is held */
808 int n = h->ndevices;
809 int i;
810 unsigned char addr1[8], addr2[8];
811 struct hpsa_scsi_dev_t *sd;
812
Scott Teelcfe5bad2011-10-26 16:21:07 -0500813 if (n >= HPSA_MAX_DEVICES) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800814 dev_err(&h->pdev->dev, "too many devices, some will be "
815 "inaccessible.\n");
816 return -1;
817 }
818
819 /* physical devices do not have lun or target assigned until now. */
820 if (device->lun != -1)
821 /* Logical device, lun is already assigned. */
822 goto lun_assigned;
823
824 /* If this device a non-zero lun of a multi-lun device
825 * byte 4 of the 8-byte LUN addr will contain the logical
826 * unit no, zero otherise.
827 */
828 if (device->scsi3addr[4] == 0) {
829 /* This is not a non-zero lun of a multi-lun device */
830 if (hpsa_find_target_lun(h, device->scsi3addr,
831 device->bus, &device->target, &device->lun) != 0)
832 return -1;
833 goto lun_assigned;
834 }
835
836 /* This is a non-zero lun of a multi-lun device.
837 * Search through our list and find the device which
838 * has the same 8 byte LUN address, excepting byte 4.
839 * Assign the same bus and target for this new LUN.
840 * Use the logical unit number from the firmware.
841 */
842 memcpy(addr1, device->scsi3addr, 8);
843 addr1[4] = 0;
844 for (i = 0; i < n; i++) {
845 sd = h->dev[i];
846 memcpy(addr2, sd->scsi3addr, 8);
847 addr2[4] = 0;
848 /* differ only in byte 4? */
849 if (memcmp(addr1, addr2, 8) == 0) {
850 device->bus = sd->bus;
851 device->target = sd->target;
852 device->lun = device->scsi3addr[4];
853 break;
854 }
855 }
856 if (device->lun == -1) {
857 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
858 " suspect firmware bug or unsupported hardware "
859 "configuration.\n");
860 return -1;
861 }
862
863lun_assigned:
864
865 h->dev[n] = device;
866 h->ndevices++;
867 added[*nadded] = device;
868 (*nadded)++;
869
870 /* initially, (before registering with scsi layer) we don't
871 * know our hostno and we don't want to print anything first
872 * time anyway (the scsi layer's inquiries will show that info)
873 */
874 /* if (hostno != -1) */
875 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
876 scsi_device_type(device->devtype), hostno,
877 device->bus, device->target, device->lun);
878 return 0;
879}
880
Scott Teelbd9244f2012-01-19 14:01:30 -0600881/* Update an entry in h->dev[] array. */
882static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
883 int entry, struct hpsa_scsi_dev_t *new_entry)
884{
885 /* assumes h->devlock is held */
886 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
887
888 /* Raid level changed. */
889 h->dev[entry]->raid_level = new_entry->raid_level;
Stephen M. Cameron250fb122014-02-18 13:55:38 -0600890
891 /* Raid offload parameters changed. */
892 h->dev[entry]->offload_config = new_entry->offload_config;
893 h->dev[entry]->offload_enabled = new_entry->offload_enabled;
Stephen M. Cameron9fb0de22014-02-18 13:56:50 -0600894 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
895 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
896 h->dev[entry]->raid_map = new_entry->raid_map;
Stephen M. Cameron250fb122014-02-18 13:55:38 -0600897
Scott Teelbd9244f2012-01-19 14:01:30 -0600898 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
899 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
900 new_entry->target, new_entry->lun);
901}
902
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -0600903/* Replace an entry from h->dev[] array. */
904static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
905 int entry, struct hpsa_scsi_dev_t *new_entry,
906 struct hpsa_scsi_dev_t *added[], int *nadded,
907 struct hpsa_scsi_dev_t *removed[], int *nremoved)
908{
909 /* assumes h->devlock is held */
Scott Teelcfe5bad2011-10-26 16:21:07 -0500910 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -0600911 removed[*nremoved] = h->dev[entry];
912 (*nremoved)++;
Stephen M. Cameron01350d02011-08-09 08:18:01 -0500913
914 /*
915 * New physical devices won't have target/lun assigned yet
916 * so we need to preserve the values in the slot we are replacing.
917 */
918 if (new_entry->target == -1) {
919 new_entry->target = h->dev[entry]->target;
920 new_entry->lun = h->dev[entry]->lun;
921 }
922
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -0600923 h->dev[entry] = new_entry;
924 added[*nadded] = new_entry;
925 (*nadded)++;
926 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
927 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
928 new_entry->target, new_entry->lun);
929}
930
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800931/* Remove an entry from h->dev[] array. */
932static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
933 struct hpsa_scsi_dev_t *removed[], int *nremoved)
934{
935 /* assumes h->devlock is held */
936 int i;
937 struct hpsa_scsi_dev_t *sd;
938
Scott Teelcfe5bad2011-10-26 16:21:07 -0500939 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800940
941 sd = h->dev[entry];
942 removed[*nremoved] = h->dev[entry];
943 (*nremoved)++;
944
945 for (i = entry; i < h->ndevices-1; i++)
946 h->dev[i] = h->dev[i+1];
947 h->ndevices--;
948 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
949 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
950 sd->lun);
951}
952
953#define SCSI3ADDR_EQ(a, b) ( \
954 (a)[7] == (b)[7] && \
955 (a)[6] == (b)[6] && \
956 (a)[5] == (b)[5] && \
957 (a)[4] == (b)[4] && \
958 (a)[3] == (b)[3] && \
959 (a)[2] == (b)[2] && \
960 (a)[1] == (b)[1] && \
961 (a)[0] == (b)[0])
962
963static void fixup_botched_add(struct ctlr_info *h,
964 struct hpsa_scsi_dev_t *added)
965{
966 /* called when scsi_add_device fails in order to re-adjust
967 * h->dev[] to match the mid layer's view.
968 */
969 unsigned long flags;
970 int i, j;
971
972 spin_lock_irqsave(&h->lock, flags);
973 for (i = 0; i < h->ndevices; i++) {
974 if (h->dev[i] == added) {
975 for (j = i; j < h->ndevices-1; j++)
976 h->dev[j] = h->dev[j+1];
977 h->ndevices--;
978 break;
979 }
980 }
981 spin_unlock_irqrestore(&h->lock, flags);
982 kfree(added);
983}
984
985static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
986 struct hpsa_scsi_dev_t *dev2)
987{
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800988 /* we compare everything except lun and target as these
989 * are not yet assigned. Compare parts likely
990 * to differ first
991 */
992 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
993 sizeof(dev1->scsi3addr)) != 0)
994 return 0;
995 if (memcmp(dev1->device_id, dev2->device_id,
996 sizeof(dev1->device_id)) != 0)
997 return 0;
998 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
999 return 0;
1000 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1001 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001002 if (dev1->devtype != dev2->devtype)
1003 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001004 if (dev1->bus != dev2->bus)
1005 return 0;
1006 return 1;
1007}
1008
Scott Teelbd9244f2012-01-19 14:01:30 -06001009static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1010 struct hpsa_scsi_dev_t *dev2)
1011{
1012 /* Device attributes that can change, but don't mean
1013 * that the device is a different device, nor that the OS
1014 * needs to be told anything about the change.
1015 */
1016 if (dev1->raid_level != dev2->raid_level)
1017 return 1;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001018 if (dev1->offload_config != dev2->offload_config)
1019 return 1;
1020 if (dev1->offload_enabled != dev2->offload_enabled)
1021 return 1;
Scott Teelbd9244f2012-01-19 14:01:30 -06001022 return 0;
1023}
1024
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001025/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1026 * and return needle location in *index. If scsi3addr matches, but not
1027 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
Scott Teelbd9244f2012-01-19 14:01:30 -06001028 * location in *index.
1029 * In the case of a minor device attribute change, such as RAID level, just
1030 * return DEVICE_UPDATED, along with the updated device's location in index.
1031 * If needle not found, return DEVICE_NOT_FOUND.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001032 */
1033static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1034 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1035 int *index)
1036{
1037 int i;
1038#define DEVICE_NOT_FOUND 0
1039#define DEVICE_CHANGED 1
1040#define DEVICE_SAME 2
Scott Teelbd9244f2012-01-19 14:01:30 -06001041#define DEVICE_UPDATED 3
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001042 for (i = 0; i < haystack_size; i++) {
Stephen M. Cameron23231042010-02-04 08:43:36 -06001043 if (haystack[i] == NULL) /* previously removed. */
1044 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001045 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1046 *index = i;
Scott Teelbd9244f2012-01-19 14:01:30 -06001047 if (device_is_the_same(needle, haystack[i])) {
1048 if (device_updated(needle, haystack[i]))
1049 return DEVICE_UPDATED;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001050 return DEVICE_SAME;
Scott Teelbd9244f2012-01-19 14:01:30 -06001051 } else {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001052 return DEVICE_CHANGED;
Scott Teelbd9244f2012-01-19 14:01:30 -06001053 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001054 }
1055 }
1056 *index = -1;
1057 return DEVICE_NOT_FOUND;
1058}
1059
Stephen M. Cameron4967bd32010-02-04 08:41:49 -06001060static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001061 struct hpsa_scsi_dev_t *sd[], int nsds)
1062{
1063 /* sd contains scsi3 addresses and devtypes, and inquiry
1064 * data. This function takes what's in sd to be the current
1065 * reality and updates h->dev[] to reflect that reality.
1066 */
1067 int i, entry, device_change, changes = 0;
1068 struct hpsa_scsi_dev_t *csd;
1069 unsigned long flags;
1070 struct hpsa_scsi_dev_t **added, **removed;
1071 int nadded, nremoved;
1072 struct Scsi_Host *sh = NULL;
1073
Scott Teelcfe5bad2011-10-26 16:21:07 -05001074 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1075 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001076
1077 if (!added || !removed) {
1078 dev_warn(&h->pdev->dev, "out of memory in "
1079 "adjust_hpsa_scsi_table\n");
1080 goto free_and_out;
1081 }
1082
1083 spin_lock_irqsave(&h->devlock, flags);
1084
1085 /* find any devices in h->dev[] that are not in
1086 * sd[] and remove them from h->dev[], and for any
1087 * devices which have changed, remove the old device
1088 * info and add the new device info.
Scott Teelbd9244f2012-01-19 14:01:30 -06001089 * If minor device attributes change, just update
1090 * the existing device structure.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001091 */
1092 i = 0;
1093 nremoved = 0;
1094 nadded = 0;
1095 while (i < h->ndevices) {
1096 csd = h->dev[i];
1097 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1098 if (device_change == DEVICE_NOT_FOUND) {
1099 changes++;
1100 hpsa_scsi_remove_entry(h, hostno, i,
1101 removed, &nremoved);
1102 continue; /* remove ^^^, hence i not incremented */
1103 } else if (device_change == DEVICE_CHANGED) {
1104 changes++;
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001105 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1106 added, &nadded, removed, &nremoved);
Stephen M. Cameronc7f172d2010-02-04 08:43:31 -06001107 /* Set it to NULL to prevent it from being freed
1108 * at the bottom of hpsa_update_scsi_devices()
1109 */
1110 sd[entry] = NULL;
Scott Teelbd9244f2012-01-19 14:01:30 -06001111 } else if (device_change == DEVICE_UPDATED) {
1112 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001113 }
1114 i++;
1115 }
1116
1117 /* Now, make sure every device listed in sd[] is also
1118 * listed in h->dev[], adding them if they aren't found
1119 */
1120
1121 for (i = 0; i < nsds; i++) {
1122 if (!sd[i]) /* if already added above. */
1123 continue;
1124 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1125 h->ndevices, &entry);
1126 if (device_change == DEVICE_NOT_FOUND) {
1127 changes++;
1128 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1129 added, &nadded) != 0)
1130 break;
1131 sd[i] = NULL; /* prevent from being freed later. */
1132 } else if (device_change == DEVICE_CHANGED) {
1133 /* should never happen... */
1134 changes++;
1135 dev_warn(&h->pdev->dev,
1136 "device unexpectedly changed.\n");
1137 /* but if it does happen, we just ignore that device */
1138 }
1139 }
1140 spin_unlock_irqrestore(&h->devlock, flags);
1141
1142 /* Don't notify scsi mid layer of any changes the first time through
1143 * (or if there are no changes) scsi_scan_host will do it later the
1144 * first time through.
1145 */
1146 if (hostno == -1 || !changes)
1147 goto free_and_out;
1148
1149 sh = h->scsi_host;
1150 /* Notify scsi mid layer of any removed devices */
1151 for (i = 0; i < nremoved; i++) {
1152 struct scsi_device *sdev =
1153 scsi_device_lookup(sh, removed[i]->bus,
1154 removed[i]->target, removed[i]->lun);
1155 if (sdev != NULL) {
1156 scsi_remove_device(sdev);
1157 scsi_device_put(sdev);
1158 } else {
1159 /* We don't expect to get here.
1160 * future cmds to this device will get selection
1161 * timeout as if the device was gone.
1162 */
1163 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1164 " for removal.", hostno, removed[i]->bus,
1165 removed[i]->target, removed[i]->lun);
1166 }
1167 kfree(removed[i]);
1168 removed[i] = NULL;
1169 }
1170
1171 /* Notify scsi mid layer of any added devices */
1172 for (i = 0; i < nadded; i++) {
1173 if (scsi_add_device(sh, added[i]->bus,
1174 added[i]->target, added[i]->lun) == 0)
1175 continue;
1176 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1177 "device not added.\n", hostno, added[i]->bus,
1178 added[i]->target, added[i]->lun);
1179 /* now we have to remove it from h->dev,
1180 * since it didn't get added to scsi mid layer
1181 */
1182 fixup_botched_add(h, added[i]);
1183 }
1184
1185free_and_out:
1186 kfree(added);
1187 kfree(removed);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001188}
1189
1190/*
Joe Perches9e03aa22013-09-03 13:45:58 -07001191 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001192 * Assume's h->devlock is held.
1193 */
1194static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1195 int bus, int target, int lun)
1196{
1197 int i;
1198 struct hpsa_scsi_dev_t *sd;
1199
1200 for (i = 0; i < h->ndevices; i++) {
1201 sd = h->dev[i];
1202 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1203 return sd;
1204 }
1205 return NULL;
1206}
1207
1208/* link sdev->hostdata to our per-device structure. */
1209static int hpsa_slave_alloc(struct scsi_device *sdev)
1210{
1211 struct hpsa_scsi_dev_t *sd;
1212 unsigned long flags;
1213 struct ctlr_info *h;
1214
1215 h = sdev_to_hba(sdev);
1216 spin_lock_irqsave(&h->devlock, flags);
1217 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1218 sdev_id(sdev), sdev->lun);
1219 if (sd != NULL)
1220 sdev->hostdata = sd;
1221 spin_unlock_irqrestore(&h->devlock, flags);
1222 return 0;
1223}
1224
1225static void hpsa_slave_destroy(struct scsi_device *sdev)
1226{
Stephen M. Cameronbcc44252010-02-04 08:41:54 -06001227 /* nothing to do. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001228}
1229
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001230static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1231{
1232 int i;
1233
1234 if (!h->cmd_sg_list)
1235 return;
1236 for (i = 0; i < h->nr_cmds; i++) {
1237 kfree(h->cmd_sg_list[i]);
1238 h->cmd_sg_list[i] = NULL;
1239 }
1240 kfree(h->cmd_sg_list);
1241 h->cmd_sg_list = NULL;
1242}
1243
1244static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1245{
1246 int i;
1247
1248 if (h->chainsize <= 0)
1249 return 0;
1250
1251 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1252 GFP_KERNEL);
1253 if (!h->cmd_sg_list)
1254 return -ENOMEM;
1255 for (i = 0; i < h->nr_cmds; i++) {
1256 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1257 h->chainsize, GFP_KERNEL);
1258 if (!h->cmd_sg_list[i])
1259 goto clean;
1260 }
1261 return 0;
1262
1263clean:
1264 hpsa_free_sg_chain_blocks(h);
1265 return -ENOMEM;
1266}
1267
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001268static int hpsa_map_sg_chain_block(struct ctlr_info *h,
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001269 struct CommandList *c)
1270{
1271 struct SGDescriptor *chain_sg, *chain_block;
1272 u64 temp64;
1273
1274 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1275 chain_block = h->cmd_sg_list[c->cmdindex];
1276 chain_sg->Ext = HPSA_SG_CHAIN;
1277 chain_sg->Len = sizeof(*chain_sg) *
1278 (c->Header.SGTotal - h->max_cmd_sg_entries);
1279 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1280 PCI_DMA_TODEVICE);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001281 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1282 /* prevent subsequent unmapping */
1283 chain_sg->Addr.lower = 0;
1284 chain_sg->Addr.upper = 0;
1285 return -1;
1286 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001287 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1288 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001289 return 0;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001290}
1291
1292static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1293 struct CommandList *c)
1294{
1295 struct SGDescriptor *chain_sg;
1296 union u64bit temp64;
1297
1298 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1299 return;
1300
1301 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1302 temp64.val32.lower = chain_sg->Addr.lower;
1303 temp64.val32.upper = chain_sg->Addr.upper;
1304 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1305}
1306
Scott Teelc3497752014-02-18 13:56:34 -06001307static void handle_ioaccel_mode2_error(struct ctlr_info *h,
1308 struct CommandList *c,
1309 struct scsi_cmnd *cmd,
1310 struct io_accel2_cmd *c2)
1311{
1312 int data_len;
1313
1314 switch (c2->error_data.serv_response) {
1315 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1316 switch (c2->error_data.status) {
1317 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1318 break;
1319 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1320 dev_warn(&h->pdev->dev,
1321 "%s: task complete with check condition.\n",
1322 "HP SSD Smart Path");
1323 if (c2->error_data.data_present !=
1324 IOACCEL2_SENSE_DATA_PRESENT)
1325 break;
1326 /* copy the sense data */
1327 data_len = c2->error_data.sense_data_len;
1328 if (data_len > SCSI_SENSE_BUFFERSIZE)
1329 data_len = SCSI_SENSE_BUFFERSIZE;
1330 if (data_len > sizeof(c2->error_data.sense_data_buff))
1331 data_len =
1332 sizeof(c2->error_data.sense_data_buff);
1333 memcpy(cmd->sense_buffer,
1334 c2->error_data.sense_data_buff, data_len);
1335 cmd->result |= SAM_STAT_CHECK_CONDITION;
1336 break;
1337 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1338 dev_warn(&h->pdev->dev,
1339 "%s: task complete with BUSY status.\n",
1340 "HP SSD Smart Path");
1341 break;
1342 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1343 dev_warn(&h->pdev->dev,
1344 "%s: task complete with reservation conflict.\n",
1345 "HP SSD Smart Path");
1346 break;
1347 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1348 /* Make scsi midlayer do unlimited retries */
1349 cmd->result = DID_IMM_RETRY << 16;
1350 break;
1351 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1352 dev_warn(&h->pdev->dev,
1353 "%s: task complete with aborted status.\n",
1354 "HP SSD Smart Path");
1355 break;
1356 default:
1357 dev_warn(&h->pdev->dev,
1358 "%s: task complete with unrecognized status: 0x%02x\n",
1359 "HP SSD Smart Path", c2->error_data.status);
1360 break;
1361 }
1362 break;
1363 case IOACCEL2_SERV_RESPONSE_FAILURE:
1364 /* don't expect to get here. */
1365 dev_warn(&h->pdev->dev,
1366 "unexpected delivery or target failure, status = 0x%02x\n",
1367 c2->error_data.status);
1368 break;
1369 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1370 break;
1371 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1372 break;
1373 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1374 dev_warn(&h->pdev->dev, "task management function rejected.\n");
1375 break;
1376 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1377 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1378 break;
1379 default:
1380 dev_warn(&h->pdev->dev,
1381 "%s: Unrecognized server response: 0x%02x\n",
1382 "HP SSD Smart Path", c2->error_data.serv_response);
1383 break;
1384 }
1385}
1386
1387static void process_ioaccel2_completion(struct ctlr_info *h,
1388 struct CommandList *c, struct scsi_cmnd *cmd,
1389 struct hpsa_scsi_dev_t *dev)
1390{
1391 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
1392
1393 /* check for good status */
1394 if (likely(c2->error_data.serv_response == 0 &&
1395 c2->error_data.status == 0)) {
1396 cmd_free(h, c);
1397 cmd->scsi_done(cmd);
1398 return;
1399 }
1400
1401 /* Any RAID offload error results in retry which will use
1402 * the normal I/O path so the controller can handle whatever's
1403 * wrong.
1404 */
1405 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1406 c2->error_data.serv_response ==
1407 IOACCEL2_SERV_RESPONSE_FAILURE) {
1408 if (c2->error_data.status !=
1409 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
1410 dev_warn(&h->pdev->dev,
1411 "%s: Error 0x%02x, Retrying on standard path.\n",
1412 "HP SSD Smart Path", c2->error_data.status);
1413 dev->offload_enabled = 0;
1414 cmd->result = DID_SOFT_ERROR << 16;
1415 cmd_free(h, c);
1416 cmd->scsi_done(cmd);
1417 return;
1418 }
1419 handle_ioaccel_mode2_error(h, c, cmd, c2);
1420 cmd_free(h, c);
1421 cmd->scsi_done(cmd);
1422}
1423
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05001424static void complete_scsi_command(struct CommandList *cp)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001425{
1426 struct scsi_cmnd *cmd;
1427 struct ctlr_info *h;
1428 struct ErrorInfo *ei;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001429 struct hpsa_scsi_dev_t *dev;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001430
1431 unsigned char sense_key;
1432 unsigned char asc; /* additional sense code */
1433 unsigned char ascq; /* additional sense code qualifier */
Stephen M. Camerondb111e12011-06-03 09:57:34 -05001434 unsigned long sense_data_size;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001435
1436 ei = cp->err_info;
1437 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1438 h = cp->h;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001439 dev = cmd->device->hostdata;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001440
1441 scsi_dma_unmap(cmd); /* undo the DMA mappings */
Matt Gatese1f7de02014-02-18 13:55:17 -06001442 if ((cp->cmd_type == CMD_SCSI) &&
1443 (cp->Header.SGTotal > h->max_cmd_sg_entries))
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001444 hpsa_unmap_sg_chain_block(h, cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001445
1446 cmd->result = (DID_OK << 16); /* host byte */
1447 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
Scott Teelc3497752014-02-18 13:56:34 -06001448
1449 if (cp->cmd_type == CMD_IOACCEL2)
1450 return process_ioaccel2_completion(h, cp, cmd, dev);
1451
Stephen M. Cameron55126722010-02-25 14:03:01 -06001452 cmd->result |= ei->ScsiStatus;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001453
1454 /* copy the sense data whether we need to or not. */
Stephen M. Camerondb111e12011-06-03 09:57:34 -05001455 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1456 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1457 else
1458 sense_data_size = sizeof(ei->SenseInfo);
1459 if (ei->SenseLen < sense_data_size)
1460 sense_data_size = ei->SenseLen;
1461
1462 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001463 scsi_set_resid(cmd, ei->ResidualCnt);
1464
1465 if (ei->CommandStatus == 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001466 cmd_free(h, cp);
Tomas Henzl2cc5bfa2013-08-01 15:14:00 +02001467 cmd->scsi_done(cmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001468 return;
1469 }
1470
Matt Gatese1f7de02014-02-18 13:55:17 -06001471 /* For I/O accelerator commands, copy over some fields to the normal
1472 * CISS header used below for error handling.
1473 */
1474 if (cp->cmd_type == CMD_IOACCEL1) {
1475 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1476 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
1477 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
1478 cp->Header.Tag.lower = c->Tag.lower;
1479 cp->Header.Tag.upper = c->Tag.upper;
1480 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1481 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001482
1483 /* Any RAID offload error results in retry which will use
1484 * the normal I/O path so the controller can handle whatever's
1485 * wrong.
1486 */
1487 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1488 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1489 dev->offload_enabled = 0;
1490 cmd->result = DID_SOFT_ERROR << 16;
1491 cmd_free(h, cp);
1492 cmd->scsi_done(cmd);
1493 return;
1494 }
Matt Gatese1f7de02014-02-18 13:55:17 -06001495 }
1496
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001497 /* an error has occurred */
1498 switch (ei->CommandStatus) {
1499
1500 case CMD_TARGET_STATUS:
1501 if (ei->ScsiStatus) {
1502 /* Get sense key */
1503 sense_key = 0xf & ei->SenseInfo[2];
1504 /* Get additional sense code */
1505 asc = ei->SenseInfo[12];
1506 /* Get addition sense code qualifier */
1507 ascq = ei->SenseInfo[13];
1508 }
1509
1510 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
Matt Gates3ce438d2013-12-04 17:10:36 -06001511 if (check_for_unit_attention(h, cp))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001512 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001513 if (sense_key == ILLEGAL_REQUEST) {
1514 /*
1515 * SCSI REPORT_LUNS is commonly unsupported on
1516 * Smart Array. Suppress noisy complaint.
1517 */
1518 if (cp->Request.CDB[0] == REPORT_LUNS)
1519 break;
1520
1521 /* If ASC/ASCQ indicate Logical Unit
1522 * Not Supported condition,
1523 */
1524 if ((asc == 0x25) && (ascq == 0x0)) {
1525 dev_warn(&h->pdev->dev, "cp %p "
1526 "has check condition\n", cp);
1527 break;
1528 }
1529 }
1530
1531 if (sense_key == NOT_READY) {
1532 /* If Sense is Not Ready, Logical Unit
1533 * Not ready, Manual Intervention
1534 * required
1535 */
1536 if ((asc == 0x04) && (ascq == 0x03)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001537 dev_warn(&h->pdev->dev, "cp %p "
1538 "has check condition: unit "
1539 "not ready, manual "
1540 "intervention required\n", cp);
1541 break;
1542 }
1543 }
Matt Gates1d3b3602010-02-04 08:43:00 -06001544 if (sense_key == ABORTED_COMMAND) {
1545 /* Aborted command is retryable */
1546 dev_warn(&h->pdev->dev, "cp %p "
1547 "has check condition: aborted command: "
1548 "ASC: 0x%x, ASCQ: 0x%x\n",
1549 cp, asc, ascq);
Stephen M. Cameron2e311fb2013-09-23 13:33:41 -05001550 cmd->result |= DID_SOFT_ERROR << 16;
Matt Gates1d3b3602010-02-04 08:43:00 -06001551 break;
1552 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001553 /* Must be some other type of check condition */
Stephen M. Cameron21b8e4e2012-05-01 11:42:25 -05001554 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001555 "unknown type: "
1556 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1557 "Returning result: 0x%x, "
1558 "cmd=[%02x %02x %02x %02x %02x "
Mike Miller807be732010-02-04 08:43:26 -06001559 "%02x %02x %02x %02x %02x %02x "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001560 "%02x %02x %02x %02x %02x]\n",
1561 cp, sense_key, asc, ascq,
1562 cmd->result,
1563 cmd->cmnd[0], cmd->cmnd[1],
1564 cmd->cmnd[2], cmd->cmnd[3],
1565 cmd->cmnd[4], cmd->cmnd[5],
1566 cmd->cmnd[6], cmd->cmnd[7],
Mike Miller807be732010-02-04 08:43:26 -06001567 cmd->cmnd[8], cmd->cmnd[9],
1568 cmd->cmnd[10], cmd->cmnd[11],
1569 cmd->cmnd[12], cmd->cmnd[13],
1570 cmd->cmnd[14], cmd->cmnd[15]);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001571 break;
1572 }
1573
1574
1575 /* Problem was not a check condition
1576 * Pass it up to the upper layers...
1577 */
1578 if (ei->ScsiStatus) {
1579 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1580 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1581 "Returning result: 0x%x\n",
1582 cp, ei->ScsiStatus,
1583 sense_key, asc, ascq,
1584 cmd->result);
1585 } else { /* scsi status is zero??? How??? */
1586 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1587 "Returning no connection.\n", cp),
1588
1589 /* Ordinarily, this case should never happen,
1590 * but there is a bug in some released firmware
1591 * revisions that allows it to happen if, for
1592 * example, a 4100 backplane loses power and
1593 * the tape drive is in it. We assume that
1594 * it's a fatal error of some kind because we
1595 * can't show that it wasn't. We will make it
1596 * look like selection timeout since that is
1597 * the most common reason for this to occur,
1598 * and it's severe enough.
1599 */
1600
1601 cmd->result = DID_NO_CONNECT << 16;
1602 }
1603 break;
1604
1605 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1606 break;
1607 case CMD_DATA_OVERRUN:
1608 dev_warn(&h->pdev->dev, "cp %p has"
1609 " completed with data overrun "
1610 "reported\n", cp);
1611 break;
1612 case CMD_INVALID: {
1613 /* print_bytes(cp, sizeof(*cp), 1, 0);
1614 print_cmd(cp); */
1615 /* We get CMD_INVALID if you address a non-existent device
1616 * instead of a selection timeout (no response). You will
1617 * see this if you yank out a drive, then try to access it.
1618 * This is kind of a shame because it means that any other
1619 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1620 * missing target. */
1621 cmd->result = DID_NO_CONNECT << 16;
1622 }
1623 break;
1624 case CMD_PROTOCOL_ERR:
Stephen M. Cameron256d0ea2012-09-14 16:34:25 -05001625 cmd->result = DID_ERROR << 16;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001626 dev_warn(&h->pdev->dev, "cp %p has "
Stephen M. Cameron256d0ea2012-09-14 16:34:25 -05001627 "protocol error\n", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001628 break;
1629 case CMD_HARDWARE_ERR:
1630 cmd->result = DID_ERROR << 16;
1631 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1632 break;
1633 case CMD_CONNECTION_LOST:
1634 cmd->result = DID_ERROR << 16;
1635 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1636 break;
1637 case CMD_ABORTED:
1638 cmd->result = DID_ABORT << 16;
1639 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1640 cp, ei->ScsiStatus);
1641 break;
1642 case CMD_ABORT_FAILED:
1643 cmd->result = DID_ERROR << 16;
1644 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1645 break;
1646 case CMD_UNSOLICITED_ABORT:
Stephen M. Cameronf6e76052011-07-26 11:08:52 -05001647 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1648 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001649 "abort\n", cp);
1650 break;
1651 case CMD_TIMEOUT:
1652 cmd->result = DID_TIME_OUT << 16;
1653 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1654 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06001655 case CMD_UNABORTABLE:
1656 cmd->result = DID_ERROR << 16;
1657 dev_warn(&h->pdev->dev, "Command unabortable\n");
1658 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001659 case CMD_IOACCEL_DISABLED:
1660 /* This only handles the direct pass-through case since RAID
1661 * offload is handled above. Just attempt a retry.
1662 */
1663 cmd->result = DID_SOFT_ERROR << 16;
1664 dev_warn(&h->pdev->dev,
1665 "cp %p had HP SSD Smart Path error\n", cp);
1666 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001667 default:
1668 cmd->result = DID_ERROR << 16;
1669 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1670 cp, ei->CommandStatus);
1671 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001672 cmd_free(h, cp);
Tomas Henzl2cc5bfa2013-08-01 15:14:00 +02001673 cmd->scsi_done(cmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001674}
1675
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001676static void hpsa_pci_unmap(struct pci_dev *pdev,
1677 struct CommandList *c, int sg_used, int data_direction)
1678{
1679 int i;
1680 union u64bit addr64;
1681
1682 for (i = 0; i < sg_used; i++) {
1683 addr64.val32.lower = c->SG[i].Addr.lower;
1684 addr64.val32.upper = c->SG[i].Addr.upper;
1685 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1686 data_direction);
1687 }
1688}
1689
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001690static int hpsa_map_one(struct pci_dev *pdev,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001691 struct CommandList *cp,
1692 unsigned char *buf,
1693 size_t buflen,
1694 int data_direction)
1695{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06001696 u64 addr64;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001697
1698 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1699 cp->Header.SGList = 0;
1700 cp->Header.SGTotal = 0;
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001701 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001702 }
1703
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06001704 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
Shuah Khaneceaae12013-02-20 11:24:34 -06001705 if (dma_mapping_error(&pdev->dev, addr64)) {
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001706 /* Prevent subsequent unmap of something never mapped */
Shuah Khaneceaae12013-02-20 11:24:34 -06001707 cp->Header.SGList = 0;
1708 cp->Header.SGTotal = 0;
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001709 return -1;
Shuah Khaneceaae12013-02-20 11:24:34 -06001710 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001711 cp->SG[0].Addr.lower =
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06001712 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001713 cp->SG[0].Addr.upper =
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06001714 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001715 cp->SG[0].Len = buflen;
Matt Gatese1d9cbf2014-02-18 13:55:12 -06001716 cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06001717 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1718 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001719 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001720}
1721
1722static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1723 struct CommandList *c)
1724{
1725 DECLARE_COMPLETION_ONSTACK(wait);
1726
1727 c->waiting = &wait;
1728 enqueue_cmd_and_start_io(h, c);
1729 wait_for_completion(&wait);
1730}
1731
Stephen M. Camerona0c12412011-10-26 16:22:04 -05001732static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1733 struct CommandList *c)
1734{
1735 unsigned long flags;
1736
1737 /* If controller lockup detected, fake a hardware error. */
1738 spin_lock_irqsave(&h->lock, flags);
1739 if (unlikely(h->lockup_detected)) {
1740 spin_unlock_irqrestore(&h->lock, flags);
1741 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
1742 } else {
1743 spin_unlock_irqrestore(&h->lock, flags);
1744 hpsa_scsi_do_simple_cmd_core(h, c);
1745 }
1746}
1747
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05001748#define MAX_DRIVER_CMD_RETRIES 25
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001749static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1750 struct CommandList *c, int data_direction)
1751{
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05001752 int backoff_time = 10, retry_count = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001753
1754 do {
Joe Perches7630abd2011-05-08 23:32:40 -07001755 memset(c->err_info, 0, sizeof(*c->err_info));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001756 hpsa_scsi_do_simple_cmd_core(h, c);
1757 retry_count++;
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05001758 if (retry_count > 3) {
1759 msleep(backoff_time);
1760 if (backoff_time < 1000)
1761 backoff_time *= 2;
1762 }
Matt Bondurant852af202012-05-01 11:42:35 -05001763 } while ((check_for_unit_attention(h, c) ||
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05001764 check_for_busy(h, c)) &&
1765 retry_count <= MAX_DRIVER_CMD_RETRIES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001766 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
1767}
1768
1769static void hpsa_scsi_interpret_error(struct CommandList *cp)
1770{
1771 struct ErrorInfo *ei;
1772 struct device *d = &cp->h->pdev->dev;
1773
1774 ei = cp->err_info;
1775 switch (ei->CommandStatus) {
1776 case CMD_TARGET_STATUS:
1777 dev_warn(d, "cmd %p has completed with errors\n", cp);
1778 dev_warn(d, "cmd %p has SCSI Status = %x\n", cp,
1779 ei->ScsiStatus);
1780 if (ei->ScsiStatus == 0)
1781 dev_warn(d, "SCSI status is abnormally zero. "
1782 "(probably indicates selection timeout "
1783 "reported incorrectly due to a known "
1784 "firmware bug, circa July, 2001.)\n");
1785 break;
1786 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1787 dev_info(d, "UNDERRUN\n");
1788 break;
1789 case CMD_DATA_OVERRUN:
1790 dev_warn(d, "cp %p has completed with data overrun\n", cp);
1791 break;
1792 case CMD_INVALID: {
1793 /* controller unfortunately reports SCSI passthru's
1794 * to non-existent targets as invalid commands.
1795 */
1796 dev_warn(d, "cp %p is reported invalid (probably means "
1797 "target device no longer present)\n", cp);
1798 /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
1799 print_cmd(cp); */
1800 }
1801 break;
1802 case CMD_PROTOCOL_ERR:
1803 dev_warn(d, "cp %p has protocol error \n", cp);
1804 break;
1805 case CMD_HARDWARE_ERR:
1806 /* cmd->result = DID_ERROR << 16; */
1807 dev_warn(d, "cp %p had hardware error\n", cp);
1808 break;
1809 case CMD_CONNECTION_LOST:
1810 dev_warn(d, "cp %p had connection lost\n", cp);
1811 break;
1812 case CMD_ABORTED:
1813 dev_warn(d, "cp %p was aborted\n", cp);
1814 break;
1815 case CMD_ABORT_FAILED:
1816 dev_warn(d, "cp %p reports abort failed\n", cp);
1817 break;
1818 case CMD_UNSOLICITED_ABORT:
1819 dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp);
1820 break;
1821 case CMD_TIMEOUT:
1822 dev_warn(d, "cp %p timed out\n", cp);
1823 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06001824 case CMD_UNABORTABLE:
1825 dev_warn(d, "Command unabortable\n");
1826 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001827 default:
1828 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1829 ei->CommandStatus);
1830 }
1831}
1832
1833static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
1834 unsigned char page, unsigned char *buf,
1835 unsigned char bufsize)
1836{
1837 int rc = IO_OK;
1838 struct CommandList *c;
1839 struct ErrorInfo *ei;
1840
1841 c = cmd_special_alloc(h);
1842
1843 if (c == NULL) { /* trouble... */
1844 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06001845 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001846 }
1847
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001848 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
1849 page, scsi3addr, TYPE_CMD)) {
1850 rc = -1;
1851 goto out;
1852 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001853 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
1854 ei = c->err_info;
1855 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
1856 hpsa_scsi_interpret_error(c);
1857 rc = -1;
1858 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001859out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001860 cmd_special_free(h, c);
1861 return rc;
1862}
1863
Scott Teelbf711ac2014-02-18 13:56:39 -06001864static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
1865 u8 reset_type)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001866{
1867 int rc = IO_OK;
1868 struct CommandList *c;
1869 struct ErrorInfo *ei;
1870
1871 c = cmd_special_alloc(h);
1872
1873 if (c == NULL) { /* trouble... */
1874 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
Stephen M. Camerone9ea04a2010-02-25 14:03:06 -06001875 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001876 }
1877
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001878 /* fill_cmd can't fail here, no data buffer to map. */
Scott Teelbf711ac2014-02-18 13:56:39 -06001879 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
1880 scsi3addr, TYPE_MSG);
1881 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001882 hpsa_scsi_do_simple_cmd_core(h, c);
1883 /* no unmap needed here because no data xfer. */
1884
1885 ei = c->err_info;
1886 if (ei->CommandStatus != 0) {
1887 hpsa_scsi_interpret_error(c);
1888 rc = -1;
1889 }
1890 cmd_special_free(h, c);
1891 return rc;
1892}
1893
1894static void hpsa_get_raid_level(struct ctlr_info *h,
1895 unsigned char *scsi3addr, unsigned char *raid_level)
1896{
1897 int rc;
1898 unsigned char *buf;
1899
1900 *raid_level = RAID_UNKNOWN;
1901 buf = kzalloc(64, GFP_KERNEL);
1902 if (!buf)
1903 return;
1904 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64);
1905 if (rc == 0)
1906 *raid_level = buf[8];
1907 if (*raid_level > RAID_UNKNOWN)
1908 *raid_level = RAID_UNKNOWN;
1909 kfree(buf);
1910 return;
1911}
1912
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001913#define HPSA_MAP_DEBUG
1914#ifdef HPSA_MAP_DEBUG
1915static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
1916 struct raid_map_data *map_buff)
1917{
1918 struct raid_map_disk_data *dd = &map_buff->data[0];
1919 int map, row, col;
1920 u16 map_cnt, row_cnt, disks_per_row;
1921
1922 if (rc != 0)
1923 return;
1924
1925 dev_info(&h->pdev->dev, "structure_size = %u\n",
1926 le32_to_cpu(map_buff->structure_size));
1927 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
1928 le32_to_cpu(map_buff->volume_blk_size));
1929 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
1930 le64_to_cpu(map_buff->volume_blk_cnt));
1931 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
1932 map_buff->phys_blk_shift);
1933 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
1934 map_buff->parity_rotation_shift);
1935 dev_info(&h->pdev->dev, "strip_size = %u\n",
1936 le16_to_cpu(map_buff->strip_size));
1937 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
1938 le64_to_cpu(map_buff->disk_starting_blk));
1939 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
1940 le64_to_cpu(map_buff->disk_blk_cnt));
1941 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
1942 le16_to_cpu(map_buff->data_disks_per_row));
1943 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
1944 le16_to_cpu(map_buff->metadata_disks_per_row));
1945 dev_info(&h->pdev->dev, "row_cnt = %u\n",
1946 le16_to_cpu(map_buff->row_cnt));
1947 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
1948 le16_to_cpu(map_buff->layout_map_count));
1949
1950 map_cnt = le16_to_cpu(map_buff->layout_map_count);
1951 for (map = 0; map < map_cnt; map++) {
1952 dev_info(&h->pdev->dev, "Map%u:\n", map);
1953 row_cnt = le16_to_cpu(map_buff->row_cnt);
1954 for (row = 0; row < row_cnt; row++) {
1955 dev_info(&h->pdev->dev, " Row%u:\n", row);
1956 disks_per_row =
1957 le16_to_cpu(map_buff->data_disks_per_row);
1958 for (col = 0; col < disks_per_row; col++, dd++)
1959 dev_info(&h->pdev->dev,
1960 " D%02u: h=0x%04x xor=%u,%u\n",
1961 col, dd->ioaccel_handle,
1962 dd->xor_mult[0], dd->xor_mult[1]);
1963 disks_per_row =
1964 le16_to_cpu(map_buff->metadata_disks_per_row);
1965 for (col = 0; col < disks_per_row; col++, dd++)
1966 dev_info(&h->pdev->dev,
1967 " M%02u: h=0x%04x xor=%u,%u\n",
1968 col, dd->ioaccel_handle,
1969 dd->xor_mult[0], dd->xor_mult[1]);
1970 }
1971 }
1972}
1973#else
1974static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
1975 __attribute__((unused)) int rc,
1976 __attribute__((unused)) struct raid_map_data *map_buff)
1977{
1978}
1979#endif
1980
1981static int hpsa_get_raid_map(struct ctlr_info *h,
1982 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
1983{
1984 int rc = 0;
1985 struct CommandList *c;
1986 struct ErrorInfo *ei;
1987
1988 c = cmd_special_alloc(h);
1989 if (c == NULL) {
1990 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
1991 return -ENOMEM;
1992 }
1993 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
1994 sizeof(this_device->raid_map), 0,
1995 scsi3addr, TYPE_CMD)) {
1996 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
1997 cmd_special_free(h, c);
1998 return -ENOMEM;
1999 }
2000 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2001 ei = c->err_info;
2002 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
2003 hpsa_scsi_interpret_error(c);
2004 cmd_special_free(h, c);
2005 return -1;
2006 }
2007 cmd_special_free(h, c);
2008
2009 /* @todo in the future, dynamically allocate RAID map memory */
2010 if (le32_to_cpu(this_device->raid_map.structure_size) >
2011 sizeof(this_device->raid_map)) {
2012 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2013 rc = -1;
2014 }
2015 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2016 return rc;
2017}
2018
2019static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2020 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2021{
2022 int rc;
2023 unsigned char *buf;
2024 u8 ioaccel_status;
2025
2026 this_device->offload_config = 0;
2027 this_device->offload_enabled = 0;
2028
2029 buf = kzalloc(64, GFP_KERNEL);
2030 if (!buf)
2031 return;
2032 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2033 HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
2034 if (rc != 0)
2035 goto out;
2036
2037#define IOACCEL_STATUS_BYTE 4
2038#define OFFLOAD_CONFIGURED_BIT 0x01
2039#define OFFLOAD_ENABLED_BIT 0x02
2040 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2041 this_device->offload_config =
2042 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2043 if (this_device->offload_config) {
2044 this_device->offload_enabled =
2045 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2046 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2047 this_device->offload_enabled = 0;
2048 }
2049out:
2050 kfree(buf);
2051 return;
2052}
2053
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002054/* Get the device id from inquiry page 0x83 */
2055static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2056 unsigned char *device_id, int buflen)
2057{
2058 int rc;
2059 unsigned char *buf;
2060
2061 if (buflen > 16)
2062 buflen = 16;
2063 buf = kzalloc(64, GFP_KERNEL);
2064 if (!buf)
2065 return -1;
2066 rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64);
2067 if (rc == 0)
2068 memcpy(device_id, &buf[8], buflen);
2069 kfree(buf);
2070 return rc != 0;
2071}
2072
2073static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2074 struct ReportLUNdata *buf, int bufsize,
2075 int extended_response)
2076{
2077 int rc = IO_OK;
2078 struct CommandList *c;
2079 unsigned char scsi3addr[8];
2080 struct ErrorInfo *ei;
2081
2082 c = cmd_special_alloc(h);
2083 if (c == NULL) { /* trouble... */
2084 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2085 return -1;
2086 }
Stephen M. Camerone89c0ae2010-02-04 08:42:04 -06002087 /* address the controller */
2088 memset(scsi3addr, 0, sizeof(scsi3addr));
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002089 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2090 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2091 rc = -1;
2092 goto out;
2093 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002094 if (extended_response)
2095 c->Request.CDB[1] = extended_response;
2096 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2097 ei = c->err_info;
2098 if (ei->CommandStatus != 0 &&
2099 ei->CommandStatus != CMD_DATA_UNDERRUN) {
2100 hpsa_scsi_interpret_error(c);
2101 rc = -1;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002102 } else {
2103 if (buf->extended_response_flag != extended_response) {
2104 dev_err(&h->pdev->dev,
2105 "report luns requested format %u, got %u\n",
2106 extended_response,
2107 buf->extended_response_flag);
2108 rc = -1;
2109 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002110 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002111out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002112 cmd_special_free(h, c);
2113 return rc;
2114}
2115
2116static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2117 struct ReportLUNdata *buf,
2118 int bufsize, int extended_response)
2119{
2120 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
2121}
2122
2123static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2124 struct ReportLUNdata *buf, int bufsize)
2125{
2126 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2127}
2128
2129static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2130 int bus, int target, int lun)
2131{
2132 device->bus = bus;
2133 device->target = target;
2134 device->lun = lun;
2135}
2136
2137static int hpsa_update_device_info(struct ctlr_info *h,
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002138 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2139 unsigned char *is_OBDR_device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002140{
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002141
2142#define OBDR_SIG_OFFSET 43
2143#define OBDR_TAPE_SIG "$DR-10"
2144#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2145#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2146
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06002147 unsigned char *inq_buff;
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002148 unsigned char *obdr_sig;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002149
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06002150 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002151 if (!inq_buff)
2152 goto bail_out;
2153
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002154 /* Do an inquiry to the device to see what it is. */
2155 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2156 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2157 /* Inquiry failed (msg printed already) */
2158 dev_err(&h->pdev->dev,
2159 "hpsa_update_device_info: inquiry failed\n");
2160 goto bail_out;
2161 }
2162
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002163 this_device->devtype = (inq_buff[0] & 0x1f);
2164 memcpy(this_device->scsi3addr, scsi3addr, 8);
2165 memcpy(this_device->vendor, &inq_buff[8],
2166 sizeof(this_device->vendor));
2167 memcpy(this_device->model, &inq_buff[16],
2168 sizeof(this_device->model));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002169 memset(this_device->device_id, 0,
2170 sizeof(this_device->device_id));
2171 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2172 sizeof(this_device->device_id));
2173
2174 if (this_device->devtype == TYPE_DISK &&
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002175 is_logical_dev_addr_mode(scsi3addr)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002176 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002177 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2178 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
2179 } else {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002180 this_device->raid_level = RAID_UNKNOWN;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002181 this_device->offload_config = 0;
2182 this_device->offload_enabled = 0;
2183 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002184
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002185 if (is_OBDR_device) {
2186 /* See if this is a One-Button-Disaster-Recovery device
2187 * by looking for "$DR-10" at offset 43 in inquiry data.
2188 */
2189 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2190 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2191 strncmp(obdr_sig, OBDR_TAPE_SIG,
2192 OBDR_SIG_LEN) == 0);
2193 }
2194
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002195 kfree(inq_buff);
2196 return 0;
2197
2198bail_out:
2199 kfree(inq_buff);
2200 return 1;
2201}
2202
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002203static unsigned char *ext_target_model[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002204 "MSA2012",
2205 "MSA2024",
2206 "MSA2312",
2207 "MSA2324",
Stephen M. Cameronfda38512011-05-03 15:00:07 -05002208 "P2000 G3 SAS",
Stephen M. Camerone06c8e52013-09-23 13:33:56 -05002209 "MSA 2040 SAS",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002210 NULL,
2211};
2212
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002213static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002214{
2215 int i;
2216
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002217 for (i = 0; ext_target_model[i]; i++)
2218 if (strncmp(device->model, ext_target_model[i],
2219 strlen(ext_target_model[i])) == 0)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002220 return 1;
2221 return 0;
2222}
2223
2224/* Helper function to assign bus, target, lun mapping of devices.
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002225 * Puts non-external target logical volumes on bus 0, external target logical
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002226 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2227 * Logical drive target and lun are assigned at this time, but
2228 * physical device lun and target assignment are deferred (assigned
2229 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2230 */
2231static void figure_bus_target_lun(struct ctlr_info *h,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002232 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002233{
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002234 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002235
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002236 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
2237 /* physical device, target and lun filled in later */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002238 if (is_hba_lunid(lunaddrbytes))
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002239 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002240 else
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002241 /* defer target, lun assignment for physical devices */
2242 hpsa_set_bus_target_lun(device, 2, -1, -1);
2243 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002244 }
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002245 /* It's a logical device */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002246 if (is_ext_target(h, device)) {
2247 /* external target way, put logicals on bus 1
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002248 * and match target/lun numbers box
2249 * reports, other smart array, bus 0, target 0, match lunid
2250 */
2251 hpsa_set_bus_target_lun(device,
2252 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
2253 return;
2254 }
2255 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002256}
2257
2258/*
2259 * If there is no lun 0 on a target, linux won't find any devices.
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002260 * For the external targets (arrays), we have to manually detect the enclosure
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002261 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2262 * it for some reason. *tmpdevice is the target we're adding,
2263 * this_device is a pointer into the current element of currentsd[]
2264 * that we're building up in update_scsi_devices(), below.
2265 * lunzerobits is a bitmap that tracks which targets already have a
2266 * lun 0 assigned.
2267 * Returns 1 if an enclosure was added, 0 if not.
2268 */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002269static int add_ext_target_dev(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002270 struct hpsa_scsi_dev_t *tmpdevice,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002271 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002272 unsigned long lunzerobits[], int *n_ext_target_devs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002273{
2274 unsigned char scsi3addr[8];
2275
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002276 if (test_bit(tmpdevice->target, lunzerobits))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002277 return 0; /* There is already a lun 0 on this target. */
2278
2279 if (!is_logical_dev_addr_mode(lunaddrbytes))
2280 return 0; /* It's the logical targets that may lack lun 0. */
2281
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002282 if (!is_ext_target(h, tmpdevice))
2283 return 0; /* Only external target devices have this problem. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002284
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002285 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002286 return 0;
2287
Stephen M. Cameronc4f8a292011-01-07 10:55:43 -06002288 memset(scsi3addr, 0, 8);
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002289 scsi3addr[3] = tmpdevice->target;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002290 if (is_hba_lunid(scsi3addr))
2291 return 0; /* Don't add the RAID controller here. */
2292
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002293 if (is_scsi_rev_5(h))
2294 return 0; /* p1210m doesn't need to do this. */
2295
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002296 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
Scott Teelaca4a522012-01-19 14:01:19 -06002297 dev_warn(&h->pdev->dev, "Maximum number of external "
2298 "target devices exceeded. Check your hardware "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002299 "configuration.");
2300 return 0;
2301 }
2302
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002303 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002304 return 0;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002305 (*n_ext_target_devs)++;
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002306 hpsa_set_bus_target_lun(this_device,
2307 tmpdevice->bus, tmpdevice->target, 0);
2308 set_bit(tmpdevice->target, lunzerobits);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002309 return 1;
2310}
2311
2312/*
Scott Teel54b6e9e2014-02-18 13:56:45 -06002313 * Get address of physical disk used for an ioaccel2 mode command:
2314 * 1. Extract ioaccel2 handle from the command.
2315 * 2. Find a matching ioaccel2 handle from list of physical disks.
2316 * 3. Return:
2317 * 1 and set scsi3addr to address of matching physical
2318 * 0 if no matching physical disk was found.
2319 */
2320static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2321 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2322{
2323 struct ReportExtendedLUNdata *physicals = NULL;
2324 int responsesize = 24; /* size of physical extended response */
2325 int extended = 2; /* flag forces reporting 'other dev info'. */
2326 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2327 u32 nphysicals = 0; /* number of reported physical devs */
2328 int found = 0; /* found match (1) or not (0) */
2329 u32 find; /* handle we need to match */
2330 int i;
2331 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2332 struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2333 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2334 u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2335 u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2336
2337 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2338 return 0; /* no match */
2339
2340 /* point to the ioaccel2 device handle */
2341 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2342 if (c2a == NULL)
2343 return 0; /* no match */
2344
2345 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2346 if (scmd == NULL)
2347 return 0; /* no match */
2348
2349 d = scmd->device->hostdata;
2350 if (d == NULL)
2351 return 0; /* no match */
2352
2353 it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
2354 scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
2355 find = c2a->scsi_nexus;
2356
2357 /* Get the list of physical devices */
2358 physicals = kzalloc(reportsize, GFP_KERNEL);
2359 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
2360 reportsize, extended)) {
2361 dev_err(&h->pdev->dev,
2362 "Can't lookup %s device handle: report physical LUNs failed.\n",
2363 "HP SSD Smart Path");
2364 kfree(physicals);
2365 return 0;
2366 }
2367 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2368 responsesize;
2369
2370
2371 /* find ioaccel2 handle in list of physicals: */
2372 for (i = 0; i < nphysicals; i++) {
2373 /* handle is in bytes 28-31 of each lun */
2374 if (memcmp(&((struct ReportExtendedLUNdata *)
2375 physicals)->LUN[i][20], &find, 4) != 0) {
2376 continue; /* didn't match */
2377 }
2378 found = 1;
2379 memcpy(scsi3addr, &((struct ReportExtendedLUNdata *)
2380 physicals)->LUN[i][0], 8);
2381 break; /* found it */
2382 }
2383
2384 kfree(physicals);
2385 if (found)
2386 return 1;
2387 else
2388 return 0;
2389
2390}
2391/*
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002392 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
2393 * logdev. The number of luns in physdev and logdev are returned in
2394 * *nphysicals and *nlogicals, respectively.
2395 * Returns 0 on success, -1 otherwise.
2396 */
2397static int hpsa_gather_lun_info(struct ctlr_info *h,
2398 int reportlunsize,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002399 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002400 struct ReportLUNdata *logdev, u32 *nlogicals)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002401{
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002402 int physical_entry_size = 8;
2403
2404 *physical_mode = 0;
2405
2406 /* For I/O accelerator mode we need to read physical device handles */
Mike MIller317d4ad2014-02-18 13:56:20 -06002407 if (h->transMethod & CFGTBL_Trans_io_accel1 ||
2408 h->transMethod & CFGTBL_Trans_io_accel2) {
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002409 *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
2410 physical_entry_size = 24;
2411 }
Matt Gatesa93aa1f2014-02-18 13:55:07 -06002412 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002413 *physical_mode)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002414 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2415 return -1;
2416 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002417 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
2418 physical_entry_size;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002419 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
2420 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
2421 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2422 *nphysicals - HPSA_MAX_PHYS_LUN);
2423 *nphysicals = HPSA_MAX_PHYS_LUN;
2424 }
2425 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
2426 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2427 return -1;
2428 }
Stephen M. Cameron6df1e952010-02-04 08:42:19 -06002429 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002430 /* Reject Logicals in excess of our max capability. */
2431 if (*nlogicals > HPSA_MAX_LUN) {
2432 dev_warn(&h->pdev->dev,
2433 "maximum logical LUNs (%d) exceeded. "
2434 "%d LUNs ignored.\n", HPSA_MAX_LUN,
2435 *nlogicals - HPSA_MAX_LUN);
2436 *nlogicals = HPSA_MAX_LUN;
2437 }
2438 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
2439 dev_warn(&h->pdev->dev,
2440 "maximum logical + physical LUNs (%d) exceeded. "
2441 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2442 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
2443 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
2444 }
2445 return 0;
2446}
2447
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002448u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
Matt Gatesa93aa1f2014-02-18 13:55:07 -06002449 int nphysicals, int nlogicals,
2450 struct ReportExtendedLUNdata *physdev_list,
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002451 struct ReportLUNdata *logdev_list)
2452{
2453 /* Helper function, figure out where the LUN ID info is coming from
2454 * given index i, lists of physical and logical devices, where in
2455 * the list the raid controller is supposed to appear (first or last)
2456 */
2457
2458 int logicals_start = nphysicals + (raid_ctlr_position == 0);
2459 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
2460
2461 if (i == raid_ctlr_position)
2462 return RAID_CTLR_LUNID;
2463
2464 if (i < logicals_start)
2465 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
2466
2467 if (i < last_device)
2468 return &logdev_list->LUN[i - nphysicals -
2469 (raid_ctlr_position == 0)][0];
2470 BUG();
2471 return NULL;
2472}
2473
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002474static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
2475{
2476 /* the idea here is we could get notified
2477 * that some devices have changed, so we do a report
2478 * physical luns and report logical luns cmd, and adjust
2479 * our list of devices accordingly.
2480 *
2481 * The scsi3addr's of devices won't change so long as the
2482 * adapter is not reset. That means we can rescan and
2483 * tell which devices we already know about, vs. new
2484 * devices, vs. disappearing devices.
2485 */
Matt Gatesa93aa1f2014-02-18 13:55:07 -06002486 struct ReportExtendedLUNdata *physdev_list = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002487 struct ReportLUNdata *logdev_list = NULL;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002488 u32 nphysicals = 0;
2489 u32 nlogicals = 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002490 int physical_mode = 0;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002491 u32 ndev_allocated = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002492 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
2493 int ncurrent = 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002494 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002495 int i, n_ext_target_devs, ndevs_to_allocate;
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002496 int raid_ctlr_position;
Scott Teelaca4a522012-01-19 14:01:19 -06002497 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002498
Scott Teelcfe5bad2011-10-26 16:21:07 -05002499 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002500 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
2501 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002502 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
2503
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002504 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002505 dev_err(&h->pdev->dev, "out of memory\n");
2506 goto out;
2507 }
2508 memset(lunzerobits, 0, sizeof(lunzerobits));
2509
Matt Gatesa93aa1f2014-02-18 13:55:07 -06002510 if (hpsa_gather_lun_info(h, reportlunsize,
2511 (struct ReportLUNdata *) physdev_list, &nphysicals,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002512 &physical_mode, logdev_list, &nlogicals))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002513 goto out;
2514
Scott Teelaca4a522012-01-19 14:01:19 -06002515 /* We might see up to the maximum number of logical and physical disks
2516 * plus external target devices, and a device for the local RAID
2517 * controller.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002518 */
Scott Teelaca4a522012-01-19 14:01:19 -06002519 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002520
2521 /* Allocate the per device structures */
2522 for (i = 0; i < ndevs_to_allocate; i++) {
Scott Teelb7ec0212011-10-26 16:21:12 -05002523 if (i >= HPSA_MAX_DEVICES) {
2524 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
2525 " %d devices ignored.\n", HPSA_MAX_DEVICES,
2526 ndevs_to_allocate - HPSA_MAX_DEVICES);
2527 break;
2528 }
2529
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002530 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
2531 if (!currentsd[i]) {
2532 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
2533 __FILE__, __LINE__);
2534 goto out;
2535 }
2536 ndev_allocated++;
2537 }
2538
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002539 if (unlikely(is_scsi_rev_5(h)))
2540 raid_ctlr_position = 0;
2541 else
2542 raid_ctlr_position = nphysicals + nlogicals;
2543
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002544 /* adjust our table of devices */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002545 n_ext_target_devs = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002546 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002547 u8 *lunaddrbytes, is_OBDR = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002548
2549 /* Figure out where the LUN ID info is coming from */
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002550 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
2551 i, nphysicals, nlogicals, physdev_list, logdev_list);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002552 /* skip masked physical devices. */
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002553 if (lunaddrbytes[3] & 0xC0 &&
2554 i < nphysicals + (raid_ctlr_position == 0))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002555 continue;
2556
2557 /* Get device type, vendor, model, device id */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002558 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
2559 &is_OBDR))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002560 continue; /* skip it if we can't talk to it. */
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002561 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002562 this_device = currentsd[ncurrent];
2563
2564 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002565 * For external target devices, we have to insert a LUN 0 which
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002566 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
2567 * is nonetheless an enclosure device there. We have to
2568 * present that otherwise linux won't find anything if
2569 * there is no lun 0.
2570 */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002571 if (add_ext_target_dev(h, tmpdevice, this_device,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002572 lunaddrbytes, lunzerobits,
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002573 &n_ext_target_devs)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002574 ncurrent++;
2575 this_device = currentsd[ncurrent];
2576 }
2577
2578 *this_device = *tmpdevice;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002579
2580 switch (this_device->devtype) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002581 case TYPE_ROM:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002582 /* We don't *really* support actual CD-ROM devices,
2583 * just "One Button Disaster Recovery" tape drive
2584 * which temporarily pretends to be a CD-ROM drive.
2585 * So we check that the device is really an OBDR tape
2586 * device by checking for "$DR-10" in bytes 43-48 of
2587 * the inquiry data.
2588 */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002589 if (is_OBDR)
2590 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002591 break;
2592 case TYPE_DISK:
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002593 if (i >= nphysicals) {
2594 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002595 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002596 }
2597 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
2598 memcpy(&this_device->ioaccel_handle,
2599 &lunaddrbytes[20],
2600 sizeof(this_device->ioaccel_handle));
2601 ncurrent++;
2602 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002603 break;
2604 case TYPE_TAPE:
2605 case TYPE_MEDIUM_CHANGER:
2606 ncurrent++;
2607 break;
2608 case TYPE_RAID:
2609 /* Only present the Smartarray HBA as a RAID controller.
2610 * If it's a RAID controller other than the HBA itself
2611 * (an external RAID controller, MSA500 or similar)
2612 * don't present it.
2613 */
2614 if (!is_hba_lunid(lunaddrbytes))
2615 break;
2616 ncurrent++;
2617 break;
2618 default:
2619 break;
2620 }
Scott Teelcfe5bad2011-10-26 16:21:07 -05002621 if (ncurrent >= HPSA_MAX_DEVICES)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002622 break;
2623 }
2624 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
2625out:
2626 kfree(tmpdevice);
2627 for (i = 0; i < ndev_allocated; i++)
2628 kfree(currentsd[i]);
2629 kfree(currentsd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002630 kfree(physdev_list);
2631 kfree(logdev_list);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002632}
2633
2634/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
2635 * dma mapping and fills in the scatter gather entries of the
2636 * hpsa command, cp.
2637 */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002638static int hpsa_scatter_gather(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002639 struct CommandList *cp,
2640 struct scsi_cmnd *cmd)
2641{
2642 unsigned int len;
2643 struct scatterlist *sg;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002644 u64 addr64;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002645 int use_sg, i, sg_index, chained;
2646 struct SGDescriptor *curr_sg;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002647
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002648 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002649
2650 use_sg = scsi_dma_map(cmd);
2651 if (use_sg < 0)
2652 return use_sg;
2653
2654 if (!use_sg)
2655 goto sglist_finished;
2656
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002657 curr_sg = cp->SG;
2658 chained = 0;
2659 sg_index = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002660 scsi_for_each_sg(cmd, sg, use_sg, i) {
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002661 if (i == h->max_cmd_sg_entries - 1 &&
2662 use_sg > h->max_cmd_sg_entries) {
2663 chained = 1;
2664 curr_sg = h->cmd_sg_list[cp->cmdindex];
2665 sg_index = 0;
2666 }
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002667 addr64 = (u64) sg_dma_address(sg);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002668 len = sg_dma_len(sg);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002669 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
2670 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
2671 curr_sg->Len = len;
Matt Gatese1d9cbf2014-02-18 13:55:12 -06002672 curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002673 curr_sg++;
2674 }
2675
2676 if (use_sg + chained > h->maxSG)
2677 h->maxSG = use_sg + chained;
2678
2679 if (chained) {
2680 cp->Header.SGList = h->max_cmd_sg_entries;
2681 cp->Header.SGTotal = (u16) (use_sg + 1);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06002682 if (hpsa_map_sg_chain_block(h, cp)) {
2683 scsi_dma_unmap(cmd);
2684 return -1;
2685 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06002686 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002687 }
2688
2689sglist_finished:
2690
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002691 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
2692 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002693 return 0;
2694}
2695
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002696#define IO_ACCEL_INELIGIBLE (1)
2697static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
2698{
2699 int is_write = 0;
2700 u32 block;
2701 u32 block_cnt;
2702
2703 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
2704 switch (cdb[0]) {
2705 case WRITE_6:
2706 case WRITE_12:
2707 is_write = 1;
2708 case READ_6:
2709 case READ_12:
2710 if (*cdb_len == 6) {
2711 block = (((u32) cdb[2]) << 8) | cdb[3];
2712 block_cnt = cdb[4];
2713 } else {
2714 BUG_ON(*cdb_len != 12);
2715 block = (((u32) cdb[2]) << 24) |
2716 (((u32) cdb[3]) << 16) |
2717 (((u32) cdb[4]) << 8) |
2718 cdb[5];
2719 block_cnt =
2720 (((u32) cdb[6]) << 24) |
2721 (((u32) cdb[7]) << 16) |
2722 (((u32) cdb[8]) << 8) |
2723 cdb[9];
2724 }
2725 if (block_cnt > 0xffff)
2726 return IO_ACCEL_INELIGIBLE;
2727
2728 cdb[0] = is_write ? WRITE_10 : READ_10;
2729 cdb[1] = 0;
2730 cdb[2] = (u8) (block >> 24);
2731 cdb[3] = (u8) (block >> 16);
2732 cdb[4] = (u8) (block >> 8);
2733 cdb[5] = (u8) (block);
2734 cdb[6] = 0;
2735 cdb[7] = (u8) (block_cnt >> 8);
2736 cdb[8] = (u8) (block_cnt);
2737 cdb[9] = 0;
2738 *cdb_len = 10;
2739 break;
2740 }
2741 return 0;
2742}
2743
Scott Teelc3497752014-02-18 13:56:34 -06002744static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002745 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
2746 u8 *scsi3addr)
Matt Gatese1f7de02014-02-18 13:55:17 -06002747{
2748 struct scsi_cmnd *cmd = c->scsi_cmd;
Matt Gatese1f7de02014-02-18 13:55:17 -06002749 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
2750 unsigned int len;
2751 unsigned int total_len = 0;
2752 struct scatterlist *sg;
2753 u64 addr64;
2754 int use_sg, i;
2755 struct SGDescriptor *curr_sg;
2756 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
2757
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002758 /* TODO: implement chaining support */
2759 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
2760 return IO_ACCEL_INELIGIBLE;
2761
Matt Gatese1f7de02014-02-18 13:55:17 -06002762 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
2763
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002764 if (fixup_ioaccel_cdb(cdb, &cdb_len))
2765 return IO_ACCEL_INELIGIBLE;
2766
Matt Gatese1f7de02014-02-18 13:55:17 -06002767 c->cmd_type = CMD_IOACCEL1;
2768
2769 /* Adjust the DMA address to point to the accelerated command buffer */
2770 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
2771 (c->cmdindex * sizeof(*cp));
2772 BUG_ON(c->busaddr & 0x0000007F);
2773
2774 use_sg = scsi_dma_map(cmd);
2775 if (use_sg < 0)
2776 return use_sg;
2777
2778 if (use_sg) {
2779 curr_sg = cp->SG;
2780 scsi_for_each_sg(cmd, sg, use_sg, i) {
2781 addr64 = (u64) sg_dma_address(sg);
2782 len = sg_dma_len(sg);
2783 total_len += len;
2784 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
2785 curr_sg->Addr.upper =
2786 (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
2787 curr_sg->Len = len;
2788
2789 if (i == (scsi_sg_count(cmd) - 1))
2790 curr_sg->Ext = HPSA_SG_LAST;
2791 else
2792 curr_sg->Ext = 0; /* we are not chaining */
2793 curr_sg++;
2794 }
2795
2796 switch (cmd->sc_data_direction) {
2797 case DMA_TO_DEVICE:
2798 control |= IOACCEL1_CONTROL_DATA_OUT;
2799 break;
2800 case DMA_FROM_DEVICE:
2801 control |= IOACCEL1_CONTROL_DATA_IN;
2802 break;
2803 case DMA_NONE:
2804 control |= IOACCEL1_CONTROL_NODATAXFER;
2805 break;
2806 default:
2807 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2808 cmd->sc_data_direction);
2809 BUG();
2810 break;
2811 }
2812 } else {
2813 control |= IOACCEL1_CONTROL_NODATAXFER;
2814 }
2815
Scott Teelc3497752014-02-18 13:56:34 -06002816 c->Header.SGList = use_sg;
Matt Gatese1f7de02014-02-18 13:55:17 -06002817 /* Fill out the command structure to submit */
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002818 cp->dev_handle = ioaccel_handle & 0xFFFF;
Matt Gatese1f7de02014-02-18 13:55:17 -06002819 cp->transfer_len = total_len;
2820 cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002821 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
Matt Gatese1f7de02014-02-18 13:55:17 -06002822 cp->control = control;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002823 memcpy(cp->CDB, cdb, cdb_len);
2824 memcpy(cp->CISS_LUN, scsi3addr, 8);
Scott Teelc3497752014-02-18 13:56:34 -06002825 /* Tag was already set at init time. */
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002826 enqueue_cmd_and_start_io(h, c);
Matt Gatese1f7de02014-02-18 13:55:17 -06002827 return 0;
2828}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002829
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002830/*
2831 * Queue a command directly to a device behind the controller using the
2832 * I/O accelerator path.
2833 */
2834static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
2835 struct CommandList *c)
2836{
2837 struct scsi_cmnd *cmd = c->scsi_cmd;
2838 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
2839
2840 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
2841 cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
2842}
2843
Scott Teelc3497752014-02-18 13:56:34 -06002844static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
2845 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
2846 u8 *scsi3addr)
2847{
2848 struct scsi_cmnd *cmd = c->scsi_cmd;
2849 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
2850 struct ioaccel2_sg_element *curr_sg;
2851 int use_sg, i;
2852 struct scatterlist *sg;
2853 u64 addr64;
2854 u32 len;
2855 u32 total_len = 0;
2856
2857 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
2858 return IO_ACCEL_INELIGIBLE;
2859
2860 if (fixup_ioaccel_cdb(cdb, &cdb_len))
2861 return IO_ACCEL_INELIGIBLE;
2862 c->cmd_type = CMD_IOACCEL2;
2863 /* Adjust the DMA address to point to the accelerated command buffer */
2864 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
2865 (c->cmdindex * sizeof(*cp));
2866 BUG_ON(c->busaddr & 0x0000007F);
2867
2868 memset(cp, 0, sizeof(*cp));
2869 cp->IU_type = IOACCEL2_IU_TYPE;
2870
2871 use_sg = scsi_dma_map(cmd);
2872 if (use_sg < 0)
2873 return use_sg;
2874
2875 if (use_sg) {
2876 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
2877 curr_sg = cp->sg;
2878 scsi_for_each_sg(cmd, sg, use_sg, i) {
2879 addr64 = (u64) sg_dma_address(sg);
2880 len = sg_dma_len(sg);
2881 total_len += len;
2882 curr_sg->address = cpu_to_le64(addr64);
2883 curr_sg->length = cpu_to_le32(len);
2884 curr_sg->reserved[0] = 0;
2885 curr_sg->reserved[1] = 0;
2886 curr_sg->reserved[2] = 0;
2887 curr_sg->chain_indicator = 0;
2888 curr_sg++;
2889 }
2890
2891 switch (cmd->sc_data_direction) {
2892 case DMA_TO_DEVICE:
2893 cp->direction = IOACCEL2_DIR_DATA_OUT;
2894 break;
2895 case DMA_FROM_DEVICE:
2896 cp->direction = IOACCEL2_DIR_DATA_IN;
2897 break;
2898 case DMA_NONE:
2899 cp->direction = IOACCEL2_DIR_NO_DATA;
2900 break;
2901 default:
2902 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
2903 cmd->sc_data_direction);
2904 BUG();
2905 break;
2906 }
2907 } else {
2908 cp->direction = IOACCEL2_DIR_NO_DATA;
2909 }
2910 cp->scsi_nexus = ioaccel_handle;
2911 cp->Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
2912 DIRECT_LOOKUP_BIT;
2913 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
2914 memset(cp->cciss_lun, 0, sizeof(cp->cciss_lun));
2915 cp->cmd_priority_task_attr = 0;
2916
2917 /* fill in sg elements */
2918 cp->sg_count = (u8) use_sg;
2919
2920 cp->data_len = cpu_to_le32(total_len);
2921 cp->err_ptr = cpu_to_le64(c->busaddr +
2922 offsetof(struct io_accel2_cmd, error_data));
2923 cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
2924
2925 enqueue_cmd_and_start_io(h, c);
2926 return 0;
2927}
2928
2929/*
2930 * Queue a command to the correct I/O accelerator path.
2931 */
2932static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
2933 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
2934 u8 *scsi3addr)
2935{
2936 if (h->transMethod & CFGTBL_Trans_io_accel1)
2937 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
2938 cdb, cdb_len, scsi3addr);
2939 else
2940 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
2941 cdb, cdb_len, scsi3addr);
2942}
2943
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002944/*
2945 * Attempt to perform offload RAID mapping for a logical volume I/O.
2946 */
2947static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
2948 struct CommandList *c)
2949{
2950 struct scsi_cmnd *cmd = c->scsi_cmd;
2951 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
2952 struct raid_map_data *map = &dev->raid_map;
2953 struct raid_map_disk_data *dd = &map->data[0];
2954 int is_write = 0;
2955 u32 map_index;
2956 u64 first_block, last_block;
2957 u32 block_cnt;
2958 u32 blocks_per_row;
2959 u64 first_row, last_row;
2960 u32 first_row_offset, last_row_offset;
2961 u32 first_column, last_column;
2962 u32 map_row;
2963 u32 disk_handle;
2964 u64 disk_block;
2965 u32 disk_block_cnt;
2966 u8 cdb[16];
2967 u8 cdb_len;
2968#if BITS_PER_LONG == 32
2969 u64 tmpdiv;
2970#endif
2971
2972 BUG_ON(!(dev->offload_config && dev->offload_enabled));
2973
2974 /* check for valid opcode, get LBA and block count */
2975 switch (cmd->cmnd[0]) {
2976 case WRITE_6:
2977 is_write = 1;
2978 case READ_6:
2979 first_block =
2980 (((u64) cmd->cmnd[2]) << 8) |
2981 cmd->cmnd[3];
2982 block_cnt = cmd->cmnd[4];
2983 break;
2984 case WRITE_10:
2985 is_write = 1;
2986 case READ_10:
2987 first_block =
2988 (((u64) cmd->cmnd[2]) << 24) |
2989 (((u64) cmd->cmnd[3]) << 16) |
2990 (((u64) cmd->cmnd[4]) << 8) |
2991 cmd->cmnd[5];
2992 block_cnt =
2993 (((u32) cmd->cmnd[7]) << 8) |
2994 cmd->cmnd[8];
2995 break;
2996 case WRITE_12:
2997 is_write = 1;
2998 case READ_12:
2999 first_block =
3000 (((u64) cmd->cmnd[2]) << 24) |
3001 (((u64) cmd->cmnd[3]) << 16) |
3002 (((u64) cmd->cmnd[4]) << 8) |
3003 cmd->cmnd[5];
3004 block_cnt =
3005 (((u32) cmd->cmnd[6]) << 24) |
3006 (((u32) cmd->cmnd[7]) << 16) |
3007 (((u32) cmd->cmnd[8]) << 8) |
3008 cmd->cmnd[9];
3009 break;
3010 case WRITE_16:
3011 is_write = 1;
3012 case READ_16:
3013 first_block =
3014 (((u64) cmd->cmnd[2]) << 56) |
3015 (((u64) cmd->cmnd[3]) << 48) |
3016 (((u64) cmd->cmnd[4]) << 40) |
3017 (((u64) cmd->cmnd[5]) << 32) |
3018 (((u64) cmd->cmnd[6]) << 24) |
3019 (((u64) cmd->cmnd[7]) << 16) |
3020 (((u64) cmd->cmnd[8]) << 8) |
3021 cmd->cmnd[9];
3022 block_cnt =
3023 (((u32) cmd->cmnd[10]) << 24) |
3024 (((u32) cmd->cmnd[11]) << 16) |
3025 (((u32) cmd->cmnd[12]) << 8) |
3026 cmd->cmnd[13];
3027 break;
3028 default:
3029 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3030 }
3031 BUG_ON(block_cnt == 0);
3032 last_block = first_block + block_cnt - 1;
3033
3034 /* check for write to non-RAID-0 */
3035 if (is_write && dev->raid_level != 0)
3036 return IO_ACCEL_INELIGIBLE;
3037
3038 /* check for invalid block or wraparound */
3039 if (last_block >= map->volume_blk_cnt || last_block < first_block)
3040 return IO_ACCEL_INELIGIBLE;
3041
3042 /* calculate stripe information for the request */
3043 blocks_per_row = map->data_disks_per_row * map->strip_size;
3044#if BITS_PER_LONG == 32
3045 tmpdiv = first_block;
3046 (void) do_div(tmpdiv, blocks_per_row);
3047 first_row = tmpdiv;
3048 tmpdiv = last_block;
3049 (void) do_div(tmpdiv, blocks_per_row);
3050 last_row = tmpdiv;
3051 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3052 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3053 tmpdiv = first_row_offset;
3054 (void) do_div(tmpdiv, map->strip_size);
3055 first_column = tmpdiv;
3056 tmpdiv = last_row_offset;
3057 (void) do_div(tmpdiv, map->strip_size);
3058 last_column = tmpdiv;
3059#else
3060 first_row = first_block / blocks_per_row;
3061 last_row = last_block / blocks_per_row;
3062 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3063 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3064 first_column = first_row_offset / map->strip_size;
3065 last_column = last_row_offset / map->strip_size;
3066#endif
3067
3068 /* if this isn't a single row/column then give to the controller */
3069 if ((first_row != last_row) || (first_column != last_column))
3070 return IO_ACCEL_INELIGIBLE;
3071
3072 /* proceeding with driver mapping */
3073 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3074 map->row_cnt;
3075 map_index = (map_row * (map->data_disks_per_row +
3076 map->metadata_disks_per_row)) + first_column;
3077 if (dev->raid_level == 2) {
3078 /* simple round-robin balancing of RAID 1+0 reads across
3079 * primary and mirror members. this is appropriate for SSD
3080 * but not optimal for HDD.
3081 */
3082 if (dev->offload_to_mirror)
3083 map_index += map->data_disks_per_row;
3084 dev->offload_to_mirror = !dev->offload_to_mirror;
3085 }
3086 disk_handle = dd[map_index].ioaccel_handle;
3087 disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
3088 (first_row_offset - (first_column * map->strip_size));
3089 disk_block_cnt = block_cnt;
3090
3091 /* handle differing logical/physical block sizes */
3092 if (map->phys_blk_shift) {
3093 disk_block <<= map->phys_blk_shift;
3094 disk_block_cnt <<= map->phys_blk_shift;
3095 }
3096 BUG_ON(disk_block_cnt > 0xffff);
3097
3098 /* build the new CDB for the physical disk I/O */
3099 if (disk_block > 0xffffffff) {
3100 cdb[0] = is_write ? WRITE_16 : READ_16;
3101 cdb[1] = 0;
3102 cdb[2] = (u8) (disk_block >> 56);
3103 cdb[3] = (u8) (disk_block >> 48);
3104 cdb[4] = (u8) (disk_block >> 40);
3105 cdb[5] = (u8) (disk_block >> 32);
3106 cdb[6] = (u8) (disk_block >> 24);
3107 cdb[7] = (u8) (disk_block >> 16);
3108 cdb[8] = (u8) (disk_block >> 8);
3109 cdb[9] = (u8) (disk_block);
3110 cdb[10] = (u8) (disk_block_cnt >> 24);
3111 cdb[11] = (u8) (disk_block_cnt >> 16);
3112 cdb[12] = (u8) (disk_block_cnt >> 8);
3113 cdb[13] = (u8) (disk_block_cnt);
3114 cdb[14] = 0;
3115 cdb[15] = 0;
3116 cdb_len = 16;
3117 } else {
3118 cdb[0] = is_write ? WRITE_10 : READ_10;
3119 cdb[1] = 0;
3120 cdb[2] = (u8) (disk_block >> 24);
3121 cdb[3] = (u8) (disk_block >> 16);
3122 cdb[4] = (u8) (disk_block >> 8);
3123 cdb[5] = (u8) (disk_block);
3124 cdb[6] = 0;
3125 cdb[7] = (u8) (disk_block_cnt >> 8);
3126 cdb[8] = (u8) (disk_block_cnt);
3127 cdb[9] = 0;
3128 cdb_len = 10;
3129 }
3130 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3131 dev->scsi3addr);
3132}
3133
Jeff Garzikf2812332010-11-16 02:10:29 -05003134static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003135 void (*done)(struct scsi_cmnd *))
3136{
3137 struct ctlr_info *h;
3138 struct hpsa_scsi_dev_t *dev;
3139 unsigned char scsi3addr[8];
3140 struct CommandList *c;
3141 unsigned long flags;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003142 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003143
3144 /* Get the ptr to our adapter structure out of cmd->host. */
3145 h = sdev_to_hba(cmd->device);
3146 dev = cmd->device->hostdata;
3147 if (!dev) {
3148 cmd->result = DID_NO_CONNECT << 16;
3149 done(cmd);
3150 return 0;
3151 }
3152 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3153
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003154 spin_lock_irqsave(&h->lock, flags);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05003155 if (unlikely(h->lockup_detected)) {
3156 spin_unlock_irqrestore(&h->lock, flags);
3157 cmd->result = DID_ERROR << 16;
3158 done(cmd);
3159 return 0;
3160 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003161 spin_unlock_irqrestore(&h->lock, flags);
Matt Gatese16a33a2012-05-01 11:43:11 -05003162 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003163 if (c == NULL) { /* trouble... */
3164 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
3165 return SCSI_MLQUEUE_HOST_BUSY;
3166 }
3167
3168 /* Fill in the command list header */
3169
3170 cmd->scsi_done = done; /* save this for use by completion code */
3171
3172 /* save c in case we have to abort it */
3173 cmd->host_scribble = (unsigned char *) c;
3174
3175 c->cmd_type = CMD_SCSI;
3176 c->scsi_cmd = cmd;
Matt Gatese1f7de02014-02-18 13:55:17 -06003177
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003178 /* Call alternate submit routine for I/O accelerated commands.
3179 * Retries always go down the normal I/O path.
3180 */
3181 if (likely(cmd->retries == 0 &&
3182 cmd->request->cmd_type == REQ_TYPE_FS)) {
3183 if (dev->offload_enabled) {
3184 rc = hpsa_scsi_ioaccel_raid_map(h, c);
3185 if (rc == 0)
3186 return 0; /* Sent on ioaccel path */
3187 if (rc < 0) { /* scsi_dma_map failed. */
3188 cmd_free(h, c);
3189 return SCSI_MLQUEUE_HOST_BUSY;
3190 }
3191 } else if (dev->ioaccel_handle) {
3192 rc = hpsa_scsi_ioaccel_direct_map(h, c);
3193 if (rc == 0)
3194 return 0; /* Sent on direct map path */
3195 if (rc < 0) { /* scsi_dma_map failed. */
3196 cmd_free(h, c);
3197 return SCSI_MLQUEUE_HOST_BUSY;
3198 }
3199 }
3200 }
Matt Gatese1f7de02014-02-18 13:55:17 -06003201
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003202 c->Header.ReplyQueue = 0; /* unused in simple mode */
3203 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
Don Brace303932f2010-02-04 08:42:40 -06003204 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
3205 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003206
3207 /* Fill in the request block... */
3208
3209 c->Request.Timeout = 0;
3210 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
3211 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
3212 c->Request.CDBLen = cmd->cmd_len;
3213 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
3214 c->Request.Type.Type = TYPE_CMD;
3215 c->Request.Type.Attribute = ATTR_SIMPLE;
3216 switch (cmd->sc_data_direction) {
3217 case DMA_TO_DEVICE:
3218 c->Request.Type.Direction = XFER_WRITE;
3219 break;
3220 case DMA_FROM_DEVICE:
3221 c->Request.Type.Direction = XFER_READ;
3222 break;
3223 case DMA_NONE:
3224 c->Request.Type.Direction = XFER_NONE;
3225 break;
3226 case DMA_BIDIRECTIONAL:
3227 /* This can happen if a buggy application does a scsi passthru
3228 * and sets both inlen and outlen to non-zero. ( see
3229 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
3230 */
3231
3232 c->Request.Type.Direction = XFER_RSVD;
3233 /* This is technically wrong, and hpsa controllers should
3234 * reject it with CMD_INVALID, which is the most correct
3235 * response, but non-fibre backends appear to let it
3236 * slide by, and give the same results as if this field
3237 * were set correctly. Either way is acceptable for
3238 * our purposes here.
3239 */
3240
3241 break;
3242
3243 default:
3244 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3245 cmd->sc_data_direction);
3246 BUG();
3247 break;
3248 }
3249
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003250 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003251 cmd_free(h, c);
3252 return SCSI_MLQUEUE_HOST_BUSY;
3253 }
3254 enqueue_cmd_and_start_io(h, c);
3255 /* the cmd'll come back via intr handler in complete_scsi_command() */
3256 return 0;
3257}
3258
Jeff Garzikf2812332010-11-16 02:10:29 -05003259static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
3260
Stephen M. Cameron5f389362014-02-18 13:55:48 -06003261static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
3262{
3263 unsigned long flags;
3264
3265 /*
3266 * Don't let rescans be initiated on a controller known
3267 * to be locked up. If the controller locks up *during*
3268 * a rescan, that thread is probably hosed, but at least
3269 * we can prevent new rescan threads from piling up on a
3270 * locked up controller.
3271 */
3272 spin_lock_irqsave(&h->lock, flags);
3273 if (unlikely(h->lockup_detected)) {
3274 spin_unlock_irqrestore(&h->lock, flags);
3275 spin_lock_irqsave(&h->scan_lock, flags);
3276 h->scan_finished = 1;
3277 wake_up_all(&h->scan_wait_queue);
3278 spin_unlock_irqrestore(&h->scan_lock, flags);
3279 return 1;
3280 }
3281 spin_unlock_irqrestore(&h->lock, flags);
3282 return 0;
3283}
3284
Stephen M. Camerona08a84712010-02-04 08:43:16 -06003285static void hpsa_scan_start(struct Scsi_Host *sh)
3286{
3287 struct ctlr_info *h = shost_to_hba(sh);
3288 unsigned long flags;
3289
Stephen M. Cameron5f389362014-02-18 13:55:48 -06003290 if (do_not_scan_if_controller_locked_up(h))
3291 return;
3292
Stephen M. Camerona08a84712010-02-04 08:43:16 -06003293 /* wait until any scan already in progress is finished. */
3294 while (1) {
3295 spin_lock_irqsave(&h->scan_lock, flags);
3296 if (h->scan_finished)
3297 break;
3298 spin_unlock_irqrestore(&h->scan_lock, flags);
3299 wait_event(h->scan_wait_queue, h->scan_finished);
3300 /* Note: We don't need to worry about a race between this
3301 * thread and driver unload because the midlayer will
3302 * have incremented the reference count, so unload won't
3303 * happen if we're in here.
3304 */
3305 }
3306 h->scan_finished = 0; /* mark scan as in progress */
3307 spin_unlock_irqrestore(&h->scan_lock, flags);
3308
Stephen M. Cameron5f389362014-02-18 13:55:48 -06003309 if (do_not_scan_if_controller_locked_up(h))
3310 return;
3311
Stephen M. Camerona08a84712010-02-04 08:43:16 -06003312 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
3313
3314 spin_lock_irqsave(&h->scan_lock, flags);
3315 h->scan_finished = 1; /* mark scan as finished. */
3316 wake_up_all(&h->scan_wait_queue);
3317 spin_unlock_irqrestore(&h->scan_lock, flags);
3318}
3319
3320static int hpsa_scan_finished(struct Scsi_Host *sh,
3321 unsigned long elapsed_time)
3322{
3323 struct ctlr_info *h = shost_to_hba(sh);
3324 unsigned long flags;
3325 int finished;
3326
3327 spin_lock_irqsave(&h->scan_lock, flags);
3328 finished = h->scan_finished;
3329 spin_unlock_irqrestore(&h->scan_lock, flags);
3330 return finished;
3331}
3332
Stephen M. Cameron667e23d2010-02-25 14:02:51 -06003333static int hpsa_change_queue_depth(struct scsi_device *sdev,
3334 int qdepth, int reason)
3335{
3336 struct ctlr_info *h = sdev_to_hba(sdev);
3337
3338 if (reason != SCSI_QDEPTH_DEFAULT)
3339 return -ENOTSUPP;
3340
3341 if (qdepth < 1)
3342 qdepth = 1;
3343 else
3344 if (qdepth > h->nr_cmds)
3345 qdepth = h->nr_cmds;
3346 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3347 return sdev->queue_depth;
3348}
3349
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003350static void hpsa_unregister_scsi(struct ctlr_info *h)
3351{
3352 /* we are being forcibly unloaded, and may not refuse. */
3353 scsi_remove_host(h->scsi_host);
3354 scsi_host_put(h->scsi_host);
3355 h->scsi_host = NULL;
3356}
3357
3358static int hpsa_register_scsi(struct ctlr_info *h)
3359{
Stephen M. Cameronb7056902012-01-19 14:00:53 -06003360 struct Scsi_Host *sh;
3361 int error;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003362
Stephen M. Cameronb7056902012-01-19 14:00:53 -06003363 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
3364 if (sh == NULL)
3365 goto fail;
3366
3367 sh->io_port = 0;
3368 sh->n_io_port = 0;
3369 sh->this_id = -1;
3370 sh->max_channel = 3;
3371 sh->max_cmd_len = MAX_COMMAND_SIZE;
3372 sh->max_lun = HPSA_MAX_LUN;
3373 sh->max_id = HPSA_MAX_LUN;
3374 sh->can_queue = h->nr_cmds;
3375 sh->cmd_per_lun = h->nr_cmds;
3376 sh->sg_tablesize = h->maxsgentries;
3377 h->scsi_host = sh;
3378 sh->hostdata[0] = (unsigned long) h;
3379 sh->irq = h->intr[h->intr_mode];
3380 sh->unique_id = sh->irq;
3381 error = scsi_add_host(sh, &h->pdev->dev);
3382 if (error)
3383 goto fail_host_put;
3384 scsi_scan_host(sh);
3385 return 0;
3386
3387 fail_host_put:
3388 dev_err(&h->pdev->dev, "%s: scsi_add_host"
3389 " failed for controller %d\n", __func__, h->ctlr);
3390 scsi_host_put(sh);
3391 return error;
3392 fail:
3393 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
3394 " failed for controller %d\n", __func__, h->ctlr);
3395 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003396}
3397
3398static int wait_for_device_to_become_ready(struct ctlr_info *h,
3399 unsigned char lunaddr[])
3400{
3401 int rc = 0;
3402 int count = 0;
3403 int waittime = 1; /* seconds */
3404 struct CommandList *c;
3405
3406 c = cmd_special_alloc(h);
3407 if (!c) {
3408 dev_warn(&h->pdev->dev, "out of memory in "
3409 "wait_for_device_to_become_ready.\n");
3410 return IO_ERROR;
3411 }
3412
3413 /* Send test unit ready until device ready, or give up. */
3414 while (count < HPSA_TUR_RETRY_LIMIT) {
3415
3416 /* Wait for a bit. do this first, because if we send
3417 * the TUR right away, the reset will just abort it.
3418 */
3419 msleep(1000 * waittime);
3420 count++;
3421
3422 /* Increase wait time with each try, up to a point. */
3423 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
3424 waittime = waittime * 2;
3425
Stephen M. Camerona2dac132013-02-20 11:24:41 -06003426 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
3427 (void) fill_cmd(c, TEST_UNIT_READY, h,
3428 NULL, 0, 0, lunaddr, TYPE_CMD);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003429 hpsa_scsi_do_simple_cmd_core(h, c);
3430 /* no unmap needed here because no data xfer. */
3431
3432 if (c->err_info->CommandStatus == CMD_SUCCESS)
3433 break;
3434
3435 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
3436 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
3437 (c->err_info->SenseInfo[2] == NO_SENSE ||
3438 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
3439 break;
3440
3441 dev_warn(&h->pdev->dev, "waiting %d secs "
3442 "for device to become ready.\n", waittime);
3443 rc = 1; /* device not ready. */
3444 }
3445
3446 if (rc)
3447 dev_warn(&h->pdev->dev, "giving up on device.\n");
3448 else
3449 dev_warn(&h->pdev->dev, "device is ready.\n");
3450
3451 cmd_special_free(h, c);
3452 return rc;
3453}
3454
3455/* Need at least one of these error handlers to keep ../scsi/hosts.c from
3456 * complaining. Doing a host- or bus-reset can't do anything good here.
3457 */
3458static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
3459{
3460 int rc;
3461 struct ctlr_info *h;
3462 struct hpsa_scsi_dev_t *dev;
3463
3464 /* find the controller to which the command to be aborted was sent */
3465 h = sdev_to_hba(scsicmd->device);
3466 if (h == NULL) /* paranoia */
3467 return FAILED;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003468 dev = scsicmd->device->hostdata;
3469 if (!dev) {
3470 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
3471 "device lookup failed.\n");
3472 return FAILED;
3473 }
Stephen M. Camerond416b0c2010-02-04 08:43:21 -06003474 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
3475 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003476 /* send a reset to the SCSI LUN which the command was sent to */
Scott Teelbf711ac2014-02-18 13:56:39 -06003477 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003478 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
3479 return SUCCESS;
3480
3481 dev_warn(&h->pdev->dev, "resetting device failed.\n");
3482 return FAILED;
3483}
3484
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05003485static void swizzle_abort_tag(u8 *tag)
3486{
3487 u8 original_tag[8];
3488
3489 memcpy(original_tag, tag, 8);
3490 tag[0] = original_tag[3];
3491 tag[1] = original_tag[2];
3492 tag[2] = original_tag[1];
3493 tag[3] = original_tag[0];
3494 tag[4] = original_tag[7];
3495 tag[5] = original_tag[6];
3496 tag[6] = original_tag[5];
3497 tag[7] = original_tag[4];
3498}
3499
Scott Teel17eb87d2014-02-18 13:55:28 -06003500static void hpsa_get_tag(struct ctlr_info *h,
3501 struct CommandList *c, u32 *taglower, u32 *tagupper)
3502{
3503 if (c->cmd_type == CMD_IOACCEL1) {
3504 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
3505 &h->ioaccel_cmd_pool[c->cmdindex];
3506 *tagupper = cm1->Tag.upper;
3507 *taglower = cm1->Tag.lower;
Scott Teel54b6e9e2014-02-18 13:56:45 -06003508 return;
Scott Teel17eb87d2014-02-18 13:55:28 -06003509 }
Scott Teel54b6e9e2014-02-18 13:56:45 -06003510 if (c->cmd_type == CMD_IOACCEL2) {
3511 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
3512 &h->ioaccel2_cmd_pool[c->cmdindex];
3513 *tagupper = cm2->Tag.upper;
3514 *taglower = cm2->Tag.lower;
3515 return;
3516 }
3517 *tagupper = c->Header.Tag.upper;
3518 *taglower = c->Header.Tag.lower;
Scott Teel17eb87d2014-02-18 13:55:28 -06003519}
3520
Scott Teel54b6e9e2014-02-18 13:56:45 -06003521
Stephen M. Cameron75167d22012-05-01 11:42:51 -05003522static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05003523 struct CommandList *abort, int swizzle)
Stephen M. Cameron75167d22012-05-01 11:42:51 -05003524{
3525 int rc = IO_OK;
3526 struct CommandList *c;
3527 struct ErrorInfo *ei;
Scott Teel17eb87d2014-02-18 13:55:28 -06003528 u32 tagupper, taglower;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05003529
3530 c = cmd_special_alloc(h);
3531 if (c == NULL) { /* trouble... */
3532 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
3533 return -ENOMEM;
3534 }
3535
Stephen M. Camerona2dac132013-02-20 11:24:41 -06003536 /* fill_cmd can't fail here, no buffer to map */
3537 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
3538 0, 0, scsi3addr, TYPE_MSG);
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05003539 if (swizzle)
3540 swizzle_abort_tag(&c->Request.CDB[4]);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05003541 hpsa_scsi_do_simple_cmd_core(h, c);
Scott Teel17eb87d2014-02-18 13:55:28 -06003542 hpsa_get_tag(h, abort, &taglower, &tagupper);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05003543 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
Scott Teel17eb87d2014-02-18 13:55:28 -06003544 __func__, tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05003545 /* no unmap needed here because no data xfer. */
3546
3547 ei = c->err_info;
3548 switch (ei->CommandStatus) {
3549 case CMD_SUCCESS:
3550 break;
3551 case CMD_UNABORTABLE: /* Very common, don't make noise. */
3552 rc = -1;
3553 break;
3554 default:
3555 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
Scott Teel17eb87d2014-02-18 13:55:28 -06003556 __func__, tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05003557 hpsa_scsi_interpret_error(c);
3558 rc = -1;
3559 break;
3560 }
3561 cmd_special_free(h, c);
3562 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__,
3563 abort->Header.Tag.upper, abort->Header.Tag.lower);
3564 return rc;
3565}
3566
3567/*
3568 * hpsa_find_cmd_in_queue
3569 *
3570 * Used to determine whether a command (find) is still present
3571 * in queue_head. Optionally excludes the last element of queue_head.
3572 *
3573 * This is used to avoid unnecessary aborts. Commands in h->reqQ have
3574 * not yet been submitted, and so can be aborted by the driver without
3575 * sending an abort to the hardware.
3576 *
3577 * Returns pointer to command if found in queue, NULL otherwise.
3578 */
3579static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
3580 struct scsi_cmnd *find, struct list_head *queue_head)
3581{
3582 unsigned long flags;
3583 struct CommandList *c = NULL; /* ptr into cmpQ */
3584
3585 if (!find)
3586 return 0;
3587 spin_lock_irqsave(&h->lock, flags);
3588 list_for_each_entry(c, queue_head, list) {
3589 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
3590 continue;
3591 if (c->scsi_cmd == find) {
3592 spin_unlock_irqrestore(&h->lock, flags);
3593 return c;
3594 }
3595 }
3596 spin_unlock_irqrestore(&h->lock, flags);
3597 return NULL;
3598}
3599
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05003600static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
3601 u8 *tag, struct list_head *queue_head)
3602{
3603 unsigned long flags;
3604 struct CommandList *c;
3605
3606 spin_lock_irqsave(&h->lock, flags);
3607 list_for_each_entry(c, queue_head, list) {
3608 if (memcmp(&c->Header.Tag, tag, 8) != 0)
3609 continue;
3610 spin_unlock_irqrestore(&h->lock, flags);
3611 return c;
3612 }
3613 spin_unlock_irqrestore(&h->lock, flags);
3614 return NULL;
3615}
3616
Scott Teel54b6e9e2014-02-18 13:56:45 -06003617/* ioaccel2 path firmware cannot handle abort task requests.
3618 * Change abort requests to physical target reset, and send to the
3619 * address of the physical disk used for the ioaccel 2 command.
3620 * Return 0 on success (IO_OK)
3621 * -1 on failure
3622 */
3623
3624static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
3625 unsigned char *scsi3addr, struct CommandList *abort)
3626{
3627 int rc = IO_OK;
3628 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
3629 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
3630 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
3631 unsigned char *psa = &phys_scsi3addr[0];
3632
3633 /* Get a pointer to the hpsa logical device. */
3634 scmd = (struct scsi_cmnd *) abort->scsi_cmd;
3635 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
3636 if (dev == NULL) {
3637 dev_warn(&h->pdev->dev,
3638 "Cannot abort: no device pointer for command.\n");
3639 return -1; /* not abortable */
3640 }
3641
3642 if (!dev->offload_enabled) {
3643 dev_warn(&h->pdev->dev,
3644 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
3645 return -1; /* not abortable */
3646 }
3647
3648 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
3649 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
3650 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
3651 return -1; /* not abortable */
3652 }
3653
3654 /* send the reset */
3655 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
3656 if (rc != 0) {
3657 dev_warn(&h->pdev->dev,
3658 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
3659 psa[0], psa[1], psa[2], psa[3],
3660 psa[4], psa[5], psa[6], psa[7]);
3661 return rc; /* failed to reset */
3662 }
3663
3664 /* wait for device to recover */
3665 if (wait_for_device_to_become_ready(h, psa) != 0) {
3666 dev_warn(&h->pdev->dev,
3667 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
3668 psa[0], psa[1], psa[2], psa[3],
3669 psa[4], psa[5], psa[6], psa[7]);
3670 return -1; /* failed to recover */
3671 }
3672
3673 /* device recovered */
3674 dev_info(&h->pdev->dev,
3675 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
3676 psa[0], psa[1], psa[2], psa[3],
3677 psa[4], psa[5], psa[6], psa[7]);
3678
3679 return rc; /* success */
3680}
3681
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05003682/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
3683 * tell which kind we're dealing with, so we send the abort both ways. There
3684 * shouldn't be any collisions between swizzled and unswizzled tags due to the
3685 * way we construct our tags but we check anyway in case the assumptions which
3686 * make this true someday become false.
3687 */
3688static int hpsa_send_abort_both_ways(struct ctlr_info *h,
3689 unsigned char *scsi3addr, struct CommandList *abort)
3690{
3691 u8 swizzled_tag[8];
3692 struct CommandList *c;
3693 int rc = 0, rc2 = 0;
3694
Scott Teel54b6e9e2014-02-18 13:56:45 -06003695 /* ioccelerator mode 2 commands should be aborted via the
3696 * accelerated path, since RAID path is unaware of these commands,
3697 * but underlying firmware can't handle abort TMF.
3698 * Change abort to physical device reset.
3699 */
3700 if (abort->cmd_type == CMD_IOACCEL2)
3701 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
3702
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05003703 /* we do not expect to find the swizzled tag in our queue, but
3704 * check anyway just to be sure the assumptions which make this
3705 * the case haven't become wrong.
3706 */
3707 memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
3708 swizzle_abort_tag(swizzled_tag);
3709 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
3710 if (c != NULL) {
3711 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
3712 return hpsa_send_abort(h, scsi3addr, abort, 0);
3713 }
3714 rc = hpsa_send_abort(h, scsi3addr, abort, 0);
3715
3716 /* if the command is still in our queue, we can't conclude that it was
3717 * aborted (it might have just completed normally) but in any case
3718 * we don't need to try to abort it another way.
3719 */
3720 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
3721 if (c)
3722 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
3723 return rc && rc2;
3724}
3725
Stephen M. Cameron75167d22012-05-01 11:42:51 -05003726/* Send an abort for the specified command.
3727 * If the device and controller support it,
3728 * send a task abort request.
3729 */
3730static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
3731{
3732
3733 int i, rc;
3734 struct ctlr_info *h;
3735 struct hpsa_scsi_dev_t *dev;
3736 struct CommandList *abort; /* pointer to command to be aborted */
3737 struct CommandList *found;
3738 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
3739 char msg[256]; /* For debug messaging. */
3740 int ml = 0;
Scott Teel17eb87d2014-02-18 13:55:28 -06003741 u32 tagupper, taglower;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05003742
3743 /* Find the controller of the command to be aborted */
3744 h = sdev_to_hba(sc->device);
3745 if (WARN(h == NULL,
3746 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
3747 return FAILED;
3748
3749 /* Check that controller supports some kind of task abort */
3750 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
3751 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
3752 return FAILED;
3753
3754 memset(msg, 0, sizeof(msg));
3755 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
3756 h->scsi_host->host_no, sc->device->channel,
3757 sc->device->id, sc->device->lun);
3758
3759 /* Find the device of the command to be aborted */
3760 dev = sc->device->hostdata;
3761 if (!dev) {
3762 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
3763 msg);
3764 return FAILED;
3765 }
3766
3767 /* Get SCSI command to be aborted */
3768 abort = (struct CommandList *) sc->host_scribble;
3769 if (abort == NULL) {
3770 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
3771 msg);
3772 return FAILED;
3773 }
Scott Teel17eb87d2014-02-18 13:55:28 -06003774 hpsa_get_tag(h, abort, &taglower, &tagupper);
3775 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05003776 as = (struct scsi_cmnd *) abort->scsi_cmd;
3777 if (as != NULL)
3778 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
3779 as->cmnd[0], as->serial_number);
3780 dev_dbg(&h->pdev->dev, "%s\n", msg);
3781 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
3782 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
3783
3784 /* Search reqQ to See if command is queued but not submitted,
3785 * if so, complete the command with aborted status and remove
3786 * it from the reqQ.
3787 */
3788 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
3789 if (found) {
3790 found->err_info->CommandStatus = CMD_ABORTED;
3791 finish_cmd(found);
3792 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
3793 msg);
3794 return SUCCESS;
3795 }
3796
3797 /* not in reqQ, if also not in cmpQ, must have already completed */
3798 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
3799 if (!found) {
Stephen M. Camerond6ebd0f2012-07-26 11:34:17 -05003800 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
Stephen M. Cameron75167d22012-05-01 11:42:51 -05003801 msg);
3802 return SUCCESS;
3803 }
3804
3805 /*
3806 * Command is in flight, or possibly already completed
3807 * by the firmware (but not to the scsi mid layer) but we can't
3808 * distinguish which. Send the abort down.
3809 */
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05003810 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05003811 if (rc != 0) {
3812 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
3813 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
3814 h->scsi_host->host_no,
3815 dev->bus, dev->target, dev->lun);
3816 return FAILED;
3817 }
3818 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
3819
3820 /* If the abort(s) above completed and actually aborted the
3821 * command, then the command to be aborted should already be
3822 * completed. If not, wait around a bit more to see if they
3823 * manage to complete normally.
3824 */
3825#define ABORT_COMPLETE_WAIT_SECS 30
3826 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
3827 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
3828 if (!found)
3829 return SUCCESS;
3830 msleep(100);
3831 }
3832 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
3833 msg, ABORT_COMPLETE_WAIT_SECS);
3834 return FAILED;
3835}
3836
3837
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003838/*
3839 * For operations that cannot sleep, a command block is allocated at init,
3840 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
3841 * which ones are free or in use. Lock must be held when calling this.
3842 * cmd_free() is the complement.
3843 */
3844static struct CommandList *cmd_alloc(struct ctlr_info *h)
3845{
3846 struct CommandList *c;
3847 int i;
3848 union u64bit temp64;
3849 dma_addr_t cmd_dma_handle, err_dma_handle;
Matt Gatese16a33a2012-05-01 11:43:11 -05003850 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003851
Matt Gatese16a33a2012-05-01 11:43:11 -05003852 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003853 do {
3854 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
Matt Gatese16a33a2012-05-01 11:43:11 -05003855 if (i == h->nr_cmds) {
3856 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003857 return NULL;
Matt Gatese16a33a2012-05-01 11:43:11 -05003858 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003859 } while (test_and_set_bit
3860 (i & (BITS_PER_LONG - 1),
3861 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
Matt Gatese16a33a2012-05-01 11:43:11 -05003862 spin_unlock_irqrestore(&h->lock, flags);
3863
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003864 c = h->cmd_pool + i;
3865 memset(c, 0, sizeof(*c));
3866 cmd_dma_handle = h->cmd_pool_dhandle
3867 + i * sizeof(*c);
3868 c->err_info = h->errinfo_pool + i;
3869 memset(c->err_info, 0, sizeof(*c->err_info));
3870 err_dma_handle = h->errinfo_pool_dhandle
3871 + i * sizeof(*c->err_info);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003872
3873 c->cmdindex = i;
3874
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06003875 INIT_LIST_HEAD(&c->list);
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003876 c->busaddr = (u32) cmd_dma_handle;
3877 temp64.val = (u64) err_dma_handle;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003878 c->ErrDesc.Addr.lower = temp64.val32.lower;
3879 c->ErrDesc.Addr.upper = temp64.val32.upper;
3880 c->ErrDesc.Len = sizeof(*c->err_info);
3881
3882 c->h = h;
3883 return c;
3884}
3885
3886/* For operations that can wait for kmalloc to possibly sleep,
3887 * this routine can be called. Lock need not be held to call
3888 * cmd_special_alloc. cmd_special_free() is the complement.
3889 */
3890static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
3891{
3892 struct CommandList *c;
3893 union u64bit temp64;
3894 dma_addr_t cmd_dma_handle, err_dma_handle;
3895
3896 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
3897 if (c == NULL)
3898 return NULL;
3899 memset(c, 0, sizeof(*c));
3900
Matt Gatese1f7de02014-02-18 13:55:17 -06003901 c->cmd_type = CMD_SCSI;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003902 c->cmdindex = -1;
3903
3904 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
3905 &err_dma_handle);
3906
3907 if (c->err_info == NULL) {
3908 pci_free_consistent(h->pdev,
3909 sizeof(*c), c, cmd_dma_handle);
3910 return NULL;
3911 }
3912 memset(c->err_info, 0, sizeof(*c->err_info));
3913
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06003914 INIT_LIST_HEAD(&c->list);
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003915 c->busaddr = (u32) cmd_dma_handle;
3916 temp64.val = (u64) err_dma_handle;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003917 c->ErrDesc.Addr.lower = temp64.val32.lower;
3918 c->ErrDesc.Addr.upper = temp64.val32.upper;
3919 c->ErrDesc.Len = sizeof(*c->err_info);
3920
3921 c->h = h;
3922 return c;
3923}
3924
3925static void cmd_free(struct ctlr_info *h, struct CommandList *c)
3926{
3927 int i;
Matt Gatese16a33a2012-05-01 11:43:11 -05003928 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003929
3930 i = c - h->cmd_pool;
Matt Gatese16a33a2012-05-01 11:43:11 -05003931 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003932 clear_bit(i & (BITS_PER_LONG - 1),
3933 h->cmd_pool_bits + (i / BITS_PER_LONG));
Matt Gatese16a33a2012-05-01 11:43:11 -05003934 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003935}
3936
3937static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
3938{
3939 union u64bit temp64;
3940
3941 temp64.val32.lower = c->ErrDesc.Addr.lower;
3942 temp64.val32.upper = c->ErrDesc.Addr.upper;
3943 pci_free_consistent(h->pdev, sizeof(*c->err_info),
3944 c->err_info, (dma_addr_t) temp64.val);
3945 pci_free_consistent(h->pdev, sizeof(*c),
Stephen M. Camerond896f3f2011-01-06 14:47:53 -06003946 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003947}
3948
3949#ifdef CONFIG_COMPAT
3950
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003951static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
3952{
3953 IOCTL32_Command_struct __user *arg32 =
3954 (IOCTL32_Command_struct __user *) arg;
3955 IOCTL_Command_struct arg64;
3956 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
3957 int err;
3958 u32 cp;
3959
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06003960 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003961 err = 0;
3962 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
3963 sizeof(arg64.LUN_info));
3964 err |= copy_from_user(&arg64.Request, &arg32->Request,
3965 sizeof(arg64.Request));
3966 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
3967 sizeof(arg64.error_info));
3968 err |= get_user(arg64.buf_size, &arg32->buf_size);
3969 err |= get_user(cp, &arg32->buf);
3970 arg64.buf = compat_ptr(cp);
3971 err |= copy_to_user(p, &arg64, sizeof(arg64));
3972
3973 if (err)
3974 return -EFAULT;
3975
Stephen M. Camerone39eeae2010-02-04 08:43:46 -06003976 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003977 if (err)
3978 return err;
3979 err |= copy_in_user(&arg32->error_info, &p->error_info,
3980 sizeof(arg32->error_info));
3981 if (err)
3982 return -EFAULT;
3983 return err;
3984}
3985
3986static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
3987 int cmd, void *arg)
3988{
3989 BIG_IOCTL32_Command_struct __user *arg32 =
3990 (BIG_IOCTL32_Command_struct __user *) arg;
3991 BIG_IOCTL_Command_struct arg64;
3992 BIG_IOCTL_Command_struct __user *p =
3993 compat_alloc_user_space(sizeof(arg64));
3994 int err;
3995 u32 cp;
3996
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06003997 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003998 err = 0;
3999 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4000 sizeof(arg64.LUN_info));
4001 err |= copy_from_user(&arg64.Request, &arg32->Request,
4002 sizeof(arg64.Request));
4003 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4004 sizeof(arg64.error_info));
4005 err |= get_user(arg64.buf_size, &arg32->buf_size);
4006 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
4007 err |= get_user(cp, &arg32->buf);
4008 arg64.buf = compat_ptr(cp);
4009 err |= copy_to_user(p, &arg64, sizeof(arg64));
4010
4011 if (err)
4012 return -EFAULT;
4013
Stephen M. Camerone39eeae2010-02-04 08:43:46 -06004014 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004015 if (err)
4016 return err;
4017 err |= copy_in_user(&arg32->error_info, &p->error_info,
4018 sizeof(arg32->error_info));
4019 if (err)
4020 return -EFAULT;
4021 return err;
4022}
Stephen M. Cameron71fe75a2010-02-04 08:43:51 -06004023
4024static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
4025{
4026 switch (cmd) {
4027 case CCISS_GETPCIINFO:
4028 case CCISS_GETINTINFO:
4029 case CCISS_SETINTINFO:
4030 case CCISS_GETNODENAME:
4031 case CCISS_SETNODENAME:
4032 case CCISS_GETHEARTBEAT:
4033 case CCISS_GETBUSTYPES:
4034 case CCISS_GETFIRMVER:
4035 case CCISS_GETDRIVVER:
4036 case CCISS_REVALIDVOLS:
4037 case CCISS_DEREGDISK:
4038 case CCISS_REGNEWDISK:
4039 case CCISS_REGNEWD:
4040 case CCISS_RESCANDISK:
4041 case CCISS_GETLUNINFO:
4042 return hpsa_ioctl(dev, cmd, arg);
4043
4044 case CCISS_PASSTHRU32:
4045 return hpsa_ioctl32_passthru(dev, cmd, arg);
4046 case CCISS_BIG_PASSTHRU32:
4047 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
4048
4049 default:
4050 return -ENOIOCTLCMD;
4051 }
4052}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004053#endif
4054
4055static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
4056{
4057 struct hpsa_pci_info pciinfo;
4058
4059 if (!argp)
4060 return -EINVAL;
4061 pciinfo.domain = pci_domain_nr(h->pdev->bus);
4062 pciinfo.bus = h->pdev->bus->number;
4063 pciinfo.dev_fn = h->pdev->devfn;
4064 pciinfo.board_id = h->board_id;
4065 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
4066 return -EFAULT;
4067 return 0;
4068}
4069
4070static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
4071{
4072 DriverVer_type DriverVer;
4073 unsigned char vmaj, vmin, vsubmin;
4074 int rc;
4075
4076 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
4077 &vmaj, &vmin, &vsubmin);
4078 if (rc != 3) {
4079 dev_info(&h->pdev->dev, "driver version string '%s' "
4080 "unrecognized.", HPSA_DRIVER_VERSION);
4081 vmaj = 0;
4082 vmin = 0;
4083 vsubmin = 0;
4084 }
4085 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
4086 if (!argp)
4087 return -EINVAL;
4088 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
4089 return -EFAULT;
4090 return 0;
4091}
4092
4093static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4094{
4095 IOCTL_Command_struct iocommand;
4096 struct CommandList *c;
4097 char *buff = NULL;
4098 union u64bit temp64;
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004099 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004100
4101 if (!argp)
4102 return -EINVAL;
4103 if (!capable(CAP_SYS_RAWIO))
4104 return -EPERM;
4105 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
4106 return -EFAULT;
4107 if ((iocommand.buf_size < 1) &&
4108 (iocommand.Request.Type.Direction != XFER_NONE)) {
4109 return -EINVAL;
4110 }
4111 if (iocommand.buf_size > 0) {
4112 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4113 if (buff == NULL)
4114 return -EFAULT;
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004115 if (iocommand.Request.Type.Direction == XFER_WRITE) {
4116 /* Copy the data into the buffer we created */
4117 if (copy_from_user(buff, iocommand.buf,
4118 iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004119 rc = -EFAULT;
4120 goto out_kfree;
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004121 }
4122 } else {
4123 memset(buff, 0, iocommand.buf_size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004124 }
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004125 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004126 c = cmd_special_alloc(h);
4127 if (c == NULL) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004128 rc = -ENOMEM;
4129 goto out_kfree;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004130 }
4131 /* Fill in the command type */
4132 c->cmd_type = CMD_IOCTL_PEND;
4133 /* Fill in Command Header */
4134 c->Header.ReplyQueue = 0; /* unused in simple mode */
4135 if (iocommand.buf_size > 0) { /* buffer to fill */
4136 c->Header.SGList = 1;
4137 c->Header.SGTotal = 1;
4138 } else { /* no buffers to fill */
4139 c->Header.SGList = 0;
4140 c->Header.SGTotal = 0;
4141 }
4142 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4143 /* use the kernel address the cmd block for tag */
4144 c->Header.Tag.lower = c->busaddr;
4145
4146 /* Fill in Request block */
4147 memcpy(&c->Request, &iocommand.Request,
4148 sizeof(c->Request));
4149
4150 /* Fill in the scatter gather information */
4151 if (iocommand.buf_size > 0) {
4152 temp64.val = pci_map_single(h->pdev, buff,
4153 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06004154 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
4155 c->SG[0].Addr.lower = 0;
4156 c->SG[0].Addr.upper = 0;
4157 c->SG[0].Len = 0;
4158 rc = -ENOMEM;
4159 goto out;
4160 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004161 c->SG[0].Addr.lower = temp64.val32.lower;
4162 c->SG[0].Addr.upper = temp64.val32.upper;
4163 c->SG[0].Len = iocommand.buf_size;
Matt Gatese1d9cbf2014-02-18 13:55:12 -06004164 c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004165 }
Stephen M. Camerona0c12412011-10-26 16:22:04 -05004166 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
Stephen M. Cameronc2dd32e2011-06-03 09:57:29 -05004167 if (iocommand.buf_size > 0)
4168 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004169 check_ioctl_unit_attention(h, c);
4170
4171 /* Copy the error information out */
4172 memcpy(&iocommand.error_info, c->err_info,
4173 sizeof(iocommand.error_info));
4174 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004175 rc = -EFAULT;
4176 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004177 }
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004178 if (iocommand.Request.Type.Direction == XFER_READ &&
4179 iocommand.buf_size > 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004180 /* Copy the data out of the buffer we created */
4181 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004182 rc = -EFAULT;
4183 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004184 }
4185 }
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004186out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004187 cmd_special_free(h, c);
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004188out_kfree:
4189 kfree(buff);
4190 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004191}
4192
4193static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4194{
4195 BIG_IOCTL_Command_struct *ioc;
4196 struct CommandList *c;
4197 unsigned char **buff = NULL;
4198 int *buff_size = NULL;
4199 union u64bit temp64;
4200 BYTE sg_used = 0;
4201 int status = 0;
4202 int i;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06004203 u32 left;
4204 u32 sz;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004205 BYTE __user *data_ptr;
4206
4207 if (!argp)
4208 return -EINVAL;
4209 if (!capable(CAP_SYS_RAWIO))
4210 return -EPERM;
4211 ioc = (BIG_IOCTL_Command_struct *)
4212 kmalloc(sizeof(*ioc), GFP_KERNEL);
4213 if (!ioc) {
4214 status = -ENOMEM;
4215 goto cleanup1;
4216 }
4217 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
4218 status = -EFAULT;
4219 goto cleanup1;
4220 }
4221 if ((ioc->buf_size < 1) &&
4222 (ioc->Request.Type.Direction != XFER_NONE)) {
4223 status = -EINVAL;
4224 goto cleanup1;
4225 }
4226 /* Check kmalloc limits using all SGs */
4227 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
4228 status = -EINVAL;
4229 goto cleanup1;
4230 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06004231 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004232 status = -EINVAL;
4233 goto cleanup1;
4234 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06004235 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004236 if (!buff) {
4237 status = -ENOMEM;
4238 goto cleanup1;
4239 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06004240 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004241 if (!buff_size) {
4242 status = -ENOMEM;
4243 goto cleanup1;
4244 }
4245 left = ioc->buf_size;
4246 data_ptr = ioc->buf;
4247 while (left) {
4248 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
4249 buff_size[sg_used] = sz;
4250 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
4251 if (buff[sg_used] == NULL) {
4252 status = -ENOMEM;
4253 goto cleanup1;
4254 }
4255 if (ioc->Request.Type.Direction == XFER_WRITE) {
4256 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
4257 status = -ENOMEM;
4258 goto cleanup1;
4259 }
4260 } else
4261 memset(buff[sg_used], 0, sz);
4262 left -= sz;
4263 data_ptr += sz;
4264 sg_used++;
4265 }
4266 c = cmd_special_alloc(h);
4267 if (c == NULL) {
4268 status = -ENOMEM;
4269 goto cleanup1;
4270 }
4271 c->cmd_type = CMD_IOCTL_PEND;
4272 c->Header.ReplyQueue = 0;
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004273 c->Header.SGList = c->Header.SGTotal = sg_used;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004274 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
4275 c->Header.Tag.lower = c->busaddr;
4276 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
4277 if (ioc->buf_size > 0) {
4278 int i;
4279 for (i = 0; i < sg_used; i++) {
4280 temp64.val = pci_map_single(h->pdev, buff[i],
4281 buff_size[i], PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06004282 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
4283 c->SG[i].Addr.lower = 0;
4284 c->SG[i].Addr.upper = 0;
4285 c->SG[i].Len = 0;
4286 hpsa_pci_unmap(h->pdev, c, i,
4287 PCI_DMA_BIDIRECTIONAL);
4288 status = -ENOMEM;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05004289 goto cleanup0;
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06004290 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004291 c->SG[i].Addr.lower = temp64.val32.lower;
4292 c->SG[i].Addr.upper = temp64.val32.upper;
4293 c->SG[i].Len = buff_size[i];
Matt Gatese1d9cbf2014-02-18 13:55:12 -06004294 c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004295 }
4296 }
Stephen M. Camerona0c12412011-10-26 16:22:04 -05004297 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004298 if (sg_used)
4299 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004300 check_ioctl_unit_attention(h, c);
4301 /* Copy the error information out */
4302 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
4303 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004304 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05004305 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004306 }
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004307 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004308 /* Copy the data out of the buffer we created */
4309 BYTE __user *ptr = ioc->buf;
4310 for (i = 0; i < sg_used; i++) {
4311 if (copy_to_user(ptr, buff[i], buff_size[i])) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004312 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05004313 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004314 }
4315 ptr += buff_size[i];
4316 }
4317 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004318 status = 0;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05004319cleanup0:
4320 cmd_special_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004321cleanup1:
4322 if (buff) {
4323 for (i = 0; i < sg_used; i++)
4324 kfree(buff[i]);
4325 kfree(buff);
4326 }
4327 kfree(buff_size);
4328 kfree(ioc);
4329 return status;
4330}
4331
4332static void check_ioctl_unit_attention(struct ctlr_info *h,
4333 struct CommandList *c)
4334{
4335 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4336 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
4337 (void) check_for_unit_attention(h, c);
4338}
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05004339
4340static int increment_passthru_count(struct ctlr_info *h)
4341{
4342 unsigned long flags;
4343
4344 spin_lock_irqsave(&h->passthru_count_lock, flags);
4345 if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) {
4346 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
4347 return -1;
4348 }
4349 h->passthru_count++;
4350 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
4351 return 0;
4352}
4353
4354static void decrement_passthru_count(struct ctlr_info *h)
4355{
4356 unsigned long flags;
4357
4358 spin_lock_irqsave(&h->passthru_count_lock, flags);
4359 if (h->passthru_count <= 0) {
4360 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
4361 /* not expecting to get here. */
4362 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n");
4363 return;
4364 }
4365 h->passthru_count--;
4366 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
4367}
4368
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004369/*
4370 * ioctl
4371 */
4372static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
4373{
4374 struct ctlr_info *h;
4375 void __user *argp = (void __user *)arg;
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05004376 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004377
4378 h = sdev_to_hba(dev);
4379
4380 switch (cmd) {
4381 case CCISS_DEREGDISK:
4382 case CCISS_REGNEWDISK:
4383 case CCISS_REGNEWD:
Stephen M. Camerona08a84712010-02-04 08:43:16 -06004384 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004385 return 0;
4386 case CCISS_GETPCIINFO:
4387 return hpsa_getpciinfo_ioctl(h, argp);
4388 case CCISS_GETDRIVVER:
4389 return hpsa_getdrivver_ioctl(h, argp);
4390 case CCISS_PASSTHRU:
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05004391 if (increment_passthru_count(h))
4392 return -EAGAIN;
4393 rc = hpsa_passthru_ioctl(h, argp);
4394 decrement_passthru_count(h);
4395 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004396 case CCISS_BIG_PASSTHRU:
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05004397 if (increment_passthru_count(h))
4398 return -EAGAIN;
4399 rc = hpsa_big_passthru_ioctl(h, argp);
4400 decrement_passthru_count(h);
4401 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004402 default:
4403 return -ENOTTY;
4404 }
4405}
4406
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08004407static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
4408 u8 reset_type)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004409{
4410 struct CommandList *c;
4411
4412 c = cmd_alloc(h);
4413 if (!c)
4414 return -ENOMEM;
Stephen M. Camerona2dac132013-02-20 11:24:41 -06004415 /* fill_cmd can't fail here, no data buffer to map */
4416 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004417 RAID_CTLR_LUNID, TYPE_MSG);
4418 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
4419 c->waiting = NULL;
4420 enqueue_cmd_and_start_io(h, c);
4421 /* Don't wait for completion, the reset won't complete. Don't free
4422 * the command either. This is the last command we will send before
4423 * re-initializing everything, so it doesn't matter and won't leak.
4424 */
4425 return 0;
4426}
4427
Stephen M. Camerona2dac132013-02-20 11:24:41 -06004428static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06004429 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004430 int cmd_type)
4431{
4432 int pci_dir = XFER_NONE;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004433 struct CommandList *a; /* for commands to be aborted */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004434
4435 c->cmd_type = CMD_IOCTL_PEND;
4436 c->Header.ReplyQueue = 0;
4437 if (buff != NULL && size > 0) {
4438 c->Header.SGList = 1;
4439 c->Header.SGTotal = 1;
4440 } else {
4441 c->Header.SGList = 0;
4442 c->Header.SGTotal = 0;
4443 }
4444 c->Header.Tag.lower = c->busaddr;
4445 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
4446
4447 c->Request.Type.Type = cmd_type;
4448 if (cmd_type == TYPE_CMD) {
4449 switch (cmd) {
4450 case HPSA_INQUIRY:
4451 /* are we trying to read a vital product page */
4452 if (page_code != 0) {
4453 c->Request.CDB[1] = 0x01;
4454 c->Request.CDB[2] = page_code;
4455 }
4456 c->Request.CDBLen = 6;
4457 c->Request.Type.Attribute = ATTR_SIMPLE;
4458 c->Request.Type.Direction = XFER_READ;
4459 c->Request.Timeout = 0;
4460 c->Request.CDB[0] = HPSA_INQUIRY;
4461 c->Request.CDB[4] = size & 0xFF;
4462 break;
4463 case HPSA_REPORT_LOG:
4464 case HPSA_REPORT_PHYS:
4465 /* Talking to controller so It's a physical command
4466 mode = 00 target = 0. Nothing to write.
4467 */
4468 c->Request.CDBLen = 12;
4469 c->Request.Type.Attribute = ATTR_SIMPLE;
4470 c->Request.Type.Direction = XFER_READ;
4471 c->Request.Timeout = 0;
4472 c->Request.CDB[0] = cmd;
4473 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
4474 c->Request.CDB[7] = (size >> 16) & 0xFF;
4475 c->Request.CDB[8] = (size >> 8) & 0xFF;
4476 c->Request.CDB[9] = size & 0xFF;
4477 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004478 case HPSA_CACHE_FLUSH:
4479 c->Request.CDBLen = 12;
4480 c->Request.Type.Attribute = ATTR_SIMPLE;
4481 c->Request.Type.Direction = XFER_WRITE;
4482 c->Request.Timeout = 0;
4483 c->Request.CDB[0] = BMIC_WRITE;
4484 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
Stephen M. Cameronbb158ea2011-10-26 16:21:17 -05004485 c->Request.CDB[7] = (size >> 8) & 0xFF;
4486 c->Request.CDB[8] = size & 0xFF;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004487 break;
4488 case TEST_UNIT_READY:
4489 c->Request.CDBLen = 6;
4490 c->Request.Type.Attribute = ATTR_SIMPLE;
4491 c->Request.Type.Direction = XFER_NONE;
4492 c->Request.Timeout = 0;
4493 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06004494 case HPSA_GET_RAID_MAP:
4495 c->Request.CDBLen = 12;
4496 c->Request.Type.Attribute = ATTR_SIMPLE;
4497 c->Request.Type.Direction = XFER_READ;
4498 c->Request.Timeout = 0;
4499 c->Request.CDB[0] = HPSA_CISS_READ;
4500 c->Request.CDB[1] = cmd;
4501 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
4502 c->Request.CDB[7] = (size >> 16) & 0xFF;
4503 c->Request.CDB[8] = (size >> 8) & 0xFF;
4504 c->Request.CDB[9] = size & 0xFF;
4505 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004506 default:
4507 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
4508 BUG();
Stephen M. Camerona2dac132013-02-20 11:24:41 -06004509 return -1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004510 }
4511 } else if (cmd_type == TYPE_MSG) {
4512 switch (cmd) {
4513
4514 case HPSA_DEVICE_RESET_MSG:
4515 c->Request.CDBLen = 16;
4516 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
4517 c->Request.Type.Attribute = ATTR_SIMPLE;
4518 c->Request.Type.Direction = XFER_NONE;
4519 c->Request.Timeout = 0; /* Don't time out */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004520 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
4521 c->Request.CDB[0] = cmd;
Stephen M. Cameron21e89af2012-07-26 11:34:10 -05004522 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004523 /* If bytes 4-7 are zero, it means reset the */
4524 /* LunID device */
4525 c->Request.CDB[4] = 0x00;
4526 c->Request.CDB[5] = 0x00;
4527 c->Request.CDB[6] = 0x00;
4528 c->Request.CDB[7] = 0x00;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004529 break;
4530 case HPSA_ABORT_MSG:
4531 a = buff; /* point to command to be aborted */
4532 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
4533 a->Header.Tag.upper, a->Header.Tag.lower,
4534 c->Header.Tag.upper, c->Header.Tag.lower);
4535 c->Request.CDBLen = 16;
4536 c->Request.Type.Type = TYPE_MSG;
4537 c->Request.Type.Attribute = ATTR_SIMPLE;
4538 c->Request.Type.Direction = XFER_WRITE;
4539 c->Request.Timeout = 0; /* Don't time out */
4540 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
4541 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
4542 c->Request.CDB[2] = 0x00; /* reserved */
4543 c->Request.CDB[3] = 0x00; /* reserved */
4544 /* Tag to abort goes in CDB[4]-CDB[11] */
4545 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
4546 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
4547 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
4548 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
4549 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
4550 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
4551 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
4552 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
4553 c->Request.CDB[12] = 0x00; /* reserved */
4554 c->Request.CDB[13] = 0x00; /* reserved */
4555 c->Request.CDB[14] = 0x00; /* reserved */
4556 c->Request.CDB[15] = 0x00; /* reserved */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004557 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004558 default:
4559 dev_warn(&h->pdev->dev, "unknown message type %d\n",
4560 cmd);
4561 BUG();
4562 }
4563 } else {
4564 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
4565 BUG();
4566 }
4567
4568 switch (c->Request.Type.Direction) {
4569 case XFER_READ:
4570 pci_dir = PCI_DMA_FROMDEVICE;
4571 break;
4572 case XFER_WRITE:
4573 pci_dir = PCI_DMA_TODEVICE;
4574 break;
4575 case XFER_NONE:
4576 pci_dir = PCI_DMA_NONE;
4577 break;
4578 default:
4579 pci_dir = PCI_DMA_BIDIRECTIONAL;
4580 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06004581 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
4582 return -1;
4583 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004584}
4585
4586/*
4587 * Map (physical) PCI mem into (virtual) kernel space
4588 */
4589static void __iomem *remap_pci_mem(ulong base, ulong size)
4590{
4591 ulong page_base = ((ulong) base) & PAGE_MASK;
4592 ulong page_offs = ((ulong) base) - page_base;
Stephen M. Cameron088ba34c2012-07-26 11:34:23 -05004593 void __iomem *page_remapped = ioremap_nocache(page_base,
4594 page_offs + size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004595
4596 return page_remapped ? (page_remapped + page_offs) : NULL;
4597}
4598
4599/* Takes cmds off the submission queue and sends them to the hardware,
4600 * then puts them on the queue of cmds waiting for completion.
4601 */
4602static void start_io(struct ctlr_info *h)
4603{
4604 struct CommandList *c;
Matt Gatese16a33a2012-05-01 11:43:11 -05004605 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004606
Matt Gatese16a33a2012-05-01 11:43:11 -05004607 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06004608 while (!list_empty(&h->reqQ)) {
4609 c = list_entry(h->reqQ.next, struct CommandList, list);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004610 /* can't do anything if fifo is full */
4611 if ((h->access.fifo_full(h))) {
Stephen M. Cameron396883e2013-09-23 13:34:17 -05004612 h->fifo_recently_full = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004613 dev_warn(&h->pdev->dev, "fifo full\n");
4614 break;
4615 }
Stephen M. Cameron396883e2013-09-23 13:34:17 -05004616 h->fifo_recently_full = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004617
4618 /* Get the first entry from the Request Q */
4619 removeQ(c);
4620 h->Qdepth--;
4621
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004622 /* Put job onto the completed Q */
4623 addQ(&h->cmpQ, c);
Matt Gatese16a33a2012-05-01 11:43:11 -05004624
4625 /* Must increment commands_outstanding before unlocking
4626 * and submitting to avoid race checking for fifo full
4627 * condition.
4628 */
4629 h->commands_outstanding++;
4630 if (h->commands_outstanding > h->max_outstanding)
4631 h->max_outstanding = h->commands_outstanding;
4632
4633 /* Tell the controller execute command */
4634 spin_unlock_irqrestore(&h->lock, flags);
4635 h->access.submit_command(h, c);
4636 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004637 }
Matt Gatese16a33a2012-05-01 11:43:11 -05004638 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004639}
4640
Matt Gates254f7962012-05-01 11:43:06 -05004641static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004642{
Matt Gates254f7962012-05-01 11:43:06 -05004643 return h->access.command_completed(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004644}
4645
Stephen M. Cameron900c5442010-02-04 08:42:35 -06004646static inline bool interrupt_pending(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004647{
4648 return h->access.intr_pending(h);
4649}
4650
4651static inline long interrupt_not_for_us(struct ctlr_info *h)
4652{
Stephen M. Cameron10f66012010-06-16 13:51:50 -05004653 return (h->access.intr_pending(h) == 0) ||
4654 (h->interrupts_enabled == 0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004655}
4656
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06004657static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
4658 u32 raw_tag)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004659{
4660 if (unlikely(tag_index >= h->nr_cmds)) {
4661 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
4662 return 1;
4663 }
4664 return 0;
4665}
4666
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05004667static inline void finish_cmd(struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004668{
Matt Gatese16a33a2012-05-01 11:43:11 -05004669 unsigned long flags;
Stephen M. Cameron396883e2013-09-23 13:34:17 -05004670 int io_may_be_stalled = 0;
4671 struct ctlr_info *h = c->h;
Matt Gatese16a33a2012-05-01 11:43:11 -05004672
Stephen M. Cameron396883e2013-09-23 13:34:17 -05004673 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004674 removeQ(c);
Stephen M. Cameron396883e2013-09-23 13:34:17 -05004675
4676 /*
4677 * Check for possibly stalled i/o.
4678 *
4679 * If a fifo_full condition is encountered, requests will back up
4680 * in h->reqQ. This queue is only emptied out by start_io which is
4681 * only called when a new i/o request comes in. If no i/o's are
4682 * forthcoming, the i/o's in h->reqQ can get stuck. So we call
4683 * start_io from here if we detect such a danger.
4684 *
4685 * Normally, we shouldn't hit this case, but pounding on the
4686 * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if
4687 * commands_outstanding is low. We want to avoid calling
4688 * start_io from in here as much as possible, and esp. don't
4689 * want to get in a cycle where we call start_io every time
4690 * through here.
4691 */
4692 if (unlikely(h->fifo_recently_full) &&
4693 h->commands_outstanding < 5)
4694 io_may_be_stalled = 1;
4695
4696 spin_unlock_irqrestore(&h->lock, flags);
4697
Stephen M. Camerone85c5972012-05-01 11:43:42 -05004698 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
Scott Teelc3497752014-02-18 13:56:34 -06004699 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
4700 || c->cmd_type == CMD_IOACCEL2))
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05004701 complete_scsi_command(c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004702 else if (c->cmd_type == CMD_IOCTL_PEND)
4703 complete(c->waiting);
Stephen M. Cameron396883e2013-09-23 13:34:17 -05004704 if (unlikely(io_may_be_stalled))
4705 start_io(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004706}
4707
Stephen M. Camerona104c992010-02-04 08:42:24 -06004708static inline u32 hpsa_tag_contains_index(u32 tag)
4709{
Stephen M. Camerona104c992010-02-04 08:42:24 -06004710 return tag & DIRECT_LOOKUP_BIT;
4711}
4712
4713static inline u32 hpsa_tag_to_index(u32 tag)
4714{
Stephen M. Camerona104c992010-02-04 08:42:24 -06004715 return tag >> DIRECT_LOOKUP_SHIFT;
4716}
4717
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06004718
4719static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
Stephen M. Camerona104c992010-02-04 08:42:24 -06004720{
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06004721#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
4722#define HPSA_SIMPLE_ERROR_BITS 0x03
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06004723 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06004724 return tag & ~HPSA_SIMPLE_ERROR_BITS;
4725 return tag & ~HPSA_PERF_ERROR_BITS;
Stephen M. Camerona104c992010-02-04 08:42:24 -06004726}
4727
Don Brace303932f2010-02-04 08:42:40 -06004728/* process completion of an indexed ("direct lookup") command */
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05004729static inline void process_indexed_cmd(struct ctlr_info *h,
Don Brace303932f2010-02-04 08:42:40 -06004730 u32 raw_tag)
4731{
4732 u32 tag_index;
4733 struct CommandList *c;
4734
4735 tag_index = hpsa_tag_to_index(raw_tag);
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05004736 if (!bad_tag(h, tag_index, raw_tag)) {
4737 c = h->cmd_pool + tag_index;
4738 finish_cmd(c);
4739 }
Don Brace303932f2010-02-04 08:42:40 -06004740}
4741
4742/* process completion of a non-indexed command */
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05004743static inline void process_nonindexed_cmd(struct ctlr_info *h,
Don Brace303932f2010-02-04 08:42:40 -06004744 u32 raw_tag)
4745{
4746 u32 tag;
4747 struct CommandList *c = NULL;
Matt Gatese16a33a2012-05-01 11:43:11 -05004748 unsigned long flags;
Don Brace303932f2010-02-04 08:42:40 -06004749
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06004750 tag = hpsa_tag_discard_error_bits(h, raw_tag);
Matt Gatese16a33a2012-05-01 11:43:11 -05004751 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06004752 list_for_each_entry(c, &h->cmpQ, list) {
Don Brace303932f2010-02-04 08:42:40 -06004753 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
Matt Gatese16a33a2012-05-01 11:43:11 -05004754 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05004755 finish_cmd(c);
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05004756 return;
Don Brace303932f2010-02-04 08:42:40 -06004757 }
4758 }
Matt Gatese16a33a2012-05-01 11:43:11 -05004759 spin_unlock_irqrestore(&h->lock, flags);
Don Brace303932f2010-02-04 08:42:40 -06004760 bad_tag(h, h->nr_cmds + 1, raw_tag);
Don Brace303932f2010-02-04 08:42:40 -06004761}
4762
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004763/* Some controllers, like p400, will give us one interrupt
4764 * after a soft reset, even if we turned interrupts off.
4765 * Only need to check for this in the hpsa_xxx_discard_completions
4766 * functions.
4767 */
4768static int ignore_bogus_interrupt(struct ctlr_info *h)
4769{
4770 if (likely(!reset_devices))
4771 return 0;
4772
4773 if (likely(h->interrupts_enabled))
4774 return 0;
4775
4776 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
4777 "(known firmware bug.) Ignoring.\n");
4778
4779 return 1;
4780}
4781
Matt Gates254f7962012-05-01 11:43:06 -05004782/*
4783 * Convert &h->q[x] (passed to interrupt handlers) back to h.
4784 * Relies on (h-q[x] == x) being true for x such that
4785 * 0 <= x < MAX_REPLY_QUEUES.
4786 */
4787static struct ctlr_info *queue_to_hba(u8 *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004788{
Matt Gates254f7962012-05-01 11:43:06 -05004789 return container_of((queue - *queue), struct ctlr_info, q[0]);
4790}
4791
4792static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
4793{
4794 struct ctlr_info *h = queue_to_hba(queue);
4795 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004796 u32 raw_tag;
4797
4798 if (ignore_bogus_interrupt(h))
4799 return IRQ_NONE;
4800
4801 if (interrupt_not_for_us(h))
4802 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05004803 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004804 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05004805 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004806 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05004807 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004808 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004809 return IRQ_HANDLED;
4810}
4811
Matt Gates254f7962012-05-01 11:43:06 -05004812static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004813{
Matt Gates254f7962012-05-01 11:43:06 -05004814 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004815 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05004816 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004817
4818 if (ignore_bogus_interrupt(h))
4819 return IRQ_NONE;
4820
Stephen M. Camerona0c12412011-10-26 16:22:04 -05004821 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05004822 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004823 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05004824 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05004825 return IRQ_HANDLED;
4826}
4827
Matt Gates254f7962012-05-01 11:43:06 -05004828static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004829{
Matt Gates254f7962012-05-01 11:43:06 -05004830 struct ctlr_info *h = queue_to_hba((u8 *) queue);
Don Brace303932f2010-02-04 08:42:40 -06004831 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05004832 u8 q = *(u8 *) queue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004833
4834 if (interrupt_not_for_us(h))
4835 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05004836 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron10f66012010-06-16 13:51:50 -05004837 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05004838 raw_tag = get_next_completion(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05004839 while (raw_tag != FIFO_EMPTY) {
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05004840 if (likely(hpsa_tag_contains_index(raw_tag)))
4841 process_indexed_cmd(h, raw_tag);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05004842 else
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05004843 process_nonindexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05004844 raw_tag = next_command(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05004845 }
4846 }
Stephen M. Cameron10f66012010-06-16 13:51:50 -05004847 return IRQ_HANDLED;
4848}
4849
Matt Gates254f7962012-05-01 11:43:06 -05004850static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
Stephen M. Cameron10f66012010-06-16 13:51:50 -05004851{
Matt Gates254f7962012-05-01 11:43:06 -05004852 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05004853 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05004854 u8 q = *(u8 *) queue;
Stephen M. Cameron10f66012010-06-16 13:51:50 -05004855
Stephen M. Camerona0c12412011-10-26 16:22:04 -05004856 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05004857 raw_tag = get_next_completion(h, q);
Don Brace303932f2010-02-04 08:42:40 -06004858 while (raw_tag != FIFO_EMPTY) {
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05004859 if (likely(hpsa_tag_contains_index(raw_tag)))
4860 process_indexed_cmd(h, raw_tag);
Don Brace303932f2010-02-04 08:42:40 -06004861 else
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05004862 process_nonindexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05004863 raw_tag = next_command(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004864 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004865 return IRQ_HANDLED;
4866}
4867
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06004868/* Send a message CDB to the firmware. Careful, this only works
4869 * in simple mode, not performant mode due to the tag lookup.
4870 * We only ever use this immediately after a controller reset.
4871 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08004872static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
4873 unsigned char type)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004874{
4875 struct Command {
4876 struct CommandListHeader CommandHeader;
4877 struct RequestBlock Request;
4878 struct ErrDescriptor ErrorDescriptor;
4879 };
4880 struct Command *cmd;
4881 static const size_t cmd_sz = sizeof(*cmd) +
4882 sizeof(cmd->ErrorDescriptor);
4883 dma_addr_t paddr64;
4884 uint32_t paddr32, tag;
4885 void __iomem *vaddr;
4886 int i, err;
4887
4888 vaddr = pci_ioremap_bar(pdev, 0);
4889 if (vaddr == NULL)
4890 return -ENOMEM;
4891
4892 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
4893 * CCISS commands, so they must be allocated from the lower 4GiB of
4894 * memory.
4895 */
4896 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4897 if (err) {
4898 iounmap(vaddr);
4899 return -ENOMEM;
4900 }
4901
4902 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
4903 if (cmd == NULL) {
4904 iounmap(vaddr);
4905 return -ENOMEM;
4906 }
4907
4908 /* This must fit, because of the 32-bit consistent DMA mask. Also,
4909 * although there's no guarantee, we assume that the address is at
4910 * least 4-byte aligned (most likely, it's page-aligned).
4911 */
4912 paddr32 = paddr64;
4913
4914 cmd->CommandHeader.ReplyQueue = 0;
4915 cmd->CommandHeader.SGList = 0;
4916 cmd->CommandHeader.SGTotal = 0;
4917 cmd->CommandHeader.Tag.lower = paddr32;
4918 cmd->CommandHeader.Tag.upper = 0;
4919 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
4920
4921 cmd->Request.CDBLen = 16;
4922 cmd->Request.Type.Type = TYPE_MSG;
4923 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
4924 cmd->Request.Type.Direction = XFER_NONE;
4925 cmd->Request.Timeout = 0; /* Don't time out */
4926 cmd->Request.CDB[0] = opcode;
4927 cmd->Request.CDB[1] = type;
4928 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
4929 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
4930 cmd->ErrorDescriptor.Addr.upper = 0;
4931 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
4932
4933 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
4934
4935 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
4936 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06004937 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004938 break;
4939 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
4940 }
4941
4942 iounmap(vaddr);
4943
4944 /* we leak the DMA buffer here ... no choice since the controller could
4945 * still complete the command.
4946 */
4947 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
4948 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
4949 opcode, type);
4950 return -ETIMEDOUT;
4951 }
4952
4953 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
4954
4955 if (tag & HPSA_ERROR_BIT) {
4956 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
4957 opcode, type);
4958 return -EIO;
4959 }
4960
4961 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
4962 opcode, type);
4963 return 0;
4964}
4965
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004966#define hpsa_noop(p) hpsa_message(p, 3, 0)
4967
Stephen M. Cameron1df85522010-06-16 13:51:40 -05004968static int hpsa_controller_hard_reset(struct pci_dev *pdev,
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05004969 void * __iomem vaddr, u32 use_doorbell)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004970{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05004971 u16 pmcsr;
4972 int pos;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004973
Stephen M. Cameron1df85522010-06-16 13:51:40 -05004974 if (use_doorbell) {
4975 /* For everything after the P600, the PCI power state method
4976 * of resetting the controller doesn't work, so we have this
4977 * other way using the doorbell register.
4978 */
4979 dev_info(&pdev->dev, "using doorbell to reset controller\n");
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05004980 writel(use_doorbell, vaddr + SA5_DOORBELL);
Stephen M. Cameron85009232013-09-23 13:33:36 -05004981
4982 /* PMC hardware guys tell us we need a 5 second delay after
4983 * doorbell reset and before any attempt to talk to the board
4984 * at all to ensure that this actually works and doesn't fall
4985 * over in some weird corner cases.
4986 */
4987 msleep(5000);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05004988 } else { /* Try to do it the PCI power state way */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004989
Stephen M. Cameron1df85522010-06-16 13:51:40 -05004990 /* Quoting from the Open CISS Specification: "The Power
4991 * Management Control/Status Register (CSR) controls the power
4992 * state of the device. The normal operating state is D0,
4993 * CSR=00h. The software off state is D3, CSR=03h. To reset
4994 * the controller, place the interface device in D3 then to D0,
4995 * this causes a secondary PCI reset which will reset the
4996 * controller." */
4997
4998 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
4999 if (pos == 0) {
5000 dev_err(&pdev->dev,
5001 "hpsa_reset_controller: "
5002 "PCI PM not supported\n");
5003 return -ENODEV;
5004 }
5005 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5006 /* enter the D3hot power management state */
5007 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
5008 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5009 pmcsr |= PCI_D3hot;
5010 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
5011
5012 msleep(500);
5013
5014 /* enter the D0 power management state */
5015 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5016 pmcsr |= PCI_D0;
5017 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
Mike Millerc4853ef2011-10-21 08:19:43 +02005018
5019 /*
5020 * The P600 requires a small delay when changing states.
5021 * Otherwise we may think the board did not reset and we bail.
5022 * This for kdump only and is particular to the P600.
5023 */
5024 msleep(500);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005025 }
5026 return 0;
5027}
5028
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005029static void init_driver_version(char *driver_version, int len)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005030{
5031 memset(driver_version, 0, len);
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06005032 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005033}
5034
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005035static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005036{
5037 char *driver_version;
5038 int i, size = sizeof(cfgtable->driver_version);
5039
5040 driver_version = kmalloc(size, GFP_KERNEL);
5041 if (!driver_version)
5042 return -ENOMEM;
5043
5044 init_driver_version(driver_version, size);
5045 for (i = 0; i < size; i++)
5046 writeb(driver_version[i], &cfgtable->driver_version[i]);
5047 kfree(driver_version);
5048 return 0;
5049}
5050
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005051static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
5052 unsigned char *driver_ver)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005053{
5054 int i;
5055
5056 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
5057 driver_ver[i] = readb(&cfgtable->driver_version[i]);
5058}
5059
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005060static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005061{
5062
5063 char *driver_ver, *old_driver_ver;
5064 int rc, size = sizeof(cfgtable->driver_version);
5065
5066 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
5067 if (!old_driver_ver)
5068 return -ENOMEM;
5069 driver_ver = old_driver_ver + size;
5070
5071 /* After a reset, the 32 bytes of "driver version" in the cfgtable
5072 * should have been changed, otherwise we know the reset failed.
5073 */
5074 init_driver_version(old_driver_ver, size);
5075 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
5076 rc = !memcmp(driver_ver, old_driver_ver, size);
5077 kfree(old_driver_ver);
5078 return rc;
5079}
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005080/* This does a hard reset of the controller using PCI power management
5081 * states or the using the doorbell register.
5082 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005083static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005084{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005085 u64 cfg_offset;
5086 u32 cfg_base_addr;
5087 u64 cfg_base_addr_index;
5088 void __iomem *vaddr;
5089 unsigned long paddr;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005090 u32 misc_fw_support;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005091 int rc;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005092 struct CfgTable __iomem *cfgtable;
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005093 u32 use_doorbell;
Stephen M. Cameron18867652010-06-16 13:51:45 -05005094 u32 board_id;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005095 u16 command_register;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005096
5097 /* For controllers as old as the P600, this is very nearly
5098 * the same thing as
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005099 *
5100 * pci_save_state(pci_dev);
5101 * pci_set_power_state(pci_dev, PCI_D3hot);
5102 * pci_set_power_state(pci_dev, PCI_D0);
5103 * pci_restore_state(pci_dev);
5104 *
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005105 * For controllers newer than the P600, the pci power state
5106 * method of resetting doesn't work so we have another way
5107 * using the doorbell register.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005108 */
Stephen M. Cameron18867652010-06-16 13:51:45 -05005109
Stephen M. Cameron25c1e56a2011-01-06 14:48:18 -06005110 rc = hpsa_lookup_board_id(pdev, &board_id);
Stephen M. Cameron46380782011-05-03 15:00:01 -05005111 if (rc < 0 || !ctlr_is_resettable(board_id)) {
Stephen M. Cameron25c1e56a2011-01-06 14:48:18 -06005112 dev_warn(&pdev->dev, "Not resetting device.\n");
5113 return -ENODEV;
5114 }
Stephen M. Cameron46380782011-05-03 15:00:01 -05005115
5116 /* if controller is soft- but not hard resettable... */
5117 if (!ctlr_is_hard_resettable(board_id))
5118 return -ENOTSUPP; /* try soft reset later. */
Stephen M. Cameron18867652010-06-16 13:51:45 -05005119
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005120 /* Save the PCI command register */
5121 pci_read_config_word(pdev, 4, &command_register);
5122 /* Turn the board off. This is so that later pci_restore_state()
5123 * won't turn the board on before the rest of config space is ready.
5124 */
5125 pci_disable_device(pdev);
5126 pci_save_state(pdev);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005127
5128 /* find the first memory BAR, so we can find the cfg table */
5129 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
5130 if (rc)
5131 return rc;
5132 vaddr = remap_pci_mem(paddr, 0x250);
5133 if (!vaddr)
5134 return -ENOMEM;
5135
5136 /* find cfgtable in order to check if reset via doorbell is supported */
5137 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
5138 &cfg_base_addr_index, &cfg_offset);
5139 if (rc)
5140 goto unmap_vaddr;
5141 cfgtable = remap_pci_mem(pci_resource_start(pdev,
5142 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
5143 if (!cfgtable) {
5144 rc = -ENOMEM;
5145 goto unmap_vaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005146 }
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005147 rc = write_driver_ver_to_cfgtable(cfgtable);
5148 if (rc)
5149 goto unmap_vaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005150
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005151 /* If reset via doorbell register is supported, use that.
5152 * There are two such methods. Favor the newest method.
5153 */
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005154 misc_fw_support = readl(&cfgtable->misc_fw_support);
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005155 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
5156 if (use_doorbell) {
5157 use_doorbell = DOORBELL_CTLR_RESET2;
5158 } else {
5159 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
5160 if (use_doorbell) {
Mike Millerfba63092011-10-13 11:44:06 -05005161 dev_warn(&pdev->dev, "Soft reset not supported. "
5162 "Firmware update is required.\n");
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005163 rc = -ENOTSUPP; /* try soft reset */
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005164 goto unmap_cfgtable;
5165 }
5166 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005167
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005168 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
5169 if (rc)
5170 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005171
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005172 pci_restore_state(pdev);
5173 rc = pci_enable_device(pdev);
5174 if (rc) {
5175 dev_warn(&pdev->dev, "failed to enable device.\n");
5176 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005177 }
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005178 pci_write_config_word(pdev, 4, command_register);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005179
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005180 /* Some devices (notably the HP Smart Array 5i Controller)
5181 need a little pause here */
5182 msleep(HPSA_POST_RESET_PAUSE_MSECS);
5183
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06005184 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
5185 if (rc) {
5186 dev_warn(&pdev->dev,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005187 "failed waiting for board to become ready "
5188 "after hard reset\n");
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06005189 goto unmap_cfgtable;
5190 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06005191
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005192 rc = controller_reset_failed(vaddr);
5193 if (rc < 0)
5194 goto unmap_cfgtable;
5195 if (rc) {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005196 dev_warn(&pdev->dev, "Unable to successfully reset "
5197 "controller. Will try soft reset.\n");
5198 rc = -ENOTSUPP;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005199 } else {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005200 dev_info(&pdev->dev, "board ready after hard reset.\n");
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005201 }
5202
5203unmap_cfgtable:
5204 iounmap(cfgtable);
5205
5206unmap_vaddr:
5207 iounmap(vaddr);
5208 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005209}
5210
5211/*
5212 * We cannot read the structure directly, for portability we must use
5213 * the io functions.
5214 * This is for debug only.
5215 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005216static void print_cfg_table(struct device *dev, struct CfgTable *tb)
5217{
Stephen M. Cameron58f86652010-05-27 15:13:58 -05005218#ifdef HPSA_DEBUG
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005219 int i;
5220 char temp_name[17];
5221
5222 dev_info(dev, "Controller Configuration information\n");
5223 dev_info(dev, "------------------------------------\n");
5224 for (i = 0; i < 4; i++)
5225 temp_name[i] = readb(&(tb->Signature[i]));
5226 temp_name[4] = '\0';
5227 dev_info(dev, " Signature = %s\n", temp_name);
5228 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
5229 dev_info(dev, " Transport methods supported = 0x%x\n",
5230 readl(&(tb->TransportSupport)));
5231 dev_info(dev, " Transport methods active = 0x%x\n",
5232 readl(&(tb->TransportActive)));
5233 dev_info(dev, " Requested transport Method = 0x%x\n",
5234 readl(&(tb->HostWrite.TransportRequest)));
5235 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
5236 readl(&(tb->HostWrite.CoalIntDelay)));
5237 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
5238 readl(&(tb->HostWrite.CoalIntCount)));
5239 dev_info(dev, " Max outstanding commands = 0x%d\n",
5240 readl(&(tb->CmdsOutMax)));
5241 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
5242 for (i = 0; i < 16; i++)
5243 temp_name[i] = readb(&(tb->ServerName[i]));
5244 temp_name[16] = '\0';
5245 dev_info(dev, " Server Name = %s\n", temp_name);
5246 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
5247 readl(&(tb->HeartBeat)));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005248#endif /* HPSA_DEBUG */
Stephen M. Cameron58f86652010-05-27 15:13:58 -05005249}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005250
5251static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
5252{
5253 int i, offset, mem_type, bar_type;
5254
5255 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
5256 return 0;
5257 offset = 0;
5258 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5259 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
5260 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
5261 offset += 4;
5262 else {
5263 mem_type = pci_resource_flags(pdev, i) &
5264 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
5265 switch (mem_type) {
5266 case PCI_BASE_ADDRESS_MEM_TYPE_32:
5267 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
5268 offset += 4; /* 32 bit */
5269 break;
5270 case PCI_BASE_ADDRESS_MEM_TYPE_64:
5271 offset += 8;
5272 break;
5273 default: /* reserved in PCI 2.2 */
5274 dev_warn(&pdev->dev,
5275 "base address is invalid\n");
5276 return -1;
5277 break;
5278 }
5279 }
5280 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
5281 return i + 1;
5282 }
5283 return -1;
5284}
5285
5286/* If MSI/MSI-X is supported by the kernel we will try to enable it on
5287 * controllers that are capable. If not, we use IO-APIC mode.
5288 */
5289
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005290static void hpsa_interrupt_mode(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005291{
5292#ifdef CONFIG_PCI_MSI
Matt Gates254f7962012-05-01 11:43:06 -05005293 int err, i;
5294 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
5295
5296 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
5297 hpsa_msix_entries[i].vector = 0;
5298 hpsa_msix_entries[i].entry = i;
5299 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005300
5301 /* Some boards advertise MSI but don't really support it */
Stephen M. Cameron6b3f4c52010-05-27 15:13:02 -05005302 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
5303 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005304 goto default_int_mode;
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05005305 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
5306 dev_info(&h->pdev->dev, "MSIX\n");
Hannes Reineckeeee0f032014-01-15 13:30:53 +01005307 h->msix_vector = MAX_REPLY_QUEUES;
Matt Gates254f7962012-05-01 11:43:06 -05005308 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
Hannes Reineckeeee0f032014-01-15 13:30:53 +01005309 h->msix_vector);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005310 if (err > 0) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05005311 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005312 "available\n", err);
Hannes Reineckeeee0f032014-01-15 13:30:53 +01005313 h->msix_vector = err;
5314 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
5315 h->msix_vector);
5316 }
5317 if (!err) {
5318 for (i = 0; i < h->msix_vector; i++)
5319 h->intr[i] = hpsa_msix_entries[i].vector;
5320 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005321 } else {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05005322 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005323 err);
Hannes Reineckeeee0f032014-01-15 13:30:53 +01005324 h->msix_vector = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005325 goto default_int_mode;
5326 }
5327 }
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05005328 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
5329 dev_info(&h->pdev->dev, "MSI\n");
5330 if (!pci_enable_msi(h->pdev))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005331 h->msi_vector = 1;
5332 else
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05005333 dev_warn(&h->pdev->dev, "MSI init failed\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005334 }
5335default_int_mode:
5336#endif /* CONFIG_PCI_MSI */
5337 /* if we get here we're going to use the default interrupt mode */
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005338 h->intr[h->intr_mode] = h->pdev->irq;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005339}
5340
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005341static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05005342{
5343 int i;
5344 u32 subsystem_vendor_id, subsystem_device_id;
5345
5346 subsystem_vendor_id = pdev->subsystem_vendor;
5347 subsystem_device_id = pdev->subsystem_device;
5348 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
5349 subsystem_vendor_id;
5350
5351 for (i = 0; i < ARRAY_SIZE(products); i++)
5352 if (*board_id == products[i].board_id)
5353 return i;
5354
Stephen M. Cameron6798cc02010-06-16 13:51:20 -05005355 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
5356 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
5357 !hpsa_allow_any) {
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05005358 dev_warn(&pdev->dev, "unrecognized board ID: "
5359 "0x%08x, ignoring.\n", *board_id);
5360 return -ENODEV;
5361 }
5362 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
5363}
5364
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005365static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
5366 unsigned long *memory_bar)
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05005367{
5368 int i;
5369
5370 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05005371 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05005372 /* addressing mode bits already removed */
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05005373 *memory_bar = pci_resource_start(pdev, i);
5374 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05005375 *memory_bar);
5376 return 0;
5377 }
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05005378 dev_warn(&pdev->dev, "no memory BAR found\n");
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05005379 return -ENODEV;
5380}
5381
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005382static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
5383 int wait_for_ready)
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05005384{
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06005385 int i, iterations;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05005386 u32 scratchpad;
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06005387 if (wait_for_ready)
5388 iterations = HPSA_BOARD_READY_ITERATIONS;
5389 else
5390 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05005391
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06005392 for (i = 0; i < iterations; i++) {
5393 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
5394 if (wait_for_ready) {
5395 if (scratchpad == HPSA_FIRMWARE_READY)
5396 return 0;
5397 } else {
5398 if (scratchpad != HPSA_FIRMWARE_READY)
5399 return 0;
5400 }
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05005401 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
5402 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06005403 dev_warn(&pdev->dev, "board not ready, timed out.\n");
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05005404 return -ENODEV;
5405}
5406
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005407static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
5408 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
5409 u64 *cfg_offset)
Stephen M. Camerona51fd472010-06-16 13:51:30 -05005410{
5411 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
5412 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
5413 *cfg_base_addr &= (u32) 0x0000ffff;
5414 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
5415 if (*cfg_base_addr_index == -1) {
5416 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
5417 return -ENODEV;
5418 }
5419 return 0;
5420}
5421
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005422static int hpsa_find_cfgtables(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005423{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06005424 u64 cfg_offset;
5425 u32 cfg_base_addr;
5426 u64 cfg_base_addr_index;
Don Brace303932f2010-02-04 08:42:40 -06005427 u32 trans_offset;
Stephen M. Camerona51fd472010-06-16 13:51:30 -05005428 int rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05005429
Stephen M. Camerona51fd472010-06-16 13:51:30 -05005430 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
5431 &cfg_base_addr_index, &cfg_offset);
5432 if (rc)
5433 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05005434 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
Stephen M. Camerona51fd472010-06-16 13:51:30 -05005435 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
Stephen M. Cameron77c44952010-05-27 15:13:17 -05005436 if (!h->cfgtable)
5437 return -ENOMEM;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005438 rc = write_driver_ver_to_cfgtable(h->cfgtable);
5439 if (rc)
5440 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05005441 /* Find performant mode table. */
Stephen M. Camerona51fd472010-06-16 13:51:30 -05005442 trans_offset = readl(&h->cfgtable->TransMethodOffset);
Stephen M. Cameron77c44952010-05-27 15:13:17 -05005443 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
5444 cfg_base_addr_index)+cfg_offset+trans_offset,
5445 sizeof(*h->transtable));
5446 if (!h->transtable)
5447 return -ENOMEM;
5448 return 0;
5449}
5450
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005451static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05005452{
5453 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
Stephen M. Cameron72ceeae2011-01-06 14:48:13 -06005454
5455 /* Limit commands in memory limited kdump scenario. */
5456 if (reset_devices && h->max_commands > 32)
5457 h->max_commands = 32;
5458
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05005459 if (h->max_commands < 16) {
5460 dev_warn(&h->pdev->dev, "Controller reports "
5461 "max supported commands of %d, an obvious lie. "
5462 "Using 16. Ensure that firmware is up to date.\n",
5463 h->max_commands);
5464 h->max_commands = 16;
5465 }
5466}
5467
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05005468/* Interrogate the hardware for some limits:
5469 * max commands, max SG elements without chaining, and with chaining,
5470 * SG chain block size, etc.
5471 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005472static void hpsa_find_board_params(struct ctlr_info *h)
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05005473{
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05005474 hpsa_get_max_perf_mode_cmds(h);
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05005475 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
5476 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005477 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05005478 /*
5479 * Limit in-command s/g elements to 32 save dma'able memory.
5480 * Howvever spec says if 0, use 31
5481 */
5482 h->max_cmd_sg_entries = 31;
5483 if (h->maxsgentries > 512) {
5484 h->max_cmd_sg_entries = 32;
5485 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
5486 h->maxsgentries--; /* save one for chain pointer */
5487 } else {
5488 h->maxsgentries = 31; /* default to traditional values */
5489 h->chainsize = 0;
5490 }
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005491
5492 /* Find out what task management functions are supported and cache */
5493 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
Scott Teel0e7a7fc2014-02-18 13:55:59 -06005494 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
5495 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
5496 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
5497 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05005498}
5499
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05005500static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
5501{
Akinobu Mita0fc9fd42012-04-04 22:14:59 +09005502 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05005503 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
5504 return false;
5505 }
5506 return true;
5507}
5508
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06005509static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05005510{
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06005511 u32 driver_support;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05005512
Stephen M. Cameron28e13442013-12-04 17:10:21 -06005513#ifdef CONFIG_X86
5514 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06005515 driver_support = readl(&(h->cfgtable->driver_support));
5516 driver_support |= ENABLE_SCSI_PREFETCH;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05005517#endif
Stephen M. Cameron28e13442013-12-04 17:10:21 -06005518 driver_support |= ENABLE_UNIT_ATTN;
5519 writel(driver_support, &(h->cfgtable->driver_support));
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05005520}
5521
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05005522/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
5523 * in a prefetch beyond physical memory.
5524 */
5525static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
5526{
5527 u32 dma_prefetch;
5528
5529 if (h->board_id != 0x3225103C)
5530 return;
5531 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
5532 dma_prefetch |= 0x8000;
5533 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
5534}
5535
Stephen M. Cameron76438d02014-02-18 13:55:43 -06005536static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
5537{
5538 int i;
5539 u32 doorbell_value;
5540 unsigned long flags;
5541 /* wait until the clear_event_notify bit 6 is cleared by controller. */
5542 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
5543 spin_lock_irqsave(&h->lock, flags);
5544 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
5545 spin_unlock_irqrestore(&h->lock, flags);
5546 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
5547 break;
5548 /* delay and try again */
5549 msleep(20);
5550 }
5551}
5552
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005553static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05005554{
5555 int i;
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06005556 u32 doorbell_value;
5557 unsigned long flags;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05005558
5559 /* under certain very rare conditions, this can take awhile.
5560 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
5561 * as we enter this code.)
5562 */
5563 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06005564 spin_lock_irqsave(&h->lock, flags);
5565 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
5566 spin_unlock_irqrestore(&h->lock, flags);
Dan Carpenter382be662011-02-15 15:33:13 -06005567 if (!(doorbell_value & CFGTBL_ChangeReq))
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05005568 break;
5569 /* delay and try again */
Stephen M. Cameron60d3f5b2011-01-06 14:48:34 -06005570 usleep_range(10000, 20000);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05005571 }
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05005572}
5573
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005574static int hpsa_enter_simple_mode(struct ctlr_info *h)
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05005575{
5576 u32 trans_support;
5577
5578 trans_support = readl(&(h->cfgtable->TransportSupport));
5579 if (!(trans_support & SIMPLE_MODE))
5580 return -ENOTSUPP;
5581
5582 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005583
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05005584 /* Update the field, and then ring the doorbell */
5585 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06005586 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05005587 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
5588 hpsa_wait_for_mode_change_ack(h);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05005589 print_cfg_table(&h->pdev->dev, h->cfgtable);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005590 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
5591 goto error;
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06005592 h->transMethod = CFGTBL_Trans_Simple;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05005593 return 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005594error:
5595 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
5596 return -ENODEV;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05005597}
5598
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005599static int hpsa_pci_init(struct ctlr_info *h)
Stephen M. Cameron77c44952010-05-27 15:13:17 -05005600{
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05005601 int prod_index, err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005602
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05005603 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
5604 if (prod_index < 0)
5605 return -ENODEV;
5606 h->product_name = products[prod_index].product_name;
5607 h->access = *(products[prod_index].access);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005608
Matthew Garrette5a44df2011-11-11 11:14:23 -05005609 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
5610 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
5611
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05005612 err = pci_enable_device(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005613 if (err) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05005614 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005615 return err;
5616 }
5617
Stephen M. Cameron5cb460a2012-05-01 11:42:20 -05005618 /* Enable bus mastering (pci_disable_device may disable this) */
5619 pci_set_master(h->pdev);
5620
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06005621 err = pci_request_regions(h->pdev, HPSA);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005622 if (err) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05005623 dev_err(&h->pdev->dev,
5624 "cannot obtain PCI resources, aborting\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005625 return err;
5626 }
Stephen M. Cameron6b3f4c52010-05-27 15:13:02 -05005627 hpsa_interrupt_mode(h);
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05005628 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05005629 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005630 goto err_out_free_res;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005631 h->vaddr = remap_pci_mem(h->paddr, 0x250);
Stephen M. Cameron204892e2010-05-27 15:13:22 -05005632 if (!h->vaddr) {
5633 err = -ENOMEM;
5634 goto err_out_free_res;
5635 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06005636 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05005637 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005638 goto err_out_free_res;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05005639 err = hpsa_find_cfgtables(h);
5640 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005641 goto err_out_free_res;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05005642 hpsa_find_board_params(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005643
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05005644 if (!hpsa_CISS_signature_present(h)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005645 err = -ENODEV;
5646 goto err_out_free_res;
5647 }
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06005648 hpsa_set_driver_support_bits(h);
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05005649 hpsa_p600_dma_prefetch_quirk(h);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05005650 err = hpsa_enter_simple_mode(h);
5651 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005652 goto err_out_free_res;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005653 return 0;
5654
5655err_out_free_res:
Stephen M. Cameron204892e2010-05-27 15:13:22 -05005656 if (h->transtable)
5657 iounmap(h->transtable);
5658 if (h->cfgtable)
5659 iounmap(h->cfgtable);
5660 if (h->vaddr)
5661 iounmap(h->vaddr);
Stephen M. Cameronf0bd0b682012-05-01 11:42:09 -05005662 pci_disable_device(h->pdev);
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05005663 pci_release_regions(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005664 return err;
5665}
5666
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005667static void hpsa_hba_inquiry(struct ctlr_info *h)
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06005668{
5669 int rc;
5670
5671#define HBA_INQUIRY_BYTE_COUNT 64
5672 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
5673 if (!h->hba_inquiry_data)
5674 return;
5675 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
5676 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
5677 if (rc != 0) {
5678 kfree(h->hba_inquiry_data);
5679 h->hba_inquiry_data = NULL;
5680 }
5681}
5682
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005683static int hpsa_init_reset_devices(struct pci_dev *pdev)
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05005684{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005685 int rc, i;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05005686
5687 if (!reset_devices)
5688 return 0;
5689
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005690 /* Reset the controller with a PCI power-cycle or via doorbell */
5691 rc = hpsa_kdump_hard_reset_controller(pdev);
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05005692
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005693 /* -ENOTSUPP here means we cannot reset the controller
5694 * but it's already (and still) up and running in
Stephen M. Cameron18867652010-06-16 13:51:45 -05005695 * "performant mode". Or, it might be 640x, which can't reset
5696 * due to concerns about shared bbwc between 6402/6404 pair.
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005697 */
5698 if (rc == -ENOTSUPP)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005699 return rc; /* just try to do the kdump anyhow. */
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005700 if (rc)
5701 return -ENODEV;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05005702
5703 /* Now try to get the controller to respond to a no-op */
Stephen M. Cameron2b870cb2011-05-03 14:59:36 -05005704 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05005705 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
5706 if (hpsa_noop(pdev) == 0)
5707 break;
5708 else
5709 dev_warn(&pdev->dev, "no-op failed%s\n",
5710 (i < 11 ? "; re-trying" : ""));
5711 }
5712 return 0;
5713}
5714
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005715static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05005716{
5717 h->cmd_pool_bits = kzalloc(
5718 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
5719 sizeof(unsigned long), GFP_KERNEL);
5720 h->cmd_pool = pci_alloc_consistent(h->pdev,
5721 h->nr_cmds * sizeof(*h->cmd_pool),
5722 &(h->cmd_pool_dhandle));
5723 h->errinfo_pool = pci_alloc_consistent(h->pdev,
5724 h->nr_cmds * sizeof(*h->errinfo_pool),
5725 &(h->errinfo_pool_dhandle));
5726 if ((h->cmd_pool_bits == NULL)
5727 || (h->cmd_pool == NULL)
5728 || (h->errinfo_pool == NULL)) {
5729 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
5730 return -ENOMEM;
5731 }
5732 return 0;
5733}
5734
5735static void hpsa_free_cmd_pool(struct ctlr_info *h)
5736{
5737 kfree(h->cmd_pool_bits);
5738 if (h->cmd_pool)
5739 pci_free_consistent(h->pdev,
5740 h->nr_cmds * sizeof(struct CommandList),
5741 h->cmd_pool, h->cmd_pool_dhandle);
Stephen M. Cameronaca90122014-02-18 13:56:14 -06005742 if (h->ioaccel2_cmd_pool)
5743 pci_free_consistent(h->pdev,
5744 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
5745 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05005746 if (h->errinfo_pool)
5747 pci_free_consistent(h->pdev,
5748 h->nr_cmds * sizeof(struct ErrorInfo),
5749 h->errinfo_pool,
5750 h->errinfo_pool_dhandle);
Matt Gatese1f7de02014-02-18 13:55:17 -06005751 if (h->ioaccel_cmd_pool)
5752 pci_free_consistent(h->pdev,
5753 h->nr_cmds * sizeof(struct io_accel1_cmd),
5754 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05005755}
5756
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05005757static int hpsa_request_irq(struct ctlr_info *h,
5758 irqreturn_t (*msixhandler)(int, void *),
5759 irqreturn_t (*intxhandler)(int, void *))
5760{
Matt Gates254f7962012-05-01 11:43:06 -05005761 int rc, i;
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05005762
Matt Gates254f7962012-05-01 11:43:06 -05005763 /*
5764 * initialize h->q[x] = x so that interrupt handlers know which
5765 * queue to process.
5766 */
5767 for (i = 0; i < MAX_REPLY_QUEUES; i++)
5768 h->q[i] = (u8) i;
5769
Hannes Reineckeeee0f032014-01-15 13:30:53 +01005770 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
Matt Gates254f7962012-05-01 11:43:06 -05005771 /* If performant mode and MSI-X, use multiple reply queues */
Hannes Reineckeeee0f032014-01-15 13:30:53 +01005772 for (i = 0; i < h->msix_vector; i++)
Matt Gates254f7962012-05-01 11:43:06 -05005773 rc = request_irq(h->intr[i], msixhandler,
5774 0, h->devname,
5775 &h->q[i]);
5776 } else {
5777 /* Use single reply pool */
Hannes Reineckeeee0f032014-01-15 13:30:53 +01005778 if (h->msix_vector > 0 || h->msi_vector) {
Matt Gates254f7962012-05-01 11:43:06 -05005779 rc = request_irq(h->intr[h->intr_mode],
5780 msixhandler, 0, h->devname,
5781 &h->q[h->intr_mode]);
5782 } else {
5783 rc = request_irq(h->intr[h->intr_mode],
5784 intxhandler, IRQF_SHARED, h->devname,
5785 &h->q[h->intr_mode]);
5786 }
5787 }
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05005788 if (rc) {
5789 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
5790 h->intr[h->intr_mode], h->devname);
5791 return -ENODEV;
5792 }
5793 return 0;
5794}
5795
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005796static int hpsa_kdump_soft_reset(struct ctlr_info *h)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005797{
5798 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
5799 HPSA_RESET_TYPE_CONTROLLER)) {
5800 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
5801 return -EIO;
5802 }
5803
5804 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
5805 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
5806 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
5807 return -1;
5808 }
5809
5810 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
5811 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
5812 dev_warn(&h->pdev->dev, "Board failed to become ready "
5813 "after soft reset.\n");
5814 return -1;
5815 }
5816
5817 return 0;
5818}
5819
Matt Gates254f7962012-05-01 11:43:06 -05005820static void free_irqs(struct ctlr_info *h)
5821{
5822 int i;
5823
5824 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
5825 /* Single reply queue, only one irq to free */
5826 i = h->intr_mode;
5827 free_irq(h->intr[i], &h->q[i]);
5828 return;
5829 }
5830
Hannes Reineckeeee0f032014-01-15 13:30:53 +01005831 for (i = 0; i < h->msix_vector; i++)
Matt Gates254f7962012-05-01 11:43:06 -05005832 free_irq(h->intr[i], &h->q[i]);
5833}
5834
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05005835static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005836{
Matt Gates254f7962012-05-01 11:43:06 -05005837 free_irqs(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005838#ifdef CONFIG_PCI_MSI
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05005839 if (h->msix_vector) {
5840 if (h->pdev->msix_enabled)
5841 pci_disable_msix(h->pdev);
5842 } else if (h->msi_vector) {
5843 if (h->pdev->msi_enabled)
5844 pci_disable_msi(h->pdev);
5845 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005846#endif /* CONFIG_PCI_MSI */
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05005847}
5848
5849static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
5850{
5851 hpsa_free_irqs_and_disable_msix(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005852 hpsa_free_sg_chain_blocks(h);
5853 hpsa_free_cmd_pool(h);
Matt Gatese1f7de02014-02-18 13:55:17 -06005854 kfree(h->ioaccel1_blockFetchTable);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005855 kfree(h->blockFetchTable);
5856 pci_free_consistent(h->pdev, h->reply_pool_size,
5857 h->reply_pool, h->reply_pool_dhandle);
5858 if (h->vaddr)
5859 iounmap(h->vaddr);
5860 if (h->transtable)
5861 iounmap(h->transtable);
5862 if (h->cfgtable)
5863 iounmap(h->cfgtable);
5864 pci_release_regions(h->pdev);
5865 kfree(h);
5866}
5867
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005868/* Called when controller lockup detected. */
5869static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
5870{
5871 struct CommandList *c = NULL;
5872
5873 assert_spin_locked(&h->lock);
5874 /* Mark all outstanding commands as failed and complete them. */
5875 while (!list_empty(list)) {
5876 c = list_entry(list->next, struct CommandList, list);
5877 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05005878 finish_cmd(c);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005879 }
5880}
5881
5882static void controller_lockup_detected(struct ctlr_info *h)
5883{
5884 unsigned long flags;
5885
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005886 h->access.set_intr_mask(h, HPSA_INTR_OFF);
5887 spin_lock_irqsave(&h->lock, flags);
5888 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
5889 spin_unlock_irqrestore(&h->lock, flags);
5890 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
5891 h->lockup_detected);
5892 pci_disable_device(h->pdev);
5893 spin_lock_irqsave(&h->lock, flags);
5894 fail_all_cmds_on_list(h, &h->cmpQ);
5895 fail_all_cmds_on_list(h, &h->reqQ);
5896 spin_unlock_irqrestore(&h->lock, flags);
5897}
5898
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005899static void detect_controller_lockup(struct ctlr_info *h)
5900{
5901 u64 now;
5902 u32 heartbeat;
5903 unsigned long flags;
5904
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005905 now = get_jiffies_64();
5906 /* If we've received an interrupt recently, we're ok. */
5907 if (time_after64(h->last_intr_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05005908 (h->heartbeat_sample_interval), now))
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005909 return;
5910
5911 /*
5912 * If we've already checked the heartbeat recently, we're ok.
5913 * This could happen if someone sends us a signal. We
5914 * otherwise don't care about signals in this thread.
5915 */
5916 if (time_after64(h->last_heartbeat_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05005917 (h->heartbeat_sample_interval), now))
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005918 return;
5919
5920 /* If heartbeat has not changed since we last looked, we're not ok. */
5921 spin_lock_irqsave(&h->lock, flags);
5922 heartbeat = readl(&h->cfgtable->HeartBeat);
5923 spin_unlock_irqrestore(&h->lock, flags);
5924 if (h->last_heartbeat == heartbeat) {
5925 controller_lockup_detected(h);
5926 return;
5927 }
5928
5929 /* We're ok. */
5930 h->last_heartbeat = heartbeat;
5931 h->last_heartbeat_timestamp = now;
5932}
5933
Stephen M. Cameron76438d02014-02-18 13:55:43 -06005934static int hpsa_kickoff_rescan(struct ctlr_info *h)
5935{
5936 int i;
5937 char *event_type;
5938
5939 /* Ask the controller to clear the events we're handling. */
Stephen M. Cameron1f7cee82014-02-18 13:56:09 -06005940 if ((h->transMethod & (CFGTBL_Trans_io_accel1
5941 | CFGTBL_Trans_io_accel2)) &&
Stephen M. Cameron76438d02014-02-18 13:55:43 -06005942 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
5943 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
5944
5945 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
5946 event_type = "state change";
5947 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
5948 event_type = "configuration change";
5949 /* Stop sending new RAID offload reqs via the IO accelerator */
5950 scsi_block_requests(h->scsi_host);
5951 for (i = 0; i < h->ndevices; i++)
5952 h->dev[i]->offload_enabled = 0;
5953 hpsa_drain_commands(h);
5954 /* Set 'accelerator path config change' bit */
5955 dev_warn(&h->pdev->dev,
5956 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
5957 h->events, event_type);
5958 writel(h->events, &(h->cfgtable->clear_event_notify));
5959 /* Set the "clear event notify field update" bit 6 */
5960 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
5961 /* Wait until ctlr clears 'clear event notify field', bit 6 */
5962 hpsa_wait_for_clear_event_notify_ack(h);
5963 scsi_unblock_requests(h->scsi_host);
5964 } else {
5965 /* Acknowledge controller notification events. */
5966 writel(h->events, &(h->cfgtable->clear_event_notify));
5967 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
5968 hpsa_wait_for_clear_event_notify_ack(h);
5969#if 0
5970 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
5971 hpsa_wait_for_mode_change_ack(h);
5972#endif
5973 }
5974
5975 /* Something in the device list may have changed to trigger
5976 * the event, so do a rescan.
5977 */
5978 hpsa_scan_start(h->scsi_host);
5979 /* release reference taken on scsi host in check_controller_events */
5980 scsi_host_put(h->scsi_host);
5981 return 0;
5982}
5983
5984/* Check a register on the controller to see if there are configuration
5985 * changes (added/changed/removed logical drives, etc.) which mean that
5986 * we should rescan the controller for devices. If so, add the controller
5987 * to the list of controllers needing to be rescanned, and gets a
5988 * reference to the associated scsi_host.
5989 */
5990static void hpsa_ctlr_needs_rescan(struct ctlr_info *h)
5991{
5992 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
5993 return;
5994
5995 h->events = readl(&(h->cfgtable->event_notify));
5996 if (!h->events)
5997 return;
5998
5999 /*
6000 * Take a reference on scsi host for the duration of the scan
6001 * Release in hpsa_kickoff_rescan(). No lock needed for scan_list
6002 * as only a single thread accesses this list.
6003 */
6004 scsi_host_get(h->scsi_host);
6005 hpsa_kickoff_rescan(h);
6006}
6007
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006008static void hpsa_monitor_ctlr_worker(struct work_struct *work)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006009{
6010 unsigned long flags;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006011 struct ctlr_info *h = container_of(to_delayed_work(work),
6012 struct ctlr_info, monitor_ctlr_work);
6013 detect_controller_lockup(h);
6014 if (h->lockup_detected)
6015 return;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006016 hpsa_ctlr_needs_rescan(h);
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006017 spin_lock_irqsave(&h->lock, flags);
6018 if (h->remove_in_progress) {
6019 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006020 return;
6021 }
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006022 schedule_delayed_work(&h->monitor_ctlr_work,
6023 h->heartbeat_sample_interval);
6024 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006025}
6026
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006027static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006028{
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006029 int dac, rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006030 struct ctlr_info *h;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006031 int try_soft_reset = 0;
6032 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006033
6034 if (number_of_controllers == 0)
6035 printk(KERN_INFO DRIVER_NAME "\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006036
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006037 rc = hpsa_init_reset_devices(pdev);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006038 if (rc) {
6039 if (rc != -ENOTSUPP)
6040 return rc;
6041 /* If the reset fails in a particular way (it has no way to do
6042 * a proper hard reset, so returns -ENOTSUPP) we can try to do
6043 * a soft reset once we get the controller configured up to the
6044 * point that it can accept a command.
6045 */
6046 try_soft_reset = 1;
6047 rc = 0;
6048 }
6049
6050reinit_after_soft_reset:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006051
Don Brace303932f2010-02-04 08:42:40 -06006052 /* Command structures must be aligned on a 32-byte boundary because
6053 * the 5 lower bits of the address are used by the hardware. and by
6054 * the driver. See comments in hpsa.h for more info.
6055 */
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006056#define COMMANDLIST_ALIGNMENT 128
Don Brace303932f2010-02-04 08:42:40 -06006057 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006058 h = kzalloc(sizeof(*h), GFP_KERNEL);
6059 if (!h)
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06006060 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006061
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006062 h->pdev = pdev;
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006063 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06006064 INIT_LIST_HEAD(&h->cmpQ);
6065 INIT_LIST_HEAD(&h->reqQ);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06006066 spin_lock_init(&h->lock);
6067 spin_lock_init(&h->scan_lock);
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006068 spin_lock_init(&h->passthru_count_lock);
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006069 rc = hpsa_pci_init(h);
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06006070 if (rc != 0)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006071 goto clean1;
6072
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06006073 sprintf(h->devname, HPSA "%d", number_of_controllers);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006074 h->ctlr = number_of_controllers;
6075 number_of_controllers++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006076
6077 /* configure PCI DMA stuff */
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06006078 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
6079 if (rc == 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006080 dac = 1;
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06006081 } else {
6082 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6083 if (rc == 0) {
6084 dac = 0;
6085 } else {
6086 dev_err(&pdev->dev, "no suitable DMA available\n");
6087 goto clean1;
6088 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006089 }
6090
6091 /* make sure the board interrupts are off */
6092 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006093
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05006094 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006095 goto clean2;
Don Brace303932f2010-02-04 08:42:40 -06006096 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
6097 h->devname, pdev->device,
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006098 h->intr[h->intr_mode], dac ? "" : " not");
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006099 if (hpsa_allocate_cmd_pool(h))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006100 goto clean4;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06006101 if (hpsa_allocate_sg_chain_blocks(h))
6102 goto clean4;
Stephen M. Camerona08a84712010-02-04 08:43:16 -06006103 init_waitqueue_head(&h->scan_wait_queue);
6104 h->scan_finished = 1; /* no scan currently in progress */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006105
6106 pci_set_drvdata(pdev, h);
Stephen M. Cameron9a413382011-05-03 14:59:41 -05006107 h->ndevices = 0;
6108 h->scsi_host = NULL;
6109 spin_lock_init(&h->devlock);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006110 hpsa_put_ctlr_into_performant_mode(h);
6111
6112 /* At this point, the controller is ready to take commands.
6113 * Now, if reset_devices and the hard reset didn't work, try
6114 * the soft reset and see if that works.
6115 */
6116 if (try_soft_reset) {
6117
6118 /* This is kind of gross. We may or may not get a completion
6119 * from the soft reset command, and if we do, then the value
6120 * from the fifo may or may not be valid. So, we wait 10 secs
6121 * after the reset throwing away any completions we get during
6122 * that time. Unregister the interrupt handler and register
6123 * fake ones to scoop up any residual completions.
6124 */
6125 spin_lock_irqsave(&h->lock, flags);
6126 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6127 spin_unlock_irqrestore(&h->lock, flags);
Matt Gates254f7962012-05-01 11:43:06 -05006128 free_irqs(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006129 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
6130 hpsa_intx_discard_completions);
6131 if (rc) {
6132 dev_warn(&h->pdev->dev, "Failed to request_irq after "
6133 "soft reset.\n");
6134 goto clean4;
6135 }
6136
6137 rc = hpsa_kdump_soft_reset(h);
6138 if (rc)
6139 /* Neither hard nor soft reset worked, we're hosed. */
6140 goto clean4;
6141
6142 dev_info(&h->pdev->dev, "Board READY.\n");
6143 dev_info(&h->pdev->dev,
6144 "Waiting for stale completions to drain.\n");
6145 h->access.set_intr_mask(h, HPSA_INTR_ON);
6146 msleep(10000);
6147 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6148
6149 rc = controller_reset_failed(h->cfgtable);
6150 if (rc)
6151 dev_info(&h->pdev->dev,
6152 "Soft reset appears to have failed.\n");
6153
6154 /* since the controller's reset, we have to go back and re-init
6155 * everything. Easiest to just forget what we've done and do it
6156 * all over again.
6157 */
6158 hpsa_undo_allocations_after_kdump_soft_reset(h);
6159 try_soft_reset = 0;
6160 if (rc)
6161 /* don't go to clean4, we already unallocated */
6162 return -ENODEV;
6163
6164 goto reinit_after_soft_reset;
6165 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006166
6167 /* Turn the interrupts on so we can service requests */
6168 h->access.set_intr_mask(h, HPSA_INTR_ON);
6169
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06006170 hpsa_hba_inquiry(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006171 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006172
6173 /* Monitor the controller for firmware lockups */
6174 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
6175 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
6176 schedule_delayed_work(&h->monitor_ctlr_work,
6177 h->heartbeat_sample_interval);
Stephen M. Cameron88bf6d62013-11-01 11:02:25 -05006178 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006179
6180clean4:
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06006181 hpsa_free_sg_chain_blocks(h);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006182 hpsa_free_cmd_pool(h);
Matt Gates254f7962012-05-01 11:43:06 -05006183 free_irqs(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006184clean2:
6185clean1:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006186 kfree(h);
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06006187 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006188}
6189
6190static void hpsa_flush_cache(struct ctlr_info *h)
6191{
6192 char *flush_buf;
6193 struct CommandList *c;
Stephen M. Cameron702890e2013-09-23 13:33:30 -05006194 unsigned long flags;
6195
6196 /* Don't bother trying to flush the cache if locked up */
6197 spin_lock_irqsave(&h->lock, flags);
6198 if (unlikely(h->lockup_detected)) {
6199 spin_unlock_irqrestore(&h->lock, flags);
6200 return;
6201 }
6202 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006203
6204 flush_buf = kzalloc(4, GFP_KERNEL);
6205 if (!flush_buf)
6206 return;
6207
6208 c = cmd_special_alloc(h);
6209 if (!c) {
6210 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
6211 goto out_of_memory;
6212 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006213 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
6214 RAID_CTLR_LUNID, TYPE_CMD)) {
6215 goto out;
6216 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006217 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
6218 if (c->err_info->CommandStatus != 0)
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006219out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006220 dev_warn(&h->pdev->dev,
6221 "error flushing cache on controller\n");
6222 cmd_special_free(h, c);
6223out_of_memory:
6224 kfree(flush_buf);
6225}
6226
6227static void hpsa_shutdown(struct pci_dev *pdev)
6228{
6229 struct ctlr_info *h;
6230
6231 h = pci_get_drvdata(pdev);
6232 /* Turn board interrupts off and send the flush cache command
6233 * sendcmd will turn off interrupt, and send the flush...
6234 * To write all data in the battery backed cache to disks
6235 */
6236 hpsa_flush_cache(h);
6237 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05006238 hpsa_free_irqs_and_disable_msix(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006239}
6240
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006241static void hpsa_free_device_info(struct ctlr_info *h)
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06006242{
6243 int i;
6244
6245 for (i = 0; i < h->ndevices; i++)
6246 kfree(h->dev[i]);
6247}
6248
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006249static void hpsa_remove_one(struct pci_dev *pdev)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006250{
6251 struct ctlr_info *h;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006252 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006253
6254 if (pci_get_drvdata(pdev) == NULL) {
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006255 dev_err(&pdev->dev, "unable to remove device\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006256 return;
6257 }
6258 h = pci_get_drvdata(pdev);
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006259
6260 /* Get rid of any controller monitoring work items */
6261 spin_lock_irqsave(&h->lock, flags);
6262 h->remove_in_progress = 1;
6263 cancel_delayed_work(&h->monitor_ctlr_work);
6264 spin_unlock_irqrestore(&h->lock, flags);
6265
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006266 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
6267 hpsa_shutdown(pdev);
6268 iounmap(h->vaddr);
Stephen M. Cameron204892e2010-05-27 15:13:22 -05006269 iounmap(h->transtable);
6270 iounmap(h->cfgtable);
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06006271 hpsa_free_device_info(h);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06006272 hpsa_free_sg_chain_blocks(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006273 pci_free_consistent(h->pdev,
6274 h->nr_cmds * sizeof(struct CommandList),
6275 h->cmd_pool, h->cmd_pool_dhandle);
6276 pci_free_consistent(h->pdev,
6277 h->nr_cmds * sizeof(struct ErrorInfo),
6278 h->errinfo_pool, h->errinfo_pool_dhandle);
Don Brace303932f2010-02-04 08:42:40 -06006279 pci_free_consistent(h->pdev, h->reply_pool_size,
6280 h->reply_pool, h->reply_pool_dhandle);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006281 kfree(h->cmd_pool_bits);
Don Brace303932f2010-02-04 08:42:40 -06006282 kfree(h->blockFetchTable);
Matt Gatese1f7de02014-02-18 13:55:17 -06006283 kfree(h->ioaccel1_blockFetchTable);
Stephen M. Cameronaca90122014-02-18 13:56:14 -06006284 kfree(h->ioaccel2_blockFetchTable);
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06006285 kfree(h->hba_inquiry_data);
Stephen M. Cameronf0bd0b682012-05-01 11:42:09 -05006286 pci_disable_device(pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006287 pci_release_regions(pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006288 kfree(h);
6289}
6290
6291static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
6292 __attribute__((unused)) pm_message_t state)
6293{
6294 return -ENOSYS;
6295}
6296
6297static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
6298{
6299 return -ENOSYS;
6300}
6301
6302static struct pci_driver hpsa_pci_driver = {
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06006303 .name = HPSA,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006304 .probe = hpsa_init_one,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006305 .remove = hpsa_remove_one,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006306 .id_table = hpsa_pci_device_id, /* id_table */
6307 .shutdown = hpsa_shutdown,
6308 .suspend = hpsa_suspend,
6309 .resume = hpsa_resume,
6310};
6311
Don Brace303932f2010-02-04 08:42:40 -06006312/* Fill in bucket_map[], given nsgs (the max number of
6313 * scatter gather elements supported) and bucket[],
6314 * which is an array of 8 integers. The bucket[] array
6315 * contains 8 different DMA transfer sizes (in 16
6316 * byte increments) which the controller uses to fetch
6317 * commands. This function fills in bucket_map[], which
6318 * maps a given number of scatter gather elements to one of
6319 * the 8 DMA transfer sizes. The point of it is to allow the
6320 * controller to only do as much DMA as needed to fetch the
6321 * command, with the DMA transfer size encoded in the lower
6322 * bits of the command address.
6323 */
6324static void calc_bucket_map(int bucket[], int num_buckets,
Matt Gatese1f7de02014-02-18 13:55:17 -06006325 int nsgs, int min_blocks, int *bucket_map)
Don Brace303932f2010-02-04 08:42:40 -06006326{
6327 int i, j, b, size;
6328
Don Brace303932f2010-02-04 08:42:40 -06006329 /* Note, bucket_map must have nsgs+1 entries. */
6330 for (i = 0; i <= nsgs; i++) {
6331 /* Compute size of a command with i SG entries */
Matt Gatese1f7de02014-02-18 13:55:17 -06006332 size = i + min_blocks;
Don Brace303932f2010-02-04 08:42:40 -06006333 b = num_buckets; /* Assume the biggest bucket */
6334 /* Find the bucket that is just big enough */
Matt Gatese1f7de02014-02-18 13:55:17 -06006335 for (j = 0; j < num_buckets; j++) {
Don Brace303932f2010-02-04 08:42:40 -06006336 if (bucket[j] >= size) {
6337 b = j;
6338 break;
6339 }
6340 }
6341 /* for a command with i SG entries, use bucket b. */
6342 bucket_map[i] = b;
6343 }
6344}
6345
Matt Gatese1f7de02014-02-18 13:55:17 -06006346static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
Don Brace303932f2010-02-04 08:42:40 -06006347{
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05006348 int i;
6349 unsigned long register_value;
Matt Gatese1f7de02014-02-18 13:55:17 -06006350 unsigned long transMethod = CFGTBL_Trans_Performant |
6351 (trans_support & CFGTBL_Trans_use_short_tags) |
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06006352 CFGTBL_Trans_enable_directed_msix |
6353 (trans_support & (CFGTBL_Trans_io_accel1 |
6354 CFGTBL_Trans_io_accel2));
Matt Gatese1f7de02014-02-18 13:55:17 -06006355 struct access_method access = SA5_performant_access;
Stephen M. Camerondef342b2010-05-27 15:14:39 -05006356
6357 /* This is a bit complicated. There are 8 registers on
6358 * the controller which we write to to tell it 8 different
6359 * sizes of commands which there may be. It's a way of
6360 * reducing the DMA done to fetch each command. Encoded into
6361 * each command's tag are 3 bits which communicate to the controller
6362 * which of the eight sizes that command fits within. The size of
6363 * each command depends on how many scatter gather entries there are.
6364 * Each SG entry requires 16 bytes. The eight registers are programmed
6365 * with the number of 16-byte blocks a command of that size requires.
6366 * The smallest command possible requires 5 such 16 byte blocks.
Stephen M. Camerond66ae082012-01-19 14:00:48 -06006367 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
Stephen M. Camerondef342b2010-05-27 15:14:39 -05006368 * blocks. Note, this only extends to the SG entries contained
6369 * within the command block, and does not extend to chained blocks
6370 * of SG elements. bft[] contains the eight values we write to
6371 * the registers. They are not evenly distributed, but have more
6372 * sizes for small commands, and fewer sizes for larger commands.
6373 */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06006374 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06006375#define MIN_IOACCEL2_BFT_ENTRY 5
6376#define HPSA_IOACCEL2_HEADER_SZ 4
6377 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
6378 13, 14, 15, 16, 17, 18, 19,
6379 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
6380 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
6381 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
6382 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
6383 16 * MIN_IOACCEL2_BFT_ENTRY);
6384 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
Stephen M. Camerond66ae082012-01-19 14:00:48 -06006385 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
Don Brace303932f2010-02-04 08:42:40 -06006386 /* 5 = 1 s/g entry or 4k
6387 * 6 = 2 s/g entry or 8k
6388 * 8 = 4 s/g entry or 16k
6389 * 10 = 6 s/g entry or 24k
6390 */
Don Brace303932f2010-02-04 08:42:40 -06006391
Don Brace303932f2010-02-04 08:42:40 -06006392 /* Controller spec: zero out this buffer. */
6393 memset(h->reply_pool, 0, h->reply_pool_size);
Don Brace303932f2010-02-04 08:42:40 -06006394
Stephen M. Camerond66ae082012-01-19 14:00:48 -06006395 bft[7] = SG_ENTRIES_IN_CMD + 4;
6396 calc_bucket_map(bft, ARRAY_SIZE(bft),
Matt Gatese1f7de02014-02-18 13:55:17 -06006397 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
Don Brace303932f2010-02-04 08:42:40 -06006398 for (i = 0; i < 8; i++)
6399 writel(bft[i], &h->transtable->BlockFetch[i]);
6400
6401 /* size of controller ring buffer */
6402 writel(h->max_commands, &h->transtable->RepQSize);
Matt Gates254f7962012-05-01 11:43:06 -05006403 writel(h->nreply_queues, &h->transtable->RepQCount);
Don Brace303932f2010-02-04 08:42:40 -06006404 writel(0, &h->transtable->RepQCtrAddrLow32);
6405 writel(0, &h->transtable->RepQCtrAddrHigh32);
Matt Gates254f7962012-05-01 11:43:06 -05006406
6407 for (i = 0; i < h->nreply_queues; i++) {
6408 writel(0, &h->transtable->RepQAddr[i].upper);
6409 writel(h->reply_pool_dhandle +
6410 (h->max_commands * sizeof(u64) * i),
6411 &h->transtable->RepQAddr[i].lower);
6412 }
6413
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06006414 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Matt Gatese1f7de02014-02-18 13:55:17 -06006415 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
6416 /*
6417 * enable outbound interrupt coalescing in accelerator mode;
6418 */
6419 if (trans_support & CFGTBL_Trans_io_accel1) {
6420 access = SA5_ioaccel_mode1_access;
6421 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
6422 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
Scott Teelc3497752014-02-18 13:56:34 -06006423 } else {
6424 if (trans_support & CFGTBL_Trans_io_accel2) {
6425 access = SA5_ioaccel_mode2_access;
6426 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
6427 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
6428 }
Matt Gatese1f7de02014-02-18 13:55:17 -06006429 }
Don Brace303932f2010-02-04 08:42:40 -06006430 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006431 hpsa_wait_for_mode_change_ack(h);
Don Brace303932f2010-02-04 08:42:40 -06006432 register_value = readl(&(h->cfgtable->TransportActive));
6433 if (!(register_value & CFGTBL_Trans_Performant)) {
6434 dev_warn(&h->pdev->dev, "unable to get board into"
6435 " performant mode\n");
6436 return;
6437 }
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06006438 /* Change the access methods to the performant access methods */
Matt Gatese1f7de02014-02-18 13:55:17 -06006439 h->access = access;
6440 h->transMethod = transMethod;
6441
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06006442 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
6443 (trans_support & CFGTBL_Trans_io_accel2)))
Matt Gatese1f7de02014-02-18 13:55:17 -06006444 return;
6445
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06006446 if (trans_support & CFGTBL_Trans_io_accel1) {
6447 /* Set up I/O accelerator mode */
6448 for (i = 0; i < h->nreply_queues; i++) {
6449 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
6450 h->reply_queue[i].current_entry =
6451 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
6452 }
6453 bft[7] = h->ioaccel_maxsg + 8;
6454 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
6455 h->ioaccel1_blockFetchTable);
6456
6457 /* initialize all reply queue entries to unused */
6458 memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
6459 h->reply_pool_size);
6460
6461 /* set all the constant fields in the accelerator command
6462 * frames once at init time to save CPU cycles later.
6463 */
6464 for (i = 0; i < h->nr_cmds; i++) {
6465 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
6466
6467 cp->function = IOACCEL1_FUNCTION_SCSIIO;
6468 cp->err_info = (u32) (h->errinfo_pool_dhandle +
6469 (i * sizeof(struct ErrorInfo)));
6470 cp->err_info_len = sizeof(struct ErrorInfo);
6471 cp->sgl_offset = IOACCEL1_SGLOFFSET;
6472 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
6473 cp->timeout_sec = 0;
6474 cp->ReplyQueue = 0;
6475 cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
6476 DIRECT_LOOKUP_BIT;
6477 cp->Tag.upper = 0;
6478 cp->host_addr.lower =
6479 (u32) (h->ioaccel_cmd_pool_dhandle +
6480 (i * sizeof(struct io_accel1_cmd)));
6481 cp->host_addr.upper = 0;
6482 }
6483 } else if (trans_support & CFGTBL_Trans_io_accel2) {
6484 u64 cfg_offset, cfg_base_addr_index;
6485 u32 bft2_offset, cfg_base_addr;
6486 int rc;
6487
6488 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6489 &cfg_base_addr_index, &cfg_offset);
6490 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
6491 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
6492 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
6493 4, h->ioaccel2_blockFetchTable);
6494 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
6495 BUILD_BUG_ON(offsetof(struct CfgTable,
6496 io_accel_request_size_offset) != 0xb8);
6497 h->ioaccel2_bft2_regs =
6498 remap_pci_mem(pci_resource_start(h->pdev,
6499 cfg_base_addr_index) +
6500 cfg_offset + bft2_offset,
6501 ARRAY_SIZE(bft2) *
6502 sizeof(*h->ioaccel2_bft2_regs));
6503 for (i = 0; i < ARRAY_SIZE(bft2); i++)
6504 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
Matt Gatese1f7de02014-02-18 13:55:17 -06006505 }
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06006506 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6507 hpsa_wait_for_mode_change_ack(h);
Matt Gatese1f7de02014-02-18 13:55:17 -06006508}
6509
6510static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
6511{
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006512 h->ioaccel_maxsg =
6513 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
6514 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
6515 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
6516
Matt Gatese1f7de02014-02-18 13:55:17 -06006517 /* Command structures must be aligned on a 128-byte boundary
6518 * because the 7 lower bits of the address are used by the
6519 * hardware.
6520 */
6521#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
6522 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
6523 IOACCEL1_COMMANDLIST_ALIGNMENT);
6524 h->ioaccel_cmd_pool =
6525 pci_alloc_consistent(h->pdev,
6526 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
6527 &(h->ioaccel_cmd_pool_dhandle));
6528
6529 h->ioaccel1_blockFetchTable =
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006530 kmalloc(((h->ioaccel_maxsg + 1) *
Matt Gatese1f7de02014-02-18 13:55:17 -06006531 sizeof(u32)), GFP_KERNEL);
6532
6533 if ((h->ioaccel_cmd_pool == NULL) ||
6534 (h->ioaccel1_blockFetchTable == NULL))
6535 goto clean_up;
6536
6537 memset(h->ioaccel_cmd_pool, 0,
6538 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
6539 return 0;
6540
6541clean_up:
6542 if (h->ioaccel_cmd_pool)
6543 pci_free_consistent(h->pdev,
6544 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
6545 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
6546 kfree(h->ioaccel1_blockFetchTable);
6547 return 1;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05006548}
6549
Stephen M. Cameronaca90122014-02-18 13:56:14 -06006550static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
6551{
6552 /* Allocate ioaccel2 mode command blocks and block fetch table */
6553
6554 h->ioaccel_maxsg =
6555 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
6556 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
6557 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
6558
6559#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
6560 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
6561 IOACCEL2_COMMANDLIST_ALIGNMENT);
6562 h->ioaccel2_cmd_pool =
6563 pci_alloc_consistent(h->pdev,
6564 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6565 &(h->ioaccel2_cmd_pool_dhandle));
6566
6567 h->ioaccel2_blockFetchTable =
6568 kmalloc(((h->ioaccel_maxsg + 1) *
6569 sizeof(u32)), GFP_KERNEL);
6570
6571 if ((h->ioaccel2_cmd_pool == NULL) ||
6572 (h->ioaccel2_blockFetchTable == NULL))
6573 goto clean_up;
6574
6575 memset(h->ioaccel2_cmd_pool, 0,
6576 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
6577 return 0;
6578
6579clean_up:
6580 if (h->ioaccel2_cmd_pool)
6581 pci_free_consistent(h->pdev,
6582 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6583 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
6584 kfree(h->ioaccel2_blockFetchTable);
6585 return 1;
6586}
6587
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006588static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05006589{
6590 u32 trans_support;
Matt Gatese1f7de02014-02-18 13:55:17 -06006591 unsigned long transMethod = CFGTBL_Trans_Performant |
6592 CFGTBL_Trans_use_short_tags;
Matt Gates254f7962012-05-01 11:43:06 -05006593 int i;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05006594
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -06006595 if (hpsa_simple_mode)
6596 return;
6597
Matt Gatese1f7de02014-02-18 13:55:17 -06006598 /* Check for I/O accelerator mode support */
6599 if (trans_support & CFGTBL_Trans_io_accel1) {
6600 transMethod |= CFGTBL_Trans_io_accel1 |
6601 CFGTBL_Trans_enable_directed_msix;
6602 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
6603 goto clean_up;
Stephen M. Cameronaca90122014-02-18 13:56:14 -06006604 } else {
6605 if (trans_support & CFGTBL_Trans_io_accel2) {
6606 transMethod |= CFGTBL_Trans_io_accel2 |
6607 CFGTBL_Trans_enable_directed_msix;
6608 if (ioaccel2_alloc_cmds_and_bft(h))
6609 goto clean_up;
6610 }
Matt Gatese1f7de02014-02-18 13:55:17 -06006611 }
6612
6613 /* TODO, check that this next line h->nreply_queues is correct */
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05006614 trans_support = readl(&(h->cfgtable->TransportSupport));
6615 if (!(trans_support & PERFORMANT_MODE))
6616 return;
6617
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006618 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006619 hpsa_get_max_perf_mode_cmds(h);
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05006620 /* Performant mode ring buffer and supporting data structures */
Matt Gates254f7962012-05-01 11:43:06 -05006621 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05006622 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
6623 &(h->reply_pool_dhandle));
6624
Matt Gates254f7962012-05-01 11:43:06 -05006625 for (i = 0; i < h->nreply_queues; i++) {
6626 h->reply_queue[i].head = &h->reply_pool[h->max_commands * i];
6627 h->reply_queue[i].size = h->max_commands;
6628 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
6629 h->reply_queue[i].current_entry = 0;
6630 }
6631
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05006632 /* Need a block fetch table for performant mode */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06006633 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05006634 sizeof(u32)), GFP_KERNEL);
6635
6636 if ((h->reply_pool == NULL)
6637 || (h->blockFetchTable == NULL))
6638 goto clean_up;
6639
Matt Gatese1f7de02014-02-18 13:55:17 -06006640 hpsa_enter_performant_mode(h, trans_support);
Don Brace303932f2010-02-04 08:42:40 -06006641 return;
6642
6643clean_up:
6644 if (h->reply_pool)
6645 pci_free_consistent(h->pdev, h->reply_pool_size,
6646 h->reply_pool, h->reply_pool_dhandle);
6647 kfree(h->blockFetchTable);
6648}
6649
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006650static void hpsa_drain_commands(struct ctlr_info *h)
6651{
6652 int cmds_out;
6653 unsigned long flags;
6654
6655 do { /* wait for all outstanding commands to drain out */
6656 spin_lock_irqsave(&h->lock, flags);
6657 cmds_out = h->commands_outstanding;
6658 spin_unlock_irqrestore(&h->lock, flags);
6659 if (cmds_out <= 0)
6660 break;
6661 msleep(100);
6662 } while (1);
6663}
6664
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006665/*
6666 * This is it. Register the PCI driver information for the cards we control
6667 * the OS will call our registered routines when it finds one of our cards.
6668 */
6669static int __init hpsa_init(void)
6670{
Mike Miller31468402010-02-25 14:03:12 -06006671 return pci_register_driver(&hpsa_pci_driver);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006672}
6673
6674static void __exit hpsa_cleanup(void)
6675{
6676 pci_unregister_driver(&hpsa_pci_driver);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006677}
6678
Matt Gatese1f7de02014-02-18 13:55:17 -06006679static void __attribute__((unused)) verify_offsets(void)
6680{
6681#define VERIFY_OFFSET(member, offset) \
Mike Millerb66cc252014-02-18 13:56:04 -06006682 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
6683
6684 VERIFY_OFFSET(IU_type, 0);
6685 VERIFY_OFFSET(direction, 1);
6686 VERIFY_OFFSET(reply_queue, 2);
6687 /* VERIFY_OFFSET(reserved1, 3); */
6688 VERIFY_OFFSET(scsi_nexus, 4);
6689 VERIFY_OFFSET(Tag, 8);
6690 VERIFY_OFFSET(cdb, 16);
6691 VERIFY_OFFSET(cciss_lun, 32);
6692 VERIFY_OFFSET(data_len, 40);
6693 VERIFY_OFFSET(cmd_priority_task_attr, 44);
6694 VERIFY_OFFSET(sg_count, 45);
6695 /* VERIFY_OFFSET(reserved3 */
6696 VERIFY_OFFSET(err_ptr, 48);
6697 VERIFY_OFFSET(err_len, 56);
6698 /* VERIFY_OFFSET(reserved4 */
6699 VERIFY_OFFSET(sg, 64);
6700
6701#undef VERIFY_OFFSET
6702
6703#define VERIFY_OFFSET(member, offset) \
Matt Gatese1f7de02014-02-18 13:55:17 -06006704 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
6705
6706 VERIFY_OFFSET(dev_handle, 0x00);
6707 VERIFY_OFFSET(reserved1, 0x02);
6708 VERIFY_OFFSET(function, 0x03);
6709 VERIFY_OFFSET(reserved2, 0x04);
6710 VERIFY_OFFSET(err_info, 0x0C);
6711 VERIFY_OFFSET(reserved3, 0x10);
6712 VERIFY_OFFSET(err_info_len, 0x12);
6713 VERIFY_OFFSET(reserved4, 0x13);
6714 VERIFY_OFFSET(sgl_offset, 0x14);
6715 VERIFY_OFFSET(reserved5, 0x15);
6716 VERIFY_OFFSET(transfer_len, 0x1C);
6717 VERIFY_OFFSET(reserved6, 0x20);
6718 VERIFY_OFFSET(io_flags, 0x24);
6719 VERIFY_OFFSET(reserved7, 0x26);
6720 VERIFY_OFFSET(LUN, 0x34);
6721 VERIFY_OFFSET(control, 0x3C);
6722 VERIFY_OFFSET(CDB, 0x40);
6723 VERIFY_OFFSET(reserved8, 0x50);
6724 VERIFY_OFFSET(host_context_flags, 0x60);
6725 VERIFY_OFFSET(timeout_sec, 0x62);
6726 VERIFY_OFFSET(ReplyQueue, 0x64);
6727 VERIFY_OFFSET(reserved9, 0x65);
6728 VERIFY_OFFSET(Tag, 0x68);
6729 VERIFY_OFFSET(host_addr, 0x70);
6730 VERIFY_OFFSET(CISS_LUN, 0x78);
6731 VERIFY_OFFSET(SG, 0x78 + 8);
6732#undef VERIFY_OFFSET
6733}
6734
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006735module_init(hpsa_init);
6736module_exit(hpsa_cleanup);