blob: 388e229cdcec98656fc6290b6646df9618250944 [file] [log] [blame]
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001/*
2 * Disk Array driver for HP Smart Array SAS controllers
Scott Teel51c35132014-02-18 13:57:26 -06003 * Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17 *
18 * Questions/Comments/Bugfixes to iss_storagedev@hp.com
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/interrupt.h>
24#include <linux/types.h>
25#include <linux/pci.h>
Matthew Garrette5a44df2011-11-11 11:14:23 -050026#include <linux/pci-aspm.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080027#include <linux/kernel.h>
28#include <linux/slab.h>
29#include <linux/delay.h>
30#include <linux/fs.h>
31#include <linux/timer.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080032#include <linux/init.h>
33#include <linux/spinlock.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080034#include <linux/compat.h>
35#include <linux/blktrace_api.h>
36#include <linux/uaccess.h>
37#include <linux/io.h>
38#include <linux/dma-mapping.h>
39#include <linux/completion.h>
40#include <linux/moduleparam.h>
41#include <scsi/scsi.h>
42#include <scsi/scsi_cmnd.h>
43#include <scsi/scsi_device.h>
44#include <scsi/scsi_host.h>
Stephen M. Cameron667e23d2010-02-25 14:02:51 -060045#include <scsi/scsi_tcq.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080046#include <linux/cciss_ioctl.h>
47#include <linux/string.h>
48#include <linux/bitmap.h>
Arun Sharma600634972011-07-26 16:09:06 -070049#include <linux/atomic.h>
Stephen M. Camerona0c12412011-10-26 16:22:04 -050050#include <linux/jiffies.h>
Stephen M. Cameron283b4a92014-02-18 13:55:33 -060051#include <asm/div64.h>
Stephen M. Cameronedd16362009-12-08 14:09:11 -080052#include "hpsa_cmd.h"
53#include "hpsa.h"
54
55/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
Mike Millere481cce2013-09-04 15:12:27 -050056#define HPSA_DRIVER_VERSION "3.4.0-1"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080057#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -060058#define HPSA "hpsa"
Stephen M. Cameronedd16362009-12-08 14:09:11 -080059
60/* How long to wait (in milliseconds) for board to go into simple mode */
61#define MAX_CONFIG_WAIT 30000
62#define MAX_IOCTL_CONFIG_WAIT 1000
63
64/*define how many times we will try a command because of bus resets */
65#define MAX_CMD_RETRIES 3
66
67/* Embedded module documentation macros - see modules.h */
68MODULE_AUTHOR("Hewlett-Packard Company");
69MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \
70 HPSA_DRIVER_VERSION);
71MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers");
72MODULE_VERSION(HPSA_DRIVER_VERSION);
73MODULE_LICENSE("GPL");
74
75static int hpsa_allow_any;
76module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
77MODULE_PARM_DESC(hpsa_allow_any,
78 "Allow hpsa driver to access unknown HP Smart Array hardware");
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -060079static int hpsa_simple_mode;
80module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
81MODULE_PARM_DESC(hpsa_simple_mode,
82 "Use 'simple mode' rather than 'performant mode'");
Stephen M. Cameronedd16362009-12-08 14:09:11 -080083
84/* define the PCI info for the cards we can control */
85static const struct pci_device_id hpsa_pci_device_id[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -080086 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
Mike Miller163dbcd2013-09-04 15:11:10 -050091 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
Mike Millerf8b01eb2010-02-04 08:42:45 -060093 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
scameron@beardog.cce.hp.com9143a962011-03-07 10:44:16 -060094 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
99 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
100 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
Mike Millerfe0c9612012-09-20 16:05:18 -0500101 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1921},
102 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1922},
103 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1923},
104 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1924},
105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925},
106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926},
107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928},
Mike Miller97b9f532013-09-04 15:05:55 -0500108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929},
109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD},
110 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE},
111 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF},
112 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0},
113 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1},
114 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2},
115 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3},
116 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4},
117 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5},
118 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7},
119 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8},
120 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9},
Stephen M. Cameron8e616a52014-02-18 13:58:02 -0600121 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076},
122 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087},
123 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D},
124 {PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088},
125 {PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},
Mike Miller7c03b872010-12-01 11:16:07 -0600126 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
Stephen M. Cameron6798cc02010-06-16 13:51:20 -0500127 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800128 {0,}
129};
130
131MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id);
132
133/* board_id = Subsystem Device ID & Vendor ID
134 * product = Marketing Name for the board
135 * access = Address of the struct of function pointers
136 */
137static struct board_type products[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800138 {0x3241103C, "Smart Array P212", &SA5_access},
139 {0x3243103C, "Smart Array P410", &SA5_access},
140 {0x3245103C, "Smart Array P410i", &SA5_access},
141 {0x3247103C, "Smart Array P411", &SA5_access},
142 {0x3249103C, "Smart Array P812", &SA5_access},
Mike Miller163dbcd2013-09-04 15:11:10 -0500143 {0x324A103C, "Smart Array P712m", &SA5_access},
144 {0x324B103C, "Smart Array P711m", &SA5_access},
Mike Millerfe0c9612012-09-20 16:05:18 -0500145 {0x3350103C, "Smart Array P222", &SA5_access},
146 {0x3351103C, "Smart Array P420", &SA5_access},
147 {0x3352103C, "Smart Array P421", &SA5_access},
148 {0x3353103C, "Smart Array P822", &SA5_access},
149 {0x3354103C, "Smart Array P420i", &SA5_access},
150 {0x3355103C, "Smart Array P220i", &SA5_access},
151 {0x3356103C, "Smart Array P721m", &SA5_access},
Mike Miller1fd6c8e2013-09-04 15:08:29 -0500152 {0x1921103C, "Smart Array P830i", &SA5_access},
153 {0x1922103C, "Smart Array P430", &SA5_access},
154 {0x1923103C, "Smart Array P431", &SA5_access},
155 {0x1924103C, "Smart Array P830", &SA5_access},
156 {0x1926103C, "Smart Array P731m", &SA5_access},
157 {0x1928103C, "Smart Array P230i", &SA5_access},
158 {0x1929103C, "Smart Array P530", &SA5_access},
Mike Miller97b9f532013-09-04 15:05:55 -0500159 {0x21BD103C, "Smart Array", &SA5_access},
160 {0x21BE103C, "Smart Array", &SA5_access},
161 {0x21BF103C, "Smart Array", &SA5_access},
162 {0x21C0103C, "Smart Array", &SA5_access},
163 {0x21C1103C, "Smart Array", &SA5_access},
164 {0x21C2103C, "Smart Array", &SA5_access},
165 {0x21C3103C, "Smart Array", &SA5_access},
166 {0x21C4103C, "Smart Array", &SA5_access},
167 {0x21C5103C, "Smart Array", &SA5_access},
168 {0x21C7103C, "Smart Array", &SA5_access},
169 {0x21C8103C, "Smart Array", &SA5_access},
170 {0x21C9103C, "Smart Array", &SA5_access},
Stephen M. Cameron8e616a52014-02-18 13:58:02 -0600171 {0x00761590, "HP Storage P1224 Array Controller", &SA5_access},
172 {0x00871590, "HP Storage P1224e Array Controller", &SA5_access},
173 {0x007D1590, "HP Storage P1228 Array Controller", &SA5_access},
174 {0x00881590, "HP Storage P1228e Array Controller", &SA5_access},
175 {0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800176 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
177};
178
179static int number_of_controllers;
180
Stephen M. Cameron10f66012010-06-16 13:51:50 -0500181static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);
182static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800183static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg);
184static void start_io(struct ctlr_info *h);
185
186#ifdef CONFIG_COMPAT
187static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg);
188#endif
189
190static void cmd_free(struct ctlr_info *h, struct CommandList *c);
191static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
192static struct CommandList *cmd_alloc(struct ctlr_info *h);
193static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
Stephen M. Camerona2dac132013-02-20 11:24:41 -0600194static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -0600195 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800196 int cmd_type);
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -0600197#define VPD_PAGE (1 << 8)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800198
Jeff Garzikf2812332010-11-16 02:10:29 -0500199static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
Stephen M. Camerona08a84712010-02-04 08:43:16 -0600200static void hpsa_scan_start(struct Scsi_Host *);
201static int hpsa_scan_finished(struct Scsi_Host *sh,
202 unsigned long elapsed_time);
Stephen M. Cameron667e23d2010-02-25 14:02:51 -0600203static int hpsa_change_queue_depth(struct scsi_device *sdev,
204 int qdepth, int reason);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800205
206static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500207static int hpsa_eh_abort_handler(struct scsi_cmnd *scsicmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800208static int hpsa_slave_alloc(struct scsi_device *sdev);
209static void hpsa_slave_destroy(struct scsi_device *sdev);
210
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800211static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800212static int check_for_unit_attention(struct ctlr_info *h,
213 struct CommandList *c);
214static void check_ioctl_unit_attention(struct ctlr_info *h,
215 struct CommandList *c);
Don Brace303932f2010-02-04 08:42:40 -0600216/* performant mode helper functions */
217static void calc_bucket_map(int *bucket, int num_buckets,
Matt Gatese1f7de02014-02-18 13:55:17 -0600218 int nsgs, int min_blocks, int *bucket_map);
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800219static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
Matt Gates254f7962012-05-01 11:43:06 -0500220static inline u32 next_command(struct ctlr_info *h, u8 q);
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -0800221static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
222 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
223 u64 *cfg_offset);
224static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
225 unsigned long *memory_bar);
226static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
227static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
228 int wait_for_ready);
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500229static inline void finish_cmd(struct CommandList *c);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -0600230static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -0600231#define BOARD_NOT_READY 0
232#define BOARD_READY 1
Stephen M. Cameron23100dd2014-02-18 13:57:37 -0600233static void hpsa_drain_accel_commands(struct ctlr_info *h);
Stephen M. Cameron76438d02014-02-18 13:55:43 -0600234static void hpsa_flush_cache(struct ctlr_info *h);
Scott Teelc3497752014-02-18 13:56:34 -0600235static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
236 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
237 u8 *scsi3addr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800238
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800239static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
240{
241 unsigned long *priv = shost_priv(sdev->host);
242 return (struct ctlr_info *) *priv;
243}
244
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600245static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh)
246{
247 unsigned long *priv = shost_priv(sh);
248 return (struct ctlr_info *) *priv;
249}
250
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800251static int check_for_unit_attention(struct ctlr_info *h,
252 struct CommandList *c)
253{
254 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
255 return 0;
256
257 switch (c->err_info->SenseInfo[12]) {
258 case STATE_CHANGED:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600259 dev_warn(&h->pdev->dev, HPSA "%d: a state change "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800260 "detected, command retried\n", h->ctlr);
261 break;
262 case LUN_FAILED:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600263 dev_warn(&h->pdev->dev, HPSA "%d: LUN failure "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800264 "detected, action required\n", h->ctlr);
265 break;
266 case REPORT_LUNS_CHANGED:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600267 dev_warn(&h->pdev->dev, HPSA "%d: report LUN data "
Mike Miller31468402010-02-25 14:03:12 -0600268 "changed, action required\n", h->ctlr);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800269 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -0600270 * Note: this REPORT_LUNS_CHANGED condition only occurs on the external
271 * target (array) devices.
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800272 */
273 break;
274 case POWER_OR_RESET:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600275 dev_warn(&h->pdev->dev, HPSA "%d: a power on "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800276 "or device reset detected\n", h->ctlr);
277 break;
278 case UNIT_ATTENTION_CLEARED:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600279 dev_warn(&h->pdev->dev, HPSA "%d: unit attention "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800280 "cleared by another initiator\n", h->ctlr);
281 break;
282 default:
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600283 dev_warn(&h->pdev->dev, HPSA "%d: unknown "
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800284 "unit attention detected\n", h->ctlr);
285 break;
286 }
287 return 1;
288}
289
Matt Bondurant852af202012-05-01 11:42:35 -0500290static int check_for_busy(struct ctlr_info *h, struct CommandList *c)
291{
292 if (c->err_info->CommandStatus != CMD_TARGET_STATUS ||
293 (c->err_info->ScsiStatus != SAM_STAT_BUSY &&
294 c->err_info->ScsiStatus != SAM_STAT_TASK_SET_FULL))
295 return 0;
296 dev_warn(&h->pdev->dev, HPSA "device busy");
297 return 1;
298}
299
Scott Teelda0697b2014-02-18 13:57:00 -0600300static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev,
301 struct device_attribute *attr,
302 const char *buf, size_t count)
303{
304 int status, len;
305 struct ctlr_info *h;
306 struct Scsi_Host *shost = class_to_shost(dev);
307 char tmpbuf[10];
308
309 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
310 return -EACCES;
311 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
312 strncpy(tmpbuf, buf, len);
313 tmpbuf[len] = '\0';
314 if (sscanf(tmpbuf, "%d", &status) != 1)
315 return -EINVAL;
316 h = shost_to_hba(shost);
317 h->acciopath_status = !!status;
318 dev_warn(&h->pdev->dev,
319 "hpsa: HP SSD Smart Path %s via sysfs update.\n",
320 h->acciopath_status ? "enabled" : "disabled");
321 return count;
322}
323
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600324static ssize_t host_store_raid_offload_debug(struct device *dev,
325 struct device_attribute *attr,
326 const char *buf, size_t count)
327{
328 int debug_level, len;
329 struct ctlr_info *h;
330 struct Scsi_Host *shost = class_to_shost(dev);
331 char tmpbuf[10];
332
333 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
334 return -EACCES;
335 len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count;
336 strncpy(tmpbuf, buf, len);
337 tmpbuf[len] = '\0';
338 if (sscanf(tmpbuf, "%d", &debug_level) != 1)
339 return -EINVAL;
340 if (debug_level < 0)
341 debug_level = 0;
342 h = shost_to_hba(shost);
343 h->raid_offload_debug = debug_level;
344 dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n",
345 h->raid_offload_debug);
346 return count;
347}
348
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800349static ssize_t host_store_rescan(struct device *dev,
350 struct device_attribute *attr,
351 const char *buf, size_t count)
352{
353 struct ctlr_info *h;
354 struct Scsi_Host *shost = class_to_shost(dev);
Stephen M. Camerona23513e2010-02-04 08:43:11 -0600355 h = shost_to_hba(shost);
Mike Miller31468402010-02-25 14:03:12 -0600356 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800357 return count;
358}
359
Stephen M. Camerond28ce022010-05-27 15:14:34 -0500360static ssize_t host_show_firmware_revision(struct device *dev,
361 struct device_attribute *attr, char *buf)
362{
363 struct ctlr_info *h;
364 struct Scsi_Host *shost = class_to_shost(dev);
365 unsigned char *fwrev;
366
367 h = shost_to_hba(shost);
368 if (!h->hba_inquiry_data)
369 return 0;
370 fwrev = &h->hba_inquiry_data[32];
371 return snprintf(buf, 20, "%c%c%c%c\n",
372 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
373}
374
Stephen M. Cameron94a13642011-01-06 14:48:39 -0600375static ssize_t host_show_commands_outstanding(struct device *dev,
376 struct device_attribute *attr, char *buf)
377{
378 struct Scsi_Host *shost = class_to_shost(dev);
379 struct ctlr_info *h = shost_to_hba(shost);
380
381 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
382}
383
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600384static ssize_t host_show_transport_mode(struct device *dev,
385 struct device_attribute *attr, char *buf)
386{
387 struct ctlr_info *h;
388 struct Scsi_Host *shost = class_to_shost(dev);
389
390 h = shost_to_hba(shost);
391 return snprintf(buf, 20, "%s\n",
Stephen M. Cameron960a30e2011-02-15 15:33:03 -0600392 h->transMethod & CFGTBL_Trans_Performant ?
Stephen M. Cameron745a7a22011-02-15 15:32:58 -0600393 "performant" : "simple");
394}
395
Scott Teelda0697b2014-02-18 13:57:00 -0600396static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev,
397 struct device_attribute *attr, char *buf)
398{
399 struct ctlr_info *h;
400 struct Scsi_Host *shost = class_to_shost(dev);
401
402 h = shost_to_hba(shost);
403 return snprintf(buf, 30, "HP SSD Smart Path %s\n",
404 (h->acciopath_status == 1) ? "enabled" : "disabled");
405}
406
Stephen M. Cameron46380782011-05-03 15:00:01 -0500407/* List of controllers which cannot be hard reset on kexec with reset_devices */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600408static u32 unresettable_controller[] = {
409 0x324a103C, /* Smart Array P712m */
410 0x324b103C, /* SmartArray P711m */
411 0x3223103C, /* Smart Array P800 */
412 0x3234103C, /* Smart Array P400 */
413 0x3235103C, /* Smart Array P400i */
414 0x3211103C, /* Smart Array E200i */
415 0x3212103C, /* Smart Array E200 */
416 0x3213103C, /* Smart Array E200i */
417 0x3214103C, /* Smart Array E200i */
418 0x3215103C, /* Smart Array E200i */
419 0x3237103C, /* Smart Array E500 */
420 0x323D103C, /* Smart Array P700m */
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100421 0x40800E11, /* Smart Array 5i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600422 0x409C0E11, /* Smart Array 6400 */
423 0x409D0E11, /* Smart Array 6400 EM */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100424 0x40700E11, /* Smart Array 5300 */
425 0x40820E11, /* Smart Array 532 */
426 0x40830E11, /* Smart Array 5312 */
427 0x409A0E11, /* Smart Array 641 */
428 0x409B0E11, /* Smart Array 642 */
429 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600430};
431
Stephen M. Cameron46380782011-05-03 15:00:01 -0500432/* List of controllers which cannot even be soft reset */
433static u32 soft_unresettable_controller[] = {
Tomas Henzl7af0abb2011-11-28 15:39:55 +0100434 0x40800E11, /* Smart Array 5i */
Tomas Henzl5a4f9342012-02-14 18:07:59 +0100435 0x40700E11, /* Smart Array 5300 */
436 0x40820E11, /* Smart Array 532 */
437 0x40830E11, /* Smart Array 5312 */
438 0x409A0E11, /* Smart Array 641 */
439 0x409B0E11, /* Smart Array 642 */
440 0x40910E11, /* Smart Array 6i */
Stephen M. Cameron46380782011-05-03 15:00:01 -0500441 /* Exclude 640x boards. These are two pci devices in one slot
442 * which share a battery backed cache module. One controls the
443 * cache, the other accesses the cache through the one that controls
444 * it. If we reset the one controlling the cache, the other will
445 * likely not be happy. Just forbid resetting this conjoined mess.
446 * The 640x isn't really supported by hpsa anyway.
447 */
448 0x409C0E11, /* Smart Array 6400 */
449 0x409D0E11, /* Smart Array 6400 EM */
450};
451
452static int ctlr_is_hard_resettable(u32 board_id)
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600453{
454 int i;
455
456 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
Stephen M. Cameron46380782011-05-03 15:00:01 -0500457 if (unresettable_controller[i] == board_id)
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600458 return 0;
459 return 1;
460}
461
Stephen M. Cameron46380782011-05-03 15:00:01 -0500462static int ctlr_is_soft_resettable(u32 board_id)
463{
464 int i;
465
466 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
467 if (soft_unresettable_controller[i] == board_id)
468 return 0;
469 return 1;
470}
471
472static int ctlr_is_resettable(u32 board_id)
473{
474 return ctlr_is_hard_resettable(board_id) ||
475 ctlr_is_soft_resettable(board_id);
476}
477
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600478static ssize_t host_show_resettable(struct device *dev,
479 struct device_attribute *attr, char *buf)
480{
481 struct ctlr_info *h;
482 struct Scsi_Host *shost = class_to_shost(dev);
483
484 h = shost_to_hba(shost);
Stephen M. Cameron46380782011-05-03 15:00:01 -0500485 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600486}
487
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800488static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
489{
490 return (scsi3addr[3] & 0xC0) == 0x40;
491}
492
493static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
Mike Millerd82357e2012-05-01 11:43:32 -0500494 "1(ADM)", "UNKNOWN"
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800495};
Scott Teel6b80b182014-02-18 13:56:55 -0600496#define HPSA_RAID_0 0
497#define HPSA_RAID_4 1
498#define HPSA_RAID_1 2 /* also used for RAID 10 */
499#define HPSA_RAID_5 3 /* also used for RAID 50 */
500#define HPSA_RAID_51 4
501#define HPSA_RAID_6 5 /* also used for RAID 60 */
502#define HPSA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800503#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)
504
505static ssize_t raid_level_show(struct device *dev,
506 struct device_attribute *attr, char *buf)
507{
508 ssize_t l = 0;
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600509 unsigned char rlevel;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800510 struct ctlr_info *h;
511 struct scsi_device *sdev;
512 struct hpsa_scsi_dev_t *hdev;
513 unsigned long flags;
514
515 sdev = to_scsi_device(dev);
516 h = sdev_to_hba(sdev);
517 spin_lock_irqsave(&h->lock, flags);
518 hdev = sdev->hostdata;
519 if (!hdev) {
520 spin_unlock_irqrestore(&h->lock, flags);
521 return -ENODEV;
522 }
523
524 /* Is this even a logical drive? */
525 if (!is_logical_dev_addr_mode(hdev->scsi3addr)) {
526 spin_unlock_irqrestore(&h->lock, flags);
527 l = snprintf(buf, PAGE_SIZE, "N/A\n");
528 return l;
529 }
530
531 rlevel = hdev->raid_level;
532 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron82a72c02010-02-04 08:41:38 -0600533 if (rlevel > RAID_UNKNOWN)
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800534 rlevel = RAID_UNKNOWN;
535 l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]);
536 return l;
537}
538
539static ssize_t lunid_show(struct device *dev,
540 struct device_attribute *attr, char *buf)
541{
542 struct ctlr_info *h;
543 struct scsi_device *sdev;
544 struct hpsa_scsi_dev_t *hdev;
545 unsigned long flags;
546 unsigned char lunid[8];
547
548 sdev = to_scsi_device(dev);
549 h = sdev_to_hba(sdev);
550 spin_lock_irqsave(&h->lock, flags);
551 hdev = sdev->hostdata;
552 if (!hdev) {
553 spin_unlock_irqrestore(&h->lock, flags);
554 return -ENODEV;
555 }
556 memcpy(lunid, hdev->scsi3addr, sizeof(lunid));
557 spin_unlock_irqrestore(&h->lock, flags);
558 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
559 lunid[0], lunid[1], lunid[2], lunid[3],
560 lunid[4], lunid[5], lunid[6], lunid[7]);
561}
562
563static ssize_t unique_id_show(struct device *dev,
564 struct device_attribute *attr, char *buf)
565{
566 struct ctlr_info *h;
567 struct scsi_device *sdev;
568 struct hpsa_scsi_dev_t *hdev;
569 unsigned long flags;
570 unsigned char sn[16];
571
572 sdev = to_scsi_device(dev);
573 h = sdev_to_hba(sdev);
574 spin_lock_irqsave(&h->lock, flags);
575 hdev = sdev->hostdata;
576 if (!hdev) {
577 spin_unlock_irqrestore(&h->lock, flags);
578 return -ENODEV;
579 }
580 memcpy(sn, hdev->device_id, sizeof(sn));
581 spin_unlock_irqrestore(&h->lock, flags);
582 return snprintf(buf, 16 * 2 + 2,
583 "%02X%02X%02X%02X%02X%02X%02X%02X"
584 "%02X%02X%02X%02X%02X%02X%02X%02X\n",
585 sn[0], sn[1], sn[2], sn[3],
586 sn[4], sn[5], sn[6], sn[7],
587 sn[8], sn[9], sn[10], sn[11],
588 sn[12], sn[13], sn[14], sn[15]);
589}
590
Scott Teelc1988682014-02-18 13:55:54 -0600591static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev,
592 struct device_attribute *attr, char *buf)
593{
594 struct ctlr_info *h;
595 struct scsi_device *sdev;
596 struct hpsa_scsi_dev_t *hdev;
597 unsigned long flags;
598 int offload_enabled;
599
600 sdev = to_scsi_device(dev);
601 h = sdev_to_hba(sdev);
602 spin_lock_irqsave(&h->lock, flags);
603 hdev = sdev->hostdata;
604 if (!hdev) {
605 spin_unlock_irqrestore(&h->lock, flags);
606 return -ENODEV;
607 }
608 offload_enabled = hdev->offload_enabled;
609 spin_unlock_irqrestore(&h->lock, flags);
610 return snprintf(buf, 20, "%d\n", offload_enabled);
611}
612
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600613static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
614static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
615static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
616static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
Scott Teelc1988682014-02-18 13:55:54 -0600617static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO,
618 host_show_hp_ssd_smart_path_enabled, NULL);
Scott Teelda0697b2014-02-18 13:57:00 -0600619static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH,
620 host_show_hp_ssd_smart_path_status,
621 host_store_hp_ssd_smart_path_status);
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600622static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL,
623 host_store_raid_offload_debug);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600624static DEVICE_ATTR(firmware_revision, S_IRUGO,
625 host_show_firmware_revision, NULL);
626static DEVICE_ATTR(commands_outstanding, S_IRUGO,
627 host_show_commands_outstanding, NULL);
628static DEVICE_ATTR(transport_mode, S_IRUGO,
629 host_show_transport_mode, NULL);
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600630static DEVICE_ATTR(resettable, S_IRUGO,
631 host_show_resettable, NULL);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600632
633static struct device_attribute *hpsa_sdev_attrs[] = {
634 &dev_attr_raid_level,
635 &dev_attr_lunid,
636 &dev_attr_unique_id,
Scott Teelc1988682014-02-18 13:55:54 -0600637 &dev_attr_hp_ssd_smart_path_enabled,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600638 NULL,
639};
640
641static struct device_attribute *hpsa_shost_attrs[] = {
642 &dev_attr_rescan,
643 &dev_attr_firmware_revision,
644 &dev_attr_commands_outstanding,
645 &dev_attr_transport_mode,
Stephen M. Cameron941b1cd2011-03-09 17:00:06 -0600646 &dev_attr_resettable,
Scott Teelda0697b2014-02-18 13:57:00 -0600647 &dev_attr_hp_ssd_smart_path_status,
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -0600648 &dev_attr_raid_offload_debug,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600649 NULL,
650};
651
652static struct scsi_host_template hpsa_driver_template = {
653 .module = THIS_MODULE,
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -0600654 .name = HPSA,
655 .proc_name = HPSA,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600656 .queuecommand = hpsa_scsi_queue_command,
657 .scan_start = hpsa_scan_start,
658 .scan_finished = hpsa_scan_finished,
659 .change_queue_depth = hpsa_change_queue_depth,
660 .this_id = -1,
661 .use_clustering = ENABLE_CLUSTERING,
Stephen M. Cameron75167d22012-05-01 11:42:51 -0500662 .eh_abort_handler = hpsa_eh_abort_handler,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600663 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
664 .ioctl = hpsa_ioctl,
665 .slave_alloc = hpsa_slave_alloc,
666 .slave_destroy = hpsa_slave_destroy,
667#ifdef CONFIG_COMPAT
668 .compat_ioctl = hpsa_compat_ioctl,
669#endif
670 .sdev_attrs = hpsa_sdev_attrs,
671 .shost_attrs = hpsa_shost_attrs,
Stephen M. Cameronc0d6a4d2011-10-26 16:20:53 -0500672 .max_sectors = 8192,
Martin K. Petersen54b2b502013-10-23 06:25:40 -0400673 .no_write_same = 1,
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600674};
675
676
677/* Enqueuing and dequeuing functions for cmdlists. */
678static inline void addQ(struct list_head *list, struct CommandList *c)
679{
680 list_add_tail(&c->list, list);
681}
682
Matt Gates254f7962012-05-01 11:43:06 -0500683static inline u32 next_command(struct ctlr_info *h, u8 q)
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600684{
685 u32 a;
Matt Gates254f7962012-05-01 11:43:06 -0500686 struct reply_pool *rq = &h->reply_queue[q];
Matt Gatese16a33a2012-05-01 11:43:11 -0500687 unsigned long flags;
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600688
Matt Gatese1f7de02014-02-18 13:55:17 -0600689 if (h->transMethod & CFGTBL_Trans_io_accel1)
690 return h->access.command_completed(h, q);
691
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600692 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Matt Gates254f7962012-05-01 11:43:06 -0500693 return h->access.command_completed(h, q);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600694
Matt Gates254f7962012-05-01 11:43:06 -0500695 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
696 a = rq->head[rq->current_entry];
697 rq->current_entry++;
Matt Gatese16a33a2012-05-01 11:43:11 -0500698 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600699 h->commands_outstanding--;
Matt Gatese16a33a2012-05-01 11:43:11 -0500700 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600701 } else {
702 a = FIFO_EMPTY;
703 }
704 /* Check for wraparound */
Matt Gates254f7962012-05-01 11:43:06 -0500705 if (rq->current_entry == h->max_commands) {
706 rq->current_entry = 0;
707 rq->wraparound ^= 1;
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600708 }
709 return a;
710}
711
Scott Teelc3497752014-02-18 13:56:34 -0600712/*
713 * There are some special bits in the bus address of the
714 * command that we have to set for the controller to know
715 * how to process the command:
716 *
717 * Normal performant mode:
718 * bit 0: 1 means performant mode, 0 means simple mode.
719 * bits 1-3 = block fetch table entry
720 * bits 4-6 = command type (== 0)
721 *
722 * ioaccel1 mode:
723 * bit 0 = "performant mode" bit.
724 * bits 1-3 = block fetch table entry
725 * bits 4-6 = command type (== 110)
726 * (command type is needed because ioaccel1 mode
727 * commands are submitted through the same register as normal
728 * mode commands, so this is how the controller knows whether
729 * the command is normal mode or ioaccel1 mode.)
730 *
731 * ioaccel2 mode:
732 * bit 0 = "performant mode" bit.
733 * bits 1-4 = block fetch table entry (note extra bit)
734 * bits 4-6 = not needed, because ioaccel2 mode has
735 * a separate special register for submitting commands.
736 */
737
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600738/* set_performant_mode: Modify the tag for cciss performant
739 * set bit 0 for pull model, bits 3-1 for block fetch
740 * register number
741 */
742static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
743{
Matt Gates254f7962012-05-01 11:43:06 -0500744 if (likely(h->transMethod & CFGTBL_Trans_Performant)) {
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600745 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
Hannes Reineckeeee0f032014-01-15 13:30:53 +0100746 if (likely(h->msix_vector > 0))
Matt Gates254f7962012-05-01 11:43:06 -0500747 c->Header.ReplyQueue =
John Kacur804a5cb2013-07-26 16:06:18 +0200748 raw_smp_processor_id() % h->nreply_queues;
Matt Gates254f7962012-05-01 11:43:06 -0500749 }
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600750}
751
Scott Teelc3497752014-02-18 13:56:34 -0600752static void set_ioaccel1_performant_mode(struct ctlr_info *h,
753 struct CommandList *c)
754{
755 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
756
757 /* Tell the controller to post the reply to the queue for this
758 * processor. This seems to give the best I/O throughput.
759 */
760 cp->ReplyQueue = smp_processor_id() % h->nreply_queues;
761 /* Set the bits in the address sent down to include:
762 * - performant mode bit (bit 0)
763 * - pull count (bits 1-3)
764 * - command type (bits 4-6)
765 */
766 c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) |
767 IOACCEL1_BUSADDR_CMDTYPE;
768}
769
770static void set_ioaccel2_performant_mode(struct ctlr_info *h,
771 struct CommandList *c)
772{
773 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
774
775 /* Tell the controller to post the reply to the queue for this
776 * processor. This seems to give the best I/O throughput.
777 */
778 cp->reply_queue = smp_processor_id() % h->nreply_queues;
779 /* Set the bits in the address sent down to include:
780 * - performant mode bit not used in ioaccel mode 2
781 * - pull count (bits 0-3)
782 * - command type isn't needed for ioaccel2
783 */
784 c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]);
785}
786
Stephen M. Camerone85c5972012-05-01 11:43:42 -0500787static int is_firmware_flash_cmd(u8 *cdb)
788{
789 return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE;
790}
791
792/*
793 * During firmware flash, the heartbeat register may not update as frequently
794 * as it should. So we dial down lockup detection during firmware flash. and
795 * dial it back up when firmware flash completes.
796 */
797#define HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH (240 * HZ)
798#define HEARTBEAT_SAMPLE_INTERVAL (30 * HZ)
799static void dial_down_lockup_detection_during_fw_flash(struct ctlr_info *h,
800 struct CommandList *c)
801{
802 if (!is_firmware_flash_cmd(c->Request.CDB))
803 return;
804 atomic_inc(&h->firmware_flash_in_progress);
805 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL_DURING_FLASH;
806}
807
808static void dial_up_lockup_detection_on_fw_flash_complete(struct ctlr_info *h,
809 struct CommandList *c)
810{
811 if (is_firmware_flash_cmd(c->Request.CDB) &&
812 atomic_dec_and_test(&h->firmware_flash_in_progress))
813 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
814}
815
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600816static void enqueue_cmd_and_start_io(struct ctlr_info *h,
817 struct CommandList *c)
818{
819 unsigned long flags;
820
Scott Teelc3497752014-02-18 13:56:34 -0600821 switch (c->cmd_type) {
822 case CMD_IOACCEL1:
823 set_ioaccel1_performant_mode(h, c);
824 break;
825 case CMD_IOACCEL2:
826 set_ioaccel2_performant_mode(h, c);
827 break;
828 default:
829 set_performant_mode(h, c);
830 }
Stephen M. Camerone85c5972012-05-01 11:43:42 -0500831 dial_down_lockup_detection_during_fw_flash(h, c);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600832 spin_lock_irqsave(&h->lock, flags);
833 addQ(&h->reqQ, c);
834 h->Qdepth++;
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600835 spin_unlock_irqrestore(&h->lock, flags);
Matt Gatese16a33a2012-05-01 11:43:11 -0500836 start_io(h);
Stephen M. Cameron3f5eac32011-03-09 17:00:01 -0600837}
838
839static inline void removeQ(struct CommandList *c)
840{
841 if (WARN_ON(list_empty(&c->list)))
842 return;
843 list_del_init(&c->list);
844}
845
846static inline int is_hba_lunid(unsigned char scsi3addr[])
847{
848 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
849}
850
851static inline int is_scsi_rev_5(struct ctlr_info *h)
852{
853 if (!h->hba_inquiry_data)
854 return 0;
855 if ((h->hba_inquiry_data[2] & 0x07) == 5)
856 return 1;
857 return 0;
858}
859
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800860static int hpsa_find_target_lun(struct ctlr_info *h,
861 unsigned char scsi3addr[], int bus, int *target, int *lun)
862{
863 /* finds an unused bus, target, lun for a new physical device
864 * assumes h->devlock is held
865 */
866 int i, found = 0;
Scott Teelcfe5bad2011-10-26 16:21:07 -0500867 DECLARE_BITMAP(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800868
Akinobu Mita263d9402012-01-21 00:15:27 +0900869 bitmap_zero(lun_taken, HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800870
871 for (i = 0; i < h->ndevices; i++) {
872 if (h->dev[i]->bus == bus && h->dev[i]->target != -1)
Akinobu Mita263d9402012-01-21 00:15:27 +0900873 __set_bit(h->dev[i]->target, lun_taken);
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800874 }
875
Akinobu Mita263d9402012-01-21 00:15:27 +0900876 i = find_first_zero_bit(lun_taken, HPSA_MAX_DEVICES);
877 if (i < HPSA_MAX_DEVICES) {
878 /* *bus = 1; */
879 *target = i;
880 *lun = 0;
881 found = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800882 }
883 return !found;
884}
885
886/* Add an entry into h->dev[] array. */
887static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno,
888 struct hpsa_scsi_dev_t *device,
889 struct hpsa_scsi_dev_t *added[], int *nadded)
890{
891 /* assumes h->devlock is held */
892 int n = h->ndevices;
893 int i;
894 unsigned char addr1[8], addr2[8];
895 struct hpsa_scsi_dev_t *sd;
896
Scott Teelcfe5bad2011-10-26 16:21:07 -0500897 if (n >= HPSA_MAX_DEVICES) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -0800898 dev_err(&h->pdev->dev, "too many devices, some will be "
899 "inaccessible.\n");
900 return -1;
901 }
902
903 /* physical devices do not have lun or target assigned until now. */
904 if (device->lun != -1)
905 /* Logical device, lun is already assigned. */
906 goto lun_assigned;
907
908 /* If this device a non-zero lun of a multi-lun device
909 * byte 4 of the 8-byte LUN addr will contain the logical
910 * unit no, zero otherise.
911 */
912 if (device->scsi3addr[4] == 0) {
913 /* This is not a non-zero lun of a multi-lun device */
914 if (hpsa_find_target_lun(h, device->scsi3addr,
915 device->bus, &device->target, &device->lun) != 0)
916 return -1;
917 goto lun_assigned;
918 }
919
920 /* This is a non-zero lun of a multi-lun device.
921 * Search through our list and find the device which
922 * has the same 8 byte LUN address, excepting byte 4.
923 * Assign the same bus and target for this new LUN.
924 * Use the logical unit number from the firmware.
925 */
926 memcpy(addr1, device->scsi3addr, 8);
927 addr1[4] = 0;
928 for (i = 0; i < n; i++) {
929 sd = h->dev[i];
930 memcpy(addr2, sd->scsi3addr, 8);
931 addr2[4] = 0;
932 /* differ only in byte 4? */
933 if (memcmp(addr1, addr2, 8) == 0) {
934 device->bus = sd->bus;
935 device->target = sd->target;
936 device->lun = device->scsi3addr[4];
937 break;
938 }
939 }
940 if (device->lun == -1) {
941 dev_warn(&h->pdev->dev, "physical device with no LUN=0,"
942 " suspect firmware bug or unsupported hardware "
943 "configuration.\n");
944 return -1;
945 }
946
947lun_assigned:
948
949 h->dev[n] = device;
950 h->ndevices++;
951 added[*nadded] = device;
952 (*nadded)++;
953
954 /* initially, (before registering with scsi layer) we don't
955 * know our hostno and we don't want to print anything first
956 * time anyway (the scsi layer's inquiries will show that info)
957 */
958 /* if (hostno != -1) */
959 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n",
960 scsi_device_type(device->devtype), hostno,
961 device->bus, device->target, device->lun);
962 return 0;
963}
964
Scott Teelbd9244f2012-01-19 14:01:30 -0600965/* Update an entry in h->dev[] array. */
966static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,
967 int entry, struct hpsa_scsi_dev_t *new_entry)
968{
969 /* assumes h->devlock is held */
970 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
971
972 /* Raid level changed. */
973 h->dev[entry]->raid_level = new_entry->raid_level;
Stephen M. Cameron250fb122014-02-18 13:55:38 -0600974
975 /* Raid offload parameters changed. */
976 h->dev[entry]->offload_config = new_entry->offload_config;
977 h->dev[entry]->offload_enabled = new_entry->offload_enabled;
Stephen M. Cameron9fb0de22014-02-18 13:56:50 -0600978 h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle;
979 h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror;
980 h->dev[entry]->raid_map = new_entry->raid_map;
Stephen M. Cameron250fb122014-02-18 13:55:38 -0600981
Scott Teelbd9244f2012-01-19 14:01:30 -0600982 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",
983 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
984 new_entry->target, new_entry->lun);
985}
986
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -0600987/* Replace an entry from h->dev[] array. */
988static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno,
989 int entry, struct hpsa_scsi_dev_t *new_entry,
990 struct hpsa_scsi_dev_t *added[], int *nadded,
991 struct hpsa_scsi_dev_t *removed[], int *nremoved)
992{
993 /* assumes h->devlock is held */
Scott Teelcfe5bad2011-10-26 16:21:07 -0500994 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -0600995 removed[*nremoved] = h->dev[entry];
996 (*nremoved)++;
Stephen M. Cameron01350d02011-08-09 08:18:01 -0500997
998 /*
999 * New physical devices won't have target/lun assigned yet
1000 * so we need to preserve the values in the slot we are replacing.
1001 */
1002 if (new_entry->target == -1) {
1003 new_entry->target = h->dev[entry]->target;
1004 new_entry->lun = h->dev[entry]->lun;
1005 }
1006
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001007 h->dev[entry] = new_entry;
1008 added[*nadded] = new_entry;
1009 (*nadded)++;
1010 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n",
1011 scsi_device_type(new_entry->devtype), hostno, new_entry->bus,
1012 new_entry->target, new_entry->lun);
1013}
1014
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001015/* Remove an entry from h->dev[] array. */
1016static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry,
1017 struct hpsa_scsi_dev_t *removed[], int *nremoved)
1018{
1019 /* assumes h->devlock is held */
1020 int i;
1021 struct hpsa_scsi_dev_t *sd;
1022
Scott Teelcfe5bad2011-10-26 16:21:07 -05001023 BUG_ON(entry < 0 || entry >= HPSA_MAX_DEVICES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001024
1025 sd = h->dev[entry];
1026 removed[*nremoved] = h->dev[entry];
1027 (*nremoved)++;
1028
1029 for (i = entry; i < h->ndevices-1; i++)
1030 h->dev[i] = h->dev[i+1];
1031 h->ndevices--;
1032 dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n",
1033 scsi_device_type(sd->devtype), hostno, sd->bus, sd->target,
1034 sd->lun);
1035}
1036
1037#define SCSI3ADDR_EQ(a, b) ( \
1038 (a)[7] == (b)[7] && \
1039 (a)[6] == (b)[6] && \
1040 (a)[5] == (b)[5] && \
1041 (a)[4] == (b)[4] && \
1042 (a)[3] == (b)[3] && \
1043 (a)[2] == (b)[2] && \
1044 (a)[1] == (b)[1] && \
1045 (a)[0] == (b)[0])
1046
1047static void fixup_botched_add(struct ctlr_info *h,
1048 struct hpsa_scsi_dev_t *added)
1049{
1050 /* called when scsi_add_device fails in order to re-adjust
1051 * h->dev[] to match the mid layer's view.
1052 */
1053 unsigned long flags;
1054 int i, j;
1055
1056 spin_lock_irqsave(&h->lock, flags);
1057 for (i = 0; i < h->ndevices; i++) {
1058 if (h->dev[i] == added) {
1059 for (j = i; j < h->ndevices-1; j++)
1060 h->dev[j] = h->dev[j+1];
1061 h->ndevices--;
1062 break;
1063 }
1064 }
1065 spin_unlock_irqrestore(&h->lock, flags);
1066 kfree(added);
1067}
1068
1069static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
1070 struct hpsa_scsi_dev_t *dev2)
1071{
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001072 /* we compare everything except lun and target as these
1073 * are not yet assigned. Compare parts likely
1074 * to differ first
1075 */
1076 if (memcmp(dev1->scsi3addr, dev2->scsi3addr,
1077 sizeof(dev1->scsi3addr)) != 0)
1078 return 0;
1079 if (memcmp(dev1->device_id, dev2->device_id,
1080 sizeof(dev1->device_id)) != 0)
1081 return 0;
1082 if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0)
1083 return 0;
1084 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
1085 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001086 if (dev1->devtype != dev2->devtype)
1087 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001088 if (dev1->bus != dev2->bus)
1089 return 0;
1090 return 1;
1091}
1092
Scott Teelbd9244f2012-01-19 14:01:30 -06001093static inline int device_updated(struct hpsa_scsi_dev_t *dev1,
1094 struct hpsa_scsi_dev_t *dev2)
1095{
1096 /* Device attributes that can change, but don't mean
1097 * that the device is a different device, nor that the OS
1098 * needs to be told anything about the change.
1099 */
1100 if (dev1->raid_level != dev2->raid_level)
1101 return 1;
Stephen M. Cameron250fb122014-02-18 13:55:38 -06001102 if (dev1->offload_config != dev2->offload_config)
1103 return 1;
1104 if (dev1->offload_enabled != dev2->offload_enabled)
1105 return 1;
Scott Teelbd9244f2012-01-19 14:01:30 -06001106 return 0;
1107}
1108
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001109/* Find needle in haystack. If exact match found, return DEVICE_SAME,
1110 * and return needle location in *index. If scsi3addr matches, but not
1111 * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle
Scott Teelbd9244f2012-01-19 14:01:30 -06001112 * location in *index.
1113 * In the case of a minor device attribute change, such as RAID level, just
1114 * return DEVICE_UPDATED, along with the updated device's location in index.
1115 * If needle not found, return DEVICE_NOT_FOUND.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001116 */
1117static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,
1118 struct hpsa_scsi_dev_t *haystack[], int haystack_size,
1119 int *index)
1120{
1121 int i;
1122#define DEVICE_NOT_FOUND 0
1123#define DEVICE_CHANGED 1
1124#define DEVICE_SAME 2
Scott Teelbd9244f2012-01-19 14:01:30 -06001125#define DEVICE_UPDATED 3
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001126 for (i = 0; i < haystack_size; i++) {
Stephen M. Cameron23231042010-02-04 08:43:36 -06001127 if (haystack[i] == NULL) /* previously removed. */
1128 continue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001129 if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) {
1130 *index = i;
Scott Teelbd9244f2012-01-19 14:01:30 -06001131 if (device_is_the_same(needle, haystack[i])) {
1132 if (device_updated(needle, haystack[i]))
1133 return DEVICE_UPDATED;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001134 return DEVICE_SAME;
Scott Teelbd9244f2012-01-19 14:01:30 -06001135 } else {
Stephen M. Cameron98465902014-02-21 16:25:00 -06001136 /* Keep offline devices offline */
1137 if (needle->volume_offline)
1138 return DEVICE_NOT_FOUND;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001139 return DEVICE_CHANGED;
Scott Teelbd9244f2012-01-19 14:01:30 -06001140 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001141 }
1142 }
1143 *index = -1;
1144 return DEVICE_NOT_FOUND;
1145}
1146
Stephen M. Cameron98465902014-02-21 16:25:00 -06001147static void hpsa_monitor_offline_device(struct ctlr_info *h,
1148 unsigned char scsi3addr[])
1149{
1150 struct offline_device_entry *device;
1151 unsigned long flags;
1152
1153 /* Check to see if device is already on the list */
1154 spin_lock_irqsave(&h->offline_device_lock, flags);
1155 list_for_each_entry(device, &h->offline_device_list, offline_list) {
1156 if (memcmp(device->scsi3addr, scsi3addr,
1157 sizeof(device->scsi3addr)) == 0) {
1158 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1159 return;
1160 }
1161 }
1162 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1163
1164 /* Device is not on the list, add it. */
1165 device = kmalloc(sizeof(*device), GFP_KERNEL);
1166 if (!device) {
1167 dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__);
1168 return;
1169 }
1170 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1171 spin_lock_irqsave(&h->offline_device_lock, flags);
1172 list_add_tail(&device->offline_list, &h->offline_device_list);
1173 spin_unlock_irqrestore(&h->offline_device_lock, flags);
1174}
1175
1176/* Print a message explaining various offline volume states */
1177static void hpsa_show_volume_status(struct ctlr_info *h,
1178 struct hpsa_scsi_dev_t *sd)
1179{
1180 if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED)
1181 dev_info(&h->pdev->dev,
1182 "C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n",
1183 h->scsi_host->host_no,
1184 sd->bus, sd->target, sd->lun);
1185 switch (sd->volume_offline) {
1186 case HPSA_LV_OK:
1187 break;
1188 case HPSA_LV_UNDERGOING_ERASE:
1189 dev_info(&h->pdev->dev,
1190 "C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n",
1191 h->scsi_host->host_no,
1192 sd->bus, sd->target, sd->lun);
1193 break;
1194 case HPSA_LV_UNDERGOING_RPI:
1195 dev_info(&h->pdev->dev,
1196 "C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n",
1197 h->scsi_host->host_no,
1198 sd->bus, sd->target, sd->lun);
1199 break;
1200 case HPSA_LV_PENDING_RPI:
1201 dev_info(&h->pdev->dev,
1202 "C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n",
1203 h->scsi_host->host_no,
1204 sd->bus, sd->target, sd->lun);
1205 break;
1206 case HPSA_LV_ENCRYPTED_NO_KEY:
1207 dev_info(&h->pdev->dev,
1208 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n",
1209 h->scsi_host->host_no,
1210 sd->bus, sd->target, sd->lun);
1211 break;
1212 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
1213 dev_info(&h->pdev->dev,
1214 "C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n",
1215 h->scsi_host->host_no,
1216 sd->bus, sd->target, sd->lun);
1217 break;
1218 case HPSA_LV_UNDERGOING_ENCRYPTION:
1219 dev_info(&h->pdev->dev,
1220 "C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n",
1221 h->scsi_host->host_no,
1222 sd->bus, sd->target, sd->lun);
1223 break;
1224 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
1225 dev_info(&h->pdev->dev,
1226 "C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n",
1227 h->scsi_host->host_no,
1228 sd->bus, sd->target, sd->lun);
1229 break;
1230 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
1231 dev_info(&h->pdev->dev,
1232 "C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n",
1233 h->scsi_host->host_no,
1234 sd->bus, sd->target, sd->lun);
1235 break;
1236 case HPSA_LV_PENDING_ENCRYPTION:
1237 dev_info(&h->pdev->dev,
1238 "C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n",
1239 h->scsi_host->host_no,
1240 sd->bus, sd->target, sd->lun);
1241 break;
1242 case HPSA_LV_PENDING_ENCRYPTION_REKEYING:
1243 dev_info(&h->pdev->dev,
1244 "C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n",
1245 h->scsi_host->host_no,
1246 sd->bus, sd->target, sd->lun);
1247 break;
1248 }
1249}
1250
Stephen M. Cameron4967bd32010-02-04 08:41:49 -06001251static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001252 struct hpsa_scsi_dev_t *sd[], int nsds)
1253{
1254 /* sd contains scsi3 addresses and devtypes, and inquiry
1255 * data. This function takes what's in sd to be the current
1256 * reality and updates h->dev[] to reflect that reality.
1257 */
1258 int i, entry, device_change, changes = 0;
1259 struct hpsa_scsi_dev_t *csd;
1260 unsigned long flags;
1261 struct hpsa_scsi_dev_t **added, **removed;
1262 int nadded, nremoved;
1263 struct Scsi_Host *sh = NULL;
1264
Scott Teelcfe5bad2011-10-26 16:21:07 -05001265 added = kzalloc(sizeof(*added) * HPSA_MAX_DEVICES, GFP_KERNEL);
1266 removed = kzalloc(sizeof(*removed) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001267
1268 if (!added || !removed) {
1269 dev_warn(&h->pdev->dev, "out of memory in "
1270 "adjust_hpsa_scsi_table\n");
1271 goto free_and_out;
1272 }
1273
1274 spin_lock_irqsave(&h->devlock, flags);
1275
1276 /* find any devices in h->dev[] that are not in
1277 * sd[] and remove them from h->dev[], and for any
1278 * devices which have changed, remove the old device
1279 * info and add the new device info.
Scott Teelbd9244f2012-01-19 14:01:30 -06001280 * If minor device attributes change, just update
1281 * the existing device structure.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001282 */
1283 i = 0;
1284 nremoved = 0;
1285 nadded = 0;
1286 while (i < h->ndevices) {
1287 csd = h->dev[i];
1288 device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry);
1289 if (device_change == DEVICE_NOT_FOUND) {
1290 changes++;
1291 hpsa_scsi_remove_entry(h, hostno, i,
1292 removed, &nremoved);
1293 continue; /* remove ^^^, hence i not incremented */
1294 } else if (device_change == DEVICE_CHANGED) {
1295 changes++;
Stephen M. Cameron2a8ccf32010-02-04 08:43:41 -06001296 hpsa_scsi_replace_entry(h, hostno, i, sd[entry],
1297 added, &nadded, removed, &nremoved);
Stephen M. Cameronc7f172d2010-02-04 08:43:31 -06001298 /* Set it to NULL to prevent it from being freed
1299 * at the bottom of hpsa_update_scsi_devices()
1300 */
1301 sd[entry] = NULL;
Scott Teelbd9244f2012-01-19 14:01:30 -06001302 } else if (device_change == DEVICE_UPDATED) {
1303 hpsa_scsi_update_entry(h, hostno, i, sd[entry]);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001304 }
1305 i++;
1306 }
1307
1308 /* Now, make sure every device listed in sd[] is also
1309 * listed in h->dev[], adding them if they aren't found
1310 */
1311
1312 for (i = 0; i < nsds; i++) {
1313 if (!sd[i]) /* if already added above. */
1314 continue;
Stephen M. Cameron98465902014-02-21 16:25:00 -06001315
1316 /* Don't add devices which are NOT READY, FORMAT IN PROGRESS
1317 * as the SCSI mid-layer does not handle such devices well.
1318 * It relentlessly loops sending TUR at 3Hz, then READ(10)
1319 * at 160Hz, and prevents the system from coming up.
1320 */
1321 if (sd[i]->volume_offline) {
1322 hpsa_show_volume_status(h, sd[i]);
1323 dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n",
1324 h->scsi_host->host_no,
1325 sd[i]->bus, sd[i]->target, sd[i]->lun);
1326 continue;
1327 }
1328
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001329 device_change = hpsa_scsi_find_entry(sd[i], h->dev,
1330 h->ndevices, &entry);
1331 if (device_change == DEVICE_NOT_FOUND) {
1332 changes++;
1333 if (hpsa_scsi_add_entry(h, hostno, sd[i],
1334 added, &nadded) != 0)
1335 break;
1336 sd[i] = NULL; /* prevent from being freed later. */
1337 } else if (device_change == DEVICE_CHANGED) {
1338 /* should never happen... */
1339 changes++;
1340 dev_warn(&h->pdev->dev,
1341 "device unexpectedly changed.\n");
1342 /* but if it does happen, we just ignore that device */
1343 }
1344 }
1345 spin_unlock_irqrestore(&h->devlock, flags);
1346
Stephen M. Cameron98465902014-02-21 16:25:00 -06001347 /* Monitor devices which are in one of several NOT READY states to be
1348 * brought online later. This must be done without holding h->devlock,
1349 * so don't touch h->dev[]
1350 */
1351 for (i = 0; i < nsds; i++) {
1352 if (!sd[i]) /* if already added above. */
1353 continue;
1354 if (sd[i]->volume_offline)
1355 hpsa_monitor_offline_device(h, sd[i]->scsi3addr);
1356 }
1357
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001358 /* Don't notify scsi mid layer of any changes the first time through
1359 * (or if there are no changes) scsi_scan_host will do it later the
1360 * first time through.
1361 */
1362 if (hostno == -1 || !changes)
1363 goto free_and_out;
1364
1365 sh = h->scsi_host;
1366 /* Notify scsi mid layer of any removed devices */
1367 for (i = 0; i < nremoved; i++) {
1368 struct scsi_device *sdev =
1369 scsi_device_lookup(sh, removed[i]->bus,
1370 removed[i]->target, removed[i]->lun);
1371 if (sdev != NULL) {
1372 scsi_remove_device(sdev);
1373 scsi_device_put(sdev);
1374 } else {
1375 /* We don't expect to get here.
1376 * future cmds to this device will get selection
1377 * timeout as if the device was gone.
1378 */
1379 dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d "
1380 " for removal.", hostno, removed[i]->bus,
1381 removed[i]->target, removed[i]->lun);
1382 }
1383 kfree(removed[i]);
1384 removed[i] = NULL;
1385 }
1386
1387 /* Notify scsi mid layer of any added devices */
1388 for (i = 0; i < nadded; i++) {
1389 if (scsi_add_device(sh, added[i]->bus,
1390 added[i]->target, added[i]->lun) == 0)
1391 continue;
1392 dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, "
1393 "device not added.\n", hostno, added[i]->bus,
1394 added[i]->target, added[i]->lun);
1395 /* now we have to remove it from h->dev,
1396 * since it didn't get added to scsi mid layer
1397 */
1398 fixup_botched_add(h, added[i]);
1399 }
1400
1401free_and_out:
1402 kfree(added);
1403 kfree(removed);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001404}
1405
1406/*
Joe Perches9e03aa22013-09-03 13:45:58 -07001407 * Lookup bus/target/lun and return corresponding struct hpsa_scsi_dev_t *
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001408 * Assume's h->devlock is held.
1409 */
1410static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h,
1411 int bus, int target, int lun)
1412{
1413 int i;
1414 struct hpsa_scsi_dev_t *sd;
1415
1416 for (i = 0; i < h->ndevices; i++) {
1417 sd = h->dev[i];
1418 if (sd->bus == bus && sd->target == target && sd->lun == lun)
1419 return sd;
1420 }
1421 return NULL;
1422}
1423
1424/* link sdev->hostdata to our per-device structure. */
1425static int hpsa_slave_alloc(struct scsi_device *sdev)
1426{
1427 struct hpsa_scsi_dev_t *sd;
1428 unsigned long flags;
1429 struct ctlr_info *h;
1430
1431 h = sdev_to_hba(sdev);
1432 spin_lock_irqsave(&h->devlock, flags);
1433 sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev),
1434 sdev_id(sdev), sdev->lun);
1435 if (sd != NULL)
1436 sdev->hostdata = sd;
1437 spin_unlock_irqrestore(&h->devlock, flags);
1438 return 0;
1439}
1440
1441static void hpsa_slave_destroy(struct scsi_device *sdev)
1442{
Stephen M. Cameronbcc44252010-02-04 08:41:54 -06001443 /* nothing to do. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001444}
1445
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001446static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
1447{
1448 int i;
1449
1450 if (!h->cmd_sg_list)
1451 return;
1452 for (i = 0; i < h->nr_cmds; i++) {
1453 kfree(h->cmd_sg_list[i]);
1454 h->cmd_sg_list[i] = NULL;
1455 }
1456 kfree(h->cmd_sg_list);
1457 h->cmd_sg_list = NULL;
1458}
1459
1460static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
1461{
1462 int i;
1463
1464 if (h->chainsize <= 0)
1465 return 0;
1466
1467 h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
1468 GFP_KERNEL);
1469 if (!h->cmd_sg_list)
1470 return -ENOMEM;
1471 for (i = 0; i < h->nr_cmds; i++) {
1472 h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
1473 h->chainsize, GFP_KERNEL);
1474 if (!h->cmd_sg_list[i])
1475 goto clean;
1476 }
1477 return 0;
1478
1479clean:
1480 hpsa_free_sg_chain_blocks(h);
1481 return -ENOMEM;
1482}
1483
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001484static int hpsa_map_sg_chain_block(struct ctlr_info *h,
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001485 struct CommandList *c)
1486{
1487 struct SGDescriptor *chain_sg, *chain_block;
1488 u64 temp64;
1489
1490 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1491 chain_block = h->cmd_sg_list[c->cmdindex];
1492 chain_sg->Ext = HPSA_SG_CHAIN;
1493 chain_sg->Len = sizeof(*chain_sg) *
1494 (c->Header.SGTotal - h->max_cmd_sg_entries);
1495 temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
1496 PCI_DMA_TODEVICE);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001497 if (dma_mapping_error(&h->pdev->dev, temp64)) {
1498 /* prevent subsequent unmapping */
1499 chain_sg->Addr.lower = 0;
1500 chain_sg->Addr.upper = 0;
1501 return -1;
1502 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001503 chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
1504 chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06001505 return 0;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001506}
1507
1508static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
1509 struct CommandList *c)
1510{
1511 struct SGDescriptor *chain_sg;
1512 union u64bit temp64;
1513
1514 if (c->Header.SGTotal <= h->max_cmd_sg_entries)
1515 return;
1516
1517 chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
1518 temp64.val32.lower = chain_sg->Addr.lower;
1519 temp64.val32.upper = chain_sg->Addr.upper;
1520 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
1521}
1522
Scott Teela09c1442014-02-18 13:57:21 -06001523
1524/* Decode the various types of errors on ioaccel2 path.
1525 * Return 1 for any error that should generate a RAID path retry.
1526 * Return 0 for errors that don't require a RAID path retry.
1527 */
1528static int handle_ioaccel_mode2_error(struct ctlr_info *h,
Scott Teelc3497752014-02-18 13:56:34 -06001529 struct CommandList *c,
1530 struct scsi_cmnd *cmd,
1531 struct io_accel2_cmd *c2)
1532{
1533 int data_len;
Scott Teela09c1442014-02-18 13:57:21 -06001534 int retry = 0;
Scott Teelc3497752014-02-18 13:56:34 -06001535
1536 switch (c2->error_data.serv_response) {
1537 case IOACCEL2_SERV_RESPONSE_COMPLETE:
1538 switch (c2->error_data.status) {
1539 case IOACCEL2_STATUS_SR_TASK_COMP_GOOD:
1540 break;
1541 case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND:
1542 dev_warn(&h->pdev->dev,
1543 "%s: task complete with check condition.\n",
1544 "HP SSD Smart Path");
1545 if (c2->error_data.data_present !=
1546 IOACCEL2_SENSE_DATA_PRESENT)
1547 break;
1548 /* copy the sense data */
1549 data_len = c2->error_data.sense_data_len;
1550 if (data_len > SCSI_SENSE_BUFFERSIZE)
1551 data_len = SCSI_SENSE_BUFFERSIZE;
1552 if (data_len > sizeof(c2->error_data.sense_data_buff))
1553 data_len =
1554 sizeof(c2->error_data.sense_data_buff);
1555 memcpy(cmd->sense_buffer,
1556 c2->error_data.sense_data_buff, data_len);
1557 cmd->result |= SAM_STAT_CHECK_CONDITION;
Scott Teela09c1442014-02-18 13:57:21 -06001558 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001559 break;
1560 case IOACCEL2_STATUS_SR_TASK_COMP_BUSY:
1561 dev_warn(&h->pdev->dev,
1562 "%s: task complete with BUSY status.\n",
1563 "HP SSD Smart Path");
Scott Teela09c1442014-02-18 13:57:21 -06001564 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001565 break;
1566 case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON:
1567 dev_warn(&h->pdev->dev,
1568 "%s: task complete with reservation conflict.\n",
1569 "HP SSD Smart Path");
Scott Teela09c1442014-02-18 13:57:21 -06001570 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001571 break;
1572 case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL:
1573 /* Make scsi midlayer do unlimited retries */
1574 cmd->result = DID_IMM_RETRY << 16;
1575 break;
1576 case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED:
1577 dev_warn(&h->pdev->dev,
1578 "%s: task complete with aborted status.\n",
1579 "HP SSD Smart Path");
Scott Teela09c1442014-02-18 13:57:21 -06001580 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001581 break;
1582 default:
1583 dev_warn(&h->pdev->dev,
1584 "%s: task complete with unrecognized status: 0x%02x\n",
1585 "HP SSD Smart Path", c2->error_data.status);
Scott Teela09c1442014-02-18 13:57:21 -06001586 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001587 break;
1588 }
1589 break;
1590 case IOACCEL2_SERV_RESPONSE_FAILURE:
1591 /* don't expect to get here. */
1592 dev_warn(&h->pdev->dev,
1593 "unexpected delivery or target failure, status = 0x%02x\n",
1594 c2->error_data.status);
Scott Teela09c1442014-02-18 13:57:21 -06001595 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001596 break;
1597 case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE:
1598 break;
1599 case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS:
1600 break;
1601 case IOACCEL2_SERV_RESPONSE_TMF_REJECTED:
1602 dev_warn(&h->pdev->dev, "task management function rejected.\n");
Scott Teela09c1442014-02-18 13:57:21 -06001603 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001604 break;
1605 case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN:
1606 dev_warn(&h->pdev->dev, "task management function invalid LUN\n");
1607 break;
1608 default:
1609 dev_warn(&h->pdev->dev,
1610 "%s: Unrecognized server response: 0x%02x\n",
Scott Teela09c1442014-02-18 13:57:21 -06001611 "HP SSD Smart Path",
1612 c2->error_data.serv_response);
1613 retry = 1;
Scott Teelc3497752014-02-18 13:56:34 -06001614 break;
1615 }
Scott Teela09c1442014-02-18 13:57:21 -06001616
1617 return retry; /* retry on raid path? */
Scott Teelc3497752014-02-18 13:56:34 -06001618}
1619
1620static void process_ioaccel2_completion(struct ctlr_info *h,
1621 struct CommandList *c, struct scsi_cmnd *cmd,
1622 struct hpsa_scsi_dev_t *dev)
1623{
1624 struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex];
Scott Teela09c1442014-02-18 13:57:21 -06001625 int raid_retry = 0;
Scott Teelc3497752014-02-18 13:56:34 -06001626
1627 /* check for good status */
1628 if (likely(c2->error_data.serv_response == 0 &&
1629 c2->error_data.status == 0)) {
1630 cmd_free(h, c);
1631 cmd->scsi_done(cmd);
1632 return;
1633 }
1634
1635 /* Any RAID offload error results in retry which will use
1636 * the normal I/O path so the controller can handle whatever's
1637 * wrong.
1638 */
1639 if (is_logical_dev_addr_mode(dev->scsi3addr) &&
1640 c2->error_data.serv_response ==
1641 IOACCEL2_SERV_RESPONSE_FAILURE) {
Scott Teela09c1442014-02-18 13:57:21 -06001642 if (c2->error_data.status ==
1643 IOACCEL2_STATUS_SR_IOACCEL_DISABLED)
Scott Teelc3497752014-02-18 13:56:34 -06001644 dev_warn(&h->pdev->dev,
Scott Teela09c1442014-02-18 13:57:21 -06001645 "%s: Path is unavailable, retrying on standard path.\n",
1646 "HP SSD Smart Path");
1647 else
1648 dev_warn(&h->pdev->dev,
1649 "%s: Error 0x%02x, retrying on standard path.\n",
Scott Teelc3497752014-02-18 13:56:34 -06001650 "HP SSD Smart Path", c2->error_data.status);
Scott Teela09c1442014-02-18 13:57:21 -06001651
Scott Teelc3497752014-02-18 13:56:34 -06001652 dev->offload_enabled = 0;
Scott Teele863d682014-02-18 13:57:05 -06001653 h->drv_req_rescan = 1; /* schedule controller for a rescan */
Scott Teelc3497752014-02-18 13:56:34 -06001654 cmd->result = DID_SOFT_ERROR << 16;
1655 cmd_free(h, c);
1656 cmd->scsi_done(cmd);
1657 return;
1658 }
Scott Teela09c1442014-02-18 13:57:21 -06001659 raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2);
1660 /* If error found, disable Smart Path, schedule a rescan,
1661 * and force a retry on the standard path.
1662 */
1663 if (raid_retry) {
1664 dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n",
1665 "HP SSD Smart Path");
1666 dev->offload_enabled = 0; /* Disable Smart Path */
1667 h->drv_req_rescan = 1; /* schedule controller rescan */
1668 cmd->result = DID_SOFT_ERROR << 16;
1669 }
Scott Teelc3497752014-02-18 13:56:34 -06001670 cmd_free(h, c);
1671 cmd->scsi_done(cmd);
1672}
1673
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05001674static void complete_scsi_command(struct CommandList *cp)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001675{
1676 struct scsi_cmnd *cmd;
1677 struct ctlr_info *h;
1678 struct ErrorInfo *ei;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001679 struct hpsa_scsi_dev_t *dev;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001680
1681 unsigned char sense_key;
1682 unsigned char asc; /* additional sense code */
1683 unsigned char ascq; /* additional sense code qualifier */
Stephen M. Camerondb111e12011-06-03 09:57:34 -05001684 unsigned long sense_data_size;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001685
1686 ei = cp->err_info;
1687 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
1688 h = cp->h;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001689 dev = cmd->device->hostdata;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001690
1691 scsi_dma_unmap(cmd); /* undo the DMA mappings */
Matt Gatese1f7de02014-02-18 13:55:17 -06001692 if ((cp->cmd_type == CMD_SCSI) &&
1693 (cp->Header.SGTotal > h->max_cmd_sg_entries))
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06001694 hpsa_unmap_sg_chain_block(h, cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001695
1696 cmd->result = (DID_OK << 16); /* host byte */
1697 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
Scott Teelc3497752014-02-18 13:56:34 -06001698
1699 if (cp->cmd_type == CMD_IOACCEL2)
1700 return process_ioaccel2_completion(h, cp, cmd, dev);
1701
Stephen M. Cameron55126722010-02-25 14:03:01 -06001702 cmd->result |= ei->ScsiStatus;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001703
1704 /* copy the sense data whether we need to or not. */
Stephen M. Camerondb111e12011-06-03 09:57:34 -05001705 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
1706 sense_data_size = SCSI_SENSE_BUFFERSIZE;
1707 else
1708 sense_data_size = sizeof(ei->SenseInfo);
1709 if (ei->SenseLen < sense_data_size)
1710 sense_data_size = ei->SenseLen;
1711
1712 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001713 scsi_set_resid(cmd, ei->ResidualCnt);
1714
1715 if (ei->CommandStatus == 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001716 cmd_free(h, cp);
Tomas Henzl2cc5bfa2013-08-01 15:14:00 +02001717 cmd->scsi_done(cmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001718 return;
1719 }
1720
Matt Gatese1f7de02014-02-18 13:55:17 -06001721 /* For I/O accelerator commands, copy over some fields to the normal
1722 * CISS header used below for error handling.
1723 */
1724 if (cp->cmd_type == CMD_IOACCEL1) {
1725 struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex];
1726 cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd);
1727 cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK;
1728 cp->Header.Tag.lower = c->Tag.lower;
1729 cp->Header.Tag.upper = c->Tag.upper;
1730 memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8);
1731 memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001732
1733 /* Any RAID offload error results in retry which will use
1734 * the normal I/O path so the controller can handle whatever's
1735 * wrong.
1736 */
1737 if (is_logical_dev_addr_mode(dev->scsi3addr)) {
1738 if (ei->CommandStatus == CMD_IOACCEL_DISABLED)
1739 dev->offload_enabled = 0;
1740 cmd->result = DID_SOFT_ERROR << 16;
1741 cmd_free(h, cp);
1742 cmd->scsi_done(cmd);
1743 return;
1744 }
Matt Gatese1f7de02014-02-18 13:55:17 -06001745 }
1746
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001747 /* an error has occurred */
1748 switch (ei->CommandStatus) {
1749
1750 case CMD_TARGET_STATUS:
1751 if (ei->ScsiStatus) {
1752 /* Get sense key */
1753 sense_key = 0xf & ei->SenseInfo[2];
1754 /* Get additional sense code */
1755 asc = ei->SenseInfo[12];
1756 /* Get addition sense code qualifier */
1757 ascq = ei->SenseInfo[13];
1758 }
1759
1760 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) {
Matt Gates3ce438d2013-12-04 17:10:36 -06001761 if (check_for_unit_attention(h, cp))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001762 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001763 if (sense_key == ILLEGAL_REQUEST) {
1764 /*
1765 * SCSI REPORT_LUNS is commonly unsupported on
1766 * Smart Array. Suppress noisy complaint.
1767 */
1768 if (cp->Request.CDB[0] == REPORT_LUNS)
1769 break;
1770
1771 /* If ASC/ASCQ indicate Logical Unit
1772 * Not Supported condition,
1773 */
1774 if ((asc == 0x25) && (ascq == 0x0)) {
1775 dev_warn(&h->pdev->dev, "cp %p "
1776 "has check condition\n", cp);
1777 break;
1778 }
1779 }
1780
1781 if (sense_key == NOT_READY) {
1782 /* If Sense is Not Ready, Logical Unit
1783 * Not ready, Manual Intervention
1784 * required
1785 */
1786 if ((asc == 0x04) && (ascq == 0x03)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001787 dev_warn(&h->pdev->dev, "cp %p "
1788 "has check condition: unit "
1789 "not ready, manual "
1790 "intervention required\n", cp);
1791 break;
1792 }
1793 }
Matt Gates1d3b3602010-02-04 08:43:00 -06001794 if (sense_key == ABORTED_COMMAND) {
1795 /* Aborted command is retryable */
1796 dev_warn(&h->pdev->dev, "cp %p "
1797 "has check condition: aborted command: "
1798 "ASC: 0x%x, ASCQ: 0x%x\n",
1799 cp, asc, ascq);
Stephen M. Cameron2e311fb2013-09-23 13:33:41 -05001800 cmd->result |= DID_SOFT_ERROR << 16;
Matt Gates1d3b3602010-02-04 08:43:00 -06001801 break;
1802 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001803 /* Must be some other type of check condition */
Stephen M. Cameron21b8e4e2012-05-01 11:42:25 -05001804 dev_dbg(&h->pdev->dev, "cp %p has check condition: "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001805 "unknown type: "
1806 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1807 "Returning result: 0x%x, "
1808 "cmd=[%02x %02x %02x %02x %02x "
Mike Miller807be732010-02-04 08:43:26 -06001809 "%02x %02x %02x %02x %02x %02x "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001810 "%02x %02x %02x %02x %02x]\n",
1811 cp, sense_key, asc, ascq,
1812 cmd->result,
1813 cmd->cmnd[0], cmd->cmnd[1],
1814 cmd->cmnd[2], cmd->cmnd[3],
1815 cmd->cmnd[4], cmd->cmnd[5],
1816 cmd->cmnd[6], cmd->cmnd[7],
Mike Miller807be732010-02-04 08:43:26 -06001817 cmd->cmnd[8], cmd->cmnd[9],
1818 cmd->cmnd[10], cmd->cmnd[11],
1819 cmd->cmnd[12], cmd->cmnd[13],
1820 cmd->cmnd[14], cmd->cmnd[15]);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001821 break;
1822 }
1823
1824
1825 /* Problem was not a check condition
1826 * Pass it up to the upper layers...
1827 */
1828 if (ei->ScsiStatus) {
1829 dev_warn(&h->pdev->dev, "cp %p has status 0x%x "
1830 "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, "
1831 "Returning result: 0x%x\n",
1832 cp, ei->ScsiStatus,
1833 sense_key, asc, ascq,
1834 cmd->result);
1835 } else { /* scsi status is zero??? How??? */
1836 dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. "
1837 "Returning no connection.\n", cp),
1838
1839 /* Ordinarily, this case should never happen,
1840 * but there is a bug in some released firmware
1841 * revisions that allows it to happen if, for
1842 * example, a 4100 backplane loses power and
1843 * the tape drive is in it. We assume that
1844 * it's a fatal error of some kind because we
1845 * can't show that it wasn't. We will make it
1846 * look like selection timeout since that is
1847 * the most common reason for this to occur,
1848 * and it's severe enough.
1849 */
1850
1851 cmd->result = DID_NO_CONNECT << 16;
1852 }
1853 break;
1854
1855 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
1856 break;
1857 case CMD_DATA_OVERRUN:
1858 dev_warn(&h->pdev->dev, "cp %p has"
1859 " completed with data overrun "
1860 "reported\n", cp);
1861 break;
1862 case CMD_INVALID: {
1863 /* print_bytes(cp, sizeof(*cp), 1, 0);
1864 print_cmd(cp); */
1865 /* We get CMD_INVALID if you address a non-existent device
1866 * instead of a selection timeout (no response). You will
1867 * see this if you yank out a drive, then try to access it.
1868 * This is kind of a shame because it means that any other
1869 * CMD_INVALID (e.g. driver bug) will get interpreted as a
1870 * missing target. */
1871 cmd->result = DID_NO_CONNECT << 16;
1872 }
1873 break;
1874 case CMD_PROTOCOL_ERR:
Stephen M. Cameron256d0ea2012-09-14 16:34:25 -05001875 cmd->result = DID_ERROR << 16;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001876 dev_warn(&h->pdev->dev, "cp %p has "
Stephen M. Cameron256d0ea2012-09-14 16:34:25 -05001877 "protocol error\n", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001878 break;
1879 case CMD_HARDWARE_ERR:
1880 cmd->result = DID_ERROR << 16;
1881 dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp);
1882 break;
1883 case CMD_CONNECTION_LOST:
1884 cmd->result = DID_ERROR << 16;
1885 dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp);
1886 break;
1887 case CMD_ABORTED:
1888 cmd->result = DID_ABORT << 16;
1889 dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n",
1890 cp, ei->ScsiStatus);
1891 break;
1892 case CMD_ABORT_FAILED:
1893 cmd->result = DID_ERROR << 16;
1894 dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp);
1895 break;
1896 case CMD_UNSOLICITED_ABORT:
Stephen M. Cameronf6e76052011-07-26 11:08:52 -05001897 cmd->result = DID_SOFT_ERROR << 16; /* retry the command */
1898 dev_warn(&h->pdev->dev, "cp %p aborted due to an unsolicited "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001899 "abort\n", cp);
1900 break;
1901 case CMD_TIMEOUT:
1902 cmd->result = DID_TIME_OUT << 16;
1903 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1904 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06001905 case CMD_UNABORTABLE:
1906 cmd->result = DID_ERROR << 16;
1907 dev_warn(&h->pdev->dev, "Command unabortable\n");
1908 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06001909 case CMD_IOACCEL_DISABLED:
1910 /* This only handles the direct pass-through case since RAID
1911 * offload is handled above. Just attempt a retry.
1912 */
1913 cmd->result = DID_SOFT_ERROR << 16;
1914 dev_warn(&h->pdev->dev,
1915 "cp %p had HP SSD Smart Path error\n", cp);
1916 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001917 default:
1918 cmd->result = DID_ERROR << 16;
1919 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
1920 cp, ei->CommandStatus);
1921 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001922 cmd_free(h, cp);
Tomas Henzl2cc5bfa2013-08-01 15:14:00 +02001923 cmd->scsi_done(cmd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001924}
1925
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001926static void hpsa_pci_unmap(struct pci_dev *pdev,
1927 struct CommandList *c, int sg_used, int data_direction)
1928{
1929 int i;
1930 union u64bit addr64;
1931
1932 for (i = 0; i < sg_used; i++) {
1933 addr64.val32.lower = c->SG[i].Addr.lower;
1934 addr64.val32.upper = c->SG[i].Addr.upper;
1935 pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len,
1936 data_direction);
1937 }
1938}
1939
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001940static int hpsa_map_one(struct pci_dev *pdev,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001941 struct CommandList *cp,
1942 unsigned char *buf,
1943 size_t buflen,
1944 int data_direction)
1945{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06001946 u64 addr64;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001947
1948 if (buflen == 0 || data_direction == PCI_DMA_NONE) {
1949 cp->Header.SGList = 0;
1950 cp->Header.SGTotal = 0;
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001951 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001952 }
1953
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06001954 addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
Shuah Khaneceaae12013-02-20 11:24:34 -06001955 if (dma_mapping_error(&pdev->dev, addr64)) {
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001956 /* Prevent subsequent unmap of something never mapped */
Shuah Khaneceaae12013-02-20 11:24:34 -06001957 cp->Header.SGList = 0;
1958 cp->Header.SGTotal = 0;
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001959 return -1;
Shuah Khaneceaae12013-02-20 11:24:34 -06001960 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001961 cp->SG[0].Addr.lower =
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06001962 (u32) (addr64 & (u64) 0x00000000FFFFFFFF);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001963 cp->SG[0].Addr.upper =
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06001964 (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001965 cp->SG[0].Len = buflen;
Matt Gatese1d9cbf2014-02-18 13:55:12 -06001966 cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06001967 cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
1968 cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
Stephen M. Camerona2dac132013-02-20 11:24:41 -06001969 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001970}
1971
1972static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
1973 struct CommandList *c)
1974{
1975 DECLARE_COMPLETION_ONSTACK(wait);
1976
1977 c->waiting = &wait;
1978 enqueue_cmd_and_start_io(h, c);
1979 wait_for_completion(&wait);
1980}
1981
Stephen M. Camerona0c12412011-10-26 16:22:04 -05001982static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,
1983 struct CommandList *c)
1984{
1985 unsigned long flags;
1986
1987 /* If controller lockup detected, fake a hardware error. */
1988 spin_lock_irqsave(&h->lock, flags);
1989 if (unlikely(h->lockup_detected)) {
1990 spin_unlock_irqrestore(&h->lock, flags);
1991 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
1992 } else {
1993 spin_unlock_irqrestore(&h->lock, flags);
1994 hpsa_scsi_do_simple_cmd_core(h, c);
1995 }
1996}
1997
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05001998#define MAX_DRIVER_CMD_RETRIES 25
Stephen M. Cameronedd16362009-12-08 14:09:11 -08001999static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
2000 struct CommandList *c, int data_direction)
2001{
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002002 int backoff_time = 10, retry_count = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002003
2004 do {
Joe Perches7630abd2011-05-08 23:32:40 -07002005 memset(c->err_info, 0, sizeof(*c->err_info));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002006 hpsa_scsi_do_simple_cmd_core(h, c);
2007 retry_count++;
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002008 if (retry_count > 3) {
2009 msleep(backoff_time);
2010 if (backoff_time < 1000)
2011 backoff_time *= 2;
2012 }
Matt Bondurant852af202012-05-01 11:42:35 -05002013 } while ((check_for_unit_attention(h, c) ||
Stephen M. Cameron9c2fc162012-05-01 11:42:40 -05002014 check_for_busy(h, c)) &&
2015 retry_count <= MAX_DRIVER_CMD_RETRIES);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002016 hpsa_pci_unmap(h->pdev, c, 1, data_direction);
2017}
2018
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002019static void hpsa_print_cmd(struct ctlr_info *h, char *txt,
2020 struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002021{
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002022 const u8 *cdb = c->Request.CDB;
2023 const u8 *lun = c->Header.LUN.LunAddrBytes;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002024
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002025 dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x"
2026 " CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2027 txt, lun[0], lun[1], lun[2], lun[3],
2028 lun[4], lun[5], lun[6], lun[7],
2029 cdb[0], cdb[1], cdb[2], cdb[3],
2030 cdb[4], cdb[5], cdb[6], cdb[7],
2031 cdb[8], cdb[9], cdb[10], cdb[11],
2032 cdb[12], cdb[13], cdb[14], cdb[15]);
2033}
2034
2035static void hpsa_scsi_interpret_error(struct ctlr_info *h,
2036 struct CommandList *cp)
2037{
2038 const struct ErrorInfo *ei = cp->err_info;
2039 struct device *d = &cp->h->pdev->dev;
2040 const u8 *sd = ei->SenseInfo;
2041
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002042 switch (ei->CommandStatus) {
2043 case CMD_TARGET_STATUS:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002044 hpsa_print_cmd(h, "SCSI status", cp);
2045 if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION)
2046 dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n",
2047 sd[2] & 0x0f, sd[12], sd[13]);
2048 else
2049 dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002050 if (ei->ScsiStatus == 0)
2051 dev_warn(d, "SCSI status is abnormally zero. "
2052 "(probably indicates selection timeout "
2053 "reported incorrectly due to a known "
2054 "firmware bug, circa July, 2001.)\n");
2055 break;
2056 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002057 break;
2058 case CMD_DATA_OVERRUN:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002059 hpsa_print_cmd(h, "overrun condition", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002060 break;
2061 case CMD_INVALID: {
2062 /* controller unfortunately reports SCSI passthru's
2063 * to non-existent targets as invalid commands.
2064 */
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002065 hpsa_print_cmd(h, "invalid command", cp);
2066 dev_warn(d, "probably means device no longer present\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002067 }
2068 break;
2069 case CMD_PROTOCOL_ERR:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002070 hpsa_print_cmd(h, "protocol error", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002071 break;
2072 case CMD_HARDWARE_ERR:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002073 hpsa_print_cmd(h, "hardware error", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002074 break;
2075 case CMD_CONNECTION_LOST:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002076 hpsa_print_cmd(h, "connection lost", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002077 break;
2078 case CMD_ABORTED:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002079 hpsa_print_cmd(h, "aborted", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002080 break;
2081 case CMD_ABORT_FAILED:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002082 hpsa_print_cmd(h, "abort failed", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002083 break;
2084 case CMD_UNSOLICITED_ABORT:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002085 hpsa_print_cmd(h, "unsolicited abort", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002086 break;
2087 case CMD_TIMEOUT:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002088 hpsa_print_cmd(h, "timed out", cp);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002089 break;
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002090 case CMD_UNABORTABLE:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002091 hpsa_print_cmd(h, "unabortable", cp);
Stephen M. Cameron1d5e2ed2011-01-07 10:55:48 -06002092 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002093 default:
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002094 hpsa_print_cmd(h, "unknown status", cp);
2095 dev_warn(d, "Unknown command status %x\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002096 ei->CommandStatus);
2097 }
2098}
2099
2100static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002101 u16 page, unsigned char *buf,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002102 unsigned char bufsize)
2103{
2104 int rc = IO_OK;
2105 struct CommandList *c;
2106 struct ErrorInfo *ei;
2107
2108 c = cmd_special_alloc(h);
2109
2110 if (c == NULL) { /* trouble... */
2111 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06002112 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002113 }
2114
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002115 if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
2116 page, scsi3addr, TYPE_CMD)) {
2117 rc = -1;
2118 goto out;
2119 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002120 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2121 ei = c->err_info;
2122 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002123 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002124 rc = -1;
2125 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002126out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002127 cmd_special_free(h, c);
2128 return rc;
2129}
2130
Scott Teelbf711ac2014-02-18 13:56:39 -06002131static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr,
2132 u8 reset_type)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002133{
2134 int rc = IO_OK;
2135 struct CommandList *c;
2136 struct ErrorInfo *ei;
2137
2138 c = cmd_special_alloc(h);
2139
2140 if (c == NULL) { /* trouble... */
2141 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
Stephen M. Camerone9ea04a2010-02-25 14:03:06 -06002142 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002143 }
2144
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002145 /* fill_cmd can't fail here, no data buffer to map. */
Scott Teelbf711ac2014-02-18 13:56:39 -06002146 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2147 scsi3addr, TYPE_MSG);
2148 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002149 hpsa_scsi_do_simple_cmd_core(h, c);
2150 /* no unmap needed here because no data xfer. */
2151
2152 ei = c->err_info;
2153 if (ei->CommandStatus != 0) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002154 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002155 rc = -1;
2156 }
2157 cmd_special_free(h, c);
2158 return rc;
2159}
2160
2161static void hpsa_get_raid_level(struct ctlr_info *h,
2162 unsigned char *scsi3addr, unsigned char *raid_level)
2163{
2164 int rc;
2165 unsigned char *buf;
2166
2167 *raid_level = RAID_UNKNOWN;
2168 buf = kzalloc(64, GFP_KERNEL);
2169 if (!buf)
2170 return;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002171 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002172 if (rc == 0)
2173 *raid_level = buf[8];
2174 if (*raid_level > RAID_UNKNOWN)
2175 *raid_level = RAID_UNKNOWN;
2176 kfree(buf);
2177 return;
2178}
2179
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002180#define HPSA_MAP_DEBUG
2181#ifdef HPSA_MAP_DEBUG
2182static void hpsa_debug_map_buff(struct ctlr_info *h, int rc,
2183 struct raid_map_data *map_buff)
2184{
2185 struct raid_map_disk_data *dd = &map_buff->data[0];
2186 int map, row, col;
2187 u16 map_cnt, row_cnt, disks_per_row;
2188
2189 if (rc != 0)
2190 return;
2191
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06002192 /* Show details only if debugging has been activated. */
2193 if (h->raid_offload_debug < 2)
2194 return;
2195
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002196 dev_info(&h->pdev->dev, "structure_size = %u\n",
2197 le32_to_cpu(map_buff->structure_size));
2198 dev_info(&h->pdev->dev, "volume_blk_size = %u\n",
2199 le32_to_cpu(map_buff->volume_blk_size));
2200 dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n",
2201 le64_to_cpu(map_buff->volume_blk_cnt));
2202 dev_info(&h->pdev->dev, "physicalBlockShift = %u\n",
2203 map_buff->phys_blk_shift);
2204 dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n",
2205 map_buff->parity_rotation_shift);
2206 dev_info(&h->pdev->dev, "strip_size = %u\n",
2207 le16_to_cpu(map_buff->strip_size));
2208 dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n",
2209 le64_to_cpu(map_buff->disk_starting_blk));
2210 dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n",
2211 le64_to_cpu(map_buff->disk_blk_cnt));
2212 dev_info(&h->pdev->dev, "data_disks_per_row = %u\n",
2213 le16_to_cpu(map_buff->data_disks_per_row));
2214 dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n",
2215 le16_to_cpu(map_buff->metadata_disks_per_row));
2216 dev_info(&h->pdev->dev, "row_cnt = %u\n",
2217 le16_to_cpu(map_buff->row_cnt));
2218 dev_info(&h->pdev->dev, "layout_map_count = %u\n",
2219 le16_to_cpu(map_buff->layout_map_count));
Scott Teeldd0e19f2014-02-18 13:57:31 -06002220 dev_info(&h->pdev->dev, "flags = %u\n",
2221 le16_to_cpu(map_buff->flags));
2222 if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON)
2223 dev_info(&h->pdev->dev, "encrypytion = ON\n");
2224 else
2225 dev_info(&h->pdev->dev, "encrypytion = OFF\n");
2226 dev_info(&h->pdev->dev, "dekindex = %u\n",
2227 le16_to_cpu(map_buff->dekindex));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002228
2229 map_cnt = le16_to_cpu(map_buff->layout_map_count);
2230 for (map = 0; map < map_cnt; map++) {
2231 dev_info(&h->pdev->dev, "Map%u:\n", map);
2232 row_cnt = le16_to_cpu(map_buff->row_cnt);
2233 for (row = 0; row < row_cnt; row++) {
2234 dev_info(&h->pdev->dev, " Row%u:\n", row);
2235 disks_per_row =
2236 le16_to_cpu(map_buff->data_disks_per_row);
2237 for (col = 0; col < disks_per_row; col++, dd++)
2238 dev_info(&h->pdev->dev,
2239 " D%02u: h=0x%04x xor=%u,%u\n",
2240 col, dd->ioaccel_handle,
2241 dd->xor_mult[0], dd->xor_mult[1]);
2242 disks_per_row =
2243 le16_to_cpu(map_buff->metadata_disks_per_row);
2244 for (col = 0; col < disks_per_row; col++, dd++)
2245 dev_info(&h->pdev->dev,
2246 " M%02u: h=0x%04x xor=%u,%u\n",
2247 col, dd->ioaccel_handle,
2248 dd->xor_mult[0], dd->xor_mult[1]);
2249 }
2250 }
2251}
2252#else
2253static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h,
2254 __attribute__((unused)) int rc,
2255 __attribute__((unused)) struct raid_map_data *map_buff)
2256{
2257}
2258#endif
2259
2260static int hpsa_get_raid_map(struct ctlr_info *h,
2261 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2262{
2263 int rc = 0;
2264 struct CommandList *c;
2265 struct ErrorInfo *ei;
2266
2267 c = cmd_special_alloc(h);
2268 if (c == NULL) {
2269 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2270 return -ENOMEM;
2271 }
2272 if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map,
2273 sizeof(this_device->raid_map), 0,
2274 scsi3addr, TYPE_CMD)) {
2275 dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n");
2276 cmd_special_free(h, c);
2277 return -ENOMEM;
2278 }
2279 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2280 ei = c->err_info;
2281 if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002282 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002283 cmd_special_free(h, c);
2284 return -1;
2285 }
2286 cmd_special_free(h, c);
2287
2288 /* @todo in the future, dynamically allocate RAID map memory */
2289 if (le32_to_cpu(this_device->raid_map.structure_size) >
2290 sizeof(this_device->raid_map)) {
2291 dev_warn(&h->pdev->dev, "RAID map size is too large!\n");
2292 rc = -1;
2293 }
2294 hpsa_debug_map_buff(h, rc, &this_device->raid_map);
2295 return rc;
2296}
2297
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06002298static int hpsa_vpd_page_supported(struct ctlr_info *h,
2299 unsigned char scsi3addr[], u8 page)
2300{
2301 int rc;
2302 int i;
2303 int pages;
2304 unsigned char *buf, bufsize;
2305
2306 buf = kzalloc(256, GFP_KERNEL);
2307 if (!buf)
2308 return 0;
2309
2310 /* Get the size of the page list first */
2311 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2312 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2313 buf, HPSA_VPD_HEADER_SZ);
2314 if (rc != 0)
2315 goto exit_unsupported;
2316 pages = buf[3];
2317 if ((pages + HPSA_VPD_HEADER_SZ) <= 255)
2318 bufsize = pages + HPSA_VPD_HEADER_SZ;
2319 else
2320 bufsize = 255;
2321
2322 /* Get the whole VPD page list */
2323 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
2324 VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES,
2325 buf, bufsize);
2326 if (rc != 0)
2327 goto exit_unsupported;
2328
2329 pages = buf[3];
2330 for (i = 1; i <= pages; i++)
2331 if (buf[3 + i] == page)
2332 goto exit_supported;
2333exit_unsupported:
2334 kfree(buf);
2335 return 0;
2336exit_supported:
2337 kfree(buf);
2338 return 1;
2339}
2340
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002341static void hpsa_get_ioaccel_status(struct ctlr_info *h,
2342 unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device)
2343{
2344 int rc;
2345 unsigned char *buf;
2346 u8 ioaccel_status;
2347
2348 this_device->offload_config = 0;
2349 this_device->offload_enabled = 0;
2350
2351 buf = kzalloc(64, GFP_KERNEL);
2352 if (!buf)
2353 return;
Stephen M. Cameron1b70150a2014-02-18 13:57:16 -06002354 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS))
2355 goto out;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002356 rc = hpsa_scsi_do_inquiry(h, scsi3addr,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002357 VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002358 if (rc != 0)
2359 goto out;
2360
2361#define IOACCEL_STATUS_BYTE 4
2362#define OFFLOAD_CONFIGURED_BIT 0x01
2363#define OFFLOAD_ENABLED_BIT 0x02
2364 ioaccel_status = buf[IOACCEL_STATUS_BYTE];
2365 this_device->offload_config =
2366 !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
2367 if (this_device->offload_config) {
2368 this_device->offload_enabled =
2369 !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
2370 if (hpsa_get_raid_map(h, scsi3addr, this_device))
2371 this_device->offload_enabled = 0;
2372 }
2373out:
2374 kfree(buf);
2375 return;
2376}
2377
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002378/* Get the device id from inquiry page 0x83 */
2379static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,
2380 unsigned char *device_id, int buflen)
2381{
2382 int rc;
2383 unsigned char *buf;
2384
2385 if (buflen > 16)
2386 buflen = 16;
2387 buf = kzalloc(64, GFP_KERNEL);
2388 if (!buf)
2389 return -1;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06002390 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002391 if (rc == 0)
2392 memcpy(device_id, &buf[8], buflen);
2393 kfree(buf);
2394 return rc != 0;
2395}
2396
2397static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
2398 struct ReportLUNdata *buf, int bufsize,
2399 int extended_response)
2400{
2401 int rc = IO_OK;
2402 struct CommandList *c;
2403 unsigned char scsi3addr[8];
2404 struct ErrorInfo *ei;
2405
2406 c = cmd_special_alloc(h);
2407 if (c == NULL) { /* trouble... */
2408 dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
2409 return -1;
2410 }
Stephen M. Camerone89c0ae2010-02-04 08:42:04 -06002411 /* address the controller */
2412 memset(scsi3addr, 0, sizeof(scsi3addr));
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002413 if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
2414 buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
2415 rc = -1;
2416 goto out;
2417 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002418 if (extended_response)
2419 c->Request.CDB[1] = extended_response;
2420 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
2421 ei = c->err_info;
2422 if (ei->CommandStatus != 0 &&
2423 ei->CommandStatus != CMD_DATA_UNDERRUN) {
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06002424 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002425 rc = -1;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002426 } else {
2427 if (buf->extended_response_flag != extended_response) {
2428 dev_err(&h->pdev->dev,
2429 "report luns requested format %u, got %u\n",
2430 extended_response,
2431 buf->extended_response_flag);
2432 rc = -1;
2433 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002434 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06002435out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002436 cmd_special_free(h, c);
2437 return rc;
2438}
2439
2440static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h,
2441 struct ReportLUNdata *buf,
2442 int bufsize, int extended_response)
2443{
2444 return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response);
2445}
2446
2447static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h,
2448 struct ReportLUNdata *buf, int bufsize)
2449{
2450 return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0);
2451}
2452
2453static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,
2454 int bus, int target, int lun)
2455{
2456 device->bus = bus;
2457 device->target = target;
2458 device->lun = lun;
2459}
2460
Stephen M. Cameron98465902014-02-21 16:25:00 -06002461/* Use VPD inquiry to get details of volume status */
2462static int hpsa_get_volume_status(struct ctlr_info *h,
2463 unsigned char scsi3addr[])
2464{
2465 int rc;
2466 int status;
2467 int size;
2468 unsigned char *buf;
2469
2470 buf = kzalloc(64, GFP_KERNEL);
2471 if (!buf)
2472 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2473
2474 /* Does controller have VPD for logical volume status? */
2475 if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) {
2476 dev_warn(&h->pdev->dev, "Logical volume status VPD page is unsupported.\n");
2477 goto exit_failed;
2478 }
2479
2480 /* Get the size of the VPD return buffer */
2481 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2482 buf, HPSA_VPD_HEADER_SZ);
2483 if (rc != 0) {
2484 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
2485 goto exit_failed;
2486 }
2487 size = buf[3];
2488
2489 /* Now get the whole VPD buffer */
2490 rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS,
2491 buf, size + HPSA_VPD_HEADER_SZ);
2492 if (rc != 0) {
2493 dev_warn(&h->pdev->dev, "Logical volume status VPD inquiry failed.\n");
2494 goto exit_failed;
2495 }
2496 status = buf[4]; /* status byte */
2497
2498 kfree(buf);
2499 return status;
2500exit_failed:
2501 kfree(buf);
2502 return HPSA_VPD_LV_STATUS_UNSUPPORTED;
2503}
2504
2505/* Determine offline status of a volume.
2506 * Return either:
2507 * 0 (not offline)
2508 * -1 (offline for unknown reasons)
2509 * # (integer code indicating one of several NOT READY states
2510 * describing why a volume is to be kept offline)
2511 */
2512static unsigned char hpsa_volume_offline(struct ctlr_info *h,
2513 unsigned char scsi3addr[])
2514{
2515 struct CommandList *c;
2516 unsigned char *sense, sense_key, asc, ascq;
2517 int ldstat = 0;
2518 u16 cmd_status;
2519 u8 scsi_status;
2520#define ASC_LUN_NOT_READY 0x04
2521#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04
2522#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02
2523
2524 c = cmd_alloc(h);
2525 if (!c)
2526 return 0;
2527 (void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD);
2528 hpsa_scsi_do_simple_cmd_core(h, c);
2529 sense = c->err_info->SenseInfo;
2530 sense_key = sense[2];
2531 asc = sense[12];
2532 ascq = sense[13];
2533 cmd_status = c->err_info->CommandStatus;
2534 scsi_status = c->err_info->ScsiStatus;
2535 cmd_free(h, c);
2536 /* Is the volume 'not ready'? */
2537 if (cmd_status != CMD_TARGET_STATUS ||
2538 scsi_status != SAM_STAT_CHECK_CONDITION ||
2539 sense_key != NOT_READY ||
2540 asc != ASC_LUN_NOT_READY) {
2541 return 0;
2542 }
2543
2544 /* Determine the reason for not ready state */
2545 ldstat = hpsa_get_volume_status(h, scsi3addr);
2546
2547 /* Keep volume offline in certain cases: */
2548 switch (ldstat) {
2549 case HPSA_LV_UNDERGOING_ERASE:
2550 case HPSA_LV_UNDERGOING_RPI:
2551 case HPSA_LV_PENDING_RPI:
2552 case HPSA_LV_ENCRYPTED_NO_KEY:
2553 case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER:
2554 case HPSA_LV_UNDERGOING_ENCRYPTION:
2555 case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING:
2556 case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
2557 return ldstat;
2558 case HPSA_VPD_LV_STATUS_UNSUPPORTED:
2559 /* If VPD status page isn't available,
2560 * use ASC/ASCQ to determine state
2561 */
2562 if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) ||
2563 (ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ))
2564 return ldstat;
2565 break;
2566 default:
2567 break;
2568 }
2569 return 0;
2570}
2571
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002572static int hpsa_update_device_info(struct ctlr_info *h,
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002573 unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,
2574 unsigned char *is_OBDR_device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002575{
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002576
2577#define OBDR_SIG_OFFSET 43
2578#define OBDR_TAPE_SIG "$DR-10"
2579#define OBDR_SIG_LEN (sizeof(OBDR_TAPE_SIG) - 1)
2580#define OBDR_TAPE_INQ_SIZE (OBDR_SIG_OFFSET + OBDR_SIG_LEN)
2581
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06002582 unsigned char *inq_buff;
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002583 unsigned char *obdr_sig;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002584
Stephen M. Cameronea6d3bc2010-02-04 08:42:09 -06002585 inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002586 if (!inq_buff)
2587 goto bail_out;
2588
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002589 /* Do an inquiry to the device to see what it is. */
2590 if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff,
2591 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) {
2592 /* Inquiry failed (msg printed already) */
2593 dev_err(&h->pdev->dev,
2594 "hpsa_update_device_info: inquiry failed\n");
2595 goto bail_out;
2596 }
2597
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002598 this_device->devtype = (inq_buff[0] & 0x1f);
2599 memcpy(this_device->scsi3addr, scsi3addr, 8);
2600 memcpy(this_device->vendor, &inq_buff[8],
2601 sizeof(this_device->vendor));
2602 memcpy(this_device->model, &inq_buff[16],
2603 sizeof(this_device->model));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002604 memset(this_device->device_id, 0,
2605 sizeof(this_device->device_id));
2606 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
2607 sizeof(this_device->device_id));
2608
2609 if (this_device->devtype == TYPE_DISK &&
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002610 is_logical_dev_addr_mode(scsi3addr)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002611 hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002612 if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC)
2613 hpsa_get_ioaccel_status(h, scsi3addr, this_device);
Stephen M. Cameron98465902014-02-21 16:25:00 -06002614 this_device->volume_offline =
2615 hpsa_volume_offline(h, scsi3addr);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002616 } else {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002617 this_device->raid_level = RAID_UNKNOWN;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002618 this_device->offload_config = 0;
2619 this_device->offload_enabled = 0;
Stephen M. Cameron98465902014-02-21 16:25:00 -06002620 this_device->volume_offline = 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002621 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002622
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002623 if (is_OBDR_device) {
2624 /* See if this is a One-Button-Disaster-Recovery device
2625 * by looking for "$DR-10" at offset 43 in inquiry data.
2626 */
2627 obdr_sig = &inq_buff[OBDR_SIG_OFFSET];
2628 *is_OBDR_device = (this_device->devtype == TYPE_ROM &&
2629 strncmp(obdr_sig, OBDR_TAPE_SIG,
2630 OBDR_SIG_LEN) == 0);
2631 }
2632
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002633 kfree(inq_buff);
2634 return 0;
2635
2636bail_out:
2637 kfree(inq_buff);
2638 return 1;
2639}
2640
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002641static unsigned char *ext_target_model[] = {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002642 "MSA2012",
2643 "MSA2024",
2644 "MSA2312",
2645 "MSA2324",
Stephen M. Cameronfda38512011-05-03 15:00:07 -05002646 "P2000 G3 SAS",
Stephen M. Camerone06c8e52013-09-23 13:33:56 -05002647 "MSA 2040 SAS",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002648 NULL,
2649};
2650
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002651static int is_ext_target(struct ctlr_info *h, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002652{
2653 int i;
2654
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002655 for (i = 0; ext_target_model[i]; i++)
2656 if (strncmp(device->model, ext_target_model[i],
2657 strlen(ext_target_model[i])) == 0)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002658 return 1;
2659 return 0;
2660}
2661
2662/* Helper function to assign bus, target, lun mapping of devices.
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002663 * Puts non-external target logical volumes on bus 0, external target logical
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002664 * volumes on bus 1, physical devices on bus 2. and the hba on bus 3.
2665 * Logical drive target and lun are assigned at this time, but
2666 * physical device lun and target assignment are deferred (assigned
2667 * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.)
2668 */
2669static void figure_bus_target_lun(struct ctlr_info *h,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002670 u8 *lunaddrbytes, struct hpsa_scsi_dev_t *device)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002671{
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002672 u32 lunid = le32_to_cpu(*((__le32 *) lunaddrbytes));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002673
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002674 if (!is_logical_dev_addr_mode(lunaddrbytes)) {
2675 /* physical device, target and lun filled in later */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002676 if (is_hba_lunid(lunaddrbytes))
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002677 hpsa_set_bus_target_lun(device, 3, 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002678 else
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002679 /* defer target, lun assignment for physical devices */
2680 hpsa_set_bus_target_lun(device, 2, -1, -1);
2681 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002682 }
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002683 /* It's a logical device */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002684 if (is_ext_target(h, device)) {
2685 /* external target way, put logicals on bus 1
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002686 * and match target/lun numbers box
2687 * reports, other smart array, bus 0, target 0, match lunid
2688 */
2689 hpsa_set_bus_target_lun(device,
2690 1, (lunid >> 16) & 0x3fff, lunid & 0x00ff);
2691 return;
2692 }
2693 hpsa_set_bus_target_lun(device, 0, 0, lunid & 0x3fff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002694}
2695
2696/*
2697 * If there is no lun 0 on a target, linux won't find any devices.
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002698 * For the external targets (arrays), we have to manually detect the enclosure
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002699 * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report
2700 * it for some reason. *tmpdevice is the target we're adding,
2701 * this_device is a pointer into the current element of currentsd[]
2702 * that we're building up in update_scsi_devices(), below.
2703 * lunzerobits is a bitmap that tracks which targets already have a
2704 * lun 0 assigned.
2705 * Returns 1 if an enclosure was added, 0 if not.
2706 */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002707static int add_ext_target_dev(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002708 struct hpsa_scsi_dev_t *tmpdevice,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002709 struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes,
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002710 unsigned long lunzerobits[], int *n_ext_target_devs)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002711{
2712 unsigned char scsi3addr[8];
2713
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002714 if (test_bit(tmpdevice->target, lunzerobits))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002715 return 0; /* There is already a lun 0 on this target. */
2716
2717 if (!is_logical_dev_addr_mode(lunaddrbytes))
2718 return 0; /* It's the logical targets that may lack lun 0. */
2719
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002720 if (!is_ext_target(h, tmpdevice))
2721 return 0; /* Only external target devices have this problem. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002722
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002723 if (tmpdevice->lun == 0) /* if lun is 0, then we have a lun 0. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002724 return 0;
2725
Stephen M. Cameronc4f8a292011-01-07 10:55:43 -06002726 memset(scsi3addr, 0, 8);
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002727 scsi3addr[3] = tmpdevice->target;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002728 if (is_hba_lunid(scsi3addr))
2729 return 0; /* Don't add the RAID controller here. */
2730
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002731 if (is_scsi_rev_5(h))
2732 return 0; /* p1210m doesn't need to do this. */
2733
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002734 if (*n_ext_target_devs >= MAX_EXT_TARGETS) {
Scott Teelaca4a522012-01-19 14:01:19 -06002735 dev_warn(&h->pdev->dev, "Maximum number of external "
2736 "target devices exceeded. Check your hardware "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002737 "configuration.");
2738 return 0;
2739 }
2740
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002741 if (hpsa_update_device_info(h, scsi3addr, this_device, NULL))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002742 return 0;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002743 (*n_ext_target_devs)++;
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06002744 hpsa_set_bus_target_lun(this_device,
2745 tmpdevice->bus, tmpdevice->target, 0);
2746 set_bit(tmpdevice->target, lunzerobits);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002747 return 1;
2748}
2749
2750/*
Scott Teel54b6e9e2014-02-18 13:56:45 -06002751 * Get address of physical disk used for an ioaccel2 mode command:
2752 * 1. Extract ioaccel2 handle from the command.
2753 * 2. Find a matching ioaccel2 handle from list of physical disks.
2754 * 3. Return:
2755 * 1 and set scsi3addr to address of matching physical
2756 * 0 if no matching physical disk was found.
2757 */
2758static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h,
2759 struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr)
2760{
2761 struct ReportExtendedLUNdata *physicals = NULL;
2762 int responsesize = 24; /* size of physical extended response */
2763 int extended = 2; /* flag forces reporting 'other dev info'. */
2764 int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize;
2765 u32 nphysicals = 0; /* number of reported physical devs */
2766 int found = 0; /* found match (1) or not (0) */
2767 u32 find; /* handle we need to match */
2768 int i;
2769 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
2770 struct hpsa_scsi_dev_t *d; /* device of request being aborted */
2771 struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */
2772 u32 it_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2773 u32 scsi_nexus; /* 4 byte device handle for the ioaccel2 cmd */
2774
2775 if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2)
2776 return 0; /* no match */
2777
2778 /* point to the ioaccel2 device handle */
2779 c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex];
2780 if (c2a == NULL)
2781 return 0; /* no match */
2782
2783 scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd;
2784 if (scmd == NULL)
2785 return 0; /* no match */
2786
2787 d = scmd->device->hostdata;
2788 if (d == NULL)
2789 return 0; /* no match */
2790
2791 it_nexus = cpu_to_le32((u32) d->ioaccel_handle);
2792 scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus);
2793 find = c2a->scsi_nexus;
2794
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06002795 if (h->raid_offload_debug > 0)
2796 dev_info(&h->pdev->dev,
2797 "%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
2798 __func__, scsi_nexus,
2799 d->device_id[0], d->device_id[1], d->device_id[2],
2800 d->device_id[3], d->device_id[4], d->device_id[5],
2801 d->device_id[6], d->device_id[7], d->device_id[8],
2802 d->device_id[9], d->device_id[10], d->device_id[11],
2803 d->device_id[12], d->device_id[13], d->device_id[14],
2804 d->device_id[15]);
2805
Scott Teel54b6e9e2014-02-18 13:56:45 -06002806 /* Get the list of physical devices */
2807 physicals = kzalloc(reportsize, GFP_KERNEL);
2808 if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals,
2809 reportsize, extended)) {
2810 dev_err(&h->pdev->dev,
2811 "Can't lookup %s device handle: report physical LUNs failed.\n",
2812 "HP SSD Smart Path");
2813 kfree(physicals);
2814 return 0;
2815 }
2816 nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) /
2817 responsesize;
2818
2819
2820 /* find ioaccel2 handle in list of physicals: */
2821 for (i = 0; i < nphysicals; i++) {
2822 /* handle is in bytes 28-31 of each lun */
2823 if (memcmp(&((struct ReportExtendedLUNdata *)
2824 physicals)->LUN[i][20], &find, 4) != 0) {
2825 continue; /* didn't match */
2826 }
2827 found = 1;
2828 memcpy(scsi3addr, &((struct ReportExtendedLUNdata *)
2829 physicals)->LUN[i][0], 8);
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06002830 if (h->raid_offload_debug > 0)
2831 dev_info(&h->pdev->dev,
2832 "%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
2833 __func__, find,
2834 ((struct ReportExtendedLUNdata *)
2835 physicals)->LUN[i][20],
2836 scsi3addr[0], scsi3addr[1], scsi3addr[2],
2837 scsi3addr[3], scsi3addr[4], scsi3addr[5],
2838 scsi3addr[6], scsi3addr[7]);
Scott Teel54b6e9e2014-02-18 13:56:45 -06002839 break; /* found it */
2840 }
2841
2842 kfree(physicals);
2843 if (found)
2844 return 1;
2845 else
2846 return 0;
2847
2848}
2849/*
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002850 * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev,
2851 * logdev. The number of luns in physdev and logdev are returned in
2852 * *nphysicals and *nlogicals, respectively.
2853 * Returns 0 on success, -1 otherwise.
2854 */
2855static int hpsa_gather_lun_info(struct ctlr_info *h,
2856 int reportlunsize,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002857 struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002858 struct ReportLUNdata *logdev, u32 *nlogicals)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002859{
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002860 int physical_entry_size = 8;
2861
2862 *physical_mode = 0;
2863
2864 /* For I/O accelerator mode we need to read physical device handles */
Mike MIller317d4ad2014-02-18 13:56:20 -06002865 if (h->transMethod & CFGTBL_Trans_io_accel1 ||
2866 h->transMethod & CFGTBL_Trans_io_accel2) {
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002867 *physical_mode = HPSA_REPORT_PHYS_EXTENDED;
2868 physical_entry_size = 24;
2869 }
Matt Gatesa93aa1f2014-02-18 13:55:07 -06002870 if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002871 *physical_mode)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002872 dev_err(&h->pdev->dev, "report physical LUNs failed.\n");
2873 return -1;
2874 }
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002875 *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) /
2876 physical_entry_size;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002877 if (*nphysicals > HPSA_MAX_PHYS_LUN) {
2878 dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."
2879 " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2880 *nphysicals - HPSA_MAX_PHYS_LUN);
2881 *nphysicals = HPSA_MAX_PHYS_LUN;
2882 }
2883 if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) {
2884 dev_err(&h->pdev->dev, "report logical LUNs failed.\n");
2885 return -1;
2886 }
Stephen M. Cameron6df1e952010-02-04 08:42:19 -06002887 *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002888 /* Reject Logicals in excess of our max capability. */
2889 if (*nlogicals > HPSA_MAX_LUN) {
2890 dev_warn(&h->pdev->dev,
2891 "maximum logical LUNs (%d) exceeded. "
2892 "%d LUNs ignored.\n", HPSA_MAX_LUN,
2893 *nlogicals - HPSA_MAX_LUN);
2894 *nlogicals = HPSA_MAX_LUN;
2895 }
2896 if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) {
2897 dev_warn(&h->pdev->dev,
2898 "maximum logical + physical LUNs (%d) exceeded. "
2899 "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN,
2900 *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN);
2901 *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals;
2902 }
2903 return 0;
2904}
2905
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002906u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,
Matt Gatesa93aa1f2014-02-18 13:55:07 -06002907 int nphysicals, int nlogicals,
2908 struct ReportExtendedLUNdata *physdev_list,
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002909 struct ReportLUNdata *logdev_list)
2910{
2911 /* Helper function, figure out where the LUN ID info is coming from
2912 * given index i, lists of physical and logical devices, where in
2913 * the list the raid controller is supposed to appear (first or last)
2914 */
2915
2916 int logicals_start = nphysicals + (raid_ctlr_position == 0);
2917 int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0);
2918
2919 if (i == raid_ctlr_position)
2920 return RAID_CTLR_LUNID;
2921
2922 if (i < logicals_start)
2923 return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0];
2924
2925 if (i < last_device)
2926 return &logdev_list->LUN[i - nphysicals -
2927 (raid_ctlr_position == 0)][0];
2928 BUG();
2929 return NULL;
2930}
2931
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002932static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)
2933{
2934 /* the idea here is we could get notified
2935 * that some devices have changed, so we do a report
2936 * physical luns and report logical luns cmd, and adjust
2937 * our list of devices accordingly.
2938 *
2939 * The scsi3addr's of devices won't change so long as the
2940 * adapter is not reset. That means we can rescan and
2941 * tell which devices we already know about, vs. new
2942 * devices, vs. disappearing devices.
2943 */
Matt Gatesa93aa1f2014-02-18 13:55:07 -06002944 struct ReportExtendedLUNdata *physdev_list = NULL;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002945 struct ReportLUNdata *logdev_list = NULL;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002946 u32 nphysicals = 0;
2947 u32 nlogicals = 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002948 int physical_mode = 0;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06002949 u32 ndev_allocated = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002950 struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;
2951 int ncurrent = 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002952 int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;
Scott Teel4f4eb9f2012-01-19 14:01:25 -06002953 int i, n_ext_target_devs, ndevs_to_allocate;
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002954 int raid_ctlr_position;
Scott Teelaca4a522012-01-19 14:01:19 -06002955 DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002956
Scott Teelcfe5bad2011-10-26 16:21:07 -05002957 currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002958 physdev_list = kzalloc(reportlunsize, GFP_KERNEL);
2959 logdev_list = kzalloc(reportlunsize, GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002960 tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL);
2961
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05002962 if (!currentsd || !physdev_list || !logdev_list || !tmpdevice) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002963 dev_err(&h->pdev->dev, "out of memory\n");
2964 goto out;
2965 }
2966 memset(lunzerobits, 0, sizeof(lunzerobits));
2967
Matt Gatesa93aa1f2014-02-18 13:55:07 -06002968 if (hpsa_gather_lun_info(h, reportlunsize,
2969 (struct ReportLUNdata *) physdev_list, &nphysicals,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06002970 &physical_mode, logdev_list, &nlogicals))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002971 goto out;
2972
Scott Teelaca4a522012-01-19 14:01:19 -06002973 /* We might see up to the maximum number of logical and physical disks
2974 * plus external target devices, and a device for the local RAID
2975 * controller.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002976 */
Scott Teelaca4a522012-01-19 14:01:19 -06002977 ndevs_to_allocate = nphysicals + nlogicals + MAX_EXT_TARGETS + 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002978
2979 /* Allocate the per device structures */
2980 for (i = 0; i < ndevs_to_allocate; i++) {
Scott Teelb7ec0212011-10-26 16:21:12 -05002981 if (i >= HPSA_MAX_DEVICES) {
2982 dev_warn(&h->pdev->dev, "maximum devices (%d) exceeded."
2983 " %d devices ignored.\n", HPSA_MAX_DEVICES,
2984 ndevs_to_allocate - HPSA_MAX_DEVICES);
2985 break;
2986 }
2987
Stephen M. Cameronedd16362009-12-08 14:09:11 -08002988 currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL);
2989 if (!currentsd[i]) {
2990 dev_warn(&h->pdev->dev, "out of memory at %s:%d\n",
2991 __FILE__, __LINE__);
2992 goto out;
2993 }
2994 ndev_allocated++;
2995 }
2996
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06002997 if (unlikely(is_scsi_rev_5(h)))
2998 raid_ctlr_position = 0;
2999 else
3000 raid_ctlr_position = nphysicals + nlogicals;
3001
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003002 /* adjust our table of devices */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003003 n_ext_target_devs = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003004 for (i = 0; i < nphysicals + nlogicals + 1; i++) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003005 u8 *lunaddrbytes, is_OBDR = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003006
3007 /* Figure out where the LUN ID info is coming from */
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003008 lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position,
3009 i, nphysicals, nlogicals, physdev_list, logdev_list);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003010 /* skip masked physical devices. */
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06003011 if (lunaddrbytes[3] & 0xC0 &&
3012 i < nphysicals + (raid_ctlr_position == 0))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003013 continue;
3014
3015 /* Get device type, vendor, model, device id */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003016 if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice,
3017 &is_OBDR))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003018 continue; /* skip it if we can't talk to it. */
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003019 figure_bus_target_lun(h, lunaddrbytes, tmpdevice);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003020 this_device = currentsd[ncurrent];
3021
3022 /*
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003023 * For external target devices, we have to insert a LUN 0 which
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003024 * doesn't show up in CCISS_REPORT_PHYSICAL data, but there
3025 * is nonetheless an enclosure device there. We have to
3026 * present that otherwise linux won't find anything if
3027 * there is no lun 0.
3028 */
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003029 if (add_ext_target_dev(h, tmpdevice, this_device,
Stephen M. Cameron1f310bd2012-01-19 14:01:14 -06003030 lunaddrbytes, lunzerobits,
Scott Teel4f4eb9f2012-01-19 14:01:25 -06003031 &n_ext_target_devs)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003032 ncurrent++;
3033 this_device = currentsd[ncurrent];
3034 }
3035
3036 *this_device = *tmpdevice;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003037
3038 switch (this_device->devtype) {
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003039 case TYPE_ROM:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003040 /* We don't *really* support actual CD-ROM devices,
3041 * just "One Button Disaster Recovery" tape drive
3042 * which temporarily pretends to be a CD-ROM drive.
3043 * So we check that the device is really an OBDR tape
3044 * device by checking for "$DR-10" in bytes 43-48 of
3045 * the inquiry data.
3046 */
Stephen M. Cameron0b0e1d62011-08-09 08:17:30 -05003047 if (is_OBDR)
3048 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003049 break;
3050 case TYPE_DISK:
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003051 if (i >= nphysicals) {
3052 ncurrent++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003053 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003054 }
3055 if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) {
3056 memcpy(&this_device->ioaccel_handle,
3057 &lunaddrbytes[20],
3058 sizeof(this_device->ioaccel_handle));
3059 ncurrent++;
3060 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003061 break;
3062 case TYPE_TAPE:
3063 case TYPE_MEDIUM_CHANGER:
3064 ncurrent++;
3065 break;
3066 case TYPE_RAID:
3067 /* Only present the Smartarray HBA as a RAID controller.
3068 * If it's a RAID controller other than the HBA itself
3069 * (an external RAID controller, MSA500 or similar)
3070 * don't present it.
3071 */
3072 if (!is_hba_lunid(lunaddrbytes))
3073 break;
3074 ncurrent++;
3075 break;
3076 default:
3077 break;
3078 }
Scott Teelcfe5bad2011-10-26 16:21:07 -05003079 if (ncurrent >= HPSA_MAX_DEVICES)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003080 break;
3081 }
3082 adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent);
3083out:
3084 kfree(tmpdevice);
3085 for (i = 0; i < ndev_allocated; i++)
3086 kfree(currentsd[i]);
3087 kfree(currentsd);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003088 kfree(physdev_list);
3089 kfree(logdev_list);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003090}
3091
3092/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci
3093 * dma mapping and fills in the scatter gather entries of the
3094 * hpsa command, cp.
3095 */
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003096static int hpsa_scatter_gather(struct ctlr_info *h,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003097 struct CommandList *cp,
3098 struct scsi_cmnd *cmd)
3099{
3100 unsigned int len;
3101 struct scatterlist *sg;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003102 u64 addr64;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003103 int use_sg, i, sg_index, chained;
3104 struct SGDescriptor *curr_sg;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003105
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003106 BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003107
3108 use_sg = scsi_dma_map(cmd);
3109 if (use_sg < 0)
3110 return use_sg;
3111
3112 if (!use_sg)
3113 goto sglist_finished;
3114
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003115 curr_sg = cp->SG;
3116 chained = 0;
3117 sg_index = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003118 scsi_for_each_sg(cmd, sg, use_sg, i) {
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003119 if (i == h->max_cmd_sg_entries - 1 &&
3120 use_sg > h->max_cmd_sg_entries) {
3121 chained = 1;
3122 curr_sg = h->cmd_sg_list[cp->cmdindex];
3123 sg_index = 0;
3124 }
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003125 addr64 = (u64) sg_dma_address(sg);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003126 len = sg_dma_len(sg);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003127 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
3128 curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
3129 curr_sg->Len = len;
Matt Gatese1d9cbf2014-02-18 13:55:12 -06003130 curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003131 curr_sg++;
3132 }
3133
3134 if (use_sg + chained > h->maxSG)
3135 h->maxSG = use_sg + chained;
3136
3137 if (chained) {
3138 cp->Header.SGList = h->max_cmd_sg_entries;
3139 cp->Header.SGTotal = (u16) (use_sg + 1);
Stephen M. Camerone2bea6d2013-02-20 11:24:46 -06003140 if (hpsa_map_sg_chain_block(h, cp)) {
3141 scsi_dma_unmap(cmd);
3142 return -1;
3143 }
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003144 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003145 }
3146
3147sglist_finished:
3148
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06003149 cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */
3150 cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003151 return 0;
3152}
3153
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003154#define IO_ACCEL_INELIGIBLE (1)
3155static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len)
3156{
3157 int is_write = 0;
3158 u32 block;
3159 u32 block_cnt;
3160
3161 /* Perform some CDB fixups if needed using 10 byte reads/writes only */
3162 switch (cdb[0]) {
3163 case WRITE_6:
3164 case WRITE_12:
3165 is_write = 1;
3166 case READ_6:
3167 case READ_12:
3168 if (*cdb_len == 6) {
3169 block = (((u32) cdb[2]) << 8) | cdb[3];
3170 block_cnt = cdb[4];
3171 } else {
3172 BUG_ON(*cdb_len != 12);
3173 block = (((u32) cdb[2]) << 24) |
3174 (((u32) cdb[3]) << 16) |
3175 (((u32) cdb[4]) << 8) |
3176 cdb[5];
3177 block_cnt =
3178 (((u32) cdb[6]) << 24) |
3179 (((u32) cdb[7]) << 16) |
3180 (((u32) cdb[8]) << 8) |
3181 cdb[9];
3182 }
3183 if (block_cnt > 0xffff)
3184 return IO_ACCEL_INELIGIBLE;
3185
3186 cdb[0] = is_write ? WRITE_10 : READ_10;
3187 cdb[1] = 0;
3188 cdb[2] = (u8) (block >> 24);
3189 cdb[3] = (u8) (block >> 16);
3190 cdb[4] = (u8) (block >> 8);
3191 cdb[5] = (u8) (block);
3192 cdb[6] = 0;
3193 cdb[7] = (u8) (block_cnt >> 8);
3194 cdb[8] = (u8) (block_cnt);
3195 cdb[9] = 0;
3196 *cdb_len = 10;
3197 break;
3198 }
3199 return 0;
3200}
3201
Scott Teelc3497752014-02-18 13:56:34 -06003202static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h,
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003203 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3204 u8 *scsi3addr)
Matt Gatese1f7de02014-02-18 13:55:17 -06003205{
3206 struct scsi_cmnd *cmd = c->scsi_cmd;
Matt Gatese1f7de02014-02-18 13:55:17 -06003207 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex];
3208 unsigned int len;
3209 unsigned int total_len = 0;
3210 struct scatterlist *sg;
3211 u64 addr64;
3212 int use_sg, i;
3213 struct SGDescriptor *curr_sg;
3214 u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE;
3215
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003216 /* TODO: implement chaining support */
3217 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3218 return IO_ACCEL_INELIGIBLE;
3219
Matt Gatese1f7de02014-02-18 13:55:17 -06003220 BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX);
3221
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003222 if (fixup_ioaccel_cdb(cdb, &cdb_len))
3223 return IO_ACCEL_INELIGIBLE;
3224
Matt Gatese1f7de02014-02-18 13:55:17 -06003225 c->cmd_type = CMD_IOACCEL1;
3226
3227 /* Adjust the DMA address to point to the accelerated command buffer */
3228 c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle +
3229 (c->cmdindex * sizeof(*cp));
3230 BUG_ON(c->busaddr & 0x0000007F);
3231
3232 use_sg = scsi_dma_map(cmd);
3233 if (use_sg < 0)
3234 return use_sg;
3235
3236 if (use_sg) {
3237 curr_sg = cp->SG;
3238 scsi_for_each_sg(cmd, sg, use_sg, i) {
3239 addr64 = (u64) sg_dma_address(sg);
3240 len = sg_dma_len(sg);
3241 total_len += len;
3242 curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
3243 curr_sg->Addr.upper =
3244 (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
3245 curr_sg->Len = len;
3246
3247 if (i == (scsi_sg_count(cmd) - 1))
3248 curr_sg->Ext = HPSA_SG_LAST;
3249 else
3250 curr_sg->Ext = 0; /* we are not chaining */
3251 curr_sg++;
3252 }
3253
3254 switch (cmd->sc_data_direction) {
3255 case DMA_TO_DEVICE:
3256 control |= IOACCEL1_CONTROL_DATA_OUT;
3257 break;
3258 case DMA_FROM_DEVICE:
3259 control |= IOACCEL1_CONTROL_DATA_IN;
3260 break;
3261 case DMA_NONE:
3262 control |= IOACCEL1_CONTROL_NODATAXFER;
3263 break;
3264 default:
3265 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3266 cmd->sc_data_direction);
3267 BUG();
3268 break;
3269 }
3270 } else {
3271 control |= IOACCEL1_CONTROL_NODATAXFER;
3272 }
3273
Scott Teelc3497752014-02-18 13:56:34 -06003274 c->Header.SGList = use_sg;
Matt Gatese1f7de02014-02-18 13:55:17 -06003275 /* Fill out the command structure to submit */
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003276 cp->dev_handle = ioaccel_handle & 0xFFFF;
Matt Gatese1f7de02014-02-18 13:55:17 -06003277 cp->transfer_len = total_len;
3278 cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ |
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003279 (cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK);
Matt Gatese1f7de02014-02-18 13:55:17 -06003280 cp->control = control;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003281 memcpy(cp->CDB, cdb, cdb_len);
3282 memcpy(cp->CISS_LUN, scsi3addr, 8);
Scott Teelc3497752014-02-18 13:56:34 -06003283 /* Tag was already set at init time. */
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003284 enqueue_cmd_and_start_io(h, c);
Matt Gatese1f7de02014-02-18 13:55:17 -06003285 return 0;
3286}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003287
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003288/*
3289 * Queue a command directly to a device behind the controller using the
3290 * I/O accelerator path.
3291 */
3292static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h,
3293 struct CommandList *c)
3294{
3295 struct scsi_cmnd *cmd = c->scsi_cmd;
3296 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3297
3298 return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle,
3299 cmd->cmnd, cmd->cmd_len, dev->scsi3addr);
3300}
3301
Scott Teeldd0e19f2014-02-18 13:57:31 -06003302/*
3303 * Set encryption parameters for the ioaccel2 request
3304 */
3305static void set_encrypt_ioaccel2(struct ctlr_info *h,
3306 struct CommandList *c, struct io_accel2_cmd *cp)
3307{
3308 struct scsi_cmnd *cmd = c->scsi_cmd;
3309 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3310 struct raid_map_data *map = &dev->raid_map;
3311 u64 first_block;
3312
3313 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3314
3315 /* Are we doing encryption on this device */
3316 if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON))
3317 return;
3318 /* Set the data encryption key index. */
3319 cp->dekindex = map->dekindex;
3320
3321 /* Set the encryption enable flag, encoded into direction field. */
3322 cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK;
3323
3324 /* Set encryption tweak values based on logical block address
3325 * If block size is 512, tweak value is LBA.
3326 * For other block sizes, tweak is (LBA * block size)/ 512)
3327 */
3328 switch (cmd->cmnd[0]) {
3329 /* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */
3330 case WRITE_6:
3331 case READ_6:
3332 if (map->volume_blk_size == 512) {
3333 cp->tweak_lower =
3334 (((u32) cmd->cmnd[2]) << 8) |
3335 cmd->cmnd[3];
3336 cp->tweak_upper = 0;
3337 } else {
3338 first_block =
3339 (((u64) cmd->cmnd[2]) << 8) |
3340 cmd->cmnd[3];
3341 first_block = (first_block * map->volume_blk_size)/512;
3342 cp->tweak_lower = (u32)first_block;
3343 cp->tweak_upper = (u32)(first_block >> 32);
3344 }
3345 break;
3346 case WRITE_10:
3347 case READ_10:
3348 if (map->volume_blk_size == 512) {
3349 cp->tweak_lower =
3350 (((u32) cmd->cmnd[2]) << 24) |
3351 (((u32) cmd->cmnd[3]) << 16) |
3352 (((u32) cmd->cmnd[4]) << 8) |
3353 cmd->cmnd[5];
3354 cp->tweak_upper = 0;
3355 } else {
3356 first_block =
3357 (((u64) cmd->cmnd[2]) << 24) |
3358 (((u64) cmd->cmnd[3]) << 16) |
3359 (((u64) cmd->cmnd[4]) << 8) |
3360 cmd->cmnd[5];
3361 first_block = (first_block * map->volume_blk_size)/512;
3362 cp->tweak_lower = (u32)first_block;
3363 cp->tweak_upper = (u32)(first_block >> 32);
3364 }
3365 break;
3366 /* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */
3367 case WRITE_12:
3368 case READ_12:
3369 if (map->volume_blk_size == 512) {
3370 cp->tweak_lower =
3371 (((u32) cmd->cmnd[2]) << 24) |
3372 (((u32) cmd->cmnd[3]) << 16) |
3373 (((u32) cmd->cmnd[4]) << 8) |
3374 cmd->cmnd[5];
3375 cp->tweak_upper = 0;
3376 } else {
3377 first_block =
3378 (((u64) cmd->cmnd[2]) << 24) |
3379 (((u64) cmd->cmnd[3]) << 16) |
3380 (((u64) cmd->cmnd[4]) << 8) |
3381 cmd->cmnd[5];
3382 first_block = (first_block * map->volume_blk_size)/512;
3383 cp->tweak_lower = (u32)first_block;
3384 cp->tweak_upper = (u32)(first_block >> 32);
3385 }
3386 break;
3387 case WRITE_16:
3388 case READ_16:
3389 if (map->volume_blk_size == 512) {
3390 cp->tweak_lower =
3391 (((u32) cmd->cmnd[6]) << 24) |
3392 (((u32) cmd->cmnd[7]) << 16) |
3393 (((u32) cmd->cmnd[8]) << 8) |
3394 cmd->cmnd[9];
3395 cp->tweak_upper =
3396 (((u32) cmd->cmnd[2]) << 24) |
3397 (((u32) cmd->cmnd[3]) << 16) |
3398 (((u32) cmd->cmnd[4]) << 8) |
3399 cmd->cmnd[5];
3400 } else {
3401 first_block =
3402 (((u64) cmd->cmnd[2]) << 56) |
3403 (((u64) cmd->cmnd[3]) << 48) |
3404 (((u64) cmd->cmnd[4]) << 40) |
3405 (((u64) cmd->cmnd[5]) << 32) |
3406 (((u64) cmd->cmnd[6]) << 24) |
3407 (((u64) cmd->cmnd[7]) << 16) |
3408 (((u64) cmd->cmnd[8]) << 8) |
3409 cmd->cmnd[9];
3410 first_block = (first_block * map->volume_blk_size)/512;
3411 cp->tweak_lower = (u32)first_block;
3412 cp->tweak_upper = (u32)(first_block >> 32);
3413 }
3414 break;
3415 default:
3416 dev_err(&h->pdev->dev,
3417 "ERROR: %s: IOACCEL request CDB size not supported for encryption\n",
3418 __func__);
3419 BUG();
3420 break;
3421 }
3422}
3423
Scott Teelc3497752014-02-18 13:56:34 -06003424static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h,
3425 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3426 u8 *scsi3addr)
3427{
3428 struct scsi_cmnd *cmd = c->scsi_cmd;
3429 struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex];
3430 struct ioaccel2_sg_element *curr_sg;
3431 int use_sg, i;
3432 struct scatterlist *sg;
3433 u64 addr64;
3434 u32 len;
3435 u32 total_len = 0;
3436
3437 if (scsi_sg_count(cmd) > h->ioaccel_maxsg)
3438 return IO_ACCEL_INELIGIBLE;
3439
3440 if (fixup_ioaccel_cdb(cdb, &cdb_len))
3441 return IO_ACCEL_INELIGIBLE;
3442 c->cmd_type = CMD_IOACCEL2;
3443 /* Adjust the DMA address to point to the accelerated command buffer */
3444 c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle +
3445 (c->cmdindex * sizeof(*cp));
3446 BUG_ON(c->busaddr & 0x0000007F);
3447
3448 memset(cp, 0, sizeof(*cp));
3449 cp->IU_type = IOACCEL2_IU_TYPE;
3450
3451 use_sg = scsi_dma_map(cmd);
3452 if (use_sg < 0)
3453 return use_sg;
3454
3455 if (use_sg) {
3456 BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES);
3457 curr_sg = cp->sg;
3458 scsi_for_each_sg(cmd, sg, use_sg, i) {
3459 addr64 = (u64) sg_dma_address(sg);
3460 len = sg_dma_len(sg);
3461 total_len += len;
3462 curr_sg->address = cpu_to_le64(addr64);
3463 curr_sg->length = cpu_to_le32(len);
3464 curr_sg->reserved[0] = 0;
3465 curr_sg->reserved[1] = 0;
3466 curr_sg->reserved[2] = 0;
3467 curr_sg->chain_indicator = 0;
3468 curr_sg++;
3469 }
3470
3471 switch (cmd->sc_data_direction) {
3472 case DMA_TO_DEVICE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06003473 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3474 cp->direction |= IOACCEL2_DIR_DATA_OUT;
Scott Teelc3497752014-02-18 13:56:34 -06003475 break;
3476 case DMA_FROM_DEVICE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06003477 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3478 cp->direction |= IOACCEL2_DIR_DATA_IN;
Scott Teelc3497752014-02-18 13:56:34 -06003479 break;
3480 case DMA_NONE:
Scott Teeldd0e19f2014-02-18 13:57:31 -06003481 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3482 cp->direction |= IOACCEL2_DIR_NO_DATA;
Scott Teelc3497752014-02-18 13:56:34 -06003483 break;
3484 default:
3485 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3486 cmd->sc_data_direction);
3487 BUG();
3488 break;
3489 }
3490 } else {
Scott Teeldd0e19f2014-02-18 13:57:31 -06003491 cp->direction &= ~IOACCEL2_DIRECTION_MASK;
3492 cp->direction |= IOACCEL2_DIR_NO_DATA;
Scott Teelc3497752014-02-18 13:56:34 -06003493 }
Scott Teeldd0e19f2014-02-18 13:57:31 -06003494
3495 /* Set encryption parameters, if necessary */
3496 set_encrypt_ioaccel2(h, c, cp);
3497
Scott Teelc3497752014-02-18 13:56:34 -06003498 cp->scsi_nexus = ioaccel_handle;
Scott Teeldd0e19f2014-02-18 13:57:31 -06003499 cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) |
Scott Teelc3497752014-02-18 13:56:34 -06003500 DIRECT_LOOKUP_BIT;
3501 memcpy(cp->cdb, cdb, sizeof(cp->cdb));
3502 memset(cp->cciss_lun, 0, sizeof(cp->cciss_lun));
3503 cp->cmd_priority_task_attr = 0;
3504
3505 /* fill in sg elements */
3506 cp->sg_count = (u8) use_sg;
3507
3508 cp->data_len = cpu_to_le32(total_len);
3509 cp->err_ptr = cpu_to_le64(c->busaddr +
3510 offsetof(struct io_accel2_cmd, error_data));
3511 cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data));
3512
3513 enqueue_cmd_and_start_io(h, c);
3514 return 0;
3515}
3516
3517/*
3518 * Queue a command to the correct I/O accelerator path.
3519 */
3520static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h,
3521 struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len,
3522 u8 *scsi3addr)
3523{
3524 if (h->transMethod & CFGTBL_Trans_io_accel1)
3525 return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle,
3526 cdb, cdb_len, scsi3addr);
3527 else
3528 return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle,
3529 cdb, cdb_len, scsi3addr);
3530}
3531
Scott Teel6b80b182014-02-18 13:56:55 -06003532static void raid_map_helper(struct raid_map_data *map,
3533 int offload_to_mirror, u32 *map_index, u32 *current_group)
3534{
3535 if (offload_to_mirror == 0) {
3536 /* use physical disk in the first mirrored group. */
3537 *map_index %= map->data_disks_per_row;
3538 return;
3539 }
3540 do {
3541 /* determine mirror group that *map_index indicates */
3542 *current_group = *map_index / map->data_disks_per_row;
3543 if (offload_to_mirror == *current_group)
3544 continue;
3545 if (*current_group < (map->layout_map_count - 1)) {
3546 /* select map index from next group */
3547 *map_index += map->data_disks_per_row;
3548 (*current_group)++;
3549 } else {
3550 /* select map index from first group */
3551 *map_index %= map->data_disks_per_row;
3552 *current_group = 0;
3553 }
3554 } while (offload_to_mirror != *current_group);
3555}
3556
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003557/*
3558 * Attempt to perform offload RAID mapping for a logical volume I/O.
3559 */
3560static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
3561 struct CommandList *c)
3562{
3563 struct scsi_cmnd *cmd = c->scsi_cmd;
3564 struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
3565 struct raid_map_data *map = &dev->raid_map;
3566 struct raid_map_disk_data *dd = &map->data[0];
3567 int is_write = 0;
3568 u32 map_index;
3569 u64 first_block, last_block;
3570 u32 block_cnt;
3571 u32 blocks_per_row;
3572 u64 first_row, last_row;
3573 u32 first_row_offset, last_row_offset;
3574 u32 first_column, last_column;
Scott Teel6b80b182014-02-18 13:56:55 -06003575 u64 r0_first_row, r0_last_row;
3576 u32 r5or6_blocks_per_row;
3577 u64 r5or6_first_row, r5or6_last_row;
3578 u32 r5or6_first_row_offset, r5or6_last_row_offset;
3579 u32 r5or6_first_column, r5or6_last_column;
3580 u32 total_disks_per_row;
3581 u32 stripesize;
3582 u32 first_group, last_group, current_group;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003583 u32 map_row;
3584 u32 disk_handle;
3585 u64 disk_block;
3586 u32 disk_block_cnt;
3587 u8 cdb[16];
3588 u8 cdb_len;
3589#if BITS_PER_LONG == 32
3590 u64 tmpdiv;
3591#endif
Scott Teel6b80b182014-02-18 13:56:55 -06003592 int offload_to_mirror;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003593
3594 BUG_ON(!(dev->offload_config && dev->offload_enabled));
3595
3596 /* check for valid opcode, get LBA and block count */
3597 switch (cmd->cmnd[0]) {
3598 case WRITE_6:
3599 is_write = 1;
3600 case READ_6:
3601 first_block =
3602 (((u64) cmd->cmnd[2]) << 8) |
3603 cmd->cmnd[3];
3604 block_cnt = cmd->cmnd[4];
3605 break;
3606 case WRITE_10:
3607 is_write = 1;
3608 case READ_10:
3609 first_block =
3610 (((u64) cmd->cmnd[2]) << 24) |
3611 (((u64) cmd->cmnd[3]) << 16) |
3612 (((u64) cmd->cmnd[4]) << 8) |
3613 cmd->cmnd[5];
3614 block_cnt =
3615 (((u32) cmd->cmnd[7]) << 8) |
3616 cmd->cmnd[8];
3617 break;
3618 case WRITE_12:
3619 is_write = 1;
3620 case READ_12:
3621 first_block =
3622 (((u64) cmd->cmnd[2]) << 24) |
3623 (((u64) cmd->cmnd[3]) << 16) |
3624 (((u64) cmd->cmnd[4]) << 8) |
3625 cmd->cmnd[5];
3626 block_cnt =
3627 (((u32) cmd->cmnd[6]) << 24) |
3628 (((u32) cmd->cmnd[7]) << 16) |
3629 (((u32) cmd->cmnd[8]) << 8) |
3630 cmd->cmnd[9];
3631 break;
3632 case WRITE_16:
3633 is_write = 1;
3634 case READ_16:
3635 first_block =
3636 (((u64) cmd->cmnd[2]) << 56) |
3637 (((u64) cmd->cmnd[3]) << 48) |
3638 (((u64) cmd->cmnd[4]) << 40) |
3639 (((u64) cmd->cmnd[5]) << 32) |
3640 (((u64) cmd->cmnd[6]) << 24) |
3641 (((u64) cmd->cmnd[7]) << 16) |
3642 (((u64) cmd->cmnd[8]) << 8) |
3643 cmd->cmnd[9];
3644 block_cnt =
3645 (((u32) cmd->cmnd[10]) << 24) |
3646 (((u32) cmd->cmnd[11]) << 16) |
3647 (((u32) cmd->cmnd[12]) << 8) |
3648 cmd->cmnd[13];
3649 break;
3650 default:
3651 return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */
3652 }
3653 BUG_ON(block_cnt == 0);
3654 last_block = first_block + block_cnt - 1;
3655
3656 /* check for write to non-RAID-0 */
3657 if (is_write && dev->raid_level != 0)
3658 return IO_ACCEL_INELIGIBLE;
3659
3660 /* check for invalid block or wraparound */
3661 if (last_block >= map->volume_blk_cnt || last_block < first_block)
3662 return IO_ACCEL_INELIGIBLE;
3663
3664 /* calculate stripe information for the request */
3665 blocks_per_row = map->data_disks_per_row * map->strip_size;
3666#if BITS_PER_LONG == 32
3667 tmpdiv = first_block;
3668 (void) do_div(tmpdiv, blocks_per_row);
3669 first_row = tmpdiv;
3670 tmpdiv = last_block;
3671 (void) do_div(tmpdiv, blocks_per_row);
3672 last_row = tmpdiv;
3673 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3674 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3675 tmpdiv = first_row_offset;
3676 (void) do_div(tmpdiv, map->strip_size);
3677 first_column = tmpdiv;
3678 tmpdiv = last_row_offset;
3679 (void) do_div(tmpdiv, map->strip_size);
3680 last_column = tmpdiv;
3681#else
3682 first_row = first_block / blocks_per_row;
3683 last_row = last_block / blocks_per_row;
3684 first_row_offset = (u32) (first_block - (first_row * blocks_per_row));
3685 last_row_offset = (u32) (last_block - (last_row * blocks_per_row));
3686 first_column = first_row_offset / map->strip_size;
3687 last_column = last_row_offset / map->strip_size;
3688#endif
3689
3690 /* if this isn't a single row/column then give to the controller */
3691 if ((first_row != last_row) || (first_column != last_column))
3692 return IO_ACCEL_INELIGIBLE;
3693
3694 /* proceeding with driver mapping */
Scott Teel6b80b182014-02-18 13:56:55 -06003695 total_disks_per_row = map->data_disks_per_row +
3696 map->metadata_disks_per_row;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003697 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3698 map->row_cnt;
Scott Teel6b80b182014-02-18 13:56:55 -06003699 map_index = (map_row * total_disks_per_row) + first_column;
3700
3701 switch (dev->raid_level) {
3702 case HPSA_RAID_0:
3703 break; /* nothing special to do */
3704 case HPSA_RAID_1:
3705 /* Handles load balance across RAID 1 members.
3706 * (2-drive R1 and R10 with even # of drives.)
3707 * Appropriate for SSDs, not optimal for HDDs
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003708 */
Scott Teel6b80b182014-02-18 13:56:55 -06003709 BUG_ON(map->layout_map_count != 2);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003710 if (dev->offload_to_mirror)
3711 map_index += map->data_disks_per_row;
3712 dev->offload_to_mirror = !dev->offload_to_mirror;
Scott Teel6b80b182014-02-18 13:56:55 -06003713 break;
3714 case HPSA_RAID_ADM:
3715 /* Handles N-way mirrors (R1-ADM)
3716 * and R10 with # of drives divisible by 3.)
3717 */
3718 BUG_ON(map->layout_map_count != 3);
3719
3720 offload_to_mirror = dev->offload_to_mirror;
3721 raid_map_helper(map, offload_to_mirror,
3722 &map_index, &current_group);
3723 /* set mirror group to use next time */
3724 offload_to_mirror =
3725 (offload_to_mirror >= map->layout_map_count - 1)
3726 ? 0 : offload_to_mirror + 1;
3727 /* FIXME: remove after debug/dev */
3728 BUG_ON(offload_to_mirror >= map->layout_map_count);
3729 dev_warn(&h->pdev->dev,
3730 "DEBUG: Using physical disk map index %d from mirror group %d\n",
3731 map_index, offload_to_mirror);
3732 dev->offload_to_mirror = offload_to_mirror;
3733 /* Avoid direct use of dev->offload_to_mirror within this
3734 * function since multiple threads might simultaneously
3735 * increment it beyond the range of dev->layout_map_count -1.
3736 */
3737 break;
3738 case HPSA_RAID_5:
3739 case HPSA_RAID_6:
3740 if (map->layout_map_count <= 1)
3741 break;
3742
3743 /* Verify first and last block are in same RAID group */
3744 r5or6_blocks_per_row =
3745 map->strip_size * map->data_disks_per_row;
3746 BUG_ON(r5or6_blocks_per_row == 0);
3747 stripesize = r5or6_blocks_per_row * map->layout_map_count;
3748#if BITS_PER_LONG == 32
3749 tmpdiv = first_block;
3750 first_group = do_div(tmpdiv, stripesize);
3751 tmpdiv = first_group;
3752 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3753 first_group = tmpdiv;
3754 tmpdiv = last_block;
3755 last_group = do_div(tmpdiv, stripesize);
3756 tmpdiv = last_group;
3757 (void) do_div(tmpdiv, r5or6_blocks_per_row);
3758 last_group = tmpdiv;
3759#else
3760 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
3761 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
3762 if (first_group != last_group)
3763#endif
3764 return IO_ACCEL_INELIGIBLE;
3765
3766 /* Verify request is in a single row of RAID 5/6 */
3767#if BITS_PER_LONG == 32
3768 tmpdiv = first_block;
3769 (void) do_div(tmpdiv, stripesize);
3770 first_row = r5or6_first_row = r0_first_row = tmpdiv;
3771 tmpdiv = last_block;
3772 (void) do_div(tmpdiv, stripesize);
3773 r5or6_last_row = r0_last_row = tmpdiv;
3774#else
3775 first_row = r5or6_first_row = r0_first_row =
3776 first_block / stripesize;
3777 r5or6_last_row = r0_last_row = last_block / stripesize;
3778#endif
3779 if (r5or6_first_row != r5or6_last_row)
3780 return IO_ACCEL_INELIGIBLE;
3781
3782
3783 /* Verify request is in a single column */
3784#if BITS_PER_LONG == 32
3785 tmpdiv = first_block;
3786 first_row_offset = do_div(tmpdiv, stripesize);
3787 tmpdiv = first_row_offset;
3788 first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row);
3789 r5or6_first_row_offset = first_row_offset;
3790 tmpdiv = last_block;
3791 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
3792 tmpdiv = r5or6_last_row_offset;
3793 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
3794 tmpdiv = r5or6_first_row_offset;
3795 (void) do_div(tmpdiv, map->strip_size);
3796 first_column = r5or6_first_column = tmpdiv;
3797 tmpdiv = r5or6_last_row_offset;
3798 (void) do_div(tmpdiv, map->strip_size);
3799 r5or6_last_column = tmpdiv;
3800#else
3801 first_row_offset = r5or6_first_row_offset =
3802 (u32)((first_block % stripesize) %
3803 r5or6_blocks_per_row);
3804
3805 r5or6_last_row_offset =
3806 (u32)((last_block % stripesize) %
3807 r5or6_blocks_per_row);
3808
3809 first_column = r5or6_first_column =
3810 r5or6_first_row_offset / map->strip_size;
3811 r5or6_last_column =
3812 r5or6_last_row_offset / map->strip_size;
3813#endif
3814 if (r5or6_first_column != r5or6_last_column)
3815 return IO_ACCEL_INELIGIBLE;
3816
3817 /* Request is eligible */
3818 map_row = ((u32)(first_row >> map->parity_rotation_shift)) %
3819 map->row_cnt;
3820
3821 map_index = (first_group *
3822 (map->row_cnt * total_disks_per_row)) +
3823 (map_row * total_disks_per_row) + first_column;
3824 break;
3825 default:
3826 return IO_ACCEL_INELIGIBLE;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003827 }
Scott Teel6b80b182014-02-18 13:56:55 -06003828
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003829 disk_handle = dd[map_index].ioaccel_handle;
3830 disk_block = map->disk_starting_blk + (first_row * map->strip_size) +
3831 (first_row_offset - (first_column * map->strip_size));
3832 disk_block_cnt = block_cnt;
3833
3834 /* handle differing logical/physical block sizes */
3835 if (map->phys_blk_shift) {
3836 disk_block <<= map->phys_blk_shift;
3837 disk_block_cnt <<= map->phys_blk_shift;
3838 }
3839 BUG_ON(disk_block_cnt > 0xffff);
3840
3841 /* build the new CDB for the physical disk I/O */
3842 if (disk_block > 0xffffffff) {
3843 cdb[0] = is_write ? WRITE_16 : READ_16;
3844 cdb[1] = 0;
3845 cdb[2] = (u8) (disk_block >> 56);
3846 cdb[3] = (u8) (disk_block >> 48);
3847 cdb[4] = (u8) (disk_block >> 40);
3848 cdb[5] = (u8) (disk_block >> 32);
3849 cdb[6] = (u8) (disk_block >> 24);
3850 cdb[7] = (u8) (disk_block >> 16);
3851 cdb[8] = (u8) (disk_block >> 8);
3852 cdb[9] = (u8) (disk_block);
3853 cdb[10] = (u8) (disk_block_cnt >> 24);
3854 cdb[11] = (u8) (disk_block_cnt >> 16);
3855 cdb[12] = (u8) (disk_block_cnt >> 8);
3856 cdb[13] = (u8) (disk_block_cnt);
3857 cdb[14] = 0;
3858 cdb[15] = 0;
3859 cdb_len = 16;
3860 } else {
3861 cdb[0] = is_write ? WRITE_10 : READ_10;
3862 cdb[1] = 0;
3863 cdb[2] = (u8) (disk_block >> 24);
3864 cdb[3] = (u8) (disk_block >> 16);
3865 cdb[4] = (u8) (disk_block >> 8);
3866 cdb[5] = (u8) (disk_block);
3867 cdb[6] = 0;
3868 cdb[7] = (u8) (disk_block_cnt >> 8);
3869 cdb[8] = (u8) (disk_block_cnt);
3870 cdb[9] = 0;
3871 cdb_len = 10;
3872 }
3873 return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len,
3874 dev->scsi3addr);
3875}
3876
Jeff Garzikf2812332010-11-16 02:10:29 -05003877static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003878 void (*done)(struct scsi_cmnd *))
3879{
3880 struct ctlr_info *h;
3881 struct hpsa_scsi_dev_t *dev;
3882 unsigned char scsi3addr[8];
3883 struct CommandList *c;
3884 unsigned long flags;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003885 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003886
3887 /* Get the ptr to our adapter structure out of cmd->host. */
3888 h = sdev_to_hba(cmd->device);
3889 dev = cmd->device->hostdata;
3890 if (!dev) {
3891 cmd->result = DID_NO_CONNECT << 16;
3892 done(cmd);
3893 return 0;
3894 }
3895 memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr));
3896
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003897 spin_lock_irqsave(&h->lock, flags);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05003898 if (unlikely(h->lockup_detected)) {
3899 spin_unlock_irqrestore(&h->lock, flags);
3900 cmd->result = DID_ERROR << 16;
3901 done(cmd);
3902 return 0;
3903 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003904 spin_unlock_irqrestore(&h->lock, flags);
Matt Gatese16a33a2012-05-01 11:43:11 -05003905 c = cmd_alloc(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003906 if (c == NULL) { /* trouble... */
3907 dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n");
3908 return SCSI_MLQUEUE_HOST_BUSY;
3909 }
3910
3911 /* Fill in the command list header */
3912
3913 cmd->scsi_done = done; /* save this for use by completion code */
3914
3915 /* save c in case we have to abort it */
3916 cmd->host_scribble = (unsigned char *) c;
3917
3918 c->cmd_type = CMD_SCSI;
3919 c->scsi_cmd = cmd;
Matt Gatese1f7de02014-02-18 13:55:17 -06003920
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003921 /* Call alternate submit routine for I/O accelerated commands.
3922 * Retries always go down the normal I/O path.
3923 */
3924 if (likely(cmd->retries == 0 &&
Scott Teelda0697b2014-02-18 13:57:00 -06003925 cmd->request->cmd_type == REQ_TYPE_FS &&
3926 h->acciopath_status)) {
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06003927 if (dev->offload_enabled) {
3928 rc = hpsa_scsi_ioaccel_raid_map(h, c);
3929 if (rc == 0)
3930 return 0; /* Sent on ioaccel path */
3931 if (rc < 0) { /* scsi_dma_map failed. */
3932 cmd_free(h, c);
3933 return SCSI_MLQUEUE_HOST_BUSY;
3934 }
3935 } else if (dev->ioaccel_handle) {
3936 rc = hpsa_scsi_ioaccel_direct_map(h, c);
3937 if (rc == 0)
3938 return 0; /* Sent on direct map path */
3939 if (rc < 0) { /* scsi_dma_map failed. */
3940 cmd_free(h, c);
3941 return SCSI_MLQUEUE_HOST_BUSY;
3942 }
3943 }
3944 }
Matt Gatese1f7de02014-02-18 13:55:17 -06003945
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003946 c->Header.ReplyQueue = 0; /* unused in simple mode */
3947 memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
Don Brace303932f2010-02-04 08:42:40 -06003948 c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT);
3949 c->Header.Tag.lower |= DIRECT_LOOKUP_BIT;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003950
3951 /* Fill in the request block... */
3952
3953 c->Request.Timeout = 0;
3954 memset(c->Request.CDB, 0, sizeof(c->Request.CDB));
3955 BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB));
3956 c->Request.CDBLen = cmd->cmd_len;
3957 memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len);
3958 c->Request.Type.Type = TYPE_CMD;
3959 c->Request.Type.Attribute = ATTR_SIMPLE;
3960 switch (cmd->sc_data_direction) {
3961 case DMA_TO_DEVICE:
3962 c->Request.Type.Direction = XFER_WRITE;
3963 break;
3964 case DMA_FROM_DEVICE:
3965 c->Request.Type.Direction = XFER_READ;
3966 break;
3967 case DMA_NONE:
3968 c->Request.Type.Direction = XFER_NONE;
3969 break;
3970 case DMA_BIDIRECTIONAL:
3971 /* This can happen if a buggy application does a scsi passthru
3972 * and sets both inlen and outlen to non-zero. ( see
3973 * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
3974 */
3975
3976 c->Request.Type.Direction = XFER_RSVD;
3977 /* This is technically wrong, and hpsa controllers should
3978 * reject it with CMD_INVALID, which is the most correct
3979 * response, but non-fibre backends appear to let it
3980 * slide by, and give the same results as if this field
3981 * were set correctly. Either way is acceptable for
3982 * our purposes here.
3983 */
3984
3985 break;
3986
3987 default:
3988 dev_err(&h->pdev->dev, "unknown data direction: %d\n",
3989 cmd->sc_data_direction);
3990 BUG();
3991 break;
3992 }
3993
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06003994 if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08003995 cmd_free(h, c);
3996 return SCSI_MLQUEUE_HOST_BUSY;
3997 }
3998 enqueue_cmd_and_start_io(h, c);
3999 /* the cmd'll come back via intr handler in complete_scsi_command() */
4000 return 0;
4001}
4002
Jeff Garzikf2812332010-11-16 02:10:29 -05004003static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
4004
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004005static int do_not_scan_if_controller_locked_up(struct ctlr_info *h)
4006{
4007 unsigned long flags;
4008
4009 /*
4010 * Don't let rescans be initiated on a controller known
4011 * to be locked up. If the controller locks up *during*
4012 * a rescan, that thread is probably hosed, but at least
4013 * we can prevent new rescan threads from piling up on a
4014 * locked up controller.
4015 */
4016 spin_lock_irqsave(&h->lock, flags);
4017 if (unlikely(h->lockup_detected)) {
4018 spin_unlock_irqrestore(&h->lock, flags);
4019 spin_lock_irqsave(&h->scan_lock, flags);
4020 h->scan_finished = 1;
4021 wake_up_all(&h->scan_wait_queue);
4022 spin_unlock_irqrestore(&h->scan_lock, flags);
4023 return 1;
4024 }
4025 spin_unlock_irqrestore(&h->lock, flags);
4026 return 0;
4027}
4028
Stephen M. Camerona08a84712010-02-04 08:43:16 -06004029static void hpsa_scan_start(struct Scsi_Host *sh)
4030{
4031 struct ctlr_info *h = shost_to_hba(sh);
4032 unsigned long flags;
4033
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004034 if (do_not_scan_if_controller_locked_up(h))
4035 return;
4036
Stephen M. Camerona08a84712010-02-04 08:43:16 -06004037 /* wait until any scan already in progress is finished. */
4038 while (1) {
4039 spin_lock_irqsave(&h->scan_lock, flags);
4040 if (h->scan_finished)
4041 break;
4042 spin_unlock_irqrestore(&h->scan_lock, flags);
4043 wait_event(h->scan_wait_queue, h->scan_finished);
4044 /* Note: We don't need to worry about a race between this
4045 * thread and driver unload because the midlayer will
4046 * have incremented the reference count, so unload won't
4047 * happen if we're in here.
4048 */
4049 }
4050 h->scan_finished = 0; /* mark scan as in progress */
4051 spin_unlock_irqrestore(&h->scan_lock, flags);
4052
Stephen M. Cameron5f389362014-02-18 13:55:48 -06004053 if (do_not_scan_if_controller_locked_up(h))
4054 return;
4055
Stephen M. Camerona08a84712010-02-04 08:43:16 -06004056 hpsa_update_scsi_devices(h, h->scsi_host->host_no);
4057
4058 spin_lock_irqsave(&h->scan_lock, flags);
4059 h->scan_finished = 1; /* mark scan as finished. */
4060 wake_up_all(&h->scan_wait_queue);
4061 spin_unlock_irqrestore(&h->scan_lock, flags);
4062}
4063
4064static int hpsa_scan_finished(struct Scsi_Host *sh,
4065 unsigned long elapsed_time)
4066{
4067 struct ctlr_info *h = shost_to_hba(sh);
4068 unsigned long flags;
4069 int finished;
4070
4071 spin_lock_irqsave(&h->scan_lock, flags);
4072 finished = h->scan_finished;
4073 spin_unlock_irqrestore(&h->scan_lock, flags);
4074 return finished;
4075}
4076
Stephen M. Cameron667e23d2010-02-25 14:02:51 -06004077static int hpsa_change_queue_depth(struct scsi_device *sdev,
4078 int qdepth, int reason)
4079{
4080 struct ctlr_info *h = sdev_to_hba(sdev);
4081
4082 if (reason != SCSI_QDEPTH_DEFAULT)
4083 return -ENOTSUPP;
4084
4085 if (qdepth < 1)
4086 qdepth = 1;
4087 else
4088 if (qdepth > h->nr_cmds)
4089 qdepth = h->nr_cmds;
4090 scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4091 return sdev->queue_depth;
4092}
4093
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004094static void hpsa_unregister_scsi(struct ctlr_info *h)
4095{
4096 /* we are being forcibly unloaded, and may not refuse. */
4097 scsi_remove_host(h->scsi_host);
4098 scsi_host_put(h->scsi_host);
4099 h->scsi_host = NULL;
4100}
4101
4102static int hpsa_register_scsi(struct ctlr_info *h)
4103{
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004104 struct Scsi_Host *sh;
4105 int error;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004106
Stephen M. Cameronb7056902012-01-19 14:00:53 -06004107 sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h));
4108 if (sh == NULL)
4109 goto fail;
4110
4111 sh->io_port = 0;
4112 sh->n_io_port = 0;
4113 sh->this_id = -1;
4114 sh->max_channel = 3;
4115 sh->max_cmd_len = MAX_COMMAND_SIZE;
4116 sh->max_lun = HPSA_MAX_LUN;
4117 sh->max_id = HPSA_MAX_LUN;
4118 sh->can_queue = h->nr_cmds;
4119 sh->cmd_per_lun = h->nr_cmds;
4120 sh->sg_tablesize = h->maxsgentries;
4121 h->scsi_host = sh;
4122 sh->hostdata[0] = (unsigned long) h;
4123 sh->irq = h->intr[h->intr_mode];
4124 sh->unique_id = sh->irq;
4125 error = scsi_add_host(sh, &h->pdev->dev);
4126 if (error)
4127 goto fail_host_put;
4128 scsi_scan_host(sh);
4129 return 0;
4130
4131 fail_host_put:
4132 dev_err(&h->pdev->dev, "%s: scsi_add_host"
4133 " failed for controller %d\n", __func__, h->ctlr);
4134 scsi_host_put(sh);
4135 return error;
4136 fail:
4137 dev_err(&h->pdev->dev, "%s: scsi_host_alloc"
4138 " failed for controller %d\n", __func__, h->ctlr);
4139 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004140}
4141
4142static int wait_for_device_to_become_ready(struct ctlr_info *h,
4143 unsigned char lunaddr[])
4144{
Tomas Henzl89193582014-02-21 16:25:05 -06004145 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004146 int count = 0;
4147 int waittime = 1; /* seconds */
4148 struct CommandList *c;
4149
4150 c = cmd_special_alloc(h);
4151 if (!c) {
4152 dev_warn(&h->pdev->dev, "out of memory in "
4153 "wait_for_device_to_become_ready.\n");
4154 return IO_ERROR;
4155 }
4156
4157 /* Send test unit ready until device ready, or give up. */
4158 while (count < HPSA_TUR_RETRY_LIMIT) {
4159
4160 /* Wait for a bit. do this first, because if we send
4161 * the TUR right away, the reset will just abort it.
4162 */
4163 msleep(1000 * waittime);
4164 count++;
Tomas Henzl89193582014-02-21 16:25:05 -06004165 rc = 0; /* Device ready. */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004166
4167 /* Increase wait time with each try, up to a point. */
4168 if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
4169 waittime = waittime * 2;
4170
Stephen M. Camerona2dac132013-02-20 11:24:41 -06004171 /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
4172 (void) fill_cmd(c, TEST_UNIT_READY, h,
4173 NULL, 0, 0, lunaddr, TYPE_CMD);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004174 hpsa_scsi_do_simple_cmd_core(h, c);
4175 /* no unmap needed here because no data xfer. */
4176
4177 if (c->err_info->CommandStatus == CMD_SUCCESS)
4178 break;
4179
4180 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
4181 c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION &&
4182 (c->err_info->SenseInfo[2] == NO_SENSE ||
4183 c->err_info->SenseInfo[2] == UNIT_ATTENTION))
4184 break;
4185
4186 dev_warn(&h->pdev->dev, "waiting %d secs "
4187 "for device to become ready.\n", waittime);
4188 rc = 1; /* device not ready. */
4189 }
4190
4191 if (rc)
4192 dev_warn(&h->pdev->dev, "giving up on device.\n");
4193 else
4194 dev_warn(&h->pdev->dev, "device is ready.\n");
4195
4196 cmd_special_free(h, c);
4197 return rc;
4198}
4199
4200/* Need at least one of these error handlers to keep ../scsi/hosts.c from
4201 * complaining. Doing a host- or bus-reset can't do anything good here.
4202 */
4203static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
4204{
4205 int rc;
4206 struct ctlr_info *h;
4207 struct hpsa_scsi_dev_t *dev;
4208
4209 /* find the controller to which the command to be aborted was sent */
4210 h = sdev_to_hba(scsicmd->device);
4211 if (h == NULL) /* paranoia */
4212 return FAILED;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004213 dev = scsicmd->device->hostdata;
4214 if (!dev) {
4215 dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: "
4216 "device lookup failed.\n");
4217 return FAILED;
4218 }
Stephen M. Camerond416b0c2010-02-04 08:43:21 -06004219 dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",
4220 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004221 /* send a reset to the SCSI LUN which the command was sent to */
Scott Teelbf711ac2014-02-18 13:56:39 -06004222 rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004223 if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)
4224 return SUCCESS;
4225
4226 dev_warn(&h->pdev->dev, "resetting device failed.\n");
4227 return FAILED;
4228}
4229
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004230static void swizzle_abort_tag(u8 *tag)
4231{
4232 u8 original_tag[8];
4233
4234 memcpy(original_tag, tag, 8);
4235 tag[0] = original_tag[3];
4236 tag[1] = original_tag[2];
4237 tag[2] = original_tag[1];
4238 tag[3] = original_tag[0];
4239 tag[4] = original_tag[7];
4240 tag[5] = original_tag[6];
4241 tag[6] = original_tag[5];
4242 tag[7] = original_tag[4];
4243}
4244
Scott Teel17eb87d2014-02-18 13:55:28 -06004245static void hpsa_get_tag(struct ctlr_info *h,
4246 struct CommandList *c, u32 *taglower, u32 *tagupper)
4247{
4248 if (c->cmd_type == CMD_IOACCEL1) {
4249 struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *)
4250 &h->ioaccel_cmd_pool[c->cmdindex];
4251 *tagupper = cm1->Tag.upper;
4252 *taglower = cm1->Tag.lower;
Scott Teel54b6e9e2014-02-18 13:56:45 -06004253 return;
Scott Teel17eb87d2014-02-18 13:55:28 -06004254 }
Scott Teel54b6e9e2014-02-18 13:56:45 -06004255 if (c->cmd_type == CMD_IOACCEL2) {
4256 struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *)
4257 &h->ioaccel2_cmd_pool[c->cmdindex];
Scott Teeldd0e19f2014-02-18 13:57:31 -06004258 /* upper tag not used in ioaccel2 mode */
4259 memset(tagupper, 0, sizeof(*tagupper));
4260 *taglower = cm2->Tag;
Scott Teel54b6e9e2014-02-18 13:56:45 -06004261 return;
4262 }
4263 *tagupper = c->Header.Tag.upper;
4264 *taglower = c->Header.Tag.lower;
Scott Teel17eb87d2014-02-18 13:55:28 -06004265}
4266
Scott Teel54b6e9e2014-02-18 13:56:45 -06004267
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004268static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004269 struct CommandList *abort, int swizzle)
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004270{
4271 int rc = IO_OK;
4272 struct CommandList *c;
4273 struct ErrorInfo *ei;
Scott Teel17eb87d2014-02-18 13:55:28 -06004274 u32 tagupper, taglower;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004275
4276 c = cmd_special_alloc(h);
4277 if (c == NULL) { /* trouble... */
4278 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
4279 return -ENOMEM;
4280 }
4281
Stephen M. Camerona2dac132013-02-20 11:24:41 -06004282 /* fill_cmd can't fail here, no buffer to map */
4283 (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
4284 0, 0, scsi3addr, TYPE_MSG);
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004285 if (swizzle)
4286 swizzle_abort_tag(&c->Request.CDB[4]);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004287 hpsa_scsi_do_simple_cmd_core(h, c);
Scott Teel17eb87d2014-02-18 13:55:28 -06004288 hpsa_get_tag(h, abort, &taglower, &tagupper);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004289 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n",
Scott Teel17eb87d2014-02-18 13:55:28 -06004290 __func__, tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004291 /* no unmap needed here because no data xfer. */
4292
4293 ei = c->err_info;
4294 switch (ei->CommandStatus) {
4295 case CMD_SUCCESS:
4296 break;
4297 case CMD_UNABORTABLE: /* Very common, don't make noise. */
4298 rc = -1;
4299 break;
4300 default:
4301 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n",
Scott Teel17eb87d2014-02-18 13:55:28 -06004302 __func__, tagupper, taglower);
Stephen M. Camerond1e8bea2014-02-18 13:57:47 -06004303 hpsa_scsi_interpret_error(h, c);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004304 rc = -1;
4305 break;
4306 }
4307 cmd_special_free(h, c);
Scott Teeldd0e19f2014-02-18 13:57:31 -06004308 dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n",
4309 __func__, tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004310 return rc;
4311}
4312
4313/*
4314 * hpsa_find_cmd_in_queue
4315 *
4316 * Used to determine whether a command (find) is still present
4317 * in queue_head. Optionally excludes the last element of queue_head.
4318 *
4319 * This is used to avoid unnecessary aborts. Commands in h->reqQ have
4320 * not yet been submitted, and so can be aborted by the driver without
4321 * sending an abort to the hardware.
4322 *
4323 * Returns pointer to command if found in queue, NULL otherwise.
4324 */
4325static struct CommandList *hpsa_find_cmd_in_queue(struct ctlr_info *h,
4326 struct scsi_cmnd *find, struct list_head *queue_head)
4327{
4328 unsigned long flags;
4329 struct CommandList *c = NULL; /* ptr into cmpQ */
4330
4331 if (!find)
4332 return 0;
4333 spin_lock_irqsave(&h->lock, flags);
4334 list_for_each_entry(c, queue_head, list) {
4335 if (c->scsi_cmd == NULL) /* e.g.: passthru ioctl */
4336 continue;
4337 if (c->scsi_cmd == find) {
4338 spin_unlock_irqrestore(&h->lock, flags);
4339 return c;
4340 }
4341 }
4342 spin_unlock_irqrestore(&h->lock, flags);
4343 return NULL;
4344}
4345
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004346static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,
4347 u8 *tag, struct list_head *queue_head)
4348{
4349 unsigned long flags;
4350 struct CommandList *c;
4351
4352 spin_lock_irqsave(&h->lock, flags);
4353 list_for_each_entry(c, queue_head, list) {
4354 if (memcmp(&c->Header.Tag, tag, 8) != 0)
4355 continue;
4356 spin_unlock_irqrestore(&h->lock, flags);
4357 return c;
4358 }
4359 spin_unlock_irqrestore(&h->lock, flags);
4360 return NULL;
4361}
4362
Scott Teel54b6e9e2014-02-18 13:56:45 -06004363/* ioaccel2 path firmware cannot handle abort task requests.
4364 * Change abort requests to physical target reset, and send to the
4365 * address of the physical disk used for the ioaccel 2 command.
4366 * Return 0 on success (IO_OK)
4367 * -1 on failure
4368 */
4369
4370static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h,
4371 unsigned char *scsi3addr, struct CommandList *abort)
4372{
4373 int rc = IO_OK;
4374 struct scsi_cmnd *scmd; /* scsi command within request being aborted */
4375 struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */
4376 unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */
4377 unsigned char *psa = &phys_scsi3addr[0];
4378
4379 /* Get a pointer to the hpsa logical device. */
4380 scmd = (struct scsi_cmnd *) abort->scsi_cmd;
4381 dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata);
4382 if (dev == NULL) {
4383 dev_warn(&h->pdev->dev,
4384 "Cannot abort: no device pointer for command.\n");
4385 return -1; /* not abortable */
4386 }
4387
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06004388 if (h->raid_offload_debug > 0)
4389 dev_info(&h->pdev->dev,
4390 "Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4391 h->scsi_host->host_no, dev->bus, dev->target, dev->lun,
4392 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
4393 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]);
4394
Scott Teel54b6e9e2014-02-18 13:56:45 -06004395 if (!dev->offload_enabled) {
4396 dev_warn(&h->pdev->dev,
4397 "Can't abort: device is not operating in HP SSD Smart Path mode.\n");
4398 return -1; /* not abortable */
4399 }
4400
4401 /* Incoming scsi3addr is logical addr. We need physical disk addr. */
4402 if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) {
4403 dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n");
4404 return -1; /* not abortable */
4405 }
4406
4407 /* send the reset */
Stephen M. Cameron2ba8bfc2014-02-18 13:57:52 -06004408 if (h->raid_offload_debug > 0)
4409 dev_info(&h->pdev->dev,
4410 "Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4411 psa[0], psa[1], psa[2], psa[3],
4412 psa[4], psa[5], psa[6], psa[7]);
Scott Teel54b6e9e2014-02-18 13:56:45 -06004413 rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET);
4414 if (rc != 0) {
4415 dev_warn(&h->pdev->dev,
4416 "Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4417 psa[0], psa[1], psa[2], psa[3],
4418 psa[4], psa[5], psa[6], psa[7]);
4419 return rc; /* failed to reset */
4420 }
4421
4422 /* wait for device to recover */
4423 if (wait_for_device_to_become_ready(h, psa) != 0) {
4424 dev_warn(&h->pdev->dev,
4425 "Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4426 psa[0], psa[1], psa[2], psa[3],
4427 psa[4], psa[5], psa[6], psa[7]);
4428 return -1; /* failed to recover */
4429 }
4430
4431 /* device recovered */
4432 dev_info(&h->pdev->dev,
4433 "Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
4434 psa[0], psa[1], psa[2], psa[3],
4435 psa[4], psa[5], psa[6], psa[7]);
4436
4437 return rc; /* success */
4438}
4439
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004440/* Some Smart Arrays need the abort tag swizzled, and some don't. It's hard to
4441 * tell which kind we're dealing with, so we send the abort both ways. There
4442 * shouldn't be any collisions between swizzled and unswizzled tags due to the
4443 * way we construct our tags but we check anyway in case the assumptions which
4444 * make this true someday become false.
4445 */
4446static int hpsa_send_abort_both_ways(struct ctlr_info *h,
4447 unsigned char *scsi3addr, struct CommandList *abort)
4448{
4449 u8 swizzled_tag[8];
4450 struct CommandList *c;
4451 int rc = 0, rc2 = 0;
4452
Scott Teel54b6e9e2014-02-18 13:56:45 -06004453 /* ioccelerator mode 2 commands should be aborted via the
4454 * accelerated path, since RAID path is unaware of these commands,
4455 * but underlying firmware can't handle abort TMF.
4456 * Change abort to physical device reset.
4457 */
4458 if (abort->cmd_type == CMD_IOACCEL2)
4459 return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort);
4460
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004461 /* we do not expect to find the swizzled tag in our queue, but
4462 * check anyway just to be sure the assumptions which make this
4463 * the case haven't become wrong.
4464 */
4465 memcpy(swizzled_tag, &abort->Request.CDB[4], 8);
4466 swizzle_abort_tag(swizzled_tag);
4467 c = hpsa_find_cmd_in_queue_by_tag(h, swizzled_tag, &h->cmpQ);
4468 if (c != NULL) {
4469 dev_warn(&h->pdev->dev, "Unexpectedly found byte-swapped tag in completion queue.\n");
4470 return hpsa_send_abort(h, scsi3addr, abort, 0);
4471 }
4472 rc = hpsa_send_abort(h, scsi3addr, abort, 0);
4473
4474 /* if the command is still in our queue, we can't conclude that it was
4475 * aborted (it might have just completed normally) but in any case
4476 * we don't need to try to abort it another way.
4477 */
4478 c = hpsa_find_cmd_in_queue(h, abort->scsi_cmd, &h->cmpQ);
4479 if (c)
4480 rc2 = hpsa_send_abort(h, scsi3addr, abort, 1);
4481 return rc && rc2;
4482}
4483
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004484/* Send an abort for the specified command.
4485 * If the device and controller support it,
4486 * send a task abort request.
4487 */
4488static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)
4489{
4490
4491 int i, rc;
4492 struct ctlr_info *h;
4493 struct hpsa_scsi_dev_t *dev;
4494 struct CommandList *abort; /* pointer to command to be aborted */
4495 struct CommandList *found;
4496 struct scsi_cmnd *as; /* ptr to scsi cmd inside aborted command. */
4497 char msg[256]; /* For debug messaging. */
4498 int ml = 0;
Scott Teel17eb87d2014-02-18 13:55:28 -06004499 u32 tagupper, taglower;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004500
4501 /* Find the controller of the command to be aborted */
4502 h = sdev_to_hba(sc->device);
4503 if (WARN(h == NULL,
4504 "ABORT REQUEST FAILED, Controller lookup failed.\n"))
4505 return FAILED;
4506
4507 /* Check that controller supports some kind of task abort */
4508 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags) &&
4509 !(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
4510 return FAILED;
4511
4512 memset(msg, 0, sizeof(msg));
4513 ml += sprintf(msg+ml, "ABORT REQUEST on C%d:B%d:T%d:L%d ",
4514 h->scsi_host->host_no, sc->device->channel,
4515 sc->device->id, sc->device->lun);
4516
4517 /* Find the device of the command to be aborted */
4518 dev = sc->device->hostdata;
4519 if (!dev) {
4520 dev_err(&h->pdev->dev, "%s FAILED, Device lookup failed.\n",
4521 msg);
4522 return FAILED;
4523 }
4524
4525 /* Get SCSI command to be aborted */
4526 abort = (struct CommandList *) sc->host_scribble;
4527 if (abort == NULL) {
4528 dev_err(&h->pdev->dev, "%s FAILED, Command to abort is NULL.\n",
4529 msg);
4530 return FAILED;
4531 }
Scott Teel17eb87d2014-02-18 13:55:28 -06004532 hpsa_get_tag(h, abort, &taglower, &tagupper);
4533 ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004534 as = (struct scsi_cmnd *) abort->scsi_cmd;
4535 if (as != NULL)
4536 ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ",
4537 as->cmnd[0], as->serial_number);
4538 dev_dbg(&h->pdev->dev, "%s\n", msg);
4539 dev_warn(&h->pdev->dev, "Abort request on C%d:B%d:T%d:L%d\n",
4540 h->scsi_host->host_no, dev->bus, dev->target, dev->lun);
4541
4542 /* Search reqQ to See if command is queued but not submitted,
4543 * if so, complete the command with aborted status and remove
4544 * it from the reqQ.
4545 */
4546 found = hpsa_find_cmd_in_queue(h, sc, &h->reqQ);
4547 if (found) {
4548 found->err_info->CommandStatus = CMD_ABORTED;
4549 finish_cmd(found);
4550 dev_info(&h->pdev->dev, "%s Request SUCCEEDED (driver queue).\n",
4551 msg);
4552 return SUCCESS;
4553 }
4554
4555 /* not in reqQ, if also not in cmpQ, must have already completed */
4556 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
4557 if (!found) {
Stephen M. Camerond6ebd0f2012-07-26 11:34:17 -05004558 dev_dbg(&h->pdev->dev, "%s Request SUCCEEDED (not known to driver).\n",
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004559 msg);
4560 return SUCCESS;
4561 }
4562
4563 /*
4564 * Command is in flight, or possibly already completed
4565 * by the firmware (but not to the scsi mid layer) but we can't
4566 * distinguish which. Send the abort down.
4567 */
Stephen M. Cameron6cba3f12012-05-01 11:42:56 -05004568 rc = hpsa_send_abort_both_ways(h, dev->scsi3addr, abort);
Stephen M. Cameron75167d22012-05-01 11:42:51 -05004569 if (rc != 0) {
4570 dev_dbg(&h->pdev->dev, "%s Request FAILED.\n", msg);
4571 dev_warn(&h->pdev->dev, "FAILED abort on device C%d:B%d:T%d:L%d\n",
4572 h->scsi_host->host_no,
4573 dev->bus, dev->target, dev->lun);
4574 return FAILED;
4575 }
4576 dev_info(&h->pdev->dev, "%s REQUEST SUCCEEDED.\n", msg);
4577
4578 /* If the abort(s) above completed and actually aborted the
4579 * command, then the command to be aborted should already be
4580 * completed. If not, wait around a bit more to see if they
4581 * manage to complete normally.
4582 */
4583#define ABORT_COMPLETE_WAIT_SECS 30
4584 for (i = 0; i < ABORT_COMPLETE_WAIT_SECS * 10; i++) {
4585 found = hpsa_find_cmd_in_queue(h, sc, &h->cmpQ);
4586 if (!found)
4587 return SUCCESS;
4588 msleep(100);
4589 }
4590 dev_warn(&h->pdev->dev, "%s FAILED. Aborted command has not completed after %d seconds.\n",
4591 msg, ABORT_COMPLETE_WAIT_SECS);
4592 return FAILED;
4593}
4594
4595
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004596/*
4597 * For operations that cannot sleep, a command block is allocated at init,
4598 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
4599 * which ones are free or in use. Lock must be held when calling this.
4600 * cmd_free() is the complement.
4601 */
4602static struct CommandList *cmd_alloc(struct ctlr_info *h)
4603{
4604 struct CommandList *c;
4605 int i;
4606 union u64bit temp64;
4607 dma_addr_t cmd_dma_handle, err_dma_handle;
Matt Gatese16a33a2012-05-01 11:43:11 -05004608 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004609
Matt Gatese16a33a2012-05-01 11:43:11 -05004610 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004611 do {
4612 i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds);
Matt Gatese16a33a2012-05-01 11:43:11 -05004613 if (i == h->nr_cmds) {
4614 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004615 return NULL;
Matt Gatese16a33a2012-05-01 11:43:11 -05004616 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004617 } while (test_and_set_bit
4618 (i & (BITS_PER_LONG - 1),
4619 h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0);
Matt Gatese16a33a2012-05-01 11:43:11 -05004620 spin_unlock_irqrestore(&h->lock, flags);
4621
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004622 c = h->cmd_pool + i;
4623 memset(c, 0, sizeof(*c));
4624 cmd_dma_handle = h->cmd_pool_dhandle
4625 + i * sizeof(*c);
4626 c->err_info = h->errinfo_pool + i;
4627 memset(c->err_info, 0, sizeof(*c->err_info));
4628 err_dma_handle = h->errinfo_pool_dhandle
4629 + i * sizeof(*c->err_info);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004630
4631 c->cmdindex = i;
4632
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06004633 INIT_LIST_HEAD(&c->list);
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06004634 c->busaddr = (u32) cmd_dma_handle;
4635 temp64.val = (u64) err_dma_handle;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004636 c->ErrDesc.Addr.lower = temp64.val32.lower;
4637 c->ErrDesc.Addr.upper = temp64.val32.upper;
4638 c->ErrDesc.Len = sizeof(*c->err_info);
4639
4640 c->h = h;
4641 return c;
4642}
4643
4644/* For operations that can wait for kmalloc to possibly sleep,
4645 * this routine can be called. Lock need not be held to call
4646 * cmd_special_alloc. cmd_special_free() is the complement.
4647 */
4648static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
4649{
4650 struct CommandList *c;
4651 union u64bit temp64;
4652 dma_addr_t cmd_dma_handle, err_dma_handle;
4653
4654 c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle);
4655 if (c == NULL)
4656 return NULL;
4657 memset(c, 0, sizeof(*c));
4658
Matt Gatese1f7de02014-02-18 13:55:17 -06004659 c->cmd_type = CMD_SCSI;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004660 c->cmdindex = -1;
4661
4662 c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info),
4663 &err_dma_handle);
4664
4665 if (c->err_info == NULL) {
4666 pci_free_consistent(h->pdev,
4667 sizeof(*c), c, cmd_dma_handle);
4668 return NULL;
4669 }
4670 memset(c->err_info, 0, sizeof(*c->err_info));
4671
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06004672 INIT_LIST_HEAD(&c->list);
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06004673 c->busaddr = (u32) cmd_dma_handle;
4674 temp64.val = (u64) err_dma_handle;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004675 c->ErrDesc.Addr.lower = temp64.val32.lower;
4676 c->ErrDesc.Addr.upper = temp64.val32.upper;
4677 c->ErrDesc.Len = sizeof(*c->err_info);
4678
4679 c->h = h;
4680 return c;
4681}
4682
4683static void cmd_free(struct ctlr_info *h, struct CommandList *c)
4684{
4685 int i;
Matt Gatese16a33a2012-05-01 11:43:11 -05004686 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004687
4688 i = c - h->cmd_pool;
Matt Gatese16a33a2012-05-01 11:43:11 -05004689 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004690 clear_bit(i & (BITS_PER_LONG - 1),
4691 h->cmd_pool_bits + (i / BITS_PER_LONG));
Matt Gatese16a33a2012-05-01 11:43:11 -05004692 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004693}
4694
4695static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
4696{
4697 union u64bit temp64;
4698
4699 temp64.val32.lower = c->ErrDesc.Addr.lower;
4700 temp64.val32.upper = c->ErrDesc.Addr.upper;
4701 pci_free_consistent(h->pdev, sizeof(*c->err_info),
4702 c->err_info, (dma_addr_t) temp64.val);
4703 pci_free_consistent(h->pdev, sizeof(*c),
Stephen M. Camerond896f3f2011-01-06 14:47:53 -06004704 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004705}
4706
4707#ifdef CONFIG_COMPAT
4708
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004709static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
4710{
4711 IOCTL32_Command_struct __user *arg32 =
4712 (IOCTL32_Command_struct __user *) arg;
4713 IOCTL_Command_struct arg64;
4714 IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
4715 int err;
4716 u32 cp;
4717
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06004718 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004719 err = 0;
4720 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4721 sizeof(arg64.LUN_info));
4722 err |= copy_from_user(&arg64.Request, &arg32->Request,
4723 sizeof(arg64.Request));
4724 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4725 sizeof(arg64.error_info));
4726 err |= get_user(arg64.buf_size, &arg32->buf_size);
4727 err |= get_user(cp, &arg32->buf);
4728 arg64.buf = compat_ptr(cp);
4729 err |= copy_to_user(p, &arg64, sizeof(arg64));
4730
4731 if (err)
4732 return -EFAULT;
4733
Stephen M. Camerone39eeae2010-02-04 08:43:46 -06004734 err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004735 if (err)
4736 return err;
4737 err |= copy_in_user(&arg32->error_info, &p->error_info,
4738 sizeof(arg32->error_info));
4739 if (err)
4740 return -EFAULT;
4741 return err;
4742}
4743
4744static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
4745 int cmd, void *arg)
4746{
4747 BIG_IOCTL32_Command_struct __user *arg32 =
4748 (BIG_IOCTL32_Command_struct __user *) arg;
4749 BIG_IOCTL_Command_struct arg64;
4750 BIG_IOCTL_Command_struct __user *p =
4751 compat_alloc_user_space(sizeof(arg64));
4752 int err;
4753 u32 cp;
4754
Vasiliy Kulikov938abd82011-01-07 10:55:53 -06004755 memset(&arg64, 0, sizeof(arg64));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004756 err = 0;
4757 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
4758 sizeof(arg64.LUN_info));
4759 err |= copy_from_user(&arg64.Request, &arg32->Request,
4760 sizeof(arg64.Request));
4761 err |= copy_from_user(&arg64.error_info, &arg32->error_info,
4762 sizeof(arg64.error_info));
4763 err |= get_user(arg64.buf_size, &arg32->buf_size);
4764 err |= get_user(arg64.malloc_size, &arg32->malloc_size);
4765 err |= get_user(cp, &arg32->buf);
4766 arg64.buf = compat_ptr(cp);
4767 err |= copy_to_user(p, &arg64, sizeof(arg64));
4768
4769 if (err)
4770 return -EFAULT;
4771
Stephen M. Camerone39eeae2010-02-04 08:43:46 -06004772 err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004773 if (err)
4774 return err;
4775 err |= copy_in_user(&arg32->error_info, &p->error_info,
4776 sizeof(arg32->error_info));
4777 if (err)
4778 return -EFAULT;
4779 return err;
4780}
Stephen M. Cameron71fe75a2010-02-04 08:43:51 -06004781
4782static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg)
4783{
4784 switch (cmd) {
4785 case CCISS_GETPCIINFO:
4786 case CCISS_GETINTINFO:
4787 case CCISS_SETINTINFO:
4788 case CCISS_GETNODENAME:
4789 case CCISS_SETNODENAME:
4790 case CCISS_GETHEARTBEAT:
4791 case CCISS_GETBUSTYPES:
4792 case CCISS_GETFIRMVER:
4793 case CCISS_GETDRIVVER:
4794 case CCISS_REVALIDVOLS:
4795 case CCISS_DEREGDISK:
4796 case CCISS_REGNEWDISK:
4797 case CCISS_REGNEWD:
4798 case CCISS_RESCANDISK:
4799 case CCISS_GETLUNINFO:
4800 return hpsa_ioctl(dev, cmd, arg);
4801
4802 case CCISS_PASSTHRU32:
4803 return hpsa_ioctl32_passthru(dev, cmd, arg);
4804 case CCISS_BIG_PASSTHRU32:
4805 return hpsa_ioctl32_big_passthru(dev, cmd, arg);
4806
4807 default:
4808 return -ENOIOCTLCMD;
4809 }
4810}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004811#endif
4812
4813static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp)
4814{
4815 struct hpsa_pci_info pciinfo;
4816
4817 if (!argp)
4818 return -EINVAL;
4819 pciinfo.domain = pci_domain_nr(h->pdev->bus);
4820 pciinfo.bus = h->pdev->bus->number;
4821 pciinfo.dev_fn = h->pdev->devfn;
4822 pciinfo.board_id = h->board_id;
4823 if (copy_to_user(argp, &pciinfo, sizeof(pciinfo)))
4824 return -EFAULT;
4825 return 0;
4826}
4827
4828static int hpsa_getdrivver_ioctl(struct ctlr_info *h, void __user *argp)
4829{
4830 DriverVer_type DriverVer;
4831 unsigned char vmaj, vmin, vsubmin;
4832 int rc;
4833
4834 rc = sscanf(HPSA_DRIVER_VERSION, "%hhu.%hhu.%hhu",
4835 &vmaj, &vmin, &vsubmin);
4836 if (rc != 3) {
4837 dev_info(&h->pdev->dev, "driver version string '%s' "
4838 "unrecognized.", HPSA_DRIVER_VERSION);
4839 vmaj = 0;
4840 vmin = 0;
4841 vsubmin = 0;
4842 }
4843 DriverVer = (vmaj << 16) | (vmin << 8) | vsubmin;
4844 if (!argp)
4845 return -EINVAL;
4846 if (copy_to_user(argp, &DriverVer, sizeof(DriverVer_type)))
4847 return -EFAULT;
4848 return 0;
4849}
4850
4851static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4852{
4853 IOCTL_Command_struct iocommand;
4854 struct CommandList *c;
4855 char *buff = NULL;
4856 union u64bit temp64;
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004857 int rc = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004858
4859 if (!argp)
4860 return -EINVAL;
4861 if (!capable(CAP_SYS_RAWIO))
4862 return -EPERM;
4863 if (copy_from_user(&iocommand, argp, sizeof(iocommand)))
4864 return -EFAULT;
4865 if ((iocommand.buf_size < 1) &&
4866 (iocommand.Request.Type.Direction != XFER_NONE)) {
4867 return -EINVAL;
4868 }
4869 if (iocommand.buf_size > 0) {
4870 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
4871 if (buff == NULL)
4872 return -EFAULT;
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004873 if (iocommand.Request.Type.Direction == XFER_WRITE) {
4874 /* Copy the data into the buffer we created */
4875 if (copy_from_user(buff, iocommand.buf,
4876 iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004877 rc = -EFAULT;
4878 goto out_kfree;
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004879 }
4880 } else {
4881 memset(buff, 0, iocommand.buf_size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004882 }
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004883 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004884 c = cmd_special_alloc(h);
4885 if (c == NULL) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004886 rc = -ENOMEM;
4887 goto out_kfree;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004888 }
4889 /* Fill in the command type */
4890 c->cmd_type = CMD_IOCTL_PEND;
4891 /* Fill in Command Header */
4892 c->Header.ReplyQueue = 0; /* unused in simple mode */
4893 if (iocommand.buf_size > 0) { /* buffer to fill */
4894 c->Header.SGList = 1;
4895 c->Header.SGTotal = 1;
4896 } else { /* no buffers to fill */
4897 c->Header.SGList = 0;
4898 c->Header.SGTotal = 0;
4899 }
4900 memcpy(&c->Header.LUN, &iocommand.LUN_info, sizeof(c->Header.LUN));
4901 /* use the kernel address the cmd block for tag */
4902 c->Header.Tag.lower = c->busaddr;
4903
4904 /* Fill in Request block */
4905 memcpy(&c->Request, &iocommand.Request,
4906 sizeof(c->Request));
4907
4908 /* Fill in the scatter gather information */
4909 if (iocommand.buf_size > 0) {
4910 temp64.val = pci_map_single(h->pdev, buff,
4911 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06004912 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
4913 c->SG[0].Addr.lower = 0;
4914 c->SG[0].Addr.upper = 0;
4915 c->SG[0].Len = 0;
4916 rc = -ENOMEM;
4917 goto out;
4918 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004919 c->SG[0].Addr.lower = temp64.val32.lower;
4920 c->SG[0].Addr.upper = temp64.val32.upper;
4921 c->SG[0].Len = iocommand.buf_size;
Matt Gatese1d9cbf2014-02-18 13:55:12 -06004922 c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004923 }
Stephen M. Camerona0c12412011-10-26 16:22:04 -05004924 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
Stephen M. Cameronc2dd32e2011-06-03 09:57:29 -05004925 if (iocommand.buf_size > 0)
4926 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004927 check_ioctl_unit_attention(h, c);
4928
4929 /* Copy the error information out */
4930 memcpy(&iocommand.error_info, c->err_info,
4931 sizeof(iocommand.error_info));
4932 if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004933 rc = -EFAULT;
4934 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004935 }
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06004936 if (iocommand.Request.Type.Direction == XFER_READ &&
4937 iocommand.buf_size > 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004938 /* Copy the data out of the buffer we created */
4939 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004940 rc = -EFAULT;
4941 goto out;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004942 }
4943 }
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004944out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004945 cmd_special_free(h, c);
Stephen M. Cameronc1f63c82013-02-20 11:24:52 -06004946out_kfree:
4947 kfree(buff);
4948 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004949}
4950
4951static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
4952{
4953 BIG_IOCTL_Command_struct *ioc;
4954 struct CommandList *c;
4955 unsigned char **buff = NULL;
4956 int *buff_size = NULL;
4957 union u64bit temp64;
4958 BYTE sg_used = 0;
4959 int status = 0;
4960 int i;
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06004961 u32 left;
4962 u32 sz;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004963 BYTE __user *data_ptr;
4964
4965 if (!argp)
4966 return -EINVAL;
4967 if (!capable(CAP_SYS_RAWIO))
4968 return -EPERM;
4969 ioc = (BIG_IOCTL_Command_struct *)
4970 kmalloc(sizeof(*ioc), GFP_KERNEL);
4971 if (!ioc) {
4972 status = -ENOMEM;
4973 goto cleanup1;
4974 }
4975 if (copy_from_user(ioc, argp, sizeof(*ioc))) {
4976 status = -EFAULT;
4977 goto cleanup1;
4978 }
4979 if ((ioc->buf_size < 1) &&
4980 (ioc->Request.Type.Direction != XFER_NONE)) {
4981 status = -EINVAL;
4982 goto cleanup1;
4983 }
4984 /* Check kmalloc limits using all SGs */
4985 if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
4986 status = -EINVAL;
4987 goto cleanup1;
4988 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06004989 if (ioc->buf_size > ioc->malloc_size * SG_ENTRIES_IN_CMD) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004990 status = -EINVAL;
4991 goto cleanup1;
4992 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06004993 buff = kzalloc(SG_ENTRIES_IN_CMD * sizeof(char *), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004994 if (!buff) {
4995 status = -ENOMEM;
4996 goto cleanup1;
4997 }
Stephen M. Camerond66ae082012-01-19 14:00:48 -06004998 buff_size = kmalloc(SG_ENTRIES_IN_CMD * sizeof(int), GFP_KERNEL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08004999 if (!buff_size) {
5000 status = -ENOMEM;
5001 goto cleanup1;
5002 }
5003 left = ioc->buf_size;
5004 data_ptr = ioc->buf;
5005 while (left) {
5006 sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
5007 buff_size[sg_used] = sz;
5008 buff[sg_used] = kmalloc(sz, GFP_KERNEL);
5009 if (buff[sg_used] == NULL) {
5010 status = -ENOMEM;
5011 goto cleanup1;
5012 }
5013 if (ioc->Request.Type.Direction == XFER_WRITE) {
5014 if (copy_from_user(buff[sg_used], data_ptr, sz)) {
5015 status = -ENOMEM;
5016 goto cleanup1;
5017 }
5018 } else
5019 memset(buff[sg_used], 0, sz);
5020 left -= sz;
5021 data_ptr += sz;
5022 sg_used++;
5023 }
5024 c = cmd_special_alloc(h);
5025 if (c == NULL) {
5026 status = -ENOMEM;
5027 goto cleanup1;
5028 }
5029 c->cmd_type = CMD_IOCTL_PEND;
5030 c->Header.ReplyQueue = 0;
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005031 c->Header.SGList = c->Header.SGTotal = sg_used;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005032 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
5033 c->Header.Tag.lower = c->busaddr;
5034 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
5035 if (ioc->buf_size > 0) {
5036 int i;
5037 for (i = 0; i < sg_used; i++) {
5038 temp64.val = pci_map_single(h->pdev, buff[i],
5039 buff_size[i], PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06005040 if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
5041 c->SG[i].Addr.lower = 0;
5042 c->SG[i].Addr.upper = 0;
5043 c->SG[i].Len = 0;
5044 hpsa_pci_unmap(h->pdev, c, i,
5045 PCI_DMA_BIDIRECTIONAL);
5046 status = -ENOMEM;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005047 goto cleanup0;
Stephen M. Cameronbcc48ff2013-02-20 11:24:57 -06005048 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005049 c->SG[i].Addr.lower = temp64.val32.lower;
5050 c->SG[i].Addr.upper = temp64.val32.upper;
5051 c->SG[i].Len = buff_size[i];
Matt Gatese1d9cbf2014-02-18 13:55:12 -06005052 c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005053 }
5054 }
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005055 hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005056 if (sg_used)
5057 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005058 check_ioctl_unit_attention(h, c);
5059 /* Copy the error information out */
5060 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
5061 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005062 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005063 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005064 }
Stephen M. Cameronb03a7772011-01-06 14:47:48 -06005065 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005066 /* Copy the data out of the buffer we created */
5067 BYTE __user *ptr = ioc->buf;
5068 for (i = 0; i < sg_used; i++) {
5069 if (copy_to_user(ptr, buff[i], buff_size[i])) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005070 status = -EFAULT;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005071 goto cleanup0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005072 }
5073 ptr += buff_size[i];
5074 }
5075 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005076 status = 0;
Stephen M. Camerone2d4a1f2013-09-23 13:33:51 -05005077cleanup0:
5078 cmd_special_free(h, c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005079cleanup1:
5080 if (buff) {
5081 for (i = 0; i < sg_used; i++)
5082 kfree(buff[i]);
5083 kfree(buff);
5084 }
5085 kfree(buff_size);
5086 kfree(ioc);
5087 return status;
5088}
5089
5090static void check_ioctl_unit_attention(struct ctlr_info *h,
5091 struct CommandList *c)
5092{
5093 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
5094 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
5095 (void) check_for_unit_attention(h, c);
5096}
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005097
5098static int increment_passthru_count(struct ctlr_info *h)
5099{
5100 unsigned long flags;
5101
5102 spin_lock_irqsave(&h->passthru_count_lock, flags);
5103 if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) {
5104 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5105 return -1;
5106 }
5107 h->passthru_count++;
5108 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5109 return 0;
5110}
5111
5112static void decrement_passthru_count(struct ctlr_info *h)
5113{
5114 unsigned long flags;
5115
5116 spin_lock_irqsave(&h->passthru_count_lock, flags);
5117 if (h->passthru_count <= 0) {
5118 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5119 /* not expecting to get here. */
5120 dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n");
5121 return;
5122 }
5123 h->passthru_count--;
5124 spin_unlock_irqrestore(&h->passthru_count_lock, flags);
5125}
5126
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005127/*
5128 * ioctl
5129 */
5130static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
5131{
5132 struct ctlr_info *h;
5133 void __user *argp = (void __user *)arg;
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005134 int rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005135
5136 h = sdev_to_hba(dev);
5137
5138 switch (cmd) {
5139 case CCISS_DEREGDISK:
5140 case CCISS_REGNEWDISK:
5141 case CCISS_REGNEWD:
Stephen M. Camerona08a84712010-02-04 08:43:16 -06005142 hpsa_scan_start(h->scsi_host);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005143 return 0;
5144 case CCISS_GETPCIINFO:
5145 return hpsa_getpciinfo_ioctl(h, argp);
5146 case CCISS_GETDRIVVER:
5147 return hpsa_getdrivver_ioctl(h, argp);
5148 case CCISS_PASSTHRU:
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005149 if (increment_passthru_count(h))
5150 return -EAGAIN;
5151 rc = hpsa_passthru_ioctl(h, argp);
5152 decrement_passthru_count(h);
5153 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005154 case CCISS_BIG_PASSTHRU:
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05005155 if (increment_passthru_count(h))
5156 return -EAGAIN;
5157 rc = hpsa_big_passthru_ioctl(h, argp);
5158 decrement_passthru_count(h);
5159 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005160 default:
5161 return -ENOTTY;
5162 }
5163}
5164
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005165static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
5166 u8 reset_type)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005167{
5168 struct CommandList *c;
5169
5170 c = cmd_alloc(h);
5171 if (!c)
5172 return -ENOMEM;
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005173 /* fill_cmd can't fail here, no data buffer to map */
5174 (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005175 RAID_CTLR_LUNID, TYPE_MSG);
5176 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
5177 c->waiting = NULL;
5178 enqueue_cmd_and_start_io(h, c);
5179 /* Don't wait for completion, the reset won't complete. Don't free
5180 * the command either. This is the last command we will send before
5181 * re-initializing everything, so it doesn't matter and won't leak.
5182 */
5183 return 0;
5184}
5185
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005186static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06005187 void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005188 int cmd_type)
5189{
5190 int pci_dir = XFER_NONE;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005191 struct CommandList *a; /* for commands to be aborted */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005192
5193 c->cmd_type = CMD_IOCTL_PEND;
5194 c->Header.ReplyQueue = 0;
5195 if (buff != NULL && size > 0) {
5196 c->Header.SGList = 1;
5197 c->Header.SGTotal = 1;
5198 } else {
5199 c->Header.SGList = 0;
5200 c->Header.SGTotal = 0;
5201 }
5202 c->Header.Tag.lower = c->busaddr;
5203 memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8);
5204
5205 c->Request.Type.Type = cmd_type;
5206 if (cmd_type == TYPE_CMD) {
5207 switch (cmd) {
5208 case HPSA_INQUIRY:
5209 /* are we trying to read a vital product page */
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06005210 if (page_code & VPD_PAGE) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005211 c->Request.CDB[1] = 0x01;
Stephen M. Cameronb7bb24e2014-02-18 13:57:11 -06005212 c->Request.CDB[2] = (page_code & 0xff);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005213 }
5214 c->Request.CDBLen = 6;
5215 c->Request.Type.Attribute = ATTR_SIMPLE;
5216 c->Request.Type.Direction = XFER_READ;
5217 c->Request.Timeout = 0;
5218 c->Request.CDB[0] = HPSA_INQUIRY;
5219 c->Request.CDB[4] = size & 0xFF;
5220 break;
5221 case HPSA_REPORT_LOG:
5222 case HPSA_REPORT_PHYS:
5223 /* Talking to controller so It's a physical command
5224 mode = 00 target = 0. Nothing to write.
5225 */
5226 c->Request.CDBLen = 12;
5227 c->Request.Type.Attribute = ATTR_SIMPLE;
5228 c->Request.Type.Direction = XFER_READ;
5229 c->Request.Timeout = 0;
5230 c->Request.CDB[0] = cmd;
5231 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5232 c->Request.CDB[7] = (size >> 16) & 0xFF;
5233 c->Request.CDB[8] = (size >> 8) & 0xFF;
5234 c->Request.CDB[9] = size & 0xFF;
5235 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005236 case HPSA_CACHE_FLUSH:
5237 c->Request.CDBLen = 12;
5238 c->Request.Type.Attribute = ATTR_SIMPLE;
5239 c->Request.Type.Direction = XFER_WRITE;
5240 c->Request.Timeout = 0;
5241 c->Request.CDB[0] = BMIC_WRITE;
5242 c->Request.CDB[6] = BMIC_CACHE_FLUSH;
Stephen M. Cameronbb158ea2011-10-26 16:21:17 -05005243 c->Request.CDB[7] = (size >> 8) & 0xFF;
5244 c->Request.CDB[8] = size & 0xFF;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005245 break;
5246 case TEST_UNIT_READY:
5247 c->Request.CDBLen = 6;
5248 c->Request.Type.Attribute = ATTR_SIMPLE;
5249 c->Request.Type.Direction = XFER_NONE;
5250 c->Request.Timeout = 0;
5251 break;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06005252 case HPSA_GET_RAID_MAP:
5253 c->Request.CDBLen = 12;
5254 c->Request.Type.Attribute = ATTR_SIMPLE;
5255 c->Request.Type.Direction = XFER_READ;
5256 c->Request.Timeout = 0;
5257 c->Request.CDB[0] = HPSA_CISS_READ;
5258 c->Request.CDB[1] = cmd;
5259 c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */
5260 c->Request.CDB[7] = (size >> 16) & 0xFF;
5261 c->Request.CDB[8] = (size >> 8) & 0xFF;
5262 c->Request.CDB[9] = size & 0xFF;
5263 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005264 default:
5265 dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
5266 BUG();
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005267 return -1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005268 }
5269 } else if (cmd_type == TYPE_MSG) {
5270 switch (cmd) {
5271
5272 case HPSA_DEVICE_RESET_MSG:
5273 c->Request.CDBLen = 16;
5274 c->Request.Type.Type = 1; /* It is a MSG not a CMD */
5275 c->Request.Type.Attribute = ATTR_SIMPLE;
5276 c->Request.Type.Direction = XFER_NONE;
5277 c->Request.Timeout = 0; /* Don't time out */
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005278 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
5279 c->Request.CDB[0] = cmd;
Stephen M. Cameron21e89af2012-07-26 11:34:10 -05005280 c->Request.CDB[1] = HPSA_RESET_TYPE_LUN;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005281 /* If bytes 4-7 are zero, it means reset the */
5282 /* LunID device */
5283 c->Request.CDB[4] = 0x00;
5284 c->Request.CDB[5] = 0x00;
5285 c->Request.CDB[6] = 0x00;
5286 c->Request.CDB[7] = 0x00;
Stephen M. Cameron75167d22012-05-01 11:42:51 -05005287 break;
5288 case HPSA_ABORT_MSG:
5289 a = buff; /* point to command to be aborted */
5290 dev_dbg(&h->pdev->dev, "Abort Tag:0x%08x:%08x using request Tag:0x%08x:%08x\n",
5291 a->Header.Tag.upper, a->Header.Tag.lower,
5292 c->Header.Tag.upper, c->Header.Tag.lower);
5293 c->Request.CDBLen = 16;
5294 c->Request.Type.Type = TYPE_MSG;
5295 c->Request.Type.Attribute = ATTR_SIMPLE;
5296 c->Request.Type.Direction = XFER_WRITE;
5297 c->Request.Timeout = 0; /* Don't time out */
5298 c->Request.CDB[0] = HPSA_TASK_MANAGEMENT;
5299 c->Request.CDB[1] = HPSA_TMF_ABORT_TASK;
5300 c->Request.CDB[2] = 0x00; /* reserved */
5301 c->Request.CDB[3] = 0x00; /* reserved */
5302 /* Tag to abort goes in CDB[4]-CDB[11] */
5303 c->Request.CDB[4] = a->Header.Tag.lower & 0xFF;
5304 c->Request.CDB[5] = (a->Header.Tag.lower >> 8) & 0xFF;
5305 c->Request.CDB[6] = (a->Header.Tag.lower >> 16) & 0xFF;
5306 c->Request.CDB[7] = (a->Header.Tag.lower >> 24) & 0xFF;
5307 c->Request.CDB[8] = a->Header.Tag.upper & 0xFF;
5308 c->Request.CDB[9] = (a->Header.Tag.upper >> 8) & 0xFF;
5309 c->Request.CDB[10] = (a->Header.Tag.upper >> 16) & 0xFF;
5310 c->Request.CDB[11] = (a->Header.Tag.upper >> 24) & 0xFF;
5311 c->Request.CDB[12] = 0x00; /* reserved */
5312 c->Request.CDB[13] = 0x00; /* reserved */
5313 c->Request.CDB[14] = 0x00; /* reserved */
5314 c->Request.CDB[15] = 0x00; /* reserved */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005315 break;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005316 default:
5317 dev_warn(&h->pdev->dev, "unknown message type %d\n",
5318 cmd);
5319 BUG();
5320 }
5321 } else {
5322 dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type);
5323 BUG();
5324 }
5325
5326 switch (c->Request.Type.Direction) {
5327 case XFER_READ:
5328 pci_dir = PCI_DMA_FROMDEVICE;
5329 break;
5330 case XFER_WRITE:
5331 pci_dir = PCI_DMA_TODEVICE;
5332 break;
5333 case XFER_NONE:
5334 pci_dir = PCI_DMA_NONE;
5335 break;
5336 default:
5337 pci_dir = PCI_DMA_BIDIRECTIONAL;
5338 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06005339 if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
5340 return -1;
5341 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005342}
5343
5344/*
5345 * Map (physical) PCI mem into (virtual) kernel space
5346 */
5347static void __iomem *remap_pci_mem(ulong base, ulong size)
5348{
5349 ulong page_base = ((ulong) base) & PAGE_MASK;
5350 ulong page_offs = ((ulong) base) - page_base;
Stephen M. Cameron088ba34c2012-07-26 11:34:23 -05005351 void __iomem *page_remapped = ioremap_nocache(page_base,
5352 page_offs + size);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005353
5354 return page_remapped ? (page_remapped + page_offs) : NULL;
5355}
5356
5357/* Takes cmds off the submission queue and sends them to the hardware,
5358 * then puts them on the queue of cmds waiting for completion.
5359 */
5360static void start_io(struct ctlr_info *h)
5361{
5362 struct CommandList *c;
Matt Gatese16a33a2012-05-01 11:43:11 -05005363 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005364
Matt Gatese16a33a2012-05-01 11:43:11 -05005365 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06005366 while (!list_empty(&h->reqQ)) {
5367 c = list_entry(h->reqQ.next, struct CommandList, list);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005368 /* can't do anything if fifo is full */
5369 if ((h->access.fifo_full(h))) {
Stephen M. Cameron396883e2013-09-23 13:34:17 -05005370 h->fifo_recently_full = 1;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005371 dev_warn(&h->pdev->dev, "fifo full\n");
5372 break;
5373 }
Stephen M. Cameron396883e2013-09-23 13:34:17 -05005374 h->fifo_recently_full = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005375
5376 /* Get the first entry from the Request Q */
5377 removeQ(c);
5378 h->Qdepth--;
5379
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005380 /* Put job onto the completed Q */
5381 addQ(&h->cmpQ, c);
Matt Gatese16a33a2012-05-01 11:43:11 -05005382
5383 /* Must increment commands_outstanding before unlocking
5384 * and submitting to avoid race checking for fifo full
5385 * condition.
5386 */
5387 h->commands_outstanding++;
5388 if (h->commands_outstanding > h->max_outstanding)
5389 h->max_outstanding = h->commands_outstanding;
5390
5391 /* Tell the controller execute command */
5392 spin_unlock_irqrestore(&h->lock, flags);
5393 h->access.submit_command(h, c);
5394 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005395 }
Matt Gatese16a33a2012-05-01 11:43:11 -05005396 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005397}
5398
Matt Gates254f7962012-05-01 11:43:06 -05005399static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005400{
Matt Gates254f7962012-05-01 11:43:06 -05005401 return h->access.command_completed(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005402}
5403
Stephen M. Cameron900c5442010-02-04 08:42:35 -06005404static inline bool interrupt_pending(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005405{
5406 return h->access.intr_pending(h);
5407}
5408
5409static inline long interrupt_not_for_us(struct ctlr_info *h)
5410{
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005411 return (h->access.intr_pending(h) == 0) ||
5412 (h->interrupts_enabled == 0);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005413}
5414
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06005415static inline int bad_tag(struct ctlr_info *h, u32 tag_index,
5416 u32 raw_tag)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005417{
5418 if (unlikely(tag_index >= h->nr_cmds)) {
5419 dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag);
5420 return 1;
5421 }
5422 return 0;
5423}
5424
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05005425static inline void finish_cmd(struct CommandList *c)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005426{
Matt Gatese16a33a2012-05-01 11:43:11 -05005427 unsigned long flags;
Stephen M. Cameron396883e2013-09-23 13:34:17 -05005428 int io_may_be_stalled = 0;
5429 struct ctlr_info *h = c->h;
Matt Gatese16a33a2012-05-01 11:43:11 -05005430
Stephen M. Cameron396883e2013-09-23 13:34:17 -05005431 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005432 removeQ(c);
Stephen M. Cameron396883e2013-09-23 13:34:17 -05005433
5434 /*
5435 * Check for possibly stalled i/o.
5436 *
5437 * If a fifo_full condition is encountered, requests will back up
5438 * in h->reqQ. This queue is only emptied out by start_io which is
5439 * only called when a new i/o request comes in. If no i/o's are
5440 * forthcoming, the i/o's in h->reqQ can get stuck. So we call
5441 * start_io from here if we detect such a danger.
5442 *
5443 * Normally, we shouldn't hit this case, but pounding on the
5444 * CCISS_PASSTHRU ioctl can provoke it. Only call start_io if
5445 * commands_outstanding is low. We want to avoid calling
5446 * start_io from in here as much as possible, and esp. don't
5447 * want to get in a cycle where we call start_io every time
5448 * through here.
5449 */
5450 if (unlikely(h->fifo_recently_full) &&
5451 h->commands_outstanding < 5)
5452 io_may_be_stalled = 1;
5453
5454 spin_unlock_irqrestore(&h->lock, flags);
5455
Stephen M. Camerone85c5972012-05-01 11:43:42 -05005456 dial_up_lockup_detection_on_fw_flash_complete(c->h, c);
Scott Teelc3497752014-02-18 13:56:34 -06005457 if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI
5458 || c->cmd_type == CMD_IOACCEL2))
Stephen M. Cameron1fb011f2011-05-03 14:59:00 -05005459 complete_scsi_command(c);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005460 else if (c->cmd_type == CMD_IOCTL_PEND)
5461 complete(c->waiting);
Stephen M. Cameron396883e2013-09-23 13:34:17 -05005462 if (unlikely(io_may_be_stalled))
5463 start_io(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005464}
5465
Stephen M. Camerona104c992010-02-04 08:42:24 -06005466static inline u32 hpsa_tag_contains_index(u32 tag)
5467{
Stephen M. Camerona104c992010-02-04 08:42:24 -06005468 return tag & DIRECT_LOOKUP_BIT;
5469}
5470
5471static inline u32 hpsa_tag_to_index(u32 tag)
5472{
Stephen M. Camerona104c992010-02-04 08:42:24 -06005473 return tag >> DIRECT_LOOKUP_SHIFT;
5474}
5475
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005476
5477static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
Stephen M. Camerona104c992010-02-04 08:42:24 -06005478{
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005479#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
5480#define HPSA_SIMPLE_ERROR_BITS 0x03
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06005481 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005482 return tag & ~HPSA_SIMPLE_ERROR_BITS;
5483 return tag & ~HPSA_PERF_ERROR_BITS;
Stephen M. Camerona104c992010-02-04 08:42:24 -06005484}
5485
Don Brace303932f2010-02-04 08:42:40 -06005486/* process completion of an indexed ("direct lookup") command */
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005487static inline void process_indexed_cmd(struct ctlr_info *h,
Don Brace303932f2010-02-04 08:42:40 -06005488 u32 raw_tag)
5489{
5490 u32 tag_index;
5491 struct CommandList *c;
5492
5493 tag_index = hpsa_tag_to_index(raw_tag);
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005494 if (!bad_tag(h, tag_index, raw_tag)) {
5495 c = h->cmd_pool + tag_index;
5496 finish_cmd(c);
5497 }
Don Brace303932f2010-02-04 08:42:40 -06005498}
5499
5500/* process completion of a non-indexed command */
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005501static inline void process_nonindexed_cmd(struct ctlr_info *h,
Don Brace303932f2010-02-04 08:42:40 -06005502 u32 raw_tag)
5503{
5504 u32 tag;
5505 struct CommandList *c = NULL;
Matt Gatese16a33a2012-05-01 11:43:11 -05005506 unsigned long flags;
Don Brace303932f2010-02-04 08:42:40 -06005507
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005508 tag = hpsa_tag_discard_error_bits(h, raw_tag);
Matt Gatese16a33a2012-05-01 11:43:11 -05005509 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06005510 list_for_each_entry(c, &h->cmpQ, list) {
Don Brace303932f2010-02-04 08:42:40 -06005511 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
Matt Gatese16a33a2012-05-01 11:43:11 -05005512 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05005513 finish_cmd(c);
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005514 return;
Don Brace303932f2010-02-04 08:42:40 -06005515 }
5516 }
Matt Gatese16a33a2012-05-01 11:43:11 -05005517 spin_unlock_irqrestore(&h->lock, flags);
Don Brace303932f2010-02-04 08:42:40 -06005518 bad_tag(h, h->nr_cmds + 1, raw_tag);
Don Brace303932f2010-02-04 08:42:40 -06005519}
5520
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005521/* Some controllers, like p400, will give us one interrupt
5522 * after a soft reset, even if we turned interrupts off.
5523 * Only need to check for this in the hpsa_xxx_discard_completions
5524 * functions.
5525 */
5526static int ignore_bogus_interrupt(struct ctlr_info *h)
5527{
5528 if (likely(!reset_devices))
5529 return 0;
5530
5531 if (likely(h->interrupts_enabled))
5532 return 0;
5533
5534 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
5535 "(known firmware bug.) Ignoring.\n");
5536
5537 return 1;
5538}
5539
Matt Gates254f7962012-05-01 11:43:06 -05005540/*
5541 * Convert &h->q[x] (passed to interrupt handlers) back to h.
5542 * Relies on (h-q[x] == x) being true for x such that
5543 * 0 <= x < MAX_REPLY_QUEUES.
5544 */
5545static struct ctlr_info *queue_to_hba(u8 *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005546{
Matt Gates254f7962012-05-01 11:43:06 -05005547 return container_of((queue - *queue), struct ctlr_info, q[0]);
5548}
5549
5550static irqreturn_t hpsa_intx_discard_completions(int irq, void *queue)
5551{
5552 struct ctlr_info *h = queue_to_hba(queue);
5553 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005554 u32 raw_tag;
5555
5556 if (ignore_bogus_interrupt(h))
5557 return IRQ_NONE;
5558
5559 if (interrupt_not_for_us(h))
5560 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005561 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005562 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05005563 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005564 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05005565 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005566 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005567 return IRQ_HANDLED;
5568}
5569
Matt Gates254f7962012-05-01 11:43:06 -05005570static irqreturn_t hpsa_msix_discard_completions(int irq, void *queue)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005571{
Matt Gates254f7962012-05-01 11:43:06 -05005572 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005573 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05005574 u8 q = *(u8 *) queue;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005575
5576 if (ignore_bogus_interrupt(h))
5577 return IRQ_NONE;
5578
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005579 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05005580 raw_tag = get_next_completion(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005581 while (raw_tag != FIFO_EMPTY)
Matt Gates254f7962012-05-01 11:43:06 -05005582 raw_tag = next_command(h, q);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005583 return IRQ_HANDLED;
5584}
5585
Matt Gates254f7962012-05-01 11:43:06 -05005586static irqreturn_t do_hpsa_intr_intx(int irq, void *queue)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005587{
Matt Gates254f7962012-05-01 11:43:06 -05005588 struct ctlr_info *h = queue_to_hba((u8 *) queue);
Don Brace303932f2010-02-04 08:42:40 -06005589 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05005590 u8 q = *(u8 *) queue;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005591
5592 if (interrupt_not_for_us(h))
5593 return IRQ_NONE;
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005594 h->last_intr_timestamp = get_jiffies_64();
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005595 while (interrupt_pending(h)) {
Matt Gates254f7962012-05-01 11:43:06 -05005596 raw_tag = get_next_completion(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005597 while (raw_tag != FIFO_EMPTY) {
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005598 if (likely(hpsa_tag_contains_index(raw_tag)))
5599 process_indexed_cmd(h, raw_tag);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005600 else
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005601 process_nonindexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05005602 raw_tag = next_command(h, q);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005603 }
5604 }
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005605 return IRQ_HANDLED;
5606}
5607
Matt Gates254f7962012-05-01 11:43:06 -05005608static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005609{
Matt Gates254f7962012-05-01 11:43:06 -05005610 struct ctlr_info *h = queue_to_hba(queue);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005611 u32 raw_tag;
Matt Gates254f7962012-05-01 11:43:06 -05005612 u8 q = *(u8 *) queue;
Stephen M. Cameron10f66012010-06-16 13:51:50 -05005613
Stephen M. Camerona0c12412011-10-26 16:22:04 -05005614 h->last_intr_timestamp = get_jiffies_64();
Matt Gates254f7962012-05-01 11:43:06 -05005615 raw_tag = get_next_completion(h, q);
Don Brace303932f2010-02-04 08:42:40 -06005616 while (raw_tag != FIFO_EMPTY) {
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005617 if (likely(hpsa_tag_contains_index(raw_tag)))
5618 process_indexed_cmd(h, raw_tag);
Don Brace303932f2010-02-04 08:42:40 -06005619 else
Stephen M. Cameron1d94f942012-05-01 11:43:01 -05005620 process_nonindexed_cmd(h, raw_tag);
Matt Gates254f7962012-05-01 11:43:06 -05005621 raw_tag = next_command(h, q);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005622 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005623 return IRQ_HANDLED;
5624}
5625
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005626/* Send a message CDB to the firmware. Careful, this only works
5627 * in simple mode, not performant mode due to the tag lookup.
5628 * We only ever use this immediately after a controller reset.
5629 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005630static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
5631 unsigned char type)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005632{
5633 struct Command {
5634 struct CommandListHeader CommandHeader;
5635 struct RequestBlock Request;
5636 struct ErrDescriptor ErrorDescriptor;
5637 };
5638 struct Command *cmd;
5639 static const size_t cmd_sz = sizeof(*cmd) +
5640 sizeof(cmd->ErrorDescriptor);
5641 dma_addr_t paddr64;
5642 uint32_t paddr32, tag;
5643 void __iomem *vaddr;
5644 int i, err;
5645
5646 vaddr = pci_ioremap_bar(pdev, 0);
5647 if (vaddr == NULL)
5648 return -ENOMEM;
5649
5650 /* The Inbound Post Queue only accepts 32-bit physical addresses for the
5651 * CCISS commands, so they must be allocated from the lower 4GiB of
5652 * memory.
5653 */
5654 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
5655 if (err) {
5656 iounmap(vaddr);
5657 return -ENOMEM;
5658 }
5659
5660 cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64);
5661 if (cmd == NULL) {
5662 iounmap(vaddr);
5663 return -ENOMEM;
5664 }
5665
5666 /* This must fit, because of the 32-bit consistent DMA mask. Also,
5667 * although there's no guarantee, we assume that the address is at
5668 * least 4-byte aligned (most likely, it's page-aligned).
5669 */
5670 paddr32 = paddr64;
5671
5672 cmd->CommandHeader.ReplyQueue = 0;
5673 cmd->CommandHeader.SGList = 0;
5674 cmd->CommandHeader.SGTotal = 0;
5675 cmd->CommandHeader.Tag.lower = paddr32;
5676 cmd->CommandHeader.Tag.upper = 0;
5677 memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8);
5678
5679 cmd->Request.CDBLen = 16;
5680 cmd->Request.Type.Type = TYPE_MSG;
5681 cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE;
5682 cmd->Request.Type.Direction = XFER_NONE;
5683 cmd->Request.Timeout = 0; /* Don't time out */
5684 cmd->Request.CDB[0] = opcode;
5685 cmd->Request.CDB[1] = type;
5686 memset(&cmd->Request.CDB[2], 0, 14); /* rest of the CDB is reserved */
5687 cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(*cmd);
5688 cmd->ErrorDescriptor.Addr.upper = 0;
5689 cmd->ErrorDescriptor.Len = sizeof(struct ErrorInfo);
5690
5691 writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET);
5692
5693 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
5694 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06005695 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005696 break;
5697 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
5698 }
5699
5700 iounmap(vaddr);
5701
5702 /* we leak the DMA buffer here ... no choice since the controller could
5703 * still complete the command.
5704 */
5705 if (i == HPSA_MSG_SEND_RETRY_LIMIT) {
5706 dev_err(&pdev->dev, "controller message %02x:%02x timed out\n",
5707 opcode, type);
5708 return -ETIMEDOUT;
5709 }
5710
5711 pci_free_consistent(pdev, cmd_sz, cmd, paddr64);
5712
5713 if (tag & HPSA_ERROR_BIT) {
5714 dev_err(&pdev->dev, "controller message %02x:%02x failed\n",
5715 opcode, type);
5716 return -EIO;
5717 }
5718
5719 dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n",
5720 opcode, type);
5721 return 0;
5722}
5723
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005724#define hpsa_noop(p) hpsa_message(p, 3, 0)
5725
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005726static int hpsa_controller_hard_reset(struct pci_dev *pdev,
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005727 void * __iomem vaddr, u32 use_doorbell)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005728{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005729 u16 pmcsr;
5730 int pos;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005731
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005732 if (use_doorbell) {
5733 /* For everything after the P600, the PCI power state method
5734 * of resetting the controller doesn't work, so we have this
5735 * other way using the doorbell register.
5736 */
5737 dev_info(&pdev->dev, "using doorbell to reset controller\n");
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005738 writel(use_doorbell, vaddr + SA5_DOORBELL);
Stephen M. Cameron85009232013-09-23 13:33:36 -05005739
5740 /* PMC hardware guys tell us we need a 5 second delay after
5741 * doorbell reset and before any attempt to talk to the board
5742 * at all to ensure that this actually works and doesn't fall
5743 * over in some weird corner cases.
5744 */
5745 msleep(5000);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005746 } else { /* Try to do it the PCI power state way */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005747
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005748 /* Quoting from the Open CISS Specification: "The Power
5749 * Management Control/Status Register (CSR) controls the power
5750 * state of the device. The normal operating state is D0,
5751 * CSR=00h. The software off state is D3, CSR=03h. To reset
5752 * the controller, place the interface device in D3 then to D0,
5753 * this causes a secondary PCI reset which will reset the
5754 * controller." */
5755
5756 pos = pci_find_capability(pdev, PCI_CAP_ID_PM);
5757 if (pos == 0) {
5758 dev_err(&pdev->dev,
5759 "hpsa_reset_controller: "
5760 "PCI PM not supported\n");
5761 return -ENODEV;
5762 }
5763 dev_info(&pdev->dev, "using PCI PM to reset controller\n");
5764 /* enter the D3hot power management state */
5765 pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr);
5766 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5767 pmcsr |= PCI_D3hot;
5768 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
5769
5770 msleep(500);
5771
5772 /* enter the D0 power management state */
5773 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
5774 pmcsr |= PCI_D0;
5775 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
Mike Millerc4853ef2011-10-21 08:19:43 +02005776
5777 /*
5778 * The P600 requires a small delay when changing states.
5779 * Otherwise we may think the board did not reset and we bail.
5780 * This for kdump only and is particular to the P600.
5781 */
5782 msleep(500);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005783 }
5784 return 0;
5785}
5786
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005787static void init_driver_version(char *driver_version, int len)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005788{
5789 memset(driver_version, 0, len);
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06005790 strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005791}
5792
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005793static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005794{
5795 char *driver_version;
5796 int i, size = sizeof(cfgtable->driver_version);
5797
5798 driver_version = kmalloc(size, GFP_KERNEL);
5799 if (!driver_version)
5800 return -ENOMEM;
5801
5802 init_driver_version(driver_version, size);
5803 for (i = 0; i < size; i++)
5804 writeb(driver_version[i], &cfgtable->driver_version[i]);
5805 kfree(driver_version);
5806 return 0;
5807}
5808
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005809static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
5810 unsigned char *driver_ver)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005811{
5812 int i;
5813
5814 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
5815 driver_ver[i] = readb(&cfgtable->driver_version[i]);
5816}
5817
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005818static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005819{
5820
5821 char *driver_ver, *old_driver_ver;
5822 int rc, size = sizeof(cfgtable->driver_version);
5823
5824 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
5825 if (!old_driver_ver)
5826 return -ENOMEM;
5827 driver_ver = old_driver_ver + size;
5828
5829 /* After a reset, the 32 bytes of "driver version" in the cfgtable
5830 * should have been changed, otherwise we know the reset failed.
5831 */
5832 init_driver_version(old_driver_ver, size);
5833 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
5834 rc = !memcmp(driver_ver, old_driver_ver, size);
5835 kfree(old_driver_ver);
5836 return rc;
5837}
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005838/* This does a hard reset of the controller using PCI power management
5839 * states or the using the doorbell register.
5840 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08005841static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005842{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005843 u64 cfg_offset;
5844 u32 cfg_base_addr;
5845 u64 cfg_base_addr_index;
5846 void __iomem *vaddr;
5847 unsigned long paddr;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005848 u32 misc_fw_support;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005849 int rc;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005850 struct CfgTable __iomem *cfgtable;
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005851 u32 use_doorbell;
Stephen M. Cameron18867652010-06-16 13:51:45 -05005852 u32 board_id;
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005853 u16 command_register;
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005854
5855 /* For controllers as old as the P600, this is very nearly
5856 * the same thing as
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005857 *
5858 * pci_save_state(pci_dev);
5859 * pci_set_power_state(pci_dev, PCI_D3hot);
5860 * pci_set_power_state(pci_dev, PCI_D0);
5861 * pci_restore_state(pci_dev);
5862 *
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005863 * For controllers newer than the P600, the pci power state
5864 * method of resetting doesn't work so we have another way
5865 * using the doorbell register.
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005866 */
Stephen M. Cameron18867652010-06-16 13:51:45 -05005867
Stephen M. Cameron25c1e56a2011-01-06 14:48:18 -06005868 rc = hpsa_lookup_board_id(pdev, &board_id);
Stephen M. Cameron46380782011-05-03 15:00:01 -05005869 if (rc < 0 || !ctlr_is_resettable(board_id)) {
Stephen M. Cameron25c1e56a2011-01-06 14:48:18 -06005870 dev_warn(&pdev->dev, "Not resetting device.\n");
5871 return -ENODEV;
5872 }
Stephen M. Cameron46380782011-05-03 15:00:01 -05005873
5874 /* if controller is soft- but not hard resettable... */
5875 if (!ctlr_is_hard_resettable(board_id))
5876 return -ENOTSUPP; /* try soft reset later. */
Stephen M. Cameron18867652010-06-16 13:51:45 -05005877
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005878 /* Save the PCI command register */
5879 pci_read_config_word(pdev, 4, &command_register);
5880 /* Turn the board off. This is so that later pci_restore_state()
5881 * won't turn the board on before the rest of config space is ready.
5882 */
5883 pci_disable_device(pdev);
5884 pci_save_state(pdev);
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005885
5886 /* find the first memory BAR, so we can find the cfg table */
5887 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
5888 if (rc)
5889 return rc;
5890 vaddr = remap_pci_mem(paddr, 0x250);
5891 if (!vaddr)
5892 return -ENOMEM;
5893
5894 /* find cfgtable in order to check if reset via doorbell is supported */
5895 rc = hpsa_find_cfg_addrs(pdev, vaddr, &cfg_base_addr,
5896 &cfg_base_addr_index, &cfg_offset);
5897 if (rc)
5898 goto unmap_vaddr;
5899 cfgtable = remap_pci_mem(pci_resource_start(pdev,
5900 cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable));
5901 if (!cfgtable) {
5902 rc = -ENOMEM;
5903 goto unmap_vaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005904 }
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005905 rc = write_driver_ver_to_cfgtable(cfgtable);
5906 if (rc)
5907 goto unmap_vaddr;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005908
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005909 /* If reset via doorbell register is supported, use that.
5910 * There are two such methods. Favor the newest method.
5911 */
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005912 misc_fw_support = readl(&cfgtable->misc_fw_support);
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005913 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
5914 if (use_doorbell) {
5915 use_doorbell = DOORBELL_CTLR_RESET2;
5916 } else {
5917 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
5918 if (use_doorbell) {
Mike Millerfba63092011-10-13 11:44:06 -05005919 dev_warn(&pdev->dev, "Soft reset not supported. "
5920 "Firmware update is required.\n");
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005921 rc = -ENOTSUPP; /* try soft reset */
Stephen M. Cameroncf0b08d2011-05-03 14:59:46 -05005922 goto unmap_cfgtable;
5923 }
5924 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005925
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005926 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
5927 if (rc)
5928 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005929
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005930 pci_restore_state(pdev);
5931 rc = pci_enable_device(pdev);
5932 if (rc) {
5933 dev_warn(&pdev->dev, "failed to enable device.\n");
5934 goto unmap_cfgtable;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005935 }
Stephen M. Cameron270d05d2011-01-06 14:48:08 -06005936 pci_write_config_word(pdev, 4, command_register);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005937
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005938 /* Some devices (notably the HP Smart Array 5i Controller)
5939 need a little pause here */
5940 msleep(HPSA_POST_RESET_PAUSE_MSECS);
5941
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06005942 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
5943 if (rc) {
5944 dev_warn(&pdev->dev,
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005945 "failed waiting for board to become ready "
5946 "after hard reset\n");
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06005947 goto unmap_cfgtable;
5948 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06005949
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005950 rc = controller_reset_failed(vaddr);
5951 if (rc < 0)
5952 goto unmap_cfgtable;
5953 if (rc) {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005954 dev_warn(&pdev->dev, "Unable to successfully reset "
5955 "controller. Will try soft reset.\n");
5956 rc = -ENOTSUPP;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05005957 } else {
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05005958 dev_info(&pdev->dev, "board ready after hard reset.\n");
Stephen M. Cameron1df85522010-06-16 13:51:40 -05005959 }
5960
5961unmap_cfgtable:
5962 iounmap(cfgtable);
5963
5964unmap_vaddr:
5965 iounmap(vaddr);
5966 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005967}
5968
5969/*
5970 * We cannot read the structure directly, for portability we must use
5971 * the io functions.
5972 * This is for debug only.
5973 */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005974static void print_cfg_table(struct device *dev, struct CfgTable *tb)
5975{
Stephen M. Cameron58f86652010-05-27 15:13:58 -05005976#ifdef HPSA_DEBUG
Stephen M. Cameronedd16362009-12-08 14:09:11 -08005977 int i;
5978 char temp_name[17];
5979
5980 dev_info(dev, "Controller Configuration information\n");
5981 dev_info(dev, "------------------------------------\n");
5982 for (i = 0; i < 4; i++)
5983 temp_name[i] = readb(&(tb->Signature[i]));
5984 temp_name[4] = '\0';
5985 dev_info(dev, " Signature = %s\n", temp_name);
5986 dev_info(dev, " Spec Number = %d\n", readl(&(tb->SpecValence)));
5987 dev_info(dev, " Transport methods supported = 0x%x\n",
5988 readl(&(tb->TransportSupport)));
5989 dev_info(dev, " Transport methods active = 0x%x\n",
5990 readl(&(tb->TransportActive)));
5991 dev_info(dev, " Requested transport Method = 0x%x\n",
5992 readl(&(tb->HostWrite.TransportRequest)));
5993 dev_info(dev, " Coalesce Interrupt Delay = 0x%x\n",
5994 readl(&(tb->HostWrite.CoalIntDelay)));
5995 dev_info(dev, " Coalesce Interrupt Count = 0x%x\n",
5996 readl(&(tb->HostWrite.CoalIntCount)));
5997 dev_info(dev, " Max outstanding commands = 0x%d\n",
5998 readl(&(tb->CmdsOutMax)));
5999 dev_info(dev, " Bus Types = 0x%x\n", readl(&(tb->BusTypes)));
6000 for (i = 0; i < 16; i++)
6001 temp_name[i] = readb(&(tb->ServerName[i]));
6002 temp_name[16] = '\0';
6003 dev_info(dev, " Server Name = %s\n", temp_name);
6004 dev_info(dev, " Heartbeat Counter = 0x%x\n\n\n",
6005 readl(&(tb->HeartBeat)));
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006006#endif /* HPSA_DEBUG */
Stephen M. Cameron58f86652010-05-27 15:13:58 -05006007}
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006008
6009static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
6010{
6011 int i, offset, mem_type, bar_type;
6012
6013 if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
6014 return 0;
6015 offset = 0;
6016 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
6017 bar_type = pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE;
6018 if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
6019 offset += 4;
6020 else {
6021 mem_type = pci_resource_flags(pdev, i) &
6022 PCI_BASE_ADDRESS_MEM_TYPE_MASK;
6023 switch (mem_type) {
6024 case PCI_BASE_ADDRESS_MEM_TYPE_32:
6025 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
6026 offset += 4; /* 32 bit */
6027 break;
6028 case PCI_BASE_ADDRESS_MEM_TYPE_64:
6029 offset += 8;
6030 break;
6031 default: /* reserved in PCI 2.2 */
6032 dev_warn(&pdev->dev,
6033 "base address is invalid\n");
6034 return -1;
6035 break;
6036 }
6037 }
6038 if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
6039 return i + 1;
6040 }
6041 return -1;
6042}
6043
6044/* If MSI/MSI-X is supported by the kernel we will try to enable it on
6045 * controllers that are capable. If not, we use IO-APIC mode.
6046 */
6047
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006048static void hpsa_interrupt_mode(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006049{
6050#ifdef CONFIG_PCI_MSI
Matt Gates254f7962012-05-01 11:43:06 -05006051 int err, i;
6052 struct msix_entry hpsa_msix_entries[MAX_REPLY_QUEUES];
6053
6054 for (i = 0; i < MAX_REPLY_QUEUES; i++) {
6055 hpsa_msix_entries[i].vector = 0;
6056 hpsa_msix_entries[i].entry = i;
6057 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006058
6059 /* Some boards advertise MSI but don't really support it */
Stephen M. Cameron6b3f4c52010-05-27 15:13:02 -05006060 if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
6061 (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006062 goto default_int_mode;
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006063 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {
6064 dev_info(&h->pdev->dev, "MSIX\n");
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006065 h->msix_vector = MAX_REPLY_QUEUES;
Matt Gates254f7962012-05-01 11:43:06 -05006066 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006067 h->msix_vector);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006068 if (err > 0) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006069 dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006070 "available\n", err);
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006071 h->msix_vector = err;
6072 err = pci_enable_msix(h->pdev, hpsa_msix_entries,
6073 h->msix_vector);
6074 }
6075 if (!err) {
6076 for (i = 0; i < h->msix_vector; i++)
6077 h->intr[i] = hpsa_msix_entries[i].vector;
6078 return;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006079 } else {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006080 dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006081 err);
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006082 h->msix_vector = 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006083 goto default_int_mode;
6084 }
6085 }
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006086 if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
6087 dev_info(&h->pdev->dev, "MSI\n");
6088 if (!pci_enable_msi(h->pdev))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006089 h->msi_vector = 1;
6090 else
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006091 dev_warn(&h->pdev->dev, "MSI init failed\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006092 }
6093default_int_mode:
6094#endif /* CONFIG_PCI_MSI */
6095 /* if we get here we're going to use the default interrupt mode */
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006096 h->intr[h->intr_mode] = h->pdev->irq;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006097}
6098
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006099static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006100{
6101 int i;
6102 u32 subsystem_vendor_id, subsystem_device_id;
6103
6104 subsystem_vendor_id = pdev->subsystem_vendor;
6105 subsystem_device_id = pdev->subsystem_device;
6106 *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
6107 subsystem_vendor_id;
6108
6109 for (i = 0; i < ARRAY_SIZE(products); i++)
6110 if (*board_id == products[i].board_id)
6111 return i;
6112
Stephen M. Cameron6798cc02010-06-16 13:51:20 -05006113 if ((subsystem_vendor_id != PCI_VENDOR_ID_HP &&
6114 subsystem_vendor_id != PCI_VENDOR_ID_COMPAQ) ||
6115 !hpsa_allow_any) {
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006116 dev_warn(&pdev->dev, "unrecognized board ID: "
6117 "0x%08x, ignoring.\n", *board_id);
6118 return -ENODEV;
6119 }
6120 return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
6121}
6122
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006123static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
6124 unsigned long *memory_bar)
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006125{
6126 int i;
6127
6128 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006129 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006130 /* addressing mode bits already removed */
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006131 *memory_bar = pci_resource_start(pdev, i);
6132 dev_dbg(&pdev->dev, "memory BAR = %lx\n",
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006133 *memory_bar);
6134 return 0;
6135 }
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006136 dev_warn(&pdev->dev, "no memory BAR found\n");
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006137 return -ENODEV;
6138}
6139
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006140static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
6141 int wait_for_ready)
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006142{
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006143 int i, iterations;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006144 u32 scratchpad;
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006145 if (wait_for_ready)
6146 iterations = HPSA_BOARD_READY_ITERATIONS;
6147 else
6148 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006149
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006150 for (i = 0; i < iterations; i++) {
6151 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
6152 if (wait_for_ready) {
6153 if (scratchpad == HPSA_FIRMWARE_READY)
6154 return 0;
6155 } else {
6156 if (scratchpad != HPSA_FIRMWARE_READY)
6157 return 0;
6158 }
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006159 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
6160 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006161 dev_warn(&pdev->dev, "board not ready, timed out.\n");
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006162 return -ENODEV;
6163}
6164
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006165static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
6166 u32 *cfg_base_addr, u64 *cfg_base_addr_index,
6167 u64 *cfg_offset)
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006168{
6169 *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
6170 *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
6171 *cfg_base_addr &= (u32) 0x0000ffff;
6172 *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr);
6173 if (*cfg_base_addr_index == -1) {
6174 dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n");
6175 return -ENODEV;
6176 }
6177 return 0;
6178}
6179
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006180static int hpsa_find_cfgtables(struct ctlr_info *h)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006181{
Stephen M. Cameron01a02ff2010-02-04 08:41:33 -06006182 u64 cfg_offset;
6183 u32 cfg_base_addr;
6184 u64 cfg_base_addr_index;
Don Brace303932f2010-02-04 08:42:40 -06006185 u32 trans_offset;
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006186 int rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006187
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006188 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
6189 &cfg_base_addr_index, &cfg_offset);
6190 if (rc)
6191 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006192 h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006193 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006194 if (!h->cfgtable)
6195 return -ENOMEM;
Stephen M. Cameron580ada32011-05-03 14:59:10 -05006196 rc = write_driver_ver_to_cfgtable(h->cfgtable);
6197 if (rc)
6198 return rc;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006199 /* Find performant mode table. */
Stephen M. Camerona51fd472010-06-16 13:51:30 -05006200 trans_offset = readl(&h->cfgtable->TransMethodOffset);
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006201 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
6202 cfg_base_addr_index)+cfg_offset+trans_offset,
6203 sizeof(*h->transtable));
6204 if (!h->transtable)
6205 return -ENOMEM;
6206 return 0;
6207}
6208
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006209static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006210{
6211 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
Stephen M. Cameron72ceeae2011-01-06 14:48:13 -06006212
6213 /* Limit commands in memory limited kdump scenario. */
6214 if (reset_devices && h->max_commands > 32)
6215 h->max_commands = 32;
6216
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006217 if (h->max_commands < 16) {
6218 dev_warn(&h->pdev->dev, "Controller reports "
6219 "max supported commands of %d, an obvious lie. "
6220 "Using 16. Ensure that firmware is up to date.\n",
6221 h->max_commands);
6222 h->max_commands = 16;
6223 }
6224}
6225
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006226/* Interrogate the hardware for some limits:
6227 * max commands, max SG elements without chaining, and with chaining,
6228 * SG chain block size, etc.
6229 */
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006230static void hpsa_find_board_params(struct ctlr_info *h)
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006231{
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05006232 hpsa_get_max_perf_mode_cmds(h);
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006233 h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
6234 h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006235 h->fw_support = readl(&(h->cfgtable->misc_fw_support));
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006236 /*
6237 * Limit in-command s/g elements to 32 save dma'able memory.
6238 * Howvever spec says if 0, use 31
6239 */
6240 h->max_cmd_sg_entries = 31;
6241 if (h->maxsgentries > 512) {
6242 h->max_cmd_sg_entries = 32;
6243 h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
6244 h->maxsgentries--; /* save one for chain pointer */
6245 } else {
6246 h->maxsgentries = 31; /* default to traditional values */
6247 h->chainsize = 0;
6248 }
Stephen M. Cameron75167d22012-05-01 11:42:51 -05006249
6250 /* Find out what task management functions are supported and cache */
6251 h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags));
Scott Teel0e7a7fc2014-02-18 13:55:59 -06006252 if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags))
6253 dev_warn(&h->pdev->dev, "Physical aborts not supported\n");
6254 if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags))
6255 dev_warn(&h->pdev->dev, "Logical aborts not supported\n");
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006256}
6257
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05006258static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)
6259{
Akinobu Mita0fc9fd42012-04-04 22:14:59 +09006260 if (!check_signature(h->cfgtable->Signature, "CISS", 4)) {
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05006261 dev_warn(&h->pdev->dev, "not a valid CISS config table\n");
6262 return false;
6263 }
6264 return true;
6265}
6266
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006267static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05006268{
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006269 u32 driver_support;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05006270
Stephen M. Cameron28e13442013-12-04 17:10:21 -06006271#ifdef CONFIG_X86
6272 /* Need to enable prefetch in the SCSI core for 6400 in x86 */
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006273 driver_support = readl(&(h->cfgtable->driver_support));
6274 driver_support |= ENABLE_SCSI_PREFETCH;
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05006275#endif
Stephen M. Cameron28e13442013-12-04 17:10:21 -06006276 driver_support |= ENABLE_UNIT_ATTN;
6277 writel(driver_support, &(h->cfgtable->driver_support));
Stephen M. Cameronf7c39102010-05-27 15:13:38 -05006278}
6279
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05006280/* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result
6281 * in a prefetch beyond physical memory.
6282 */
6283static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
6284{
6285 u32 dma_prefetch;
6286
6287 if (h->board_id != 0x3225103C)
6288 return;
6289 dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG);
6290 dma_prefetch |= 0x8000;
6291 writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
6292}
6293
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006294static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h)
6295{
6296 int i;
6297 u32 doorbell_value;
6298 unsigned long flags;
6299 /* wait until the clear_event_notify bit 6 is cleared by controller. */
6300 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
6301 spin_lock_irqsave(&h->lock, flags);
6302 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6303 spin_unlock_irqrestore(&h->lock, flags);
6304 if (!(doorbell_value & DOORBELL_CLEAR_EVENTS))
6305 break;
6306 /* delay and try again */
6307 msleep(20);
6308 }
6309}
6310
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006311static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006312{
6313 int i;
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06006314 u32 doorbell_value;
6315 unsigned long flags;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006316
6317 /* under certain very rare conditions, this can take awhile.
6318 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
6319 * as we enter this code.)
6320 */
6321 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06006322 spin_lock_irqsave(&h->lock, flags);
6323 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
6324 spin_unlock_irqrestore(&h->lock, flags);
Dan Carpenter382be662011-02-15 15:33:13 -06006325 if (!(doorbell_value & CFGTBL_ChangeReq))
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006326 break;
6327 /* delay and try again */
Stephen M. Cameron60d3f5b2011-01-06 14:48:34 -06006328 usleep_range(10000, 20000);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006329 }
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006330}
6331
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006332static int hpsa_enter_simple_mode(struct ctlr_info *h)
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006333{
6334 u32 trans_support;
6335
6336 trans_support = readl(&(h->cfgtable->TransportSupport));
6337 if (!(trans_support & SIMPLE_MODE))
6338 return -ENOTSUPP;
6339
6340 h->max_commands = readl(&(h->cfgtable->CmdsOutMax));
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006341
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006342 /* Update the field, and then ring the doorbell */
6343 writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest));
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06006344 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05006345 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6346 hpsa_wait_for_mode_change_ack(h);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006347 print_cfg_table(&h->pdev->dev, h->cfgtable);
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006348 if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
6349 goto error;
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06006350 h->transMethod = CFGTBL_Trans_Simple;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006351 return 0;
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006352error:
6353 dev_warn(&h->pdev->dev, "unable to get board into simple mode\n");
6354 return -ENODEV;
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006355}
6356
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006357static int hpsa_pci_init(struct ctlr_info *h)
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006358{
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006359 int prod_index, err;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006360
Stephen M. Camerone5c880d2010-05-27 15:12:52 -05006361 prod_index = hpsa_lookup_board_id(h->pdev, &h->board_id);
6362 if (prod_index < 0)
6363 return -ENODEV;
6364 h->product_name = products[prod_index].product_name;
6365 h->access = *(products[prod_index].access);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006366
Matthew Garrette5a44df2011-11-11 11:14:23 -05006367 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
6368 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
6369
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006370 err = pci_enable_device(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006371 if (err) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006372 dev_warn(&h->pdev->dev, "unable to enable PCI device\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006373 return err;
6374 }
6375
Stephen M. Cameron5cb460a2012-05-01 11:42:20 -05006376 /* Enable bus mastering (pci_disable_device may disable this) */
6377 pci_set_master(h->pdev);
6378
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06006379 err = pci_request_regions(h->pdev, HPSA);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006380 if (err) {
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006381 dev_err(&h->pdev->dev,
6382 "cannot obtain PCI resources, aborting\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006383 return err;
6384 }
Stephen M. Cameron6b3f4c52010-05-27 15:13:02 -05006385 hpsa_interrupt_mode(h);
Stephen M. Cameron12d2cd42010-06-16 13:51:25 -05006386 err = hpsa_pci_find_memory_BAR(h->pdev, &h->paddr);
Stephen M. Cameron3a7774c2010-05-27 15:13:07 -05006387 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006388 goto err_out_free_res;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006389 h->vaddr = remap_pci_mem(h->paddr, 0x250);
Stephen M. Cameron204892e2010-05-27 15:13:22 -05006390 if (!h->vaddr) {
6391 err = -ENOMEM;
6392 goto err_out_free_res;
6393 }
Stephen M. Cameronfe5389c2011-01-06 14:48:03 -06006394 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
Stephen M. Cameron2c4c8c82010-05-27 15:13:12 -05006395 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006396 goto err_out_free_res;
Stephen M. Cameron77c44952010-05-27 15:13:17 -05006397 err = hpsa_find_cfgtables(h);
6398 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006399 goto err_out_free_res;
Stephen M. Cameronb93d7532010-05-27 15:13:27 -05006400 hpsa_find_board_params(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006401
Stephen M. Cameron76c46e42010-05-27 15:13:32 -05006402 if (!hpsa_CISS_signature_present(h)) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006403 err = -ENODEV;
6404 goto err_out_free_res;
6405 }
Stephen M. Cameron97a5e982013-12-04 17:10:16 -06006406 hpsa_set_driver_support_bits(h);
Stephen M. Cameron3d0eab62010-05-27 15:13:43 -05006407 hpsa_p600_dma_prefetch_quirk(h);
Stephen M. Cameroneb6b2ae2010-05-27 15:13:48 -05006408 err = hpsa_enter_simple_mode(h);
6409 if (err)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006410 goto err_out_free_res;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006411 return 0;
6412
6413err_out_free_res:
Stephen M. Cameron204892e2010-05-27 15:13:22 -05006414 if (h->transtable)
6415 iounmap(h->transtable);
6416 if (h->cfgtable)
6417 iounmap(h->cfgtable);
6418 if (h->vaddr)
6419 iounmap(h->vaddr);
Stephen M. Cameronf0bd0b682012-05-01 11:42:09 -05006420 pci_disable_device(h->pdev);
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006421 pci_release_regions(h->pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006422 return err;
6423}
6424
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006425static void hpsa_hba_inquiry(struct ctlr_info *h)
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06006426{
6427 int rc;
6428
6429#define HBA_INQUIRY_BYTE_COUNT 64
6430 h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL);
6431 if (!h->hba_inquiry_data)
6432 return;
6433 rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0,
6434 h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT);
6435 if (rc != 0) {
6436 kfree(h->hba_inquiry_data);
6437 h->hba_inquiry_data = NULL;
6438 }
6439}
6440
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006441static int hpsa_init_reset_devices(struct pci_dev *pdev)
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006442{
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006443 int rc, i;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006444
6445 if (!reset_devices)
6446 return 0;
6447
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006448 /* Reset the controller with a PCI power-cycle or via doorbell */
6449 rc = hpsa_kdump_hard_reset_controller(pdev);
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006450
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006451 /* -ENOTSUPP here means we cannot reset the controller
6452 * but it's already (and still) up and running in
Stephen M. Cameron18867652010-06-16 13:51:45 -05006453 * "performant mode". Or, it might be 640x, which can't reset
6454 * due to concerns about shared bbwc between 6402/6404 pair.
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006455 */
6456 if (rc == -ENOTSUPP)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006457 return rc; /* just try to do the kdump anyhow. */
Stephen M. Cameron1df85522010-06-16 13:51:40 -05006458 if (rc)
6459 return -ENODEV;
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006460
6461 /* Now try to get the controller to respond to a no-op */
Stephen M. Cameron2b870cb2011-05-03 14:59:36 -05006462 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006463 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
6464 if (hpsa_noop(pdev) == 0)
6465 break;
6466 else
6467 dev_warn(&pdev->dev, "no-op failed%s\n",
6468 (i < 11 ? "; re-trying" : ""));
6469 }
6470 return 0;
6471}
6472
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006473static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006474{
6475 h->cmd_pool_bits = kzalloc(
6476 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
6477 sizeof(unsigned long), GFP_KERNEL);
6478 h->cmd_pool = pci_alloc_consistent(h->pdev,
6479 h->nr_cmds * sizeof(*h->cmd_pool),
6480 &(h->cmd_pool_dhandle));
6481 h->errinfo_pool = pci_alloc_consistent(h->pdev,
6482 h->nr_cmds * sizeof(*h->errinfo_pool),
6483 &(h->errinfo_pool_dhandle));
6484 if ((h->cmd_pool_bits == NULL)
6485 || (h->cmd_pool == NULL)
6486 || (h->errinfo_pool == NULL)) {
6487 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
6488 return -ENOMEM;
6489 }
6490 return 0;
6491}
6492
6493static void hpsa_free_cmd_pool(struct ctlr_info *h)
6494{
6495 kfree(h->cmd_pool_bits);
6496 if (h->cmd_pool)
6497 pci_free_consistent(h->pdev,
6498 h->nr_cmds * sizeof(struct CommandList),
6499 h->cmd_pool, h->cmd_pool_dhandle);
Stephen M. Cameronaca90122014-02-18 13:56:14 -06006500 if (h->ioaccel2_cmd_pool)
6501 pci_free_consistent(h->pdev,
6502 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
6503 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006504 if (h->errinfo_pool)
6505 pci_free_consistent(h->pdev,
6506 h->nr_cmds * sizeof(struct ErrorInfo),
6507 h->errinfo_pool,
6508 h->errinfo_pool_dhandle);
Matt Gatese1f7de02014-02-18 13:55:17 -06006509 if (h->ioaccel_cmd_pool)
6510 pci_free_consistent(h->pdev,
6511 h->nr_cmds * sizeof(struct io_accel1_cmd),
6512 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006513}
6514
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05006515static int hpsa_request_irq(struct ctlr_info *h,
6516 irqreturn_t (*msixhandler)(int, void *),
6517 irqreturn_t (*intxhandler)(int, void *))
6518{
Matt Gates254f7962012-05-01 11:43:06 -05006519 int rc, i;
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05006520
Matt Gates254f7962012-05-01 11:43:06 -05006521 /*
6522 * initialize h->q[x] = x so that interrupt handlers know which
6523 * queue to process.
6524 */
6525 for (i = 0; i < MAX_REPLY_QUEUES; i++)
6526 h->q[i] = (u8) i;
6527
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006528 if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {
Matt Gates254f7962012-05-01 11:43:06 -05006529 /* If performant mode and MSI-X, use multiple reply queues */
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006530 for (i = 0; i < h->msix_vector; i++)
Matt Gates254f7962012-05-01 11:43:06 -05006531 rc = request_irq(h->intr[i], msixhandler,
6532 0, h->devname,
6533 &h->q[i]);
6534 } else {
6535 /* Use single reply pool */
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006536 if (h->msix_vector > 0 || h->msi_vector) {
Matt Gates254f7962012-05-01 11:43:06 -05006537 rc = request_irq(h->intr[h->intr_mode],
6538 msixhandler, 0, h->devname,
6539 &h->q[h->intr_mode]);
6540 } else {
6541 rc = request_irq(h->intr[h->intr_mode],
6542 intxhandler, IRQF_SHARED, h->devname,
6543 &h->q[h->intr_mode]);
6544 }
6545 }
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05006546 if (rc) {
6547 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
6548 h->intr[h->intr_mode], h->devname);
6549 return -ENODEV;
6550 }
6551 return 0;
6552}
6553
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006554static int hpsa_kdump_soft_reset(struct ctlr_info *h)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006555{
6556 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
6557 HPSA_RESET_TYPE_CONTROLLER)) {
6558 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
6559 return -EIO;
6560 }
6561
6562 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
6563 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
6564 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
6565 return -1;
6566 }
6567
6568 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
6569 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
6570 dev_warn(&h->pdev->dev, "Board failed to become ready "
6571 "after soft reset.\n");
6572 return -1;
6573 }
6574
6575 return 0;
6576}
6577
Matt Gates254f7962012-05-01 11:43:06 -05006578static void free_irqs(struct ctlr_info *h)
6579{
6580 int i;
6581
6582 if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {
6583 /* Single reply queue, only one irq to free */
6584 i = h->intr_mode;
6585 free_irq(h->intr[i], &h->q[i]);
6586 return;
6587 }
6588
Hannes Reineckeeee0f032014-01-15 13:30:53 +01006589 for (i = 0; i < h->msix_vector; i++)
Matt Gates254f7962012-05-01 11:43:06 -05006590 free_irq(h->intr[i], &h->q[i]);
6591}
6592
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05006593static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006594{
Matt Gates254f7962012-05-01 11:43:06 -05006595 free_irqs(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006596#ifdef CONFIG_PCI_MSI
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05006597 if (h->msix_vector) {
6598 if (h->pdev->msix_enabled)
6599 pci_disable_msix(h->pdev);
6600 } else if (h->msi_vector) {
6601 if (h->pdev->msi_enabled)
6602 pci_disable_msi(h->pdev);
6603 }
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006604#endif /* CONFIG_PCI_MSI */
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05006605}
6606
6607static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
6608{
6609 hpsa_free_irqs_and_disable_msix(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006610 hpsa_free_sg_chain_blocks(h);
6611 hpsa_free_cmd_pool(h);
Matt Gatese1f7de02014-02-18 13:55:17 -06006612 kfree(h->ioaccel1_blockFetchTable);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006613 kfree(h->blockFetchTable);
6614 pci_free_consistent(h->pdev, h->reply_pool_size,
6615 h->reply_pool, h->reply_pool_dhandle);
6616 if (h->vaddr)
6617 iounmap(h->vaddr);
6618 if (h->transtable)
6619 iounmap(h->transtable);
6620 if (h->cfgtable)
6621 iounmap(h->cfgtable);
6622 pci_release_regions(h->pdev);
6623 kfree(h);
6624}
6625
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006626/* Called when controller lockup detected. */
6627static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)
6628{
6629 struct CommandList *c = NULL;
6630
6631 assert_spin_locked(&h->lock);
6632 /* Mark all outstanding commands as failed and complete them. */
6633 while (!list_empty(list)) {
6634 c = list_entry(list->next, struct CommandList, list);
6635 c->err_info->CommandStatus = CMD_HARDWARE_ERR;
Stephen M. Cameron5a3d16f2012-05-01 11:42:46 -05006636 finish_cmd(c);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006637 }
6638}
6639
6640static void controller_lockup_detected(struct ctlr_info *h)
6641{
6642 unsigned long flags;
6643
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006644 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6645 spin_lock_irqsave(&h->lock, flags);
6646 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
6647 spin_unlock_irqrestore(&h->lock, flags);
6648 dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n",
6649 h->lockup_detected);
6650 pci_disable_device(h->pdev);
6651 spin_lock_irqsave(&h->lock, flags);
6652 fail_all_cmds_on_list(h, &h->cmpQ);
6653 fail_all_cmds_on_list(h, &h->reqQ);
6654 spin_unlock_irqrestore(&h->lock, flags);
6655}
6656
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006657static void detect_controller_lockup(struct ctlr_info *h)
6658{
6659 u64 now;
6660 u32 heartbeat;
6661 unsigned long flags;
6662
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006663 now = get_jiffies_64();
6664 /* If we've received an interrupt recently, we're ok. */
6665 if (time_after64(h->last_intr_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05006666 (h->heartbeat_sample_interval), now))
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006667 return;
6668
6669 /*
6670 * If we've already checked the heartbeat recently, we're ok.
6671 * This could happen if someone sends us a signal. We
6672 * otherwise don't care about signals in this thread.
6673 */
6674 if (time_after64(h->last_heartbeat_timestamp +
Stephen M. Camerone85c5972012-05-01 11:43:42 -05006675 (h->heartbeat_sample_interval), now))
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006676 return;
6677
6678 /* If heartbeat has not changed since we last looked, we're not ok. */
6679 spin_lock_irqsave(&h->lock, flags);
6680 heartbeat = readl(&h->cfgtable->HeartBeat);
6681 spin_unlock_irqrestore(&h->lock, flags);
6682 if (h->last_heartbeat == heartbeat) {
6683 controller_lockup_detected(h);
6684 return;
6685 }
6686
6687 /* We're ok. */
6688 h->last_heartbeat = heartbeat;
6689 h->last_heartbeat_timestamp = now;
6690}
6691
Stephen M. Cameron98465902014-02-21 16:25:00 -06006692static void hpsa_ack_ctlr_events(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006693{
6694 int i;
6695 char *event_type;
6696
Scott Teele863d682014-02-18 13:57:05 -06006697 /* Clear the driver-requested rescan flag */
6698 h->drv_req_rescan = 0;
6699
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006700 /* Ask the controller to clear the events we're handling. */
Stephen M. Cameron1f7cee82014-02-18 13:56:09 -06006701 if ((h->transMethod & (CFGTBL_Trans_io_accel1
6702 | CFGTBL_Trans_io_accel2)) &&
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006703 (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE ||
6704 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) {
6705
6706 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE)
6707 event_type = "state change";
6708 if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)
6709 event_type = "configuration change";
6710 /* Stop sending new RAID offload reqs via the IO accelerator */
6711 scsi_block_requests(h->scsi_host);
6712 for (i = 0; i < h->ndevices; i++)
6713 h->dev[i]->offload_enabled = 0;
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06006714 hpsa_drain_accel_commands(h);
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006715 /* Set 'accelerator path config change' bit */
6716 dev_warn(&h->pdev->dev,
6717 "Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n",
6718 h->events, event_type);
6719 writel(h->events, &(h->cfgtable->clear_event_notify));
6720 /* Set the "clear event notify field update" bit 6 */
6721 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6722 /* Wait until ctlr clears 'clear event notify field', bit 6 */
6723 hpsa_wait_for_clear_event_notify_ack(h);
6724 scsi_unblock_requests(h->scsi_host);
6725 } else {
6726 /* Acknowledge controller notification events. */
6727 writel(h->events, &(h->cfgtable->clear_event_notify));
6728 writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL);
6729 hpsa_wait_for_clear_event_notify_ack(h);
6730#if 0
6731 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
6732 hpsa_wait_for_mode_change_ack(h);
6733#endif
6734 }
Stephen M. Cameron98465902014-02-21 16:25:00 -06006735 return;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006736}
6737
6738/* Check a register on the controller to see if there are configuration
6739 * changes (added/changed/removed logical drives, etc.) which mean that
Scott Teele863d682014-02-18 13:57:05 -06006740 * we should rescan the controller for devices.
6741 * Also check flag for driver-initiated rescan.
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006742 */
Stephen M. Cameron98465902014-02-21 16:25:00 -06006743static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006744{
Stephen M. Cameron98465902014-02-21 16:25:00 -06006745 if (h->drv_req_rescan)
6746 return 1;
6747
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006748 if (!(h->fw_support & MISC_FW_EVENT_NOTIFY))
Stephen M. Cameron98465902014-02-21 16:25:00 -06006749 return 0;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006750
6751 h->events = readl(&(h->cfgtable->event_notify));
Stephen M. Cameron98465902014-02-21 16:25:00 -06006752 return h->events & RESCAN_REQUIRED_EVENT_BITS;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06006753}
6754
Stephen M. Cameron98465902014-02-21 16:25:00 -06006755/*
6756 * Check if any of the offline devices have become ready
6757 */
6758static int hpsa_offline_devices_ready(struct ctlr_info *h)
6759{
6760 unsigned long flags;
6761 struct offline_device_entry *d;
6762 struct list_head *this, *tmp;
6763
6764 spin_lock_irqsave(&h->offline_device_lock, flags);
6765 list_for_each_safe(this, tmp, &h->offline_device_list) {
6766 d = list_entry(this, struct offline_device_entry,
6767 offline_list);
6768 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6769 if (!hpsa_volume_offline(h, d->scsi3addr))
6770 return 1;
6771 spin_lock_irqsave(&h->offline_device_lock, flags);
6772 }
6773 spin_unlock_irqrestore(&h->offline_device_lock, flags);
6774 return 0;
6775}
6776
6777
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006778static void hpsa_monitor_ctlr_worker(struct work_struct *work)
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006779{
6780 unsigned long flags;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006781 struct ctlr_info *h = container_of(to_delayed_work(work),
6782 struct ctlr_info, monitor_ctlr_work);
6783 detect_controller_lockup(h);
6784 if (h->lockup_detected)
6785 return;
Stephen M. Cameron98465902014-02-21 16:25:00 -06006786
6787 if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) {
6788 scsi_host_get(h->scsi_host);
6789 h->drv_req_rescan = 0;
6790 hpsa_ack_ctlr_events(h);
6791 hpsa_scan_start(h->scsi_host);
6792 scsi_host_put(h->scsi_host);
6793 }
6794
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006795 spin_lock_irqsave(&h->lock, flags);
6796 if (h->remove_in_progress) {
6797 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006798 return;
6799 }
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006800 schedule_delayed_work(&h->monitor_ctlr_work,
6801 h->heartbeat_sample_interval);
6802 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Camerona0c12412011-10-26 16:22:04 -05006803}
6804
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08006805static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006806{
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006807 int dac, rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006808 struct ctlr_info *h;
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006809 int try_soft_reset = 0;
6810 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006811
6812 if (number_of_controllers == 0)
6813 printk(KERN_INFO DRIVER_NAME "\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006814
Stephen M. Cameron4c2a8c42010-06-16 13:51:35 -05006815 rc = hpsa_init_reset_devices(pdev);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006816 if (rc) {
6817 if (rc != -ENOTSUPP)
6818 return rc;
6819 /* If the reset fails in a particular way (it has no way to do
6820 * a proper hard reset, so returns -ENOTSUPP) we can try to do
6821 * a soft reset once we get the controller configured up to the
6822 * point that it can accept a command.
6823 */
6824 try_soft_reset = 1;
6825 rc = 0;
6826 }
6827
6828reinit_after_soft_reset:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006829
Don Brace303932f2010-02-04 08:42:40 -06006830 /* Command structures must be aligned on a 32-byte boundary because
6831 * the 5 lower bits of the address are used by the hardware. and by
6832 * the driver. See comments in hpsa.h for more info.
6833 */
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06006834#define COMMANDLIST_ALIGNMENT 128
Don Brace303932f2010-02-04 08:42:40 -06006835 BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006836 h = kzalloc(sizeof(*h), GFP_KERNEL);
6837 if (!h)
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06006838 return -ENOMEM;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006839
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006840 h->pdev = pdev;
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006841 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
Stephen M. Cameron9e0fc762011-02-15 15:32:48 -06006842 INIT_LIST_HEAD(&h->cmpQ);
6843 INIT_LIST_HEAD(&h->reqQ);
Stephen M. Cameron98465902014-02-21 16:25:00 -06006844 INIT_LIST_HEAD(&h->offline_device_list);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06006845 spin_lock_init(&h->lock);
Stephen M. Cameron98465902014-02-21 16:25:00 -06006846 spin_lock_init(&h->offline_device_lock);
Stephen M. Cameron6eaf46f2011-01-06 14:48:24 -06006847 spin_lock_init(&h->scan_lock);
Stephen M. Cameron0390f0c2013-09-23 13:34:12 -05006848 spin_lock_init(&h->passthru_count_lock);
Stephen M. Cameron55c06c72010-05-27 15:12:46 -05006849 rc = hpsa_pci_init(h);
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06006850 if (rc != 0)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006851 goto clean1;
6852
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06006853 sprintf(h->devname, HPSA "%d", number_of_controllers);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006854 h->ctlr = number_of_controllers;
6855 number_of_controllers++;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006856
6857 /* configure PCI DMA stuff */
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06006858 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
6859 if (rc == 0) {
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006860 dac = 1;
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06006861 } else {
6862 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6863 if (rc == 0) {
6864 dac = 0;
6865 } else {
6866 dev_err(&pdev->dev, "no suitable DMA available\n");
6867 goto clean1;
6868 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006869 }
6870
6871 /* make sure the board interrupts are off */
6872 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Stephen M. Cameron10f66012010-06-16 13:51:50 -05006873
Stephen M. Cameron0ae01a32011-05-03 14:59:25 -05006874 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006875 goto clean2;
Don Brace303932f2010-02-04 08:42:40 -06006876 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
6877 h->devname, pdev->device,
Stephen M. Camerona9a3a272011-02-15 15:32:53 -06006878 h->intr[h->intr_mode], dac ? "" : " not");
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006879 if (hpsa_allocate_cmd_pool(h))
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006880 goto clean4;
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06006881 if (hpsa_allocate_sg_chain_blocks(h))
6882 goto clean4;
Stephen M. Camerona08a84712010-02-04 08:43:16 -06006883 init_waitqueue_head(&h->scan_wait_queue);
6884 h->scan_finished = 1; /* no scan currently in progress */
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006885
6886 pci_set_drvdata(pdev, h);
Stephen M. Cameron9a413382011-05-03 14:59:41 -05006887 h->ndevices = 0;
6888 h->scsi_host = NULL;
6889 spin_lock_init(&h->devlock);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006890 hpsa_put_ctlr_into_performant_mode(h);
6891
6892 /* At this point, the controller is ready to take commands.
6893 * Now, if reset_devices and the hard reset didn't work, try
6894 * the soft reset and see if that works.
6895 */
6896 if (try_soft_reset) {
6897
6898 /* This is kind of gross. We may or may not get a completion
6899 * from the soft reset command, and if we do, then the value
6900 * from the fifo may or may not be valid. So, we wait 10 secs
6901 * after the reset throwing away any completions we get during
6902 * that time. Unregister the interrupt handler and register
6903 * fake ones to scoop up any residual completions.
6904 */
6905 spin_lock_irqsave(&h->lock, flags);
6906 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6907 spin_unlock_irqrestore(&h->lock, flags);
Matt Gates254f7962012-05-01 11:43:06 -05006908 free_irqs(h);
Stephen M. Cameron64670ac2011-05-03 14:59:51 -05006909 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
6910 hpsa_intx_discard_completions);
6911 if (rc) {
6912 dev_warn(&h->pdev->dev, "Failed to request_irq after "
6913 "soft reset.\n");
6914 goto clean4;
6915 }
6916
6917 rc = hpsa_kdump_soft_reset(h);
6918 if (rc)
6919 /* Neither hard nor soft reset worked, we're hosed. */
6920 goto clean4;
6921
6922 dev_info(&h->pdev->dev, "Board READY.\n");
6923 dev_info(&h->pdev->dev,
6924 "Waiting for stale completions to drain.\n");
6925 h->access.set_intr_mask(h, HPSA_INTR_ON);
6926 msleep(10000);
6927 h->access.set_intr_mask(h, HPSA_INTR_OFF);
6928
6929 rc = controller_reset_failed(h->cfgtable);
6930 if (rc)
6931 dev_info(&h->pdev->dev,
6932 "Soft reset appears to have failed.\n");
6933
6934 /* since the controller's reset, we have to go back and re-init
6935 * everything. Easiest to just forget what we've done and do it
6936 * all over again.
6937 */
6938 hpsa_undo_allocations_after_kdump_soft_reset(h);
6939 try_soft_reset = 0;
6940 if (rc)
6941 /* don't go to clean4, we already unallocated */
6942 return -ENODEV;
6943
6944 goto reinit_after_soft_reset;
6945 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006946
Scott Teelda0697b2014-02-18 13:57:00 -06006947 /* Enable Accelerated IO path at driver layer */
6948 h->acciopath_status = 1;
6949
Scott Teele863d682014-02-18 13:57:05 -06006950 h->drv_req_rescan = 0;
6951
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006952 /* Turn the interrupts on so we can service requests */
6953 h->access.set_intr_mask(h, HPSA_INTR_ON);
6954
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06006955 hpsa_hba_inquiry(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006956 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06006957
6958 /* Monitor the controller for firmware lockups */
6959 h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL;
6960 INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker);
6961 schedule_delayed_work(&h->monitor_ctlr_work,
6962 h->heartbeat_sample_interval);
Stephen M. Cameron88bf6d62013-11-01 11:02:25 -05006963 return 0;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006964
6965clean4:
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06006966 hpsa_free_sg_chain_blocks(h);
Stephen M. Cameron2e9d1b32011-05-03 14:59:20 -05006967 hpsa_free_cmd_pool(h);
Matt Gates254f7962012-05-01 11:43:06 -05006968 free_irqs(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006969clean2:
6970clean1:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006971 kfree(h);
Stephen M. Cameronecd9aad2010-02-04 08:41:59 -06006972 return rc;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006973}
6974
6975static void hpsa_flush_cache(struct ctlr_info *h)
6976{
6977 char *flush_buf;
6978 struct CommandList *c;
Stephen M. Cameron702890e2013-09-23 13:33:30 -05006979 unsigned long flags;
6980
6981 /* Don't bother trying to flush the cache if locked up */
6982 spin_lock_irqsave(&h->lock, flags);
6983 if (unlikely(h->lockup_detected)) {
6984 spin_unlock_irqrestore(&h->lock, flags);
6985 return;
6986 }
6987 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08006988
6989 flush_buf = kzalloc(4, GFP_KERNEL);
6990 if (!flush_buf)
6991 return;
6992
6993 c = cmd_special_alloc(h);
6994 if (!c) {
6995 dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
6996 goto out_of_memory;
6997 }
Stephen M. Camerona2dac132013-02-20 11:24:41 -06006998 if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
6999 RAID_CTLR_LUNID, TYPE_CMD)) {
7000 goto out;
7001 }
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007002 hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
7003 if (c->err_info->CommandStatus != 0)
Stephen M. Camerona2dac132013-02-20 11:24:41 -06007004out:
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007005 dev_warn(&h->pdev->dev,
7006 "error flushing cache on controller\n");
7007 cmd_special_free(h, c);
7008out_of_memory:
7009 kfree(flush_buf);
7010}
7011
7012static void hpsa_shutdown(struct pci_dev *pdev)
7013{
7014 struct ctlr_info *h;
7015
7016 h = pci_get_drvdata(pdev);
7017 /* Turn board interrupts off and send the flush cache command
7018 * sendcmd will turn off interrupt, and send the flush...
7019 * To write all data in the battery backed cache to disks
7020 */
7021 hpsa_flush_cache(h);
7022 h->access.set_intr_mask(h, HPSA_INTR_OFF);
Stephen M. Cameron0097f0f2012-05-01 11:43:21 -05007023 hpsa_free_irqs_and_disable_msix(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007024}
7025
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007026static void hpsa_free_device_info(struct ctlr_info *h)
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06007027{
7028 int i;
7029
7030 for (i = 0; i < h->ndevices; i++)
7031 kfree(h->dev[i]);
7032}
7033
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007034static void hpsa_remove_one(struct pci_dev *pdev)
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007035{
7036 struct ctlr_info *h;
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007037 unsigned long flags;
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007038
7039 if (pci_get_drvdata(pdev) == NULL) {
Stephen M. Camerona0c12412011-10-26 16:22:04 -05007040 dev_err(&pdev->dev, "unable to remove device\n");
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007041 return;
7042 }
7043 h = pci_get_drvdata(pdev);
Stephen M. Cameron8a98db732013-12-04 17:10:07 -06007044
7045 /* Get rid of any controller monitoring work items */
7046 spin_lock_irqsave(&h->lock, flags);
7047 h->remove_in_progress = 1;
7048 cancel_delayed_work(&h->monitor_ctlr_work);
7049 spin_unlock_irqrestore(&h->lock, flags);
7050
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007051 hpsa_unregister_scsi(h); /* unhook from SCSI subsystem */
7052 hpsa_shutdown(pdev);
7053 iounmap(h->vaddr);
Stephen M. Cameron204892e2010-05-27 15:13:22 -05007054 iounmap(h->transtable);
7055 iounmap(h->cfgtable);
Stephen M. Cameron55e14e72012-01-19 14:00:42 -06007056 hpsa_free_device_info(h);
Stephen M. Cameron33a2ffc2010-02-25 14:03:27 -06007057 hpsa_free_sg_chain_blocks(h);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007058 pci_free_consistent(h->pdev,
7059 h->nr_cmds * sizeof(struct CommandList),
7060 h->cmd_pool, h->cmd_pool_dhandle);
7061 pci_free_consistent(h->pdev,
7062 h->nr_cmds * sizeof(struct ErrorInfo),
7063 h->errinfo_pool, h->errinfo_pool_dhandle);
Don Brace303932f2010-02-04 08:42:40 -06007064 pci_free_consistent(h->pdev, h->reply_pool_size,
7065 h->reply_pool, h->reply_pool_dhandle);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007066 kfree(h->cmd_pool_bits);
Don Brace303932f2010-02-04 08:42:40 -06007067 kfree(h->blockFetchTable);
Matt Gatese1f7de02014-02-18 13:55:17 -06007068 kfree(h->ioaccel1_blockFetchTable);
Stephen M. Cameronaca90122014-02-18 13:56:14 -06007069 kfree(h->ioaccel2_blockFetchTable);
Stephen M. Cameron339b2b12010-02-04 08:42:50 -06007070 kfree(h->hba_inquiry_data);
Stephen M. Cameronf0bd0b682012-05-01 11:42:09 -05007071 pci_disable_device(pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007072 pci_release_regions(pdev);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007073 kfree(h);
7074}
7075
7076static int hpsa_suspend(__attribute__((unused)) struct pci_dev *pdev,
7077 __attribute__((unused)) pm_message_t state)
7078{
7079 return -ENOSYS;
7080}
7081
7082static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
7083{
7084 return -ENOSYS;
7085}
7086
7087static struct pci_driver hpsa_pci_driver = {
Stephen M. Cameronf79cfec2012-01-19 14:00:59 -06007088 .name = HPSA,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007089 .probe = hpsa_init_one,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007090 .remove = hpsa_remove_one,
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007091 .id_table = hpsa_pci_device_id, /* id_table */
7092 .shutdown = hpsa_shutdown,
7093 .suspend = hpsa_suspend,
7094 .resume = hpsa_resume,
7095};
7096
Don Brace303932f2010-02-04 08:42:40 -06007097/* Fill in bucket_map[], given nsgs (the max number of
7098 * scatter gather elements supported) and bucket[],
7099 * which is an array of 8 integers. The bucket[] array
7100 * contains 8 different DMA transfer sizes (in 16
7101 * byte increments) which the controller uses to fetch
7102 * commands. This function fills in bucket_map[], which
7103 * maps a given number of scatter gather elements to one of
7104 * the 8 DMA transfer sizes. The point of it is to allow the
7105 * controller to only do as much DMA as needed to fetch the
7106 * command, with the DMA transfer size encoded in the lower
7107 * bits of the command address.
7108 */
7109static void calc_bucket_map(int bucket[], int num_buckets,
Matt Gatese1f7de02014-02-18 13:55:17 -06007110 int nsgs, int min_blocks, int *bucket_map)
Don Brace303932f2010-02-04 08:42:40 -06007111{
7112 int i, j, b, size;
7113
Don Brace303932f2010-02-04 08:42:40 -06007114 /* Note, bucket_map must have nsgs+1 entries. */
7115 for (i = 0; i <= nsgs; i++) {
7116 /* Compute size of a command with i SG entries */
Matt Gatese1f7de02014-02-18 13:55:17 -06007117 size = i + min_blocks;
Don Brace303932f2010-02-04 08:42:40 -06007118 b = num_buckets; /* Assume the biggest bucket */
7119 /* Find the bucket that is just big enough */
Matt Gatese1f7de02014-02-18 13:55:17 -06007120 for (j = 0; j < num_buckets; j++) {
Don Brace303932f2010-02-04 08:42:40 -06007121 if (bucket[j] >= size) {
7122 b = j;
7123 break;
7124 }
7125 }
7126 /* for a command with i SG entries, use bucket b. */
7127 bucket_map[i] = b;
7128 }
7129}
7130
Matt Gatese1f7de02014-02-18 13:55:17 -06007131static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
Don Brace303932f2010-02-04 08:42:40 -06007132{
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007133 int i;
7134 unsigned long register_value;
Matt Gatese1f7de02014-02-18 13:55:17 -06007135 unsigned long transMethod = CFGTBL_Trans_Performant |
7136 (trans_support & CFGTBL_Trans_use_short_tags) |
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007137 CFGTBL_Trans_enable_directed_msix |
7138 (trans_support & (CFGTBL_Trans_io_accel1 |
7139 CFGTBL_Trans_io_accel2));
Matt Gatese1f7de02014-02-18 13:55:17 -06007140 struct access_method access = SA5_performant_access;
Stephen M. Camerondef342b2010-05-27 15:14:39 -05007141
7142 /* This is a bit complicated. There are 8 registers on
7143 * the controller which we write to to tell it 8 different
7144 * sizes of commands which there may be. It's a way of
7145 * reducing the DMA done to fetch each command. Encoded into
7146 * each command's tag are 3 bits which communicate to the controller
7147 * which of the eight sizes that command fits within. The size of
7148 * each command depends on how many scatter gather entries there are.
7149 * Each SG entry requires 16 bytes. The eight registers are programmed
7150 * with the number of 16-byte blocks a command of that size requires.
7151 * The smallest command possible requires 5 such 16 byte blocks.
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007152 * the largest command possible requires SG_ENTRIES_IN_CMD + 4 16-byte
Stephen M. Camerondef342b2010-05-27 15:14:39 -05007153 * blocks. Note, this only extends to the SG entries contained
7154 * within the command block, and does not extend to chained blocks
7155 * of SG elements. bft[] contains the eight values we write to
7156 * the registers. They are not evenly distributed, but have more
7157 * sizes for small commands, and fewer sizes for larger commands.
7158 */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007159 int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4};
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007160#define MIN_IOACCEL2_BFT_ENTRY 5
7161#define HPSA_IOACCEL2_HEADER_SZ 4
7162 int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12,
7163 13, 14, 15, 16, 17, 18, 19,
7164 HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES};
7165 BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16);
7166 BUILD_BUG_ON(ARRAY_SIZE(bft) != 8);
7167 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) >
7168 16 * MIN_IOACCEL2_BFT_ENTRY);
7169 BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007170 BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);
Don Brace303932f2010-02-04 08:42:40 -06007171 /* 5 = 1 s/g entry or 4k
7172 * 6 = 2 s/g entry or 8k
7173 * 8 = 4 s/g entry or 16k
7174 * 10 = 6 s/g entry or 24k
7175 */
Don Brace303932f2010-02-04 08:42:40 -06007176
Don Brace303932f2010-02-04 08:42:40 -06007177 /* Controller spec: zero out this buffer. */
7178 memset(h->reply_pool, 0, h->reply_pool_size);
Don Brace303932f2010-02-04 08:42:40 -06007179
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007180 bft[7] = SG_ENTRIES_IN_CMD + 4;
7181 calc_bucket_map(bft, ARRAY_SIZE(bft),
Matt Gatese1f7de02014-02-18 13:55:17 -06007182 SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);
Don Brace303932f2010-02-04 08:42:40 -06007183 for (i = 0; i < 8; i++)
7184 writel(bft[i], &h->transtable->BlockFetch[i]);
7185
7186 /* size of controller ring buffer */
7187 writel(h->max_commands, &h->transtable->RepQSize);
Matt Gates254f7962012-05-01 11:43:06 -05007188 writel(h->nreply_queues, &h->transtable->RepQCount);
Don Brace303932f2010-02-04 08:42:40 -06007189 writel(0, &h->transtable->RepQCtrAddrLow32);
7190 writel(0, &h->transtable->RepQCtrAddrHigh32);
Matt Gates254f7962012-05-01 11:43:06 -05007191
7192 for (i = 0; i < h->nreply_queues; i++) {
7193 writel(0, &h->transtable->RepQAddr[i].upper);
7194 writel(h->reply_pool_dhandle +
7195 (h->max_commands * sizeof(u64) * i),
7196 &h->transtable->RepQAddr[i].lower);
7197 }
7198
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007199 writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);
Matt Gatese1f7de02014-02-18 13:55:17 -06007200 writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest));
7201 /*
7202 * enable outbound interrupt coalescing in accelerator mode;
7203 */
7204 if (trans_support & CFGTBL_Trans_io_accel1) {
7205 access = SA5_ioaccel_mode1_access;
7206 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7207 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
Scott Teelc3497752014-02-18 13:56:34 -06007208 } else {
7209 if (trans_support & CFGTBL_Trans_io_accel2) {
7210 access = SA5_ioaccel_mode2_access;
7211 writel(10, &h->cfgtable->HostWrite.CoalIntDelay);
7212 writel(4, &h->cfgtable->HostWrite.CoalIntCount);
7213 }
Matt Gatese1f7de02014-02-18 13:55:17 -06007214 }
Don Brace303932f2010-02-04 08:42:40 -06007215 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
Stephen M. Cameron3f4336f2010-05-27 15:14:08 -05007216 hpsa_wait_for_mode_change_ack(h);
Don Brace303932f2010-02-04 08:42:40 -06007217 register_value = readl(&(h->cfgtable->TransportActive));
7218 if (!(register_value & CFGTBL_Trans_Performant)) {
7219 dev_warn(&h->pdev->dev, "unable to get board into"
7220 " performant mode\n");
7221 return;
7222 }
Stephen M. Cameron960a30e2011-02-15 15:33:03 -06007223 /* Change the access methods to the performant access methods */
Matt Gatese1f7de02014-02-18 13:55:17 -06007224 h->access = access;
7225 h->transMethod = transMethod;
7226
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007227 if (!((trans_support & CFGTBL_Trans_io_accel1) ||
7228 (trans_support & CFGTBL_Trans_io_accel2)))
Matt Gatese1f7de02014-02-18 13:55:17 -06007229 return;
7230
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007231 if (trans_support & CFGTBL_Trans_io_accel1) {
7232 /* Set up I/O accelerator mode */
7233 for (i = 0; i < h->nreply_queues; i++) {
7234 writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX);
7235 h->reply_queue[i].current_entry =
7236 readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX);
7237 }
7238 bft[7] = h->ioaccel_maxsg + 8;
7239 calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8,
7240 h->ioaccel1_blockFetchTable);
7241
7242 /* initialize all reply queue entries to unused */
7243 memset(h->reply_pool, (u8) IOACCEL_MODE1_REPLY_UNUSED,
7244 h->reply_pool_size);
7245
7246 /* set all the constant fields in the accelerator command
7247 * frames once at init time to save CPU cycles later.
7248 */
7249 for (i = 0; i < h->nr_cmds; i++) {
7250 struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i];
7251
7252 cp->function = IOACCEL1_FUNCTION_SCSIIO;
7253 cp->err_info = (u32) (h->errinfo_pool_dhandle +
7254 (i * sizeof(struct ErrorInfo)));
7255 cp->err_info_len = sizeof(struct ErrorInfo);
7256 cp->sgl_offset = IOACCEL1_SGLOFFSET;
7257 cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT;
7258 cp->timeout_sec = 0;
7259 cp->ReplyQueue = 0;
7260 cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) |
7261 DIRECT_LOOKUP_BIT;
7262 cp->Tag.upper = 0;
7263 cp->host_addr.lower =
7264 (u32) (h->ioaccel_cmd_pool_dhandle +
7265 (i * sizeof(struct io_accel1_cmd)));
7266 cp->host_addr.upper = 0;
7267 }
7268 } else if (trans_support & CFGTBL_Trans_io_accel2) {
7269 u64 cfg_offset, cfg_base_addr_index;
7270 u32 bft2_offset, cfg_base_addr;
7271 int rc;
7272
7273 rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr,
7274 &cfg_base_addr_index, &cfg_offset);
7275 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64);
7276 bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ;
7277 calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg,
7278 4, h->ioaccel2_blockFetchTable);
7279 bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset);
7280 BUILD_BUG_ON(offsetof(struct CfgTable,
7281 io_accel_request_size_offset) != 0xb8);
7282 h->ioaccel2_bft2_regs =
7283 remap_pci_mem(pci_resource_start(h->pdev,
7284 cfg_base_addr_index) +
7285 cfg_offset + bft2_offset,
7286 ARRAY_SIZE(bft2) *
7287 sizeof(*h->ioaccel2_bft2_regs));
7288 for (i = 0; i < ARRAY_SIZE(bft2); i++)
7289 writel(bft2[i], &h->ioaccel2_bft2_regs[i]);
Matt Gatese1f7de02014-02-18 13:55:17 -06007290 }
Stephen M. Cameronb9af4932014-02-18 13:56:29 -06007291 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
7292 hpsa_wait_for_mode_change_ack(h);
Matt Gatese1f7de02014-02-18 13:55:17 -06007293}
7294
7295static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h)
7296{
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007297 h->ioaccel_maxsg =
7298 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7299 if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES)
7300 h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES;
7301
Matt Gatese1f7de02014-02-18 13:55:17 -06007302 /* Command structures must be aligned on a 128-byte boundary
7303 * because the 7 lower bits of the address are used by the
7304 * hardware.
7305 */
7306#define IOACCEL1_COMMANDLIST_ALIGNMENT 128
7307 BUILD_BUG_ON(sizeof(struct io_accel1_cmd) %
7308 IOACCEL1_COMMANDLIST_ALIGNMENT);
7309 h->ioaccel_cmd_pool =
7310 pci_alloc_consistent(h->pdev,
7311 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7312 &(h->ioaccel_cmd_pool_dhandle));
7313
7314 h->ioaccel1_blockFetchTable =
Stephen M. Cameron283b4a92014-02-18 13:55:33 -06007315 kmalloc(((h->ioaccel_maxsg + 1) *
Matt Gatese1f7de02014-02-18 13:55:17 -06007316 sizeof(u32)), GFP_KERNEL);
7317
7318 if ((h->ioaccel_cmd_pool == NULL) ||
7319 (h->ioaccel1_blockFetchTable == NULL))
7320 goto clean_up;
7321
7322 memset(h->ioaccel_cmd_pool, 0,
7323 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool));
7324 return 0;
7325
7326clean_up:
7327 if (h->ioaccel_cmd_pool)
7328 pci_free_consistent(h->pdev,
7329 h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
7330 h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle);
7331 kfree(h->ioaccel1_blockFetchTable);
7332 return 1;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007333}
7334
Stephen M. Cameronaca90122014-02-18 13:56:14 -06007335static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h)
7336{
7337 /* Allocate ioaccel2 mode command blocks and block fetch table */
7338
7339 h->ioaccel_maxsg =
7340 readl(&(h->cfgtable->io_accel_max_embedded_sg_count));
7341 if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES)
7342 h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES;
7343
7344#define IOACCEL2_COMMANDLIST_ALIGNMENT 128
7345 BUILD_BUG_ON(sizeof(struct io_accel2_cmd) %
7346 IOACCEL2_COMMANDLIST_ALIGNMENT);
7347 h->ioaccel2_cmd_pool =
7348 pci_alloc_consistent(h->pdev,
7349 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7350 &(h->ioaccel2_cmd_pool_dhandle));
7351
7352 h->ioaccel2_blockFetchTable =
7353 kmalloc(((h->ioaccel_maxsg + 1) *
7354 sizeof(u32)), GFP_KERNEL);
7355
7356 if ((h->ioaccel2_cmd_pool == NULL) ||
7357 (h->ioaccel2_blockFetchTable == NULL))
7358 goto clean_up;
7359
7360 memset(h->ioaccel2_cmd_pool, 0,
7361 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool));
7362 return 0;
7363
7364clean_up:
7365 if (h->ioaccel2_cmd_pool)
7366 pci_free_consistent(h->pdev,
7367 h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
7368 h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);
7369 kfree(h->ioaccel2_blockFetchTable);
7370 return 1;
7371}
7372
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08007373static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007374{
7375 u32 trans_support;
Matt Gatese1f7de02014-02-18 13:55:17 -06007376 unsigned long transMethod = CFGTBL_Trans_Performant |
7377 CFGTBL_Trans_use_short_tags;
Matt Gates254f7962012-05-01 11:43:06 -05007378 int i;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007379
Stephen M. Cameron02ec19c2011-01-06 14:48:29 -06007380 if (hpsa_simple_mode)
7381 return;
7382
Matt Gatese1f7de02014-02-18 13:55:17 -06007383 /* Check for I/O accelerator mode support */
7384 if (trans_support & CFGTBL_Trans_io_accel1) {
7385 transMethod |= CFGTBL_Trans_io_accel1 |
7386 CFGTBL_Trans_enable_directed_msix;
7387 if (hpsa_alloc_ioaccel_cmd_and_bft(h))
7388 goto clean_up;
Stephen M. Cameronaca90122014-02-18 13:56:14 -06007389 } else {
7390 if (trans_support & CFGTBL_Trans_io_accel2) {
7391 transMethod |= CFGTBL_Trans_io_accel2 |
7392 CFGTBL_Trans_enable_directed_msix;
7393 if (ioaccel2_alloc_cmds_and_bft(h))
7394 goto clean_up;
7395 }
Matt Gatese1f7de02014-02-18 13:55:17 -06007396 }
7397
7398 /* TODO, check that this next line h->nreply_queues is correct */
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007399 trans_support = readl(&(h->cfgtable->TransportSupport));
7400 if (!(trans_support & PERFORMANT_MODE))
7401 return;
7402
Hannes Reineckeeee0f032014-01-15 13:30:53 +01007403 h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;
Stephen M. Cameroncba3d382010-06-16 13:51:56 -05007404 hpsa_get_max_perf_mode_cmds(h);
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007405 /* Performant mode ring buffer and supporting data structures */
Matt Gates254f7962012-05-01 11:43:06 -05007406 h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues;
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007407 h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size,
7408 &(h->reply_pool_dhandle));
7409
Matt Gates254f7962012-05-01 11:43:06 -05007410 for (i = 0; i < h->nreply_queues; i++) {
7411 h->reply_queue[i].head = &h->reply_pool[h->max_commands * i];
7412 h->reply_queue[i].size = h->max_commands;
7413 h->reply_queue[i].wraparound = 1; /* spec: init to 1 */
7414 h->reply_queue[i].current_entry = 0;
7415 }
7416
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007417 /* Need a block fetch table for performant mode */
Stephen M. Camerond66ae082012-01-19 14:00:48 -06007418 h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *
Stephen M. Cameron6c311b52010-05-27 15:14:19 -05007419 sizeof(u32)), GFP_KERNEL);
7420
7421 if ((h->reply_pool == NULL)
7422 || (h->blockFetchTable == NULL))
7423 goto clean_up;
7424
Matt Gatese1f7de02014-02-18 13:55:17 -06007425 hpsa_enter_performant_mode(h, trans_support);
Don Brace303932f2010-02-04 08:42:40 -06007426 return;
7427
7428clean_up:
7429 if (h->reply_pool)
7430 pci_free_consistent(h->pdev, h->reply_pool_size,
7431 h->reply_pool, h->reply_pool_dhandle);
7432 kfree(h->blockFetchTable);
7433}
7434
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007435static int is_accelerated_cmd(struct CommandList *c)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007436{
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007437 return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2;
7438}
7439
7440static void hpsa_drain_accel_commands(struct ctlr_info *h)
7441{
7442 struct CommandList *c = NULL;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007443 unsigned long flags;
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007444 int accel_cmds_out;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007445
7446 do { /* wait for all outstanding commands to drain out */
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007447 accel_cmds_out = 0;
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007448 spin_lock_irqsave(&h->lock, flags);
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007449 list_for_each_entry(c, &h->cmpQ, list)
7450 accel_cmds_out += is_accelerated_cmd(c);
7451 list_for_each_entry(c, &h->reqQ, list)
7452 accel_cmds_out += is_accelerated_cmd(c);
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007453 spin_unlock_irqrestore(&h->lock, flags);
Stephen M. Cameron23100dd2014-02-18 13:57:37 -06007454 if (accel_cmds_out <= 0)
Stephen M. Cameron76438d02014-02-18 13:55:43 -06007455 break;
7456 msleep(100);
7457 } while (1);
7458}
7459
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007460/*
7461 * This is it. Register the PCI driver information for the cards we control
7462 * the OS will call our registered routines when it finds one of our cards.
7463 */
7464static int __init hpsa_init(void)
7465{
Mike Miller31468402010-02-25 14:03:12 -06007466 return pci_register_driver(&hpsa_pci_driver);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007467}
7468
7469static void __exit hpsa_cleanup(void)
7470{
7471 pci_unregister_driver(&hpsa_pci_driver);
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007472}
7473
Matt Gatese1f7de02014-02-18 13:55:17 -06007474static void __attribute__((unused)) verify_offsets(void)
7475{
7476#define VERIFY_OFFSET(member, offset) \
Scott Teeldd0e19f2014-02-18 13:57:31 -06007477 BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset)
7478
7479 VERIFY_OFFSET(structure_size, 0);
7480 VERIFY_OFFSET(volume_blk_size, 4);
7481 VERIFY_OFFSET(volume_blk_cnt, 8);
7482 VERIFY_OFFSET(phys_blk_shift, 16);
7483 VERIFY_OFFSET(parity_rotation_shift, 17);
7484 VERIFY_OFFSET(strip_size, 18);
7485 VERIFY_OFFSET(disk_starting_blk, 20);
7486 VERIFY_OFFSET(disk_blk_cnt, 28);
7487 VERIFY_OFFSET(data_disks_per_row, 36);
7488 VERIFY_OFFSET(metadata_disks_per_row, 38);
7489 VERIFY_OFFSET(row_cnt, 40);
7490 VERIFY_OFFSET(layout_map_count, 42);
7491 VERIFY_OFFSET(flags, 44);
7492 VERIFY_OFFSET(dekindex, 46);
7493 /* VERIFY_OFFSET(reserved, 48 */
7494 VERIFY_OFFSET(data, 64);
7495
7496#undef VERIFY_OFFSET
7497
7498#define VERIFY_OFFSET(member, offset) \
Mike Millerb66cc252014-02-18 13:56:04 -06007499 BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset)
7500
7501 VERIFY_OFFSET(IU_type, 0);
7502 VERIFY_OFFSET(direction, 1);
7503 VERIFY_OFFSET(reply_queue, 2);
7504 /* VERIFY_OFFSET(reserved1, 3); */
7505 VERIFY_OFFSET(scsi_nexus, 4);
7506 VERIFY_OFFSET(Tag, 8);
7507 VERIFY_OFFSET(cdb, 16);
7508 VERIFY_OFFSET(cciss_lun, 32);
7509 VERIFY_OFFSET(data_len, 40);
7510 VERIFY_OFFSET(cmd_priority_task_attr, 44);
7511 VERIFY_OFFSET(sg_count, 45);
7512 /* VERIFY_OFFSET(reserved3 */
7513 VERIFY_OFFSET(err_ptr, 48);
7514 VERIFY_OFFSET(err_len, 56);
7515 /* VERIFY_OFFSET(reserved4 */
7516 VERIFY_OFFSET(sg, 64);
7517
7518#undef VERIFY_OFFSET
7519
7520#define VERIFY_OFFSET(member, offset) \
Matt Gatese1f7de02014-02-18 13:55:17 -06007521 BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset)
7522
7523 VERIFY_OFFSET(dev_handle, 0x00);
7524 VERIFY_OFFSET(reserved1, 0x02);
7525 VERIFY_OFFSET(function, 0x03);
7526 VERIFY_OFFSET(reserved2, 0x04);
7527 VERIFY_OFFSET(err_info, 0x0C);
7528 VERIFY_OFFSET(reserved3, 0x10);
7529 VERIFY_OFFSET(err_info_len, 0x12);
7530 VERIFY_OFFSET(reserved4, 0x13);
7531 VERIFY_OFFSET(sgl_offset, 0x14);
7532 VERIFY_OFFSET(reserved5, 0x15);
7533 VERIFY_OFFSET(transfer_len, 0x1C);
7534 VERIFY_OFFSET(reserved6, 0x20);
7535 VERIFY_OFFSET(io_flags, 0x24);
7536 VERIFY_OFFSET(reserved7, 0x26);
7537 VERIFY_OFFSET(LUN, 0x34);
7538 VERIFY_OFFSET(control, 0x3C);
7539 VERIFY_OFFSET(CDB, 0x40);
7540 VERIFY_OFFSET(reserved8, 0x50);
7541 VERIFY_OFFSET(host_context_flags, 0x60);
7542 VERIFY_OFFSET(timeout_sec, 0x62);
7543 VERIFY_OFFSET(ReplyQueue, 0x64);
7544 VERIFY_OFFSET(reserved9, 0x65);
7545 VERIFY_OFFSET(Tag, 0x68);
7546 VERIFY_OFFSET(host_addr, 0x70);
7547 VERIFY_OFFSET(CISS_LUN, 0x78);
7548 VERIFY_OFFSET(SG, 0x78 + 8);
7549#undef VERIFY_OFFSET
7550}
7551
Stephen M. Cameronedd16362009-12-08 14:09:11 -08007552module_init(hpsa_init);
7553module_exit(hpsa_cleanup);