blob: 4b0928f074ac0789373f2acd081b537fd538ea19 [file] [log] [blame]
Kevin Barnett6c223762016-06-27 16:41:00 -05001/*
2 * driver for Microsemi PQI-based storage controllers
Kevin Barnettb805dbf2017-05-03 18:54:06 -05003 * Copyright (c) 2016-2017 Microsemi Corporation
Kevin Barnett6c223762016-06-27 16:41:00 -05004 * Copyright (c) 2016 PMC-Sierra, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13 * NON INFRINGEMENT. See the GNU General Public License for more details.
14 *
15 * Questions/Comments/Bugfixes to esc.storagedev@microsemi.com
16 *
17 */
18
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/interrupt.h>
24#include <linux/sched.h>
25#include <linux/rtc.h>
26#include <linux/bcd.h>
27#include <linux/cciss_ioctl.h>
Christoph Hellwig52198222016-11-01 08:12:49 -060028#include <linux/blk-mq-pci.h>
Kevin Barnett6c223762016-06-27 16:41:00 -050029#include <scsi/scsi_host.h>
30#include <scsi/scsi_cmnd.h>
31#include <scsi/scsi_device.h>
32#include <scsi/scsi_eh.h>
33#include <scsi/scsi_transport_sas.h>
34#include <asm/unaligned.h>
35#include "smartpqi.h"
36#include "smartpqi_sis.h"
37
38#if !defined(BUILD_TIMESTAMP)
39#define BUILD_TIMESTAMP
40#endif
41
Kevin Barnett699bed72016-08-31 14:55:36 -050042#define DRIVER_VERSION "0.9.13-370"
Kevin Barnett6c223762016-06-27 16:41:00 -050043#define DRIVER_MAJOR 0
44#define DRIVER_MINOR 9
Kevin Barnett699bed72016-08-31 14:55:36 -050045#define DRIVER_RELEASE 13
46#define DRIVER_REVISION 370
Kevin Barnett6c223762016-06-27 16:41:00 -050047
48#define DRIVER_NAME "Microsemi PQI Driver (v" DRIVER_VERSION ")"
49#define DRIVER_NAME_SHORT "smartpqi"
50
Kevin Barnette1d213b2017-05-03 18:53:18 -050051#define PQI_EXTRA_SGL_MEMORY (12 * sizeof(struct pqi_sg_descriptor))
52
Kevin Barnett6c223762016-06-27 16:41:00 -050053MODULE_AUTHOR("Microsemi");
54MODULE_DESCRIPTION("Driver for Microsemi Smart Family Controller version "
55 DRIVER_VERSION);
56MODULE_SUPPORTED_DEVICE("Microsemi Smart Family Controllers");
57MODULE_VERSION(DRIVER_VERSION);
58MODULE_LICENSE("GPL");
59
Kevin Barnett6c223762016-06-27 16:41:00 -050060static char *hpe_branded_controller = "HPE Smart Array Controller";
61static char *microsemi_branded_controller = "Microsemi Smart Family Controller";
62
63static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info);
64static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info);
65static void pqi_scan_start(struct Scsi_Host *shost);
66static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
67 struct pqi_queue_group *queue_group, enum pqi_io_path path,
68 struct pqi_io_request *io_request);
69static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
70 struct pqi_iu_header *request, unsigned int flags,
71 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs);
72static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
73 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
74 unsigned int cdb_length, struct pqi_queue_group *queue_group,
75 struct pqi_encryption_info *encryption_info);
76
77/* for flags argument to pqi_submit_raid_request_synchronous() */
78#define PQI_SYNC_FLAGS_INTERRUPTABLE 0x1
79
80static struct scsi_transport_template *pqi_sas_transport_template;
81
82static atomic_t pqi_controller_count = ATOMIC_INIT(0);
83
Kevin Barnett6a50d6a2017-05-03 18:52:52 -050084static unsigned int pqi_supported_event_types[] = {
85 PQI_EVENT_TYPE_HOTPLUG,
86 PQI_EVENT_TYPE_HARDWARE,
87 PQI_EVENT_TYPE_PHYSICAL_DEVICE,
88 PQI_EVENT_TYPE_LOGICAL_DEVICE,
89 PQI_EVENT_TYPE_AIO_STATE_CHANGE,
90 PQI_EVENT_TYPE_AIO_CONFIG_CHANGE,
91};
92
Kevin Barnett6c223762016-06-27 16:41:00 -050093static int pqi_disable_device_id_wildcards;
94module_param_named(disable_device_id_wildcards,
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -050095 pqi_disable_device_id_wildcards, int, 0644);
Kevin Barnett6c223762016-06-27 16:41:00 -050096MODULE_PARM_DESC(disable_device_id_wildcards,
97 "Disable device ID wildcards.");
98
99static char *raid_levels[] = {
100 "RAID-0",
101 "RAID-4",
102 "RAID-1(1+0)",
103 "RAID-5",
104 "RAID-5+1",
105 "RAID-ADG",
106 "RAID-1(ADM)",
107};
108
109static char *pqi_raid_level_to_string(u8 raid_level)
110{
111 if (raid_level < ARRAY_SIZE(raid_levels))
112 return raid_levels[raid_level];
113
114 return "";
115}
116
117#define SA_RAID_0 0
118#define SA_RAID_4 1
119#define SA_RAID_1 2 /* also used for RAID 10 */
120#define SA_RAID_5 3 /* also used for RAID 50 */
121#define SA_RAID_51 4
122#define SA_RAID_6 5 /* also used for RAID 60 */
123#define SA_RAID_ADM 6 /* also used for RAID 1+0 ADM */
124#define SA_RAID_MAX SA_RAID_ADM
125#define SA_RAID_UNKNOWN 0xff
126
127static inline void pqi_scsi_done(struct scsi_cmnd *scmd)
128{
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500129 pqi_prep_for_scsi_done(scmd);
Kevin Barnett6c223762016-06-27 16:41:00 -0500130 scmd->scsi_done(scmd);
131}
132
133static inline bool pqi_scsi3addr_equal(u8 *scsi3addr1, u8 *scsi3addr2)
134{
135 return memcmp(scsi3addr1, scsi3addr2, 8) == 0;
136}
137
138static inline struct pqi_ctrl_info *shost_to_hba(struct Scsi_Host *shost)
139{
140 void *hostdata = shost_priv(shost);
141
142 return *((struct pqi_ctrl_info **)hostdata);
143}
144
145static inline bool pqi_is_logical_device(struct pqi_scsi_dev *device)
146{
147 return !device->is_physical_device;
148}
149
150static inline bool pqi_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
151{
152 return !ctrl_info->controller_online;
153}
154
155static inline void pqi_check_ctrl_health(struct pqi_ctrl_info *ctrl_info)
156{
157 if (ctrl_info->controller_online)
158 if (!sis_is_firmware_running(ctrl_info))
159 pqi_take_ctrl_offline(ctrl_info);
160}
161
162static inline bool pqi_is_hba_lunid(u8 *scsi3addr)
163{
164 return pqi_scsi3addr_equal(scsi3addr, RAID_CTLR_LUNID);
165}
166
Kevin Barnettff6abb72016-08-31 14:54:41 -0500167static inline enum pqi_ctrl_mode pqi_get_ctrl_mode(
168 struct pqi_ctrl_info *ctrl_info)
169{
170 return sis_read_driver_scratch(ctrl_info);
171}
172
173static inline void pqi_save_ctrl_mode(struct pqi_ctrl_info *ctrl_info,
174 enum pqi_ctrl_mode mode)
175{
176 sis_write_driver_scratch(ctrl_info, mode);
177}
178
Kevin Barnett7561a7e2017-05-03 18:52:58 -0500179#define PQI_RESCAN_WORK_INTERVAL (10 * HZ)
180static inline void pqi_ctrl_block_requests(struct pqi_ctrl_info *ctrl_info)
181{
182 ctrl_info->block_requests = true;
183 scsi_block_requests(ctrl_info->scsi_host);
184}
185
186static inline void pqi_ctrl_unblock_requests(struct pqi_ctrl_info *ctrl_info)
187{
188 ctrl_info->block_requests = false;
189 wake_up_all(&ctrl_info->block_requests_wait);
190 scsi_unblock_requests(ctrl_info->scsi_host);
191}
192
193static inline bool pqi_ctrl_blocked(struct pqi_ctrl_info *ctrl_info)
194{
195 return ctrl_info->block_requests;
196}
197
198static unsigned long pqi_wait_if_ctrl_blocked(struct pqi_ctrl_info *ctrl_info,
199 unsigned long timeout_msecs)
200{
201 unsigned long remaining_msecs;
202
203 if (!pqi_ctrl_blocked(ctrl_info))
204 return timeout_msecs;
205
206 atomic_inc(&ctrl_info->num_blocked_threads);
207
208 if (timeout_msecs == NO_TIMEOUT) {
209 wait_event(ctrl_info->block_requests_wait,
210 !pqi_ctrl_blocked(ctrl_info));
211 remaining_msecs = timeout_msecs;
212 } else {
213 unsigned long remaining_jiffies;
214
215 remaining_jiffies =
216 wait_event_timeout(ctrl_info->block_requests_wait,
217 !pqi_ctrl_blocked(ctrl_info),
218 msecs_to_jiffies(timeout_msecs));
219 remaining_msecs = jiffies_to_msecs(remaining_jiffies);
220 }
221
222 atomic_dec(&ctrl_info->num_blocked_threads);
223
224 return remaining_msecs;
225}
226
227static inline void pqi_ctrl_busy(struct pqi_ctrl_info *ctrl_info)
228{
229 atomic_inc(&ctrl_info->num_busy_threads);
230}
231
232static inline void pqi_ctrl_unbusy(struct pqi_ctrl_info *ctrl_info)
233{
234 atomic_dec(&ctrl_info->num_busy_threads);
235}
236
237static inline void pqi_ctrl_wait_until_quiesced(struct pqi_ctrl_info *ctrl_info)
238{
239 while (atomic_read(&ctrl_info->num_busy_threads) >
240 atomic_read(&ctrl_info->num_blocked_threads))
241 usleep_range(1000, 2000);
242}
243
244static inline void pqi_device_reset_start(struct pqi_scsi_dev *device)
245{
246 device->in_reset = true;
247}
248
249static inline void pqi_device_reset_done(struct pqi_scsi_dev *device)
250{
251 device->in_reset = false;
252}
253
254static inline bool pqi_device_in_reset(struct pqi_scsi_dev *device)
255{
256 return device->in_reset;
257}
Kevin Barnett6c223762016-06-27 16:41:00 -0500258
259static inline void pqi_schedule_rescan_worker(struct pqi_ctrl_info *ctrl_info)
260{
261 schedule_delayed_work(&ctrl_info->rescan_work,
262 PQI_RESCAN_WORK_INTERVAL);
263}
264
Kevin Barnett061ef062017-05-03 18:53:05 -0500265static inline void pqi_cancel_rescan_worker(struct pqi_ctrl_info *ctrl_info)
266{
267 cancel_delayed_work_sync(&ctrl_info->rescan_work);
268}
269
Kevin Barnett98f87662017-05-03 18:53:11 -0500270static inline u32 pqi_read_heartbeat_counter(struct pqi_ctrl_info *ctrl_info)
271{
272 if (!ctrl_info->heartbeat_counter)
273 return 0;
274
275 return readl(ctrl_info->heartbeat_counter);
276}
277
Kevin Barnett6c223762016-06-27 16:41:00 -0500278static int pqi_map_single(struct pci_dev *pci_dev,
279 struct pqi_sg_descriptor *sg_descriptor, void *buffer,
280 size_t buffer_length, int data_direction)
281{
282 dma_addr_t bus_address;
283
284 if (!buffer || buffer_length == 0 || data_direction == PCI_DMA_NONE)
285 return 0;
286
287 bus_address = pci_map_single(pci_dev, buffer, buffer_length,
288 data_direction);
289 if (pci_dma_mapping_error(pci_dev, bus_address))
290 return -ENOMEM;
291
292 put_unaligned_le64((u64)bus_address, &sg_descriptor->address);
293 put_unaligned_le32(buffer_length, &sg_descriptor->length);
294 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
295
296 return 0;
297}
298
299static void pqi_pci_unmap(struct pci_dev *pci_dev,
300 struct pqi_sg_descriptor *descriptors, int num_descriptors,
301 int data_direction)
302{
303 int i;
304
305 if (data_direction == PCI_DMA_NONE)
306 return;
307
308 for (i = 0; i < num_descriptors; i++)
309 pci_unmap_single(pci_dev,
310 (dma_addr_t)get_unaligned_le64(&descriptors[i].address),
311 get_unaligned_le32(&descriptors[i].length),
312 data_direction);
313}
314
315static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
316 struct pqi_raid_path_request *request, u8 cmd,
317 u8 *scsi3addr, void *buffer, size_t buffer_length,
318 u16 vpd_page, int *pci_direction)
319{
320 u8 *cdb;
321 int pci_dir;
322
323 memset(request, 0, sizeof(*request));
324
325 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
326 put_unaligned_le16(offsetof(struct pqi_raid_path_request,
327 sg_descriptors[1]) - PQI_REQUEST_HEADER_LENGTH,
328 &request->header.iu_length);
329 put_unaligned_le32(buffer_length, &request->buffer_length);
330 memcpy(request->lun_number, scsi3addr, sizeof(request->lun_number));
331 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
332 request->additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
333
334 cdb = request->cdb;
335
336 switch (cmd) {
337 case INQUIRY:
338 request->data_direction = SOP_READ_FLAG;
339 cdb[0] = INQUIRY;
340 if (vpd_page & VPD_PAGE) {
341 cdb[1] = 0x1;
342 cdb[2] = (u8)vpd_page;
343 }
344 cdb[4] = (u8)buffer_length;
345 break;
346 case CISS_REPORT_LOG:
347 case CISS_REPORT_PHYS:
348 request->data_direction = SOP_READ_FLAG;
349 cdb[0] = cmd;
350 if (cmd == CISS_REPORT_PHYS)
351 cdb[1] = CISS_REPORT_PHYS_EXTENDED;
352 else
353 cdb[1] = CISS_REPORT_LOG_EXTENDED;
354 put_unaligned_be32(buffer_length, &cdb[6]);
355 break;
356 case CISS_GET_RAID_MAP:
357 request->data_direction = SOP_READ_FLAG;
358 cdb[0] = CISS_READ;
359 cdb[1] = CISS_GET_RAID_MAP;
360 put_unaligned_be32(buffer_length, &cdb[6]);
361 break;
362 case SA_CACHE_FLUSH:
363 request->data_direction = SOP_WRITE_FLAG;
364 cdb[0] = BMIC_WRITE;
365 cdb[6] = BMIC_CACHE_FLUSH;
366 put_unaligned_be16(buffer_length, &cdb[7]);
367 break;
368 case BMIC_IDENTIFY_CONTROLLER:
369 case BMIC_IDENTIFY_PHYSICAL_DEVICE:
370 request->data_direction = SOP_READ_FLAG;
371 cdb[0] = BMIC_READ;
372 cdb[6] = cmd;
373 put_unaligned_be16(buffer_length, &cdb[7]);
374 break;
375 case BMIC_WRITE_HOST_WELLNESS:
376 request->data_direction = SOP_WRITE_FLAG;
377 cdb[0] = BMIC_WRITE;
378 cdb[6] = cmd;
379 put_unaligned_be16(buffer_length, &cdb[7]);
380 break;
381 default:
382 dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
383 cmd);
Kevin Barnett6c223762016-06-27 16:41:00 -0500384 break;
385 }
386
387 switch (request->data_direction) {
388 case SOP_READ_FLAG:
389 pci_dir = PCI_DMA_FROMDEVICE;
390 break;
391 case SOP_WRITE_FLAG:
392 pci_dir = PCI_DMA_TODEVICE;
393 break;
394 case SOP_NO_DIRECTION_FLAG:
395 pci_dir = PCI_DMA_NONE;
396 break;
397 default:
398 pci_dir = PCI_DMA_BIDIRECTIONAL;
399 break;
400 }
401
402 *pci_direction = pci_dir;
403
404 return pqi_map_single(ctrl_info->pci_dev, &request->sg_descriptors[0],
405 buffer, buffer_length, pci_dir);
406}
407
408static struct pqi_io_request *pqi_alloc_io_request(
409 struct pqi_ctrl_info *ctrl_info)
410{
411 struct pqi_io_request *io_request;
412 u16 i = ctrl_info->next_io_request_slot; /* benignly racy */
413
414 while (1) {
415 io_request = &ctrl_info->io_request_pool[i];
416 if (atomic_inc_return(&io_request->refcount) == 1)
417 break;
418 atomic_dec(&io_request->refcount);
419 i = (i + 1) % ctrl_info->max_io_slots;
420 }
421
422 /* benignly racy */
423 ctrl_info->next_io_request_slot = (i + 1) % ctrl_info->max_io_slots;
424
425 io_request->scmd = NULL;
426 io_request->status = 0;
427 io_request->error_info = NULL;
428
429 return io_request;
430}
431
432static void pqi_free_io_request(struct pqi_io_request *io_request)
433{
434 atomic_dec(&io_request->refcount);
435}
436
437static int pqi_identify_controller(struct pqi_ctrl_info *ctrl_info,
438 struct bmic_identify_controller *buffer)
439{
440 int rc;
441 int pci_direction;
442 struct pqi_raid_path_request request;
443
444 rc = pqi_build_raid_path_request(ctrl_info, &request,
445 BMIC_IDENTIFY_CONTROLLER, RAID_CTLR_LUNID, buffer,
446 sizeof(*buffer), 0, &pci_direction);
447 if (rc)
448 return rc;
449
450 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
451 NULL, NO_TIMEOUT);
452
453 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
454 pci_direction);
455
456 return rc;
457}
458
459static int pqi_scsi_inquiry(struct pqi_ctrl_info *ctrl_info,
460 u8 *scsi3addr, u16 vpd_page, void *buffer, size_t buffer_length)
461{
462 int rc;
463 int pci_direction;
464 struct pqi_raid_path_request request;
465
466 rc = pqi_build_raid_path_request(ctrl_info, &request,
467 INQUIRY, scsi3addr, buffer, buffer_length, vpd_page,
468 &pci_direction);
469 if (rc)
470 return rc;
471
472 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
473 NULL, NO_TIMEOUT);
474
475 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
476 pci_direction);
477
478 return rc;
479}
480
481static int pqi_identify_physical_device(struct pqi_ctrl_info *ctrl_info,
482 struct pqi_scsi_dev *device,
483 struct bmic_identify_physical_device *buffer,
484 size_t buffer_length)
485{
486 int rc;
487 int pci_direction;
488 u16 bmic_device_index;
489 struct pqi_raid_path_request request;
490
491 rc = pqi_build_raid_path_request(ctrl_info, &request,
492 BMIC_IDENTIFY_PHYSICAL_DEVICE, RAID_CTLR_LUNID, buffer,
493 buffer_length, 0, &pci_direction);
494 if (rc)
495 return rc;
496
497 bmic_device_index = CISS_GET_DRIVE_NUMBER(device->scsi3addr);
498 request.cdb[2] = (u8)bmic_device_index;
499 request.cdb[9] = (u8)(bmic_device_index >> 8);
500
501 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
502 0, NULL, NO_TIMEOUT);
503
504 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
505 pci_direction);
506
507 return rc;
508}
509
510#define SA_CACHE_FLUSH_BUFFER_LENGTH 4
Kevin Barnett6c223762016-06-27 16:41:00 -0500511
512static int pqi_flush_cache(struct pqi_ctrl_info *ctrl_info)
513{
514 int rc;
515 struct pqi_raid_path_request request;
516 int pci_direction;
517 u8 *buffer;
518
519 /*
520 * Don't bother trying to flush the cache if the controller is
521 * locked up.
522 */
523 if (pqi_ctrl_offline(ctrl_info))
524 return -ENXIO;
525
526 buffer = kzalloc(SA_CACHE_FLUSH_BUFFER_LENGTH, GFP_KERNEL);
527 if (!buffer)
528 return -ENOMEM;
529
530 rc = pqi_build_raid_path_request(ctrl_info, &request,
531 SA_CACHE_FLUSH, RAID_CTLR_LUNID, buffer,
532 SA_CACHE_FLUSH_BUFFER_LENGTH, 0, &pci_direction);
533 if (rc)
534 goto out;
535
536 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
Kevin Barnettd48f8fa2016-08-31 14:55:17 -0500537 0, NULL, NO_TIMEOUT);
Kevin Barnett6c223762016-06-27 16:41:00 -0500538
539 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
540 pci_direction);
541
542out:
543 kfree(buffer);
544
545 return rc;
546}
547
548static int pqi_write_host_wellness(struct pqi_ctrl_info *ctrl_info,
549 void *buffer, size_t buffer_length)
550{
551 int rc;
552 struct pqi_raid_path_request request;
553 int pci_direction;
554
555 rc = pqi_build_raid_path_request(ctrl_info, &request,
556 BMIC_WRITE_HOST_WELLNESS, RAID_CTLR_LUNID, buffer,
557 buffer_length, 0, &pci_direction);
558 if (rc)
559 return rc;
560
561 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
562 0, NULL, NO_TIMEOUT);
563
564 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
565 pci_direction);
566
567 return rc;
568}
569
570#pragma pack(1)
571
572struct bmic_host_wellness_driver_version {
573 u8 start_tag[4];
574 u8 driver_version_tag[2];
575 __le16 driver_version_length;
576 char driver_version[32];
577 u8 end_tag[2];
578};
579
580#pragma pack()
581
582static int pqi_write_driver_version_to_host_wellness(
583 struct pqi_ctrl_info *ctrl_info)
584{
585 int rc;
586 struct bmic_host_wellness_driver_version *buffer;
587 size_t buffer_length;
588
589 buffer_length = sizeof(*buffer);
590
591 buffer = kmalloc(buffer_length, GFP_KERNEL);
592 if (!buffer)
593 return -ENOMEM;
594
595 buffer->start_tag[0] = '<';
596 buffer->start_tag[1] = 'H';
597 buffer->start_tag[2] = 'W';
598 buffer->start_tag[3] = '>';
599 buffer->driver_version_tag[0] = 'D';
600 buffer->driver_version_tag[1] = 'V';
601 put_unaligned_le16(sizeof(buffer->driver_version),
602 &buffer->driver_version_length);
Kevin Barnett061ef062017-05-03 18:53:05 -0500603 strncpy(buffer->driver_version, "Linux " DRIVER_VERSION,
Kevin Barnett6c223762016-06-27 16:41:00 -0500604 sizeof(buffer->driver_version) - 1);
605 buffer->driver_version[sizeof(buffer->driver_version) - 1] = '\0';
606 buffer->end_tag[0] = 'Z';
607 buffer->end_tag[1] = 'Z';
608
609 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
610
611 kfree(buffer);
612
613 return rc;
614}
615
616#pragma pack(1)
617
618struct bmic_host_wellness_time {
619 u8 start_tag[4];
620 u8 time_tag[2];
621 __le16 time_length;
622 u8 time[8];
623 u8 dont_write_tag[2];
624 u8 end_tag[2];
625};
626
627#pragma pack()
628
629static int pqi_write_current_time_to_host_wellness(
630 struct pqi_ctrl_info *ctrl_info)
631{
632 int rc;
633 struct bmic_host_wellness_time *buffer;
634 size_t buffer_length;
635 time64_t local_time;
636 unsigned int year;
Arnd Bergmanned108582017-02-17 16:03:52 +0100637 struct tm tm;
Kevin Barnett6c223762016-06-27 16:41:00 -0500638
639 buffer_length = sizeof(*buffer);
640
641 buffer = kmalloc(buffer_length, GFP_KERNEL);
642 if (!buffer)
643 return -ENOMEM;
644
645 buffer->start_tag[0] = '<';
646 buffer->start_tag[1] = 'H';
647 buffer->start_tag[2] = 'W';
648 buffer->start_tag[3] = '>';
649 buffer->time_tag[0] = 'T';
650 buffer->time_tag[1] = 'D';
651 put_unaligned_le16(sizeof(buffer->time),
652 &buffer->time_length);
653
Arnd Bergmanned108582017-02-17 16:03:52 +0100654 local_time = ktime_get_real_seconds();
655 time64_to_tm(local_time, -sys_tz.tz_minuteswest * 60, &tm);
Kevin Barnett6c223762016-06-27 16:41:00 -0500656 year = tm.tm_year + 1900;
657
658 buffer->time[0] = bin2bcd(tm.tm_hour);
659 buffer->time[1] = bin2bcd(tm.tm_min);
660 buffer->time[2] = bin2bcd(tm.tm_sec);
661 buffer->time[3] = 0;
662 buffer->time[4] = bin2bcd(tm.tm_mon + 1);
663 buffer->time[5] = bin2bcd(tm.tm_mday);
664 buffer->time[6] = bin2bcd(year / 100);
665 buffer->time[7] = bin2bcd(year % 100);
666
667 buffer->dont_write_tag[0] = 'D';
668 buffer->dont_write_tag[1] = 'W';
669 buffer->end_tag[0] = 'Z';
670 buffer->end_tag[1] = 'Z';
671
672 rc = pqi_write_host_wellness(ctrl_info, buffer, buffer_length);
673
674 kfree(buffer);
675
676 return rc;
677}
678
679#define PQI_UPDATE_TIME_WORK_INTERVAL (24UL * 60 * 60 * HZ)
680
681static void pqi_update_time_worker(struct work_struct *work)
682{
683 int rc;
684 struct pqi_ctrl_info *ctrl_info;
685
686 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
687 update_time_work);
688
Kevin Barnett6c223762016-06-27 16:41:00 -0500689 rc = pqi_write_current_time_to_host_wellness(ctrl_info);
690 if (rc)
691 dev_warn(&ctrl_info->pci_dev->dev,
692 "error updating time on controller\n");
693
694 schedule_delayed_work(&ctrl_info->update_time_work,
695 PQI_UPDATE_TIME_WORK_INTERVAL);
696}
697
698static inline void pqi_schedule_update_time_worker(
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500699 struct pqi_ctrl_info *ctrl_info)
Kevin Barnett6c223762016-06-27 16:41:00 -0500700{
Kevin Barnett061ef062017-05-03 18:53:05 -0500701 if (ctrl_info->update_time_worker_scheduled)
702 return;
703
Kevin Barnett4fbebf12016-08-31 14:55:05 -0500704 schedule_delayed_work(&ctrl_info->update_time_work, 0);
Kevin Barnett061ef062017-05-03 18:53:05 -0500705 ctrl_info->update_time_worker_scheduled = true;
706}
707
708static inline void pqi_cancel_update_time_worker(
709 struct pqi_ctrl_info *ctrl_info)
710{
711 if (!ctrl_info->update_time_worker_scheduled)
712 return;
713
714 cancel_delayed_work_sync(&ctrl_info->update_time_work);
715 ctrl_info->update_time_worker_scheduled = false;
Kevin Barnett6c223762016-06-27 16:41:00 -0500716}
717
718static int pqi_report_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
719 void *buffer, size_t buffer_length)
720{
721 int rc;
722 int pci_direction;
723 struct pqi_raid_path_request request;
724
725 rc = pqi_build_raid_path_request(ctrl_info, &request,
726 cmd, RAID_CTLR_LUNID, buffer, buffer_length, 0, &pci_direction);
727 if (rc)
728 return rc;
729
730 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
731 NULL, NO_TIMEOUT);
732
733 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
734 pci_direction);
735
736 return rc;
737}
738
739static int pqi_report_phys_logical_luns(struct pqi_ctrl_info *ctrl_info, u8 cmd,
740 void **buffer)
741{
742 int rc;
743 size_t lun_list_length;
744 size_t lun_data_length;
745 size_t new_lun_list_length;
746 void *lun_data = NULL;
747 struct report_lun_header *report_lun_header;
748
749 report_lun_header = kmalloc(sizeof(*report_lun_header), GFP_KERNEL);
750 if (!report_lun_header) {
751 rc = -ENOMEM;
752 goto out;
753 }
754
755 rc = pqi_report_luns(ctrl_info, cmd, report_lun_header,
756 sizeof(*report_lun_header));
757 if (rc)
758 goto out;
759
760 lun_list_length = get_unaligned_be32(&report_lun_header->list_length);
761
762again:
763 lun_data_length = sizeof(struct report_lun_header) + lun_list_length;
764
765 lun_data = kmalloc(lun_data_length, GFP_KERNEL);
766 if (!lun_data) {
767 rc = -ENOMEM;
768 goto out;
769 }
770
771 if (lun_list_length == 0) {
772 memcpy(lun_data, report_lun_header, sizeof(*report_lun_header));
773 goto out;
774 }
775
776 rc = pqi_report_luns(ctrl_info, cmd, lun_data, lun_data_length);
777 if (rc)
778 goto out;
779
780 new_lun_list_length = get_unaligned_be32(
781 &((struct report_lun_header *)lun_data)->list_length);
782
783 if (new_lun_list_length > lun_list_length) {
784 lun_list_length = new_lun_list_length;
785 kfree(lun_data);
786 goto again;
787 }
788
789out:
790 kfree(report_lun_header);
791
792 if (rc) {
793 kfree(lun_data);
794 lun_data = NULL;
795 }
796
797 *buffer = lun_data;
798
799 return rc;
800}
801
802static inline int pqi_report_phys_luns(struct pqi_ctrl_info *ctrl_info,
803 void **buffer)
804{
805 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_PHYS,
806 buffer);
807}
808
809static inline int pqi_report_logical_luns(struct pqi_ctrl_info *ctrl_info,
810 void **buffer)
811{
812 return pqi_report_phys_logical_luns(ctrl_info, CISS_REPORT_LOG, buffer);
813}
814
815static int pqi_get_device_lists(struct pqi_ctrl_info *ctrl_info,
816 struct report_phys_lun_extended **physdev_list,
817 struct report_log_lun_extended **logdev_list)
818{
819 int rc;
820 size_t logdev_list_length;
821 size_t logdev_data_length;
822 struct report_log_lun_extended *internal_logdev_list;
823 struct report_log_lun_extended *logdev_data;
824 struct report_lun_header report_lun_header;
825
826 rc = pqi_report_phys_luns(ctrl_info, (void **)physdev_list);
827 if (rc)
828 dev_err(&ctrl_info->pci_dev->dev,
829 "report physical LUNs failed\n");
830
831 rc = pqi_report_logical_luns(ctrl_info, (void **)logdev_list);
832 if (rc)
833 dev_err(&ctrl_info->pci_dev->dev,
834 "report logical LUNs failed\n");
835
836 /*
837 * Tack the controller itself onto the end of the logical device list.
838 */
839
840 logdev_data = *logdev_list;
841
842 if (logdev_data) {
843 logdev_list_length =
844 get_unaligned_be32(&logdev_data->header.list_length);
845 } else {
846 memset(&report_lun_header, 0, sizeof(report_lun_header));
847 logdev_data =
848 (struct report_log_lun_extended *)&report_lun_header;
849 logdev_list_length = 0;
850 }
851
852 logdev_data_length = sizeof(struct report_lun_header) +
853 logdev_list_length;
854
855 internal_logdev_list = kmalloc(logdev_data_length +
856 sizeof(struct report_log_lun_extended), GFP_KERNEL);
857 if (!internal_logdev_list) {
858 kfree(*logdev_list);
859 *logdev_list = NULL;
860 return -ENOMEM;
861 }
862
863 memcpy(internal_logdev_list, logdev_data, logdev_data_length);
864 memset((u8 *)internal_logdev_list + logdev_data_length, 0,
865 sizeof(struct report_log_lun_extended_entry));
866 put_unaligned_be32(logdev_list_length +
867 sizeof(struct report_log_lun_extended_entry),
868 &internal_logdev_list->header.list_length);
869
870 kfree(*logdev_list);
871 *logdev_list = internal_logdev_list;
872
873 return 0;
874}
875
876static inline void pqi_set_bus_target_lun(struct pqi_scsi_dev *device,
877 int bus, int target, int lun)
878{
879 device->bus = bus;
880 device->target = target;
881 device->lun = lun;
882}
883
884static void pqi_assign_bus_target_lun(struct pqi_scsi_dev *device)
885{
886 u8 *scsi3addr;
887 u32 lunid;
888
889 scsi3addr = device->scsi3addr;
890 lunid = get_unaligned_le32(scsi3addr);
891
892 if (pqi_is_hba_lunid(scsi3addr)) {
893 /* The specified device is the controller. */
894 pqi_set_bus_target_lun(device, PQI_HBA_BUS, 0, lunid & 0x3fff);
895 device->target_lun_valid = true;
896 return;
897 }
898
899 if (pqi_is_logical_device(device)) {
900 pqi_set_bus_target_lun(device, PQI_RAID_VOLUME_BUS, 0,
901 lunid & 0x3fff);
902 device->target_lun_valid = true;
903 return;
904 }
905
906 /*
907 * Defer target and LUN assignment for non-controller physical devices
908 * because the SAS transport layer will make these assignments later.
909 */
910 pqi_set_bus_target_lun(device, PQI_PHYSICAL_DEVICE_BUS, 0, 0);
911}
912
913static void pqi_get_raid_level(struct pqi_ctrl_info *ctrl_info,
914 struct pqi_scsi_dev *device)
915{
916 int rc;
917 u8 raid_level;
918 u8 *buffer;
919
920 raid_level = SA_RAID_UNKNOWN;
921
922 buffer = kmalloc(64, GFP_KERNEL);
923 if (buffer) {
924 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
925 VPD_PAGE | CISS_VPD_LV_DEVICE_GEOMETRY, buffer, 64);
926 if (rc == 0) {
927 raid_level = buffer[8];
928 if (raid_level > SA_RAID_MAX)
929 raid_level = SA_RAID_UNKNOWN;
930 }
931 kfree(buffer);
932 }
933
934 device->raid_level = raid_level;
935}
936
937static int pqi_validate_raid_map(struct pqi_ctrl_info *ctrl_info,
938 struct pqi_scsi_dev *device, struct raid_map *raid_map)
939{
940 char *err_msg;
941 u32 raid_map_size;
942 u32 r5or6_blocks_per_row;
943 unsigned int num_phys_disks;
944 unsigned int num_raid_map_entries;
945
946 raid_map_size = get_unaligned_le32(&raid_map->structure_size);
947
948 if (raid_map_size < offsetof(struct raid_map, disk_data)) {
949 err_msg = "RAID map too small";
950 goto bad_raid_map;
951 }
952
953 if (raid_map_size > sizeof(*raid_map)) {
954 err_msg = "RAID map too large";
955 goto bad_raid_map;
956 }
957
958 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
959 (get_unaligned_le16(&raid_map->data_disks_per_row) +
960 get_unaligned_le16(&raid_map->metadata_disks_per_row));
961 num_raid_map_entries = num_phys_disks *
962 get_unaligned_le16(&raid_map->row_cnt);
963
964 if (num_raid_map_entries > RAID_MAP_MAX_ENTRIES) {
965 err_msg = "invalid number of map entries in RAID map";
966 goto bad_raid_map;
967 }
968
969 if (device->raid_level == SA_RAID_1) {
970 if (get_unaligned_le16(&raid_map->layout_map_count) != 2) {
971 err_msg = "invalid RAID-1 map";
972 goto bad_raid_map;
973 }
974 } else if (device->raid_level == SA_RAID_ADM) {
975 if (get_unaligned_le16(&raid_map->layout_map_count) != 3) {
976 err_msg = "invalid RAID-1(ADM) map";
977 goto bad_raid_map;
978 }
979 } else if ((device->raid_level == SA_RAID_5 ||
980 device->raid_level == SA_RAID_6) &&
981 get_unaligned_le16(&raid_map->layout_map_count) > 1) {
982 /* RAID 50/60 */
983 r5or6_blocks_per_row =
984 get_unaligned_le16(&raid_map->strip_size) *
985 get_unaligned_le16(&raid_map->data_disks_per_row);
986 if (r5or6_blocks_per_row == 0) {
987 err_msg = "invalid RAID-5 or RAID-6 map";
988 goto bad_raid_map;
989 }
990 }
991
992 return 0;
993
994bad_raid_map:
Kevin Barnettd87d5472017-05-03 18:54:00 -0500995 dev_warn(&ctrl_info->pci_dev->dev,
996 "scsi %d:%d:%d:%d %s\n",
997 ctrl_info->scsi_host->host_no,
998 device->bus, device->target, device->lun, err_msg);
Kevin Barnett6c223762016-06-27 16:41:00 -0500999
1000 return -EINVAL;
1001}
1002
1003static int pqi_get_raid_map(struct pqi_ctrl_info *ctrl_info,
1004 struct pqi_scsi_dev *device)
1005{
1006 int rc;
1007 int pci_direction;
1008 struct pqi_raid_path_request request;
1009 struct raid_map *raid_map;
1010
1011 raid_map = kmalloc(sizeof(*raid_map), GFP_KERNEL);
1012 if (!raid_map)
1013 return -ENOMEM;
1014
1015 rc = pqi_build_raid_path_request(ctrl_info, &request,
1016 CISS_GET_RAID_MAP, device->scsi3addr, raid_map,
1017 sizeof(*raid_map), 0, &pci_direction);
1018 if (rc)
1019 goto error;
1020
1021 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
1022 NULL, NO_TIMEOUT);
1023
1024 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
1025 pci_direction);
1026
1027 if (rc)
1028 goto error;
1029
1030 rc = pqi_validate_raid_map(ctrl_info, device, raid_map);
1031 if (rc)
1032 goto error;
1033
1034 device->raid_map = raid_map;
1035
1036 return 0;
1037
1038error:
1039 kfree(raid_map);
1040
1041 return rc;
1042}
1043
1044static void pqi_get_offload_status(struct pqi_ctrl_info *ctrl_info,
1045 struct pqi_scsi_dev *device)
1046{
1047 int rc;
1048 u8 *buffer;
1049 u8 offload_status;
1050
1051 buffer = kmalloc(64, GFP_KERNEL);
1052 if (!buffer)
1053 return;
1054
1055 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1056 VPD_PAGE | CISS_VPD_LV_OFFLOAD_STATUS, buffer, 64);
1057 if (rc)
1058 goto out;
1059
1060#define OFFLOAD_STATUS_BYTE 4
1061#define OFFLOAD_CONFIGURED_BIT 0x1
1062#define OFFLOAD_ENABLED_BIT 0x2
1063
1064 offload_status = buffer[OFFLOAD_STATUS_BYTE];
1065 device->offload_configured =
1066 !!(offload_status & OFFLOAD_CONFIGURED_BIT);
1067 if (device->offload_configured) {
1068 device->offload_enabled_pending =
1069 !!(offload_status & OFFLOAD_ENABLED_BIT);
1070 if (pqi_get_raid_map(ctrl_info, device))
1071 device->offload_enabled_pending = false;
1072 }
1073
1074out:
1075 kfree(buffer);
1076}
1077
1078/*
1079 * Use vendor-specific VPD to determine online/offline status of a volume.
1080 */
1081
1082static void pqi_get_volume_status(struct pqi_ctrl_info *ctrl_info,
1083 struct pqi_scsi_dev *device)
1084{
1085 int rc;
1086 size_t page_length;
1087 u8 volume_status = CISS_LV_STATUS_UNAVAILABLE;
1088 bool volume_offline = true;
1089 u32 volume_flags;
1090 struct ciss_vpd_logical_volume_status *vpd;
1091
1092 vpd = kmalloc(sizeof(*vpd), GFP_KERNEL);
1093 if (!vpd)
1094 goto no_buffer;
1095
1096 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr,
1097 VPD_PAGE | CISS_VPD_LV_STATUS, vpd, sizeof(*vpd));
1098 if (rc)
1099 goto out;
1100
1101 page_length = offsetof(struct ciss_vpd_logical_volume_status,
1102 volume_status) + vpd->page_length;
1103 if (page_length < sizeof(*vpd))
1104 goto out;
1105
1106 volume_status = vpd->volume_status;
1107 volume_flags = get_unaligned_be32(&vpd->flags);
1108 volume_offline = (volume_flags & CISS_LV_FLAGS_NO_HOST_IO) != 0;
1109
1110out:
1111 kfree(vpd);
1112no_buffer:
1113 device->volume_status = volume_status;
1114 device->volume_offline = volume_offline;
1115}
1116
1117static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
1118 struct pqi_scsi_dev *device)
1119{
1120 int rc;
1121 u8 *buffer;
1122
1123 buffer = kmalloc(64, GFP_KERNEL);
1124 if (!buffer)
1125 return -ENOMEM;
1126
1127 /* Send an inquiry to the device to see what it is. */
1128 rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
1129 if (rc)
1130 goto out;
1131
1132 scsi_sanitize_inquiry_string(&buffer[8], 8);
1133 scsi_sanitize_inquiry_string(&buffer[16], 16);
1134
1135 device->devtype = buffer[0] & 0x1f;
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05001136 memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
1137 memcpy(device->model, &buffer[16], sizeof(device->model));
Kevin Barnett6c223762016-06-27 16:41:00 -05001138
1139 if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
1140 pqi_get_raid_level(ctrl_info, device);
1141 pqi_get_offload_status(ctrl_info, device);
1142 pqi_get_volume_status(ctrl_info, device);
1143 }
1144
1145out:
1146 kfree(buffer);
1147
1148 return rc;
1149}
1150
1151static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
1152 struct pqi_scsi_dev *device,
1153 struct bmic_identify_physical_device *id_phys)
1154{
1155 int rc;
1156
1157 memset(id_phys, 0, sizeof(*id_phys));
1158
1159 rc = pqi_identify_physical_device(ctrl_info, device,
1160 id_phys, sizeof(*id_phys));
1161 if (rc) {
1162 device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
1163 return;
1164 }
1165
1166 device->queue_depth =
1167 get_unaligned_le16(&id_phys->current_queue_depth_limit);
1168 device->device_type = id_phys->device_type;
1169 device->active_path_index = id_phys->active_path_number;
1170 device->path_map = id_phys->redundant_path_present_map;
1171 memcpy(&device->box,
1172 &id_phys->alternate_paths_phys_box_on_port,
1173 sizeof(device->box));
1174 memcpy(&device->phys_connector,
1175 &id_phys->alternate_paths_phys_connector,
1176 sizeof(device->phys_connector));
1177 device->bay = id_phys->phys_bay_in_box;
1178}
1179
1180static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
1181 struct pqi_scsi_dev *device)
1182{
1183 char *status;
1184 static const char unknown_state_str[] =
1185 "Volume is in an unknown state (%u)";
1186 char unknown_state_buffer[sizeof(unknown_state_str) + 10];
1187
1188 switch (device->volume_status) {
1189 case CISS_LV_OK:
1190 status = "Volume online";
1191 break;
1192 case CISS_LV_FAILED:
1193 status = "Volume failed";
1194 break;
1195 case CISS_LV_NOT_CONFIGURED:
1196 status = "Volume not configured";
1197 break;
1198 case CISS_LV_DEGRADED:
1199 status = "Volume degraded";
1200 break;
1201 case CISS_LV_READY_FOR_RECOVERY:
1202 status = "Volume ready for recovery operation";
1203 break;
1204 case CISS_LV_UNDERGOING_RECOVERY:
1205 status = "Volume undergoing recovery";
1206 break;
1207 case CISS_LV_WRONG_PHYSICAL_DRIVE_REPLACED:
1208 status = "Wrong physical drive was replaced";
1209 break;
1210 case CISS_LV_PHYSICAL_DRIVE_CONNECTION_PROBLEM:
1211 status = "A physical drive not properly connected";
1212 break;
1213 case CISS_LV_HARDWARE_OVERHEATING:
1214 status = "Hardware is overheating";
1215 break;
1216 case CISS_LV_HARDWARE_HAS_OVERHEATED:
1217 status = "Hardware has overheated";
1218 break;
1219 case CISS_LV_UNDERGOING_EXPANSION:
1220 status = "Volume undergoing expansion";
1221 break;
1222 case CISS_LV_NOT_AVAILABLE:
1223 status = "Volume waiting for transforming volume";
1224 break;
1225 case CISS_LV_QUEUED_FOR_EXPANSION:
1226 status = "Volume queued for expansion";
1227 break;
1228 case CISS_LV_DISABLED_SCSI_ID_CONFLICT:
1229 status = "Volume disabled due to SCSI ID conflict";
1230 break;
1231 case CISS_LV_EJECTED:
1232 status = "Volume has been ejected";
1233 break;
1234 case CISS_LV_UNDERGOING_ERASE:
1235 status = "Volume undergoing background erase";
1236 break;
1237 case CISS_LV_READY_FOR_PREDICTIVE_SPARE_REBUILD:
1238 status = "Volume ready for predictive spare rebuild";
1239 break;
1240 case CISS_LV_UNDERGOING_RPI:
1241 status = "Volume undergoing rapid parity initialization";
1242 break;
1243 case CISS_LV_PENDING_RPI:
1244 status = "Volume queued for rapid parity initialization";
1245 break;
1246 case CISS_LV_ENCRYPTED_NO_KEY:
1247 status = "Encrypted volume inaccessible - key not present";
1248 break;
1249 case CISS_LV_UNDERGOING_ENCRYPTION:
1250 status = "Volume undergoing encryption process";
1251 break;
1252 case CISS_LV_UNDERGOING_ENCRYPTION_REKEYING:
1253 status = "Volume undergoing encryption re-keying process";
1254 break;
1255 case CISS_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER:
Kevin Barnettd87d5472017-05-03 18:54:00 -05001256 status = "Volume encrypted but encryption is disabled";
Kevin Barnett6c223762016-06-27 16:41:00 -05001257 break;
1258 case CISS_LV_PENDING_ENCRYPTION:
1259 status = "Volume pending migration to encrypted state";
1260 break;
1261 case CISS_LV_PENDING_ENCRYPTION_REKEYING:
1262 status = "Volume pending encryption rekeying";
1263 break;
1264 case CISS_LV_NOT_SUPPORTED:
1265 status = "Volume not supported on this controller";
1266 break;
1267 case CISS_LV_STATUS_UNAVAILABLE:
1268 status = "Volume status not available";
1269 break;
1270 default:
1271 snprintf(unknown_state_buffer, sizeof(unknown_state_buffer),
1272 unknown_state_str, device->volume_status);
1273 status = unknown_state_buffer;
1274 break;
1275 }
1276
1277 dev_info(&ctrl_info->pci_dev->dev,
1278 "scsi %d:%d:%d:%d %s\n",
1279 ctrl_info->scsi_host->host_no,
1280 device->bus, device->target, device->lun, status);
1281}
1282
1283static struct pqi_scsi_dev *pqi_find_disk_by_aio_handle(
1284 struct pqi_ctrl_info *ctrl_info, u32 aio_handle)
1285{
1286 struct pqi_scsi_dev *device;
1287
1288 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1289 scsi_device_list_entry) {
1290 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1291 continue;
1292 if (pqi_is_logical_device(device))
1293 continue;
1294 if (device->aio_handle == aio_handle)
1295 return device;
1296 }
1297
1298 return NULL;
1299}
1300
1301static void pqi_update_logical_drive_queue_depth(
1302 struct pqi_ctrl_info *ctrl_info, struct pqi_scsi_dev *logical_drive)
1303{
1304 unsigned int i;
1305 struct raid_map *raid_map;
1306 struct raid_map_disk_data *disk_data;
1307 struct pqi_scsi_dev *phys_disk;
1308 unsigned int num_phys_disks;
1309 unsigned int num_raid_map_entries;
1310 unsigned int queue_depth;
1311
1312 logical_drive->queue_depth = PQI_LOGICAL_DRIVE_DEFAULT_MAX_QUEUE_DEPTH;
1313
1314 raid_map = logical_drive->raid_map;
1315 if (!raid_map)
1316 return;
1317
1318 disk_data = raid_map->disk_data;
1319 num_phys_disks = get_unaligned_le16(&raid_map->layout_map_count) *
1320 (get_unaligned_le16(&raid_map->data_disks_per_row) +
1321 get_unaligned_le16(&raid_map->metadata_disks_per_row));
1322 num_raid_map_entries = num_phys_disks *
1323 get_unaligned_le16(&raid_map->row_cnt);
1324
1325 queue_depth = 0;
1326 for (i = 0; i < num_raid_map_entries; i++) {
1327 phys_disk = pqi_find_disk_by_aio_handle(ctrl_info,
1328 disk_data[i].aio_handle);
1329
1330 if (!phys_disk) {
1331 dev_warn(&ctrl_info->pci_dev->dev,
1332 "failed to find physical disk for logical drive %016llx\n",
1333 get_unaligned_be64(logical_drive->scsi3addr));
1334 logical_drive->offload_enabled = false;
1335 logical_drive->offload_enabled_pending = false;
1336 kfree(raid_map);
1337 logical_drive->raid_map = NULL;
1338 return;
1339 }
1340
1341 queue_depth += phys_disk->queue_depth;
1342 }
1343
1344 logical_drive->queue_depth = queue_depth;
1345}
1346
1347static void pqi_update_all_logical_drive_queue_depths(
1348 struct pqi_ctrl_info *ctrl_info)
1349{
1350 struct pqi_scsi_dev *device;
1351
1352 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1353 scsi_device_list_entry) {
1354 if (device->devtype != TYPE_DISK && device->devtype != TYPE_ZBC)
1355 continue;
1356 if (!pqi_is_logical_device(device))
1357 continue;
1358 pqi_update_logical_drive_queue_depth(ctrl_info, device);
1359 }
1360}
1361
1362static void pqi_rescan_worker(struct work_struct *work)
1363{
1364 struct pqi_ctrl_info *ctrl_info;
1365
1366 ctrl_info = container_of(to_delayed_work(work), struct pqi_ctrl_info,
1367 rescan_work);
1368
1369 pqi_scan_scsi_devices(ctrl_info);
1370}
1371
1372static int pqi_add_device(struct pqi_ctrl_info *ctrl_info,
1373 struct pqi_scsi_dev *device)
1374{
1375 int rc;
1376
1377 if (pqi_is_logical_device(device))
1378 rc = scsi_add_device(ctrl_info->scsi_host, device->bus,
1379 device->target, device->lun);
1380 else
1381 rc = pqi_add_sas_device(ctrl_info->sas_host, device);
1382
1383 return rc;
1384}
1385
1386static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
1387 struct pqi_scsi_dev *device)
1388{
1389 if (pqi_is_logical_device(device))
1390 scsi_remove_device(device->sdev);
1391 else
1392 pqi_remove_sas_device(device);
1393}
1394
1395/* Assumes the SCSI device list lock is held. */
1396
1397static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
1398 int bus, int target, int lun)
1399{
1400 struct pqi_scsi_dev *device;
1401
1402 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1403 scsi_device_list_entry)
1404 if (device->bus == bus && device->target == target &&
1405 device->lun == lun)
1406 return device;
1407
1408 return NULL;
1409}
1410
1411static inline bool pqi_device_equal(struct pqi_scsi_dev *dev1,
1412 struct pqi_scsi_dev *dev2)
1413{
1414 if (dev1->is_physical_device != dev2->is_physical_device)
1415 return false;
1416
1417 if (dev1->is_physical_device)
1418 return dev1->wwid == dev2->wwid;
1419
1420 return memcmp(dev1->volume_id, dev2->volume_id,
1421 sizeof(dev1->volume_id)) == 0;
1422}
1423
1424enum pqi_find_result {
1425 DEVICE_NOT_FOUND,
1426 DEVICE_CHANGED,
1427 DEVICE_SAME,
1428};
1429
1430static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
1431 struct pqi_scsi_dev *device_to_find,
1432 struct pqi_scsi_dev **matching_device)
1433{
1434 struct pqi_scsi_dev *device;
1435
1436 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1437 scsi_device_list_entry) {
1438 if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
1439 device->scsi3addr)) {
1440 *matching_device = device;
1441 if (pqi_device_equal(device_to_find, device)) {
1442 if (device_to_find->volume_offline)
1443 return DEVICE_CHANGED;
1444 return DEVICE_SAME;
1445 }
1446 return DEVICE_CHANGED;
1447 }
1448 }
1449
1450 return DEVICE_NOT_FOUND;
1451}
1452
1453static void pqi_dev_info(struct pqi_ctrl_info *ctrl_info,
1454 char *action, struct pqi_scsi_dev *device)
1455{
1456 dev_info(&ctrl_info->pci_dev->dev,
1457 "%s scsi %d:%d:%d:%d: %s %.8s %.16s %-12s SSDSmartPathCap%c En%c Exp%c qd=%d\n",
1458 action,
1459 ctrl_info->scsi_host->host_no,
1460 device->bus,
1461 device->target,
1462 device->lun,
1463 scsi_device_type(device->devtype),
1464 device->vendor,
1465 device->model,
1466 pqi_raid_level_to_string(device->raid_level),
1467 device->offload_configured ? '+' : '-',
1468 device->offload_enabled_pending ? '+' : '-',
1469 device->expose_device ? '+' : '-',
1470 device->queue_depth);
1471}
1472
1473/* Assumes the SCSI device list lock is held. */
1474
1475static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
1476 struct pqi_scsi_dev *new_device)
1477{
1478 existing_device->devtype = new_device->devtype;
1479 existing_device->device_type = new_device->device_type;
1480 existing_device->bus = new_device->bus;
1481 if (new_device->target_lun_valid) {
1482 existing_device->target = new_device->target;
1483 existing_device->lun = new_device->lun;
1484 existing_device->target_lun_valid = true;
1485 }
1486
1487 /* By definition, the scsi3addr and wwid fields are already the same. */
1488
1489 existing_device->is_physical_device = new_device->is_physical_device;
1490 existing_device->expose_device = new_device->expose_device;
1491 existing_device->no_uld_attach = new_device->no_uld_attach;
1492 existing_device->aio_enabled = new_device->aio_enabled;
1493 memcpy(existing_device->vendor, new_device->vendor,
1494 sizeof(existing_device->vendor));
1495 memcpy(existing_device->model, new_device->model,
1496 sizeof(existing_device->model));
1497 existing_device->sas_address = new_device->sas_address;
1498 existing_device->raid_level = new_device->raid_level;
1499 existing_device->queue_depth = new_device->queue_depth;
1500 existing_device->aio_handle = new_device->aio_handle;
1501 existing_device->volume_status = new_device->volume_status;
1502 existing_device->active_path_index = new_device->active_path_index;
1503 existing_device->path_map = new_device->path_map;
1504 existing_device->bay = new_device->bay;
1505 memcpy(existing_device->box, new_device->box,
1506 sizeof(existing_device->box));
1507 memcpy(existing_device->phys_connector, new_device->phys_connector,
1508 sizeof(existing_device->phys_connector));
1509 existing_device->offload_configured = new_device->offload_configured;
1510 existing_device->offload_enabled = false;
1511 existing_device->offload_enabled_pending =
1512 new_device->offload_enabled_pending;
1513 existing_device->offload_to_mirror = 0;
1514 kfree(existing_device->raid_map);
1515 existing_device->raid_map = new_device->raid_map;
1516
1517 /* To prevent this from being freed later. */
1518 new_device->raid_map = NULL;
1519}
1520
1521static inline void pqi_free_device(struct pqi_scsi_dev *device)
1522{
1523 if (device) {
1524 kfree(device->raid_map);
1525 kfree(device);
1526 }
1527}
1528
1529/*
1530 * Called when exposing a new device to the OS fails in order to re-adjust
1531 * our internal SCSI device list to match the SCSI ML's view.
1532 */
1533
1534static inline void pqi_fixup_botched_add(struct pqi_ctrl_info *ctrl_info,
1535 struct pqi_scsi_dev *device)
1536{
1537 unsigned long flags;
1538
1539 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1540 list_del(&device->scsi_device_list_entry);
1541 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1542
1543 /* Allow the device structure to be freed later. */
1544 device->keep_device = false;
1545}
1546
1547static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
1548 struct pqi_scsi_dev *new_device_list[], unsigned int num_new_devices)
1549{
1550 int rc;
1551 unsigned int i;
1552 unsigned long flags;
1553 enum pqi_find_result find_result;
1554 struct pqi_scsi_dev *device;
1555 struct pqi_scsi_dev *next;
1556 struct pqi_scsi_dev *matching_device;
1557 struct list_head add_list;
1558 struct list_head delete_list;
1559
1560 INIT_LIST_HEAD(&add_list);
1561 INIT_LIST_HEAD(&delete_list);
1562
1563 /*
1564 * The idea here is to do as little work as possible while holding the
1565 * spinlock. That's why we go to great pains to defer anything other
1566 * than updating the internal device list until after we release the
1567 * spinlock.
1568 */
1569
1570 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
1571
1572 /* Assume that all devices in the existing list have gone away. */
1573 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1574 scsi_device_list_entry)
1575 device->device_gone = true;
1576
1577 for (i = 0; i < num_new_devices; i++) {
1578 device = new_device_list[i];
1579
1580 find_result = pqi_scsi_find_entry(ctrl_info, device,
1581 &matching_device);
1582
1583 switch (find_result) {
1584 case DEVICE_SAME:
1585 /*
1586 * The newly found device is already in the existing
1587 * device list.
1588 */
1589 device->new_device = false;
1590 matching_device->device_gone = false;
1591 pqi_scsi_update_device(matching_device, device);
1592 break;
1593 case DEVICE_NOT_FOUND:
1594 /*
1595 * The newly found device is NOT in the existing device
1596 * list.
1597 */
1598 device->new_device = true;
1599 break;
1600 case DEVICE_CHANGED:
1601 /*
1602 * The original device has gone away and we need to add
1603 * the new device.
1604 */
1605 device->new_device = true;
1606 break;
Kevin Barnett6c223762016-06-27 16:41:00 -05001607 }
1608 }
1609
1610 /* Process all devices that have gone away. */
1611 list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
1612 scsi_device_list_entry) {
1613 if (device->device_gone) {
1614 list_del(&device->scsi_device_list_entry);
1615 list_add_tail(&device->delete_list_entry, &delete_list);
1616 }
1617 }
1618
1619 /* Process all new devices. */
1620 for (i = 0; i < num_new_devices; i++) {
1621 device = new_device_list[i];
1622 if (!device->new_device)
1623 continue;
1624 if (device->volume_offline)
1625 continue;
1626 list_add_tail(&device->scsi_device_list_entry,
1627 &ctrl_info->scsi_device_list);
1628 list_add_tail(&device->add_list_entry, &add_list);
1629 /* To prevent this device structure from being freed later. */
1630 device->keep_device = true;
1631 }
1632
1633 pqi_update_all_logical_drive_queue_depths(ctrl_info);
1634
1635 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1636 scsi_device_list_entry)
1637 device->offload_enabled =
1638 device->offload_enabled_pending;
1639
1640 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
1641
1642 /* Remove all devices that have gone away. */
1643 list_for_each_entry_safe(device, next, &delete_list,
1644 delete_list_entry) {
1645 if (device->sdev)
1646 pqi_remove_device(ctrl_info, device);
1647 if (device->volume_offline) {
1648 pqi_dev_info(ctrl_info, "offline", device);
1649 pqi_show_volume_status(ctrl_info, device);
1650 } else {
1651 pqi_dev_info(ctrl_info, "removed", device);
1652 }
1653 list_del(&device->delete_list_entry);
1654 pqi_free_device(device);
1655 }
1656
1657 /*
1658 * Notify the SCSI ML if the queue depth of any existing device has
1659 * changed.
1660 */
1661 list_for_each_entry(device, &ctrl_info->scsi_device_list,
1662 scsi_device_list_entry) {
1663 if (device->sdev && device->queue_depth !=
1664 device->advertised_queue_depth) {
1665 device->advertised_queue_depth = device->queue_depth;
1666 scsi_change_queue_depth(device->sdev,
1667 device->advertised_queue_depth);
1668 }
1669 }
1670
1671 /* Expose any new devices. */
1672 list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
1673 if (device->expose_device && !device->sdev) {
1674 rc = pqi_add_device(ctrl_info, device);
1675 if (rc) {
1676 dev_warn(&ctrl_info->pci_dev->dev,
1677 "scsi %d:%d:%d:%d addition failed, device not added\n",
1678 ctrl_info->scsi_host->host_no,
1679 device->bus, device->target,
1680 device->lun);
1681 pqi_fixup_botched_add(ctrl_info, device);
1682 continue;
1683 }
1684 }
1685 pqi_dev_info(ctrl_info, "added", device);
1686 }
1687}
1688
1689static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
1690{
1691 bool is_supported = false;
1692
1693 switch (device->devtype) {
1694 case TYPE_DISK:
1695 case TYPE_ZBC:
1696 case TYPE_TAPE:
1697 case TYPE_MEDIUM_CHANGER:
1698 case TYPE_ENCLOSURE:
1699 is_supported = true;
1700 break;
1701 case TYPE_RAID:
1702 /*
1703 * Only support the HBA controller itself as a RAID
1704 * controller. If it's a RAID controller other than
1705 * the HBA itself (an external RAID controller, MSA500
1706 * or similar), we don't support it.
1707 */
1708 if (pqi_is_hba_lunid(device->scsi3addr))
1709 is_supported = true;
1710 break;
1711 }
1712
1713 return is_supported;
1714}
1715
1716static inline bool pqi_skip_device(u8 *scsi3addr,
1717 struct report_phys_lun_extended_entry *phys_lun_ext_entry)
1718{
1719 u8 device_flags;
1720
1721 if (!MASKED_DEVICE(scsi3addr))
1722 return false;
1723
1724 /* The device is masked. */
1725
1726 device_flags = phys_lun_ext_entry->device_flags;
1727
1728 if (device_flags & REPORT_PHYS_LUN_DEV_FLAG_NON_DISK) {
1729 /*
1730 * It's a non-disk device. We ignore all devices of this type
1731 * when they're masked.
1732 */
1733 return true;
1734 }
1735
1736 return false;
1737}
1738
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05001739static inline bool pqi_ok_to_expose_device(struct pqi_scsi_dev *device)
Kevin Barnett6c223762016-06-27 16:41:00 -05001740{
1741 /* Expose all devices except for physical devices that are masked. */
1742 if (device->is_physical_device && MASKED_DEVICE(device->scsi3addr))
1743 return false;
1744
1745 return true;
1746}
1747
1748static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1749{
1750 int i;
1751 int rc;
1752 struct list_head new_device_list_head;
1753 struct report_phys_lun_extended *physdev_list = NULL;
1754 struct report_log_lun_extended *logdev_list = NULL;
1755 struct report_phys_lun_extended_entry *phys_lun_ext_entry;
1756 struct report_log_lun_extended_entry *log_lun_ext_entry;
1757 struct bmic_identify_physical_device *id_phys = NULL;
1758 u32 num_physicals;
1759 u32 num_logicals;
1760 struct pqi_scsi_dev **new_device_list = NULL;
1761 struct pqi_scsi_dev *device;
1762 struct pqi_scsi_dev *next;
1763 unsigned int num_new_devices;
1764 unsigned int num_valid_devices;
1765 bool is_physical_device;
1766 u8 *scsi3addr;
1767 static char *out_of_memory_msg =
1768 "out of memory, device discovery stopped";
1769
1770 INIT_LIST_HEAD(&new_device_list_head);
1771
1772 rc = pqi_get_device_lists(ctrl_info, &physdev_list, &logdev_list);
1773 if (rc)
1774 goto out;
1775
1776 if (physdev_list)
1777 num_physicals =
1778 get_unaligned_be32(&physdev_list->header.list_length)
1779 / sizeof(physdev_list->lun_entries[0]);
1780 else
1781 num_physicals = 0;
1782
1783 if (logdev_list)
1784 num_logicals =
1785 get_unaligned_be32(&logdev_list->header.list_length)
1786 / sizeof(logdev_list->lun_entries[0]);
1787 else
1788 num_logicals = 0;
1789
1790 if (num_physicals) {
1791 /*
1792 * We need this buffer for calls to pqi_get_physical_disk_info()
1793 * below. We allocate it here instead of inside
1794 * pqi_get_physical_disk_info() because it's a fairly large
1795 * buffer.
1796 */
1797 id_phys = kmalloc(sizeof(*id_phys), GFP_KERNEL);
1798 if (!id_phys) {
1799 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1800 out_of_memory_msg);
1801 rc = -ENOMEM;
1802 goto out;
1803 }
1804 }
1805
1806 num_new_devices = num_physicals + num_logicals;
1807
1808 new_device_list = kmalloc(sizeof(*new_device_list) *
1809 num_new_devices, GFP_KERNEL);
1810 if (!new_device_list) {
1811 dev_warn(&ctrl_info->pci_dev->dev, "%s\n", out_of_memory_msg);
1812 rc = -ENOMEM;
1813 goto out;
1814 }
1815
1816 for (i = 0; i < num_new_devices; i++) {
1817 device = kzalloc(sizeof(*device), GFP_KERNEL);
1818 if (!device) {
1819 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1820 out_of_memory_msg);
1821 rc = -ENOMEM;
1822 goto out;
1823 }
1824 list_add_tail(&device->new_device_list_entry,
1825 &new_device_list_head);
1826 }
1827
1828 device = NULL;
1829 num_valid_devices = 0;
1830
1831 for (i = 0; i < num_new_devices; i++) {
1832
1833 if (i < num_physicals) {
1834 is_physical_device = true;
1835 phys_lun_ext_entry = &physdev_list->lun_entries[i];
1836 log_lun_ext_entry = NULL;
1837 scsi3addr = phys_lun_ext_entry->lunid;
1838 } else {
1839 is_physical_device = false;
1840 phys_lun_ext_entry = NULL;
1841 log_lun_ext_entry =
1842 &logdev_list->lun_entries[i - num_physicals];
1843 scsi3addr = log_lun_ext_entry->lunid;
1844 }
1845
1846 if (is_physical_device &&
1847 pqi_skip_device(scsi3addr, phys_lun_ext_entry))
1848 continue;
1849
1850 if (device)
1851 device = list_next_entry(device, new_device_list_entry);
1852 else
1853 device = list_first_entry(&new_device_list_head,
1854 struct pqi_scsi_dev, new_device_list_entry);
1855
1856 memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
1857 device->is_physical_device = is_physical_device;
1858 device->raid_level = SA_RAID_UNKNOWN;
1859
1860 /* Gather information about the device. */
1861 rc = pqi_get_device_info(ctrl_info, device);
1862 if (rc == -ENOMEM) {
1863 dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
1864 out_of_memory_msg);
1865 goto out;
1866 }
1867 if (rc) {
1868 dev_warn(&ctrl_info->pci_dev->dev,
1869 "obtaining device info failed, skipping device %016llx\n",
1870 get_unaligned_be64(device->scsi3addr));
1871 rc = 0;
1872 continue;
1873 }
1874
1875 if (!pqi_is_supported_device(device))
1876 continue;
1877
1878 pqi_assign_bus_target_lun(device);
1879
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05001880 device->expose_device = pqi_ok_to_expose_device(device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001881
1882 if (device->is_physical_device) {
1883 device->wwid = phys_lun_ext_entry->wwid;
1884 if ((phys_lun_ext_entry->device_flags &
1885 REPORT_PHYS_LUN_DEV_FLAG_AIO_ENABLED) &&
1886 phys_lun_ext_entry->aio_handle)
1887 device->aio_enabled = true;
1888 } else {
1889 memcpy(device->volume_id, log_lun_ext_entry->volume_id,
1890 sizeof(device->volume_id));
1891 }
1892
1893 switch (device->devtype) {
1894 case TYPE_DISK:
1895 case TYPE_ZBC:
1896 case TYPE_ENCLOSURE:
1897 if (device->is_physical_device) {
1898 device->sas_address =
1899 get_unaligned_be64(&device->wwid);
1900 if (device->devtype == TYPE_DISK ||
1901 device->devtype == TYPE_ZBC) {
1902 device->aio_handle =
1903 phys_lun_ext_entry->aio_handle;
1904 pqi_get_physical_disk_info(ctrl_info,
1905 device, id_phys);
1906 }
1907 }
1908 break;
1909 }
1910
1911 new_device_list[num_valid_devices++] = device;
1912 }
1913
1914 pqi_update_device_list(ctrl_info, new_device_list, num_valid_devices);
1915
1916out:
1917 list_for_each_entry_safe(device, next, &new_device_list_head,
1918 new_device_list_entry) {
1919 if (device->keep_device)
1920 continue;
1921 list_del(&device->new_device_list_entry);
1922 pqi_free_device(device);
1923 }
1924
1925 kfree(new_device_list);
1926 kfree(physdev_list);
1927 kfree(logdev_list);
1928 kfree(id_phys);
1929
1930 return rc;
1931}
1932
1933static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1934{
1935 unsigned long flags;
1936 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05001937
Kevin Barnetta37ef742017-05-03 18:52:22 -05001938 while (1) {
1939 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05001940
Kevin Barnetta37ef742017-05-03 18:52:22 -05001941 device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
1942 struct pqi_scsi_dev, scsi_device_list_entry);
1943 if (device)
1944 list_del(&device->scsi_device_list_entry);
1945
1946 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
1947 flags);
1948
1949 if (!device)
1950 break;
1951
Kevin Barnett6c223762016-06-27 16:41:00 -05001952 if (device->sdev)
1953 pqi_remove_device(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05001954 pqi_free_device(device);
1955 }
Kevin Barnett6c223762016-06-27 16:41:00 -05001956}
1957
1958static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
1959{
1960 int rc;
1961
1962 if (pqi_ctrl_offline(ctrl_info))
1963 return -ENXIO;
1964
1965 mutex_lock(&ctrl_info->scan_mutex);
1966
1967 rc = pqi_update_scsi_devices(ctrl_info);
1968 if (rc)
1969 pqi_schedule_rescan_worker(ctrl_info);
1970
1971 mutex_unlock(&ctrl_info->scan_mutex);
1972
1973 return rc;
1974}
1975
1976static void pqi_scan_start(struct Scsi_Host *shost)
1977{
1978 pqi_scan_scsi_devices(shost_to_hba(shost));
1979}
1980
1981/* Returns TRUE if scan is finished. */
1982
1983static int pqi_scan_finished(struct Scsi_Host *shost,
1984 unsigned long elapsed_time)
1985{
1986 struct pqi_ctrl_info *ctrl_info;
1987
1988 ctrl_info = shost_priv(shost);
1989
1990 return !mutex_is_locked(&ctrl_info->scan_mutex);
1991}
1992
Kevin Barnett061ef062017-05-03 18:53:05 -05001993static void pqi_wait_until_scan_finished(struct pqi_ctrl_info *ctrl_info)
1994{
1995 mutex_lock(&ctrl_info->scan_mutex);
1996 mutex_unlock(&ctrl_info->scan_mutex);
1997}
1998
1999static void pqi_wait_until_lun_reset_finished(struct pqi_ctrl_info *ctrl_info)
2000{
2001 mutex_lock(&ctrl_info->lun_reset_mutex);
2002 mutex_unlock(&ctrl_info->lun_reset_mutex);
2003}
2004
Kevin Barnett6c223762016-06-27 16:41:00 -05002005static inline void pqi_set_encryption_info(
2006 struct pqi_encryption_info *encryption_info, struct raid_map *raid_map,
2007 u64 first_block)
2008{
2009 u32 volume_blk_size;
2010
2011 /*
2012 * Set the encryption tweak values based on logical block address.
2013 * If the block size is 512, the tweak value is equal to the LBA.
2014 * For other block sizes, tweak value is (LBA * block size) / 512.
2015 */
2016 volume_blk_size = get_unaligned_le32(&raid_map->volume_blk_size);
2017 if (volume_blk_size != 512)
2018 first_block = (first_block * volume_blk_size) / 512;
2019
2020 encryption_info->data_encryption_key_index =
2021 get_unaligned_le16(&raid_map->data_encryption_key_index);
2022 encryption_info->encrypt_tweak_lower = lower_32_bits(first_block);
2023 encryption_info->encrypt_tweak_upper = upper_32_bits(first_block);
2024}
2025
2026/*
2027 * Attempt to perform offload RAID mapping for a logical volume I/O.
2028 */
2029
2030#define PQI_RAID_BYPASS_INELIGIBLE 1
2031
2032static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
2033 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
2034 struct pqi_queue_group *queue_group)
2035{
2036 struct raid_map *raid_map;
2037 bool is_write = false;
2038 u32 map_index;
2039 u64 first_block;
2040 u64 last_block;
2041 u32 block_cnt;
2042 u32 blocks_per_row;
2043 u64 first_row;
2044 u64 last_row;
2045 u32 first_row_offset;
2046 u32 last_row_offset;
2047 u32 first_column;
2048 u32 last_column;
2049 u64 r0_first_row;
2050 u64 r0_last_row;
2051 u32 r5or6_blocks_per_row;
2052 u64 r5or6_first_row;
2053 u64 r5or6_last_row;
2054 u32 r5or6_first_row_offset;
2055 u32 r5or6_last_row_offset;
2056 u32 r5or6_first_column;
2057 u32 r5or6_last_column;
2058 u16 data_disks_per_row;
2059 u32 total_disks_per_row;
2060 u16 layout_map_count;
2061 u32 stripesize;
2062 u16 strip_size;
2063 u32 first_group;
2064 u32 last_group;
2065 u32 current_group;
2066 u32 map_row;
2067 u32 aio_handle;
2068 u64 disk_block;
2069 u32 disk_block_cnt;
2070 u8 cdb[16];
2071 u8 cdb_length;
2072 int offload_to_mirror;
2073 struct pqi_encryption_info *encryption_info_ptr;
2074 struct pqi_encryption_info encryption_info;
2075#if BITS_PER_LONG == 32
2076 u64 tmpdiv;
2077#endif
2078
2079 /* Check for valid opcode, get LBA and block count. */
2080 switch (scmd->cmnd[0]) {
2081 case WRITE_6:
2082 is_write = true;
2083 /* fall through */
2084 case READ_6:
kevin Barnette018ef52016-09-16 15:01:51 -05002085 first_block = (u64)(((scmd->cmnd[1] & 0x1f) << 16) |
2086 (scmd->cmnd[2] << 8) | scmd->cmnd[3]);
Kevin Barnett6c223762016-06-27 16:41:00 -05002087 block_cnt = (u32)scmd->cmnd[4];
2088 if (block_cnt == 0)
2089 block_cnt = 256;
2090 break;
2091 case WRITE_10:
2092 is_write = true;
2093 /* fall through */
2094 case READ_10:
2095 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2096 block_cnt = (u32)get_unaligned_be16(&scmd->cmnd[7]);
2097 break;
2098 case WRITE_12:
2099 is_write = true;
2100 /* fall through */
2101 case READ_12:
2102 first_block = (u64)get_unaligned_be32(&scmd->cmnd[2]);
2103 block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
2104 break;
2105 case WRITE_16:
2106 is_write = true;
2107 /* fall through */
2108 case READ_16:
2109 first_block = get_unaligned_be64(&scmd->cmnd[2]);
2110 block_cnt = get_unaligned_be32(&scmd->cmnd[10]);
2111 break;
2112 default:
2113 /* Process via normal I/O path. */
2114 return PQI_RAID_BYPASS_INELIGIBLE;
2115 }
2116
2117 /* Check for write to non-RAID-0. */
2118 if (is_write && device->raid_level != SA_RAID_0)
2119 return PQI_RAID_BYPASS_INELIGIBLE;
2120
2121 if (unlikely(block_cnt == 0))
2122 return PQI_RAID_BYPASS_INELIGIBLE;
2123
2124 last_block = first_block + block_cnt - 1;
2125 raid_map = device->raid_map;
2126
2127 /* Check for invalid block or wraparound. */
2128 if (last_block >= get_unaligned_le64(&raid_map->volume_blk_cnt) ||
2129 last_block < first_block)
2130 return PQI_RAID_BYPASS_INELIGIBLE;
2131
2132 data_disks_per_row = get_unaligned_le16(&raid_map->data_disks_per_row);
2133 strip_size = get_unaligned_le16(&raid_map->strip_size);
2134 layout_map_count = get_unaligned_le16(&raid_map->layout_map_count);
2135
2136 /* Calculate stripe information for the request. */
2137 blocks_per_row = data_disks_per_row * strip_size;
2138#if BITS_PER_LONG == 32
2139 tmpdiv = first_block;
2140 do_div(tmpdiv, blocks_per_row);
2141 first_row = tmpdiv;
2142 tmpdiv = last_block;
2143 do_div(tmpdiv, blocks_per_row);
2144 last_row = tmpdiv;
2145 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2146 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2147 tmpdiv = first_row_offset;
2148 do_div(tmpdiv, strip_size);
2149 first_column = tmpdiv;
2150 tmpdiv = last_row_offset;
2151 do_div(tmpdiv, strip_size);
2152 last_column = tmpdiv;
2153#else
2154 first_row = first_block / blocks_per_row;
2155 last_row = last_block / blocks_per_row;
2156 first_row_offset = (u32)(first_block - (first_row * blocks_per_row));
2157 last_row_offset = (u32)(last_block - (last_row * blocks_per_row));
2158 first_column = first_row_offset / strip_size;
2159 last_column = last_row_offset / strip_size;
2160#endif
2161
2162 /* If this isn't a single row/column then give to the controller. */
2163 if (first_row != last_row || first_column != last_column)
2164 return PQI_RAID_BYPASS_INELIGIBLE;
2165
2166 /* Proceeding with driver mapping. */
2167 total_disks_per_row = data_disks_per_row +
2168 get_unaligned_le16(&raid_map->metadata_disks_per_row);
2169 map_row = ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2170 get_unaligned_le16(&raid_map->row_cnt);
2171 map_index = (map_row * total_disks_per_row) + first_column;
2172
2173 /* RAID 1 */
2174 if (device->raid_level == SA_RAID_1) {
2175 if (device->offload_to_mirror)
2176 map_index += data_disks_per_row;
2177 device->offload_to_mirror = !device->offload_to_mirror;
2178 } else if (device->raid_level == SA_RAID_ADM) {
2179 /* RAID ADM */
2180 /*
2181 * Handles N-way mirrors (R1-ADM) and R10 with # of drives
2182 * divisible by 3.
2183 */
2184 offload_to_mirror = device->offload_to_mirror;
2185 if (offload_to_mirror == 0) {
2186 /* use physical disk in the first mirrored group. */
2187 map_index %= data_disks_per_row;
2188 } else {
2189 do {
2190 /*
2191 * Determine mirror group that map_index
2192 * indicates.
2193 */
2194 current_group = map_index / data_disks_per_row;
2195
2196 if (offload_to_mirror != current_group) {
2197 if (current_group <
2198 layout_map_count - 1) {
2199 /*
2200 * Select raid index from
2201 * next group.
2202 */
2203 map_index += data_disks_per_row;
2204 current_group++;
2205 } else {
2206 /*
2207 * Select raid index from first
2208 * group.
2209 */
2210 map_index %= data_disks_per_row;
2211 current_group = 0;
2212 }
2213 }
2214 } while (offload_to_mirror != current_group);
2215 }
2216
2217 /* Set mirror group to use next time. */
2218 offload_to_mirror =
2219 (offload_to_mirror >= layout_map_count - 1) ?
2220 0 : offload_to_mirror + 1;
2221 WARN_ON(offload_to_mirror >= layout_map_count);
2222 device->offload_to_mirror = offload_to_mirror;
2223 /*
2224 * Avoid direct use of device->offload_to_mirror within this
2225 * function since multiple threads might simultaneously
2226 * increment it beyond the range of device->layout_map_count -1.
2227 */
2228 } else if ((device->raid_level == SA_RAID_5 ||
2229 device->raid_level == SA_RAID_6) && layout_map_count > 1) {
2230 /* RAID 50/60 */
2231 /* Verify first and last block are in same RAID group */
2232 r5or6_blocks_per_row = strip_size * data_disks_per_row;
2233 stripesize = r5or6_blocks_per_row * layout_map_count;
2234#if BITS_PER_LONG == 32
2235 tmpdiv = first_block;
2236 first_group = do_div(tmpdiv, stripesize);
2237 tmpdiv = first_group;
2238 do_div(tmpdiv, r5or6_blocks_per_row);
2239 first_group = tmpdiv;
2240 tmpdiv = last_block;
2241 last_group = do_div(tmpdiv, stripesize);
2242 tmpdiv = last_group;
2243 do_div(tmpdiv, r5or6_blocks_per_row);
2244 last_group = tmpdiv;
2245#else
2246 first_group = (first_block % stripesize) / r5or6_blocks_per_row;
2247 last_group = (last_block % stripesize) / r5or6_blocks_per_row;
2248#endif
2249 if (first_group != last_group)
2250 return PQI_RAID_BYPASS_INELIGIBLE;
2251
2252 /* Verify request is in a single row of RAID 5/6 */
2253#if BITS_PER_LONG == 32
2254 tmpdiv = first_block;
2255 do_div(tmpdiv, stripesize);
2256 first_row = r5or6_first_row = r0_first_row = tmpdiv;
2257 tmpdiv = last_block;
2258 do_div(tmpdiv, stripesize);
2259 r5or6_last_row = r0_last_row = tmpdiv;
2260#else
2261 first_row = r5or6_first_row = r0_first_row =
2262 first_block / stripesize;
2263 r5or6_last_row = r0_last_row = last_block / stripesize;
2264#endif
2265 if (r5or6_first_row != r5or6_last_row)
2266 return PQI_RAID_BYPASS_INELIGIBLE;
2267
2268 /* Verify request is in a single column */
2269#if BITS_PER_LONG == 32
2270 tmpdiv = first_block;
2271 first_row_offset = do_div(tmpdiv, stripesize);
2272 tmpdiv = first_row_offset;
2273 first_row_offset = (u32)do_div(tmpdiv, r5or6_blocks_per_row);
2274 r5or6_first_row_offset = first_row_offset;
2275 tmpdiv = last_block;
2276 r5or6_last_row_offset = do_div(tmpdiv, stripesize);
2277 tmpdiv = r5or6_last_row_offset;
2278 r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row);
2279 tmpdiv = r5or6_first_row_offset;
2280 do_div(tmpdiv, strip_size);
2281 first_column = r5or6_first_column = tmpdiv;
2282 tmpdiv = r5or6_last_row_offset;
2283 do_div(tmpdiv, strip_size);
2284 r5or6_last_column = tmpdiv;
2285#else
2286 first_row_offset = r5or6_first_row_offset =
2287 (u32)((first_block % stripesize) %
2288 r5or6_blocks_per_row);
2289
2290 r5or6_last_row_offset =
2291 (u32)((last_block % stripesize) %
2292 r5or6_blocks_per_row);
2293
2294 first_column = r5or6_first_row_offset / strip_size;
2295 r5or6_first_column = first_column;
2296 r5or6_last_column = r5or6_last_row_offset / strip_size;
2297#endif
2298 if (r5or6_first_column != r5or6_last_column)
2299 return PQI_RAID_BYPASS_INELIGIBLE;
2300
2301 /* Request is eligible */
2302 map_row =
2303 ((u32)(first_row >> raid_map->parity_rotation_shift)) %
2304 get_unaligned_le16(&raid_map->row_cnt);
2305
2306 map_index = (first_group *
2307 (get_unaligned_le16(&raid_map->row_cnt) *
2308 total_disks_per_row)) +
2309 (map_row * total_disks_per_row) + first_column;
2310 }
2311
2312 if (unlikely(map_index >= RAID_MAP_MAX_ENTRIES))
2313 return PQI_RAID_BYPASS_INELIGIBLE;
2314
2315 aio_handle = raid_map->disk_data[map_index].aio_handle;
2316 disk_block = get_unaligned_le64(&raid_map->disk_starting_blk) +
2317 first_row * strip_size +
2318 (first_row_offset - first_column * strip_size);
2319 disk_block_cnt = block_cnt;
2320
2321 /* Handle differing logical/physical block sizes. */
2322 if (raid_map->phys_blk_shift) {
2323 disk_block <<= raid_map->phys_blk_shift;
2324 disk_block_cnt <<= raid_map->phys_blk_shift;
2325 }
2326
2327 if (unlikely(disk_block_cnt > 0xffff))
2328 return PQI_RAID_BYPASS_INELIGIBLE;
2329
2330 /* Build the new CDB for the physical disk I/O. */
2331 if (disk_block > 0xffffffff) {
2332 cdb[0] = is_write ? WRITE_16 : READ_16;
2333 cdb[1] = 0;
2334 put_unaligned_be64(disk_block, &cdb[2]);
2335 put_unaligned_be32(disk_block_cnt, &cdb[10]);
2336 cdb[14] = 0;
2337 cdb[15] = 0;
2338 cdb_length = 16;
2339 } else {
2340 cdb[0] = is_write ? WRITE_10 : READ_10;
2341 cdb[1] = 0;
2342 put_unaligned_be32((u32)disk_block, &cdb[2]);
2343 cdb[6] = 0;
2344 put_unaligned_be16((u16)disk_block_cnt, &cdb[7]);
2345 cdb[9] = 0;
2346 cdb_length = 10;
2347 }
2348
2349 if (get_unaligned_le16(&raid_map->flags) &
2350 RAID_MAP_ENCRYPTION_ENABLED) {
2351 pqi_set_encryption_info(&encryption_info, raid_map,
2352 first_block);
2353 encryption_info_ptr = &encryption_info;
2354 } else {
2355 encryption_info_ptr = NULL;
2356 }
2357
2358 return pqi_aio_submit_io(ctrl_info, scmd, aio_handle,
2359 cdb, cdb_length, queue_group, encryption_info_ptr);
2360}
2361
2362#define PQI_STATUS_IDLE 0x0
2363
2364#define PQI_CREATE_ADMIN_QUEUE_PAIR 1
2365#define PQI_DELETE_ADMIN_QUEUE_PAIR 2
2366
2367#define PQI_DEVICE_STATE_POWER_ON_AND_RESET 0x0
2368#define PQI_DEVICE_STATE_STATUS_AVAILABLE 0x1
2369#define PQI_DEVICE_STATE_ALL_REGISTERS_READY 0x2
2370#define PQI_DEVICE_STATE_ADMIN_QUEUE_PAIR_READY 0x3
2371#define PQI_DEVICE_STATE_ERROR 0x4
2372
2373#define PQI_MODE_READY_TIMEOUT_SECS 30
2374#define PQI_MODE_READY_POLL_INTERVAL_MSECS 1
2375
2376static int pqi_wait_for_pqi_mode_ready(struct pqi_ctrl_info *ctrl_info)
2377{
2378 struct pqi_device_registers __iomem *pqi_registers;
2379 unsigned long timeout;
2380 u64 signature;
2381 u8 status;
2382
2383 pqi_registers = ctrl_info->pqi_registers;
2384 timeout = (PQI_MODE_READY_TIMEOUT_SECS * HZ) + jiffies;
2385
2386 while (1) {
2387 signature = readq(&pqi_registers->signature);
2388 if (memcmp(&signature, PQI_DEVICE_SIGNATURE,
2389 sizeof(signature)) == 0)
2390 break;
2391 if (time_after(jiffies, timeout)) {
2392 dev_err(&ctrl_info->pci_dev->dev,
2393 "timed out waiting for PQI signature\n");
2394 return -ETIMEDOUT;
2395 }
2396 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2397 }
2398
2399 while (1) {
2400 status = readb(&pqi_registers->function_and_status_code);
2401 if (status == PQI_STATUS_IDLE)
2402 break;
2403 if (time_after(jiffies, timeout)) {
2404 dev_err(&ctrl_info->pci_dev->dev,
2405 "timed out waiting for PQI IDLE\n");
2406 return -ETIMEDOUT;
2407 }
2408 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2409 }
2410
2411 while (1) {
2412 if (readl(&pqi_registers->device_status) ==
2413 PQI_DEVICE_STATE_ALL_REGISTERS_READY)
2414 break;
2415 if (time_after(jiffies, timeout)) {
2416 dev_err(&ctrl_info->pci_dev->dev,
2417 "timed out waiting for PQI all registers ready\n");
2418 return -ETIMEDOUT;
2419 }
2420 msleep(PQI_MODE_READY_POLL_INTERVAL_MSECS);
2421 }
2422
2423 return 0;
2424}
2425
2426static inline void pqi_aio_path_disabled(struct pqi_io_request *io_request)
2427{
2428 struct pqi_scsi_dev *device;
2429
2430 device = io_request->scmd->device->hostdata;
2431 device->offload_enabled = false;
2432}
2433
Kevin Barnettd87d5472017-05-03 18:54:00 -05002434static inline void pqi_take_device_offline(struct scsi_device *sdev, char *path)
Kevin Barnett6c223762016-06-27 16:41:00 -05002435{
2436 struct pqi_ctrl_info *ctrl_info;
Kevin Barnette58081a2016-08-31 14:54:29 -05002437 struct pqi_scsi_dev *device;
Kevin Barnett6c223762016-06-27 16:41:00 -05002438
2439 if (scsi_device_online(sdev)) {
2440 scsi_device_set_state(sdev, SDEV_OFFLINE);
2441 ctrl_info = shost_to_hba(sdev->host);
2442 schedule_delayed_work(&ctrl_info->rescan_work, 0);
Kevin Barnette58081a2016-08-31 14:54:29 -05002443 device = sdev->hostdata;
Kevin Barnettd87d5472017-05-03 18:54:00 -05002444 dev_err(&ctrl_info->pci_dev->dev, "offlined %s scsi %d:%d:%d:%d\n",
2445 path, ctrl_info->scsi_host->host_no, device->bus,
Kevin Barnette58081a2016-08-31 14:54:29 -05002446 device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05002447 }
2448}
2449
2450static void pqi_process_raid_io_error(struct pqi_io_request *io_request)
2451{
2452 u8 scsi_status;
2453 u8 host_byte;
2454 struct scsi_cmnd *scmd;
2455 struct pqi_raid_error_info *error_info;
2456 size_t sense_data_length;
2457 int residual_count;
2458 int xfer_count;
2459 struct scsi_sense_hdr sshdr;
2460
2461 scmd = io_request->scmd;
2462 if (!scmd)
2463 return;
2464
2465 error_info = io_request->error_info;
2466 scsi_status = error_info->status;
2467 host_byte = DID_OK;
2468
2469 if (error_info->data_out_result == PQI_DATA_IN_OUT_UNDERFLOW) {
2470 xfer_count =
2471 get_unaligned_le32(&error_info->data_out_transferred);
2472 residual_count = scsi_bufflen(scmd) - xfer_count;
2473 scsi_set_resid(scmd, residual_count);
2474 if (xfer_count < scmd->underflow)
2475 host_byte = DID_SOFT_ERROR;
2476 }
2477
2478 sense_data_length = get_unaligned_le16(&error_info->sense_data_length);
2479 if (sense_data_length == 0)
2480 sense_data_length =
2481 get_unaligned_le16(&error_info->response_data_length);
2482 if (sense_data_length) {
2483 if (sense_data_length > sizeof(error_info->data))
2484 sense_data_length = sizeof(error_info->data);
2485
2486 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
2487 scsi_normalize_sense(error_info->data,
2488 sense_data_length, &sshdr) &&
2489 sshdr.sense_key == HARDWARE_ERROR &&
2490 sshdr.asc == 0x3e &&
2491 sshdr.ascq == 0x1) {
Kevin Barnettd87d5472017-05-03 18:54:00 -05002492 pqi_take_device_offline(scmd->device, "RAID");
Kevin Barnett6c223762016-06-27 16:41:00 -05002493 host_byte = DID_NO_CONNECT;
2494 }
2495
2496 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2497 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2498 memcpy(scmd->sense_buffer, error_info->data,
2499 sense_data_length);
2500 }
2501
2502 scmd->result = scsi_status;
2503 set_host_byte(scmd, host_byte);
2504}
2505
2506static void pqi_process_aio_io_error(struct pqi_io_request *io_request)
2507{
2508 u8 scsi_status;
2509 u8 host_byte;
2510 struct scsi_cmnd *scmd;
2511 struct pqi_aio_error_info *error_info;
2512 size_t sense_data_length;
2513 int residual_count;
2514 int xfer_count;
2515 bool device_offline;
2516
2517 scmd = io_request->scmd;
2518 error_info = io_request->error_info;
2519 host_byte = DID_OK;
2520 sense_data_length = 0;
2521 device_offline = false;
2522
2523 switch (error_info->service_response) {
2524 case PQI_AIO_SERV_RESPONSE_COMPLETE:
2525 scsi_status = error_info->status;
2526 break;
2527 case PQI_AIO_SERV_RESPONSE_FAILURE:
2528 switch (error_info->status) {
2529 case PQI_AIO_STATUS_IO_ABORTED:
2530 scsi_status = SAM_STAT_TASK_ABORTED;
2531 break;
2532 case PQI_AIO_STATUS_UNDERRUN:
2533 scsi_status = SAM_STAT_GOOD;
2534 residual_count = get_unaligned_le32(
2535 &error_info->residual_count);
2536 scsi_set_resid(scmd, residual_count);
2537 xfer_count = scsi_bufflen(scmd) - residual_count;
2538 if (xfer_count < scmd->underflow)
2539 host_byte = DID_SOFT_ERROR;
2540 break;
2541 case PQI_AIO_STATUS_OVERRUN:
2542 scsi_status = SAM_STAT_GOOD;
2543 break;
2544 case PQI_AIO_STATUS_AIO_PATH_DISABLED:
2545 pqi_aio_path_disabled(io_request);
2546 scsi_status = SAM_STAT_GOOD;
2547 io_request->status = -EAGAIN;
2548 break;
2549 case PQI_AIO_STATUS_NO_PATH_TO_DEVICE:
2550 case PQI_AIO_STATUS_INVALID_DEVICE:
2551 device_offline = true;
Kevin Barnettd87d5472017-05-03 18:54:00 -05002552 pqi_take_device_offline(scmd->device, "AIO");
Kevin Barnett6c223762016-06-27 16:41:00 -05002553 host_byte = DID_NO_CONNECT;
2554 scsi_status = SAM_STAT_CHECK_CONDITION;
2555 break;
2556 case PQI_AIO_STATUS_IO_ERROR:
2557 default:
2558 scsi_status = SAM_STAT_CHECK_CONDITION;
2559 break;
2560 }
2561 break;
2562 case PQI_AIO_SERV_RESPONSE_TMF_COMPLETE:
2563 case PQI_AIO_SERV_RESPONSE_TMF_SUCCEEDED:
2564 scsi_status = SAM_STAT_GOOD;
2565 break;
2566 case PQI_AIO_SERV_RESPONSE_TMF_REJECTED:
2567 case PQI_AIO_SERV_RESPONSE_TMF_INCORRECT_LUN:
2568 default:
2569 scsi_status = SAM_STAT_CHECK_CONDITION;
2570 break;
2571 }
2572
2573 if (error_info->data_present) {
2574 sense_data_length =
2575 get_unaligned_le16(&error_info->data_length);
2576 if (sense_data_length) {
2577 if (sense_data_length > sizeof(error_info->data))
2578 sense_data_length = sizeof(error_info->data);
2579 if (sense_data_length > SCSI_SENSE_BUFFERSIZE)
2580 sense_data_length = SCSI_SENSE_BUFFERSIZE;
2581 memcpy(scmd->sense_buffer, error_info->data,
2582 sense_data_length);
2583 }
2584 }
2585
2586 if (device_offline && sense_data_length == 0)
2587 scsi_build_sense_buffer(0, scmd->sense_buffer, HARDWARE_ERROR,
2588 0x3e, 0x1);
2589
2590 scmd->result = scsi_status;
2591 set_host_byte(scmd, host_byte);
2592}
2593
2594static void pqi_process_io_error(unsigned int iu_type,
2595 struct pqi_io_request *io_request)
2596{
2597 switch (iu_type) {
2598 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2599 pqi_process_raid_io_error(io_request);
2600 break;
2601 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2602 pqi_process_aio_io_error(io_request);
2603 break;
2604 }
2605}
2606
2607static int pqi_interpret_task_management_response(
2608 struct pqi_task_management_response *response)
2609{
2610 int rc;
2611
2612 switch (response->response_code) {
Kevin Barnettb17f0482016-08-31 14:54:17 -05002613 case SOP_TMF_COMPLETE:
2614 case SOP_TMF_FUNCTION_SUCCEEDED:
Kevin Barnett6c223762016-06-27 16:41:00 -05002615 rc = 0;
2616 break;
2617 default:
2618 rc = -EIO;
2619 break;
2620 }
2621
2622 return rc;
2623}
2624
2625static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
2626 struct pqi_queue_group *queue_group)
2627{
2628 unsigned int num_responses;
2629 pqi_index_t oq_pi;
2630 pqi_index_t oq_ci;
2631 struct pqi_io_request *io_request;
2632 struct pqi_io_response *response;
2633 u16 request_id;
2634
2635 num_responses = 0;
2636 oq_ci = queue_group->oq_ci_copy;
2637
2638 while (1) {
2639 oq_pi = *queue_group->oq_pi;
2640 if (oq_pi == oq_ci)
2641 break;
2642
2643 num_responses++;
2644 response = queue_group->oq_element_array +
2645 (oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
2646
2647 request_id = get_unaligned_le16(&response->request_id);
2648 WARN_ON(request_id >= ctrl_info->max_io_slots);
2649
2650 io_request = &ctrl_info->io_request_pool[request_id];
2651 WARN_ON(atomic_read(&io_request->refcount) == 0);
2652
2653 switch (response->header.iu_type) {
2654 case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
2655 case PQI_RESPONSE_IU_AIO_PATH_IO_SUCCESS:
2656 case PQI_RESPONSE_IU_GENERAL_MANAGEMENT:
2657 break;
2658 case PQI_RESPONSE_IU_TASK_MANAGEMENT:
2659 io_request->status =
2660 pqi_interpret_task_management_response(
2661 (void *)response);
2662 break;
2663 case PQI_RESPONSE_IU_AIO_PATH_DISABLED:
2664 pqi_aio_path_disabled(io_request);
2665 io_request->status = -EAGAIN;
2666 break;
2667 case PQI_RESPONSE_IU_RAID_PATH_IO_ERROR:
2668 case PQI_RESPONSE_IU_AIO_PATH_IO_ERROR:
2669 io_request->error_info = ctrl_info->error_buffer +
2670 (get_unaligned_le16(&response->error_index) *
2671 PQI_ERROR_BUFFER_ELEMENT_LENGTH);
2672 pqi_process_io_error(response->header.iu_type,
2673 io_request);
2674 break;
2675 default:
2676 dev_err(&ctrl_info->pci_dev->dev,
2677 "unexpected IU type: 0x%x\n",
2678 response->header.iu_type);
Kevin Barnett6c223762016-06-27 16:41:00 -05002679 break;
2680 }
2681
2682 io_request->io_complete_callback(io_request,
2683 io_request->context);
2684
2685 /*
2686 * Note that the I/O request structure CANNOT BE TOUCHED after
2687 * returning from the I/O completion callback!
2688 */
2689
2690 oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
2691 }
2692
2693 if (num_responses) {
2694 queue_group->oq_ci_copy = oq_ci;
2695 writel(oq_ci, queue_group->oq_ci);
2696 }
2697
2698 return num_responses;
2699}
2700
2701static inline unsigned int pqi_num_elements_free(unsigned int pi,
Kevin Barnettdf7a1fc2016-08-31 14:54:59 -05002702 unsigned int ci, unsigned int elements_in_queue)
Kevin Barnett6c223762016-06-27 16:41:00 -05002703{
2704 unsigned int num_elements_used;
2705
2706 if (pi >= ci)
2707 num_elements_used = pi - ci;
2708 else
2709 num_elements_used = elements_in_queue - ci + pi;
2710
2711 return elements_in_queue - num_elements_used - 1;
2712}
2713
Kevin Barnett98f87662017-05-03 18:53:11 -05002714static void pqi_send_event_ack(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05002715 struct pqi_event_acknowledge_request *iu, size_t iu_length)
2716{
2717 pqi_index_t iq_pi;
2718 pqi_index_t iq_ci;
2719 unsigned long flags;
2720 void *next_element;
Kevin Barnett6c223762016-06-27 16:41:00 -05002721 struct pqi_queue_group *queue_group;
2722
2723 queue_group = &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP];
2724 put_unaligned_le16(queue_group->oq_id, &iu->header.response_queue_id);
2725
Kevin Barnett6c223762016-06-27 16:41:00 -05002726 while (1) {
2727 spin_lock_irqsave(&queue_group->submit_lock[RAID_PATH], flags);
2728
2729 iq_pi = queue_group->iq_pi_copy[RAID_PATH];
2730 iq_ci = *queue_group->iq_ci[RAID_PATH];
2731
2732 if (pqi_num_elements_free(iq_pi, iq_ci,
2733 ctrl_info->num_elements_per_iq))
2734 break;
2735
2736 spin_unlock_irqrestore(
2737 &queue_group->submit_lock[RAID_PATH], flags);
2738
Kevin Barnett98f87662017-05-03 18:53:11 -05002739 if (pqi_ctrl_offline(ctrl_info))
Kevin Barnett6c223762016-06-27 16:41:00 -05002740 return;
Kevin Barnett6c223762016-06-27 16:41:00 -05002741 }
2742
2743 next_element = queue_group->iq_element_array[RAID_PATH] +
2744 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
2745
2746 memcpy(next_element, iu, iu_length);
2747
2748 iq_pi = (iq_pi + 1) % ctrl_info->num_elements_per_iq;
Kevin Barnett6c223762016-06-27 16:41:00 -05002749 queue_group->iq_pi_copy[RAID_PATH] = iq_pi;
2750
2751 /*
2752 * This write notifies the controller that an IU is available to be
2753 * processed.
2754 */
2755 writel(iq_pi, queue_group->iq_pi[RAID_PATH]);
2756
2757 spin_unlock_irqrestore(&queue_group->submit_lock[RAID_PATH], flags);
Kevin Barnett6c223762016-06-27 16:41:00 -05002758}
2759
2760static void pqi_acknowledge_event(struct pqi_ctrl_info *ctrl_info,
2761 struct pqi_event *event)
2762{
2763 struct pqi_event_acknowledge_request request;
2764
2765 memset(&request, 0, sizeof(request));
2766
2767 request.header.iu_type = PQI_REQUEST_IU_ACKNOWLEDGE_VENDOR_EVENT;
2768 put_unaligned_le16(sizeof(request) - PQI_REQUEST_HEADER_LENGTH,
2769 &request.header.iu_length);
2770 request.event_type = event->event_type;
2771 request.event_id = event->event_id;
2772 request.additional_event_id = event->additional_event_id;
2773
Kevin Barnett98f87662017-05-03 18:53:11 -05002774 pqi_send_event_ack(ctrl_info, &request, sizeof(request));
Kevin Barnett6c223762016-06-27 16:41:00 -05002775}
2776
2777static void pqi_event_worker(struct work_struct *work)
2778{
2779 unsigned int i;
2780 struct pqi_ctrl_info *ctrl_info;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002781 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05002782
2783 ctrl_info = container_of(work, struct pqi_ctrl_info, event_work);
2784
Kevin Barnett7561a7e2017-05-03 18:52:58 -05002785 pqi_ctrl_busy(ctrl_info);
2786 pqi_wait_if_ctrl_blocked(ctrl_info, NO_TIMEOUT);
2787
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002788 event = ctrl_info->events;
Kevin Barnett6c223762016-06-27 16:41:00 -05002789 for (i = 0; i < PQI_NUM_SUPPORTED_EVENTS; i++) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002790 if (event->pending) {
2791 event->pending = false;
2792 pqi_acknowledge_event(ctrl_info, event);
Kevin Barnett6c223762016-06-27 16:41:00 -05002793 }
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002794 event++;
Kevin Barnett6c223762016-06-27 16:41:00 -05002795 }
2796
Kevin Barnett7561a7e2017-05-03 18:52:58 -05002797 pqi_ctrl_unbusy(ctrl_info);
2798
2799 pqi_schedule_rescan_worker(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002800}
2801
2802static void pqi_take_ctrl_offline(struct pqi_ctrl_info *ctrl_info)
2803{
2804 unsigned int i;
2805 unsigned int path;
2806 struct pqi_queue_group *queue_group;
2807 unsigned long flags;
2808 struct pqi_io_request *io_request;
2809 struct pqi_io_request *next;
2810 struct scsi_cmnd *scmd;
2811
2812 ctrl_info->controller_online = false;
2813 dev_err(&ctrl_info->pci_dev->dev, "controller offline\n");
Kevin Barnett5b0fba02017-05-03 18:52:40 -05002814 sis_shutdown_ctrl(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002815
2816 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
2817 queue_group = &ctrl_info->queue_groups[i];
2818
2819 for (path = 0; path < 2; path++) {
2820 spin_lock_irqsave(
2821 &queue_group->submit_lock[path], flags);
2822
2823 list_for_each_entry_safe(io_request, next,
2824 &queue_group->request_list[path],
2825 request_list_entry) {
2826
2827 scmd = io_request->scmd;
2828 if (scmd) {
2829 set_host_byte(scmd, DID_NO_CONNECT);
2830 pqi_scsi_done(scmd);
2831 }
2832
2833 list_del(&io_request->request_list_entry);
2834 }
2835
2836 spin_unlock_irqrestore(
2837 &queue_group->submit_lock[path], flags);
2838 }
2839 }
2840}
2841
Kevin Barnett98f87662017-05-03 18:53:11 -05002842#define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ)
Kevin Barnett6c223762016-06-27 16:41:00 -05002843
2844static void pqi_heartbeat_timer_handler(unsigned long data)
2845{
2846 int num_interrupts;
Kevin Barnett98f87662017-05-03 18:53:11 -05002847 u32 heartbeat_count;
Kevin Barnett6c223762016-06-27 16:41:00 -05002848 struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data;
2849
Kevin Barnett98f87662017-05-03 18:53:11 -05002850 pqi_check_ctrl_health(ctrl_info);
2851 if (pqi_ctrl_offline(ctrl_info))
Kevin Barnett061ef062017-05-03 18:53:05 -05002852 return;
2853
Kevin Barnett6c223762016-06-27 16:41:00 -05002854 num_interrupts = atomic_read(&ctrl_info->num_interrupts);
Kevin Barnett98f87662017-05-03 18:53:11 -05002855 heartbeat_count = pqi_read_heartbeat_counter(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002856
2857 if (num_interrupts == ctrl_info->previous_num_interrupts) {
Kevin Barnett98f87662017-05-03 18:53:11 -05002858 if (heartbeat_count == ctrl_info->previous_heartbeat_count) {
2859 dev_err(&ctrl_info->pci_dev->dev,
2860 "no heartbeat detected - last heartbeat count: %u\n",
2861 heartbeat_count);
Kevin Barnett6c223762016-06-27 16:41:00 -05002862 pqi_take_ctrl_offline(ctrl_info);
2863 return;
2864 }
Kevin Barnett6c223762016-06-27 16:41:00 -05002865 } else {
Kevin Barnett98f87662017-05-03 18:53:11 -05002866 ctrl_info->previous_num_interrupts = num_interrupts;
Kevin Barnett6c223762016-06-27 16:41:00 -05002867 }
2868
Kevin Barnett98f87662017-05-03 18:53:11 -05002869 ctrl_info->previous_heartbeat_count = heartbeat_count;
Kevin Barnett6c223762016-06-27 16:41:00 -05002870 mod_timer(&ctrl_info->heartbeat_timer,
2871 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL);
2872}
2873
2874static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2875{
Kevin Barnett98f87662017-05-03 18:53:11 -05002876 if (!ctrl_info->heartbeat_counter)
2877 return;
2878
Kevin Barnett6c223762016-06-27 16:41:00 -05002879 ctrl_info->previous_num_interrupts =
2880 atomic_read(&ctrl_info->num_interrupts);
Kevin Barnett98f87662017-05-03 18:53:11 -05002881 ctrl_info->previous_heartbeat_count =
2882 pqi_read_heartbeat_counter(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05002883
Kevin Barnett6c223762016-06-27 16:41:00 -05002884 ctrl_info->heartbeat_timer.expires =
2885 jiffies + PQI_HEARTBEAT_TIMER_INTERVAL;
2886 ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info;
2887 ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler;
Kevin Barnett061ef062017-05-03 18:53:05 -05002888 add_timer(&ctrl_info->heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05002889}
2890
2891static inline void pqi_stop_heartbeat_timer(struct pqi_ctrl_info *ctrl_info)
2892{
Kevin Barnett98f87662017-05-03 18:53:11 -05002893 del_timer_sync(&ctrl_info->heartbeat_timer);
Kevin Barnett6c223762016-06-27 16:41:00 -05002894}
2895
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002896static inline int pqi_event_type_to_event_index(unsigned int event_type)
Kevin Barnett6c223762016-06-27 16:41:00 -05002897{
2898 int index;
2899
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002900 for (index = 0; index < ARRAY_SIZE(pqi_supported_event_types); index++)
2901 if (event_type == pqi_supported_event_types[index])
2902 return index;
Kevin Barnett6c223762016-06-27 16:41:00 -05002903
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002904 return -1;
2905}
2906
2907static inline bool pqi_is_supported_event(unsigned int event_type)
2908{
2909 return pqi_event_type_to_event_index(event_type) != -1;
Kevin Barnett6c223762016-06-27 16:41:00 -05002910}
2911
2912static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
2913{
2914 unsigned int num_events;
2915 pqi_index_t oq_pi;
2916 pqi_index_t oq_ci;
2917 struct pqi_event_queue *event_queue;
2918 struct pqi_event_response *response;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002919 struct pqi_event *event;
Kevin Barnett6c223762016-06-27 16:41:00 -05002920 int event_index;
2921
2922 event_queue = &ctrl_info->event_queue;
2923 num_events = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05002924 oq_ci = event_queue->oq_ci_copy;
2925
2926 while (1) {
2927 oq_pi = *event_queue->oq_pi;
2928 if (oq_pi == oq_ci)
2929 break;
2930
2931 num_events++;
2932 response = event_queue->oq_element_array +
2933 (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
2934
2935 event_index =
2936 pqi_event_type_to_event_index(response->event_type);
2937
2938 if (event_index >= 0) {
2939 if (response->request_acknowlege) {
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05002940 event = &ctrl_info->events[event_index];
2941 event->pending = true;
2942 event->event_type = response->event_type;
2943 event->event_id = response->event_id;
2944 event->additional_event_id =
Kevin Barnett6c223762016-06-27 16:41:00 -05002945 response->additional_event_id;
Kevin Barnett6c223762016-06-27 16:41:00 -05002946 }
2947 }
2948
2949 oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
2950 }
2951
2952 if (num_events) {
2953 event_queue->oq_ci_copy = oq_ci;
2954 writel(oq_ci, event_queue->oq_ci);
Kevin Barnett98f87662017-05-03 18:53:11 -05002955 schedule_work(&ctrl_info->event_work);
Kevin Barnett6c223762016-06-27 16:41:00 -05002956 }
2957
2958 return num_events;
2959}
2960
Kevin Barnett061ef062017-05-03 18:53:05 -05002961#define PQI_LEGACY_INTX_MASK 0x1
2962
2963static inline void pqi_configure_legacy_intx(struct pqi_ctrl_info *ctrl_info,
2964 bool enable_intx)
2965{
2966 u32 intx_mask;
2967 struct pqi_device_registers __iomem *pqi_registers;
2968 volatile void __iomem *register_addr;
2969
2970 pqi_registers = ctrl_info->pqi_registers;
2971
2972 if (enable_intx)
2973 register_addr = &pqi_registers->legacy_intx_mask_clear;
2974 else
2975 register_addr = &pqi_registers->legacy_intx_mask_set;
2976
2977 intx_mask = readl(register_addr);
2978 intx_mask |= PQI_LEGACY_INTX_MASK;
2979 writel(intx_mask, register_addr);
2980}
2981
2982static void pqi_change_irq_mode(struct pqi_ctrl_info *ctrl_info,
2983 enum pqi_irq_mode new_mode)
2984{
2985 switch (ctrl_info->irq_mode) {
2986 case IRQ_MODE_MSIX:
2987 switch (new_mode) {
2988 case IRQ_MODE_MSIX:
2989 break;
2990 case IRQ_MODE_INTX:
2991 pqi_configure_legacy_intx(ctrl_info, true);
2992 sis_disable_msix(ctrl_info);
2993 sis_enable_intx(ctrl_info);
2994 break;
2995 case IRQ_MODE_NONE:
2996 sis_disable_msix(ctrl_info);
2997 break;
2998 }
2999 break;
3000 case IRQ_MODE_INTX:
3001 switch (new_mode) {
3002 case IRQ_MODE_MSIX:
3003 pqi_configure_legacy_intx(ctrl_info, false);
3004 sis_disable_intx(ctrl_info);
3005 sis_enable_msix(ctrl_info);
3006 break;
3007 case IRQ_MODE_INTX:
3008 break;
3009 case IRQ_MODE_NONE:
3010 pqi_configure_legacy_intx(ctrl_info, false);
3011 sis_disable_intx(ctrl_info);
3012 break;
3013 }
3014 break;
3015 case IRQ_MODE_NONE:
3016 switch (new_mode) {
3017 case IRQ_MODE_MSIX:
3018 sis_enable_msix(ctrl_info);
3019 break;
3020 case IRQ_MODE_INTX:
3021 pqi_configure_legacy_intx(ctrl_info, true);
3022 sis_enable_intx(ctrl_info);
3023 break;
3024 case IRQ_MODE_NONE:
3025 break;
3026 }
3027 break;
3028 }
3029
3030 ctrl_info->irq_mode = new_mode;
3031}
3032
3033#define PQI_LEGACY_INTX_PENDING 0x1
3034
3035static inline bool pqi_is_valid_irq(struct pqi_ctrl_info *ctrl_info)
3036{
3037 bool valid_irq;
3038 u32 intx_status;
3039
3040 switch (ctrl_info->irq_mode) {
3041 case IRQ_MODE_MSIX:
3042 valid_irq = true;
3043 break;
3044 case IRQ_MODE_INTX:
3045 intx_status =
3046 readl(&ctrl_info->pqi_registers->legacy_intx_status);
3047 if (intx_status & PQI_LEGACY_INTX_PENDING)
3048 valid_irq = true;
3049 else
3050 valid_irq = false;
3051 break;
3052 case IRQ_MODE_NONE:
3053 default:
3054 valid_irq = false;
3055 break;
3056 }
3057
3058 return valid_irq;
3059}
3060
Kevin Barnett6c223762016-06-27 16:41:00 -05003061static irqreturn_t pqi_irq_handler(int irq, void *data)
3062{
3063 struct pqi_ctrl_info *ctrl_info;
3064 struct pqi_queue_group *queue_group;
3065 unsigned int num_responses_handled;
3066
3067 queue_group = data;
3068 ctrl_info = queue_group->ctrl_info;
3069
Kevin Barnett061ef062017-05-03 18:53:05 -05003070 if (!pqi_is_valid_irq(ctrl_info))
Kevin Barnett6c223762016-06-27 16:41:00 -05003071 return IRQ_NONE;
3072
3073 num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
3074
3075 if (irq == ctrl_info->event_irq)
3076 num_responses_handled += pqi_process_event_intr(ctrl_info);
3077
3078 if (num_responses_handled)
3079 atomic_inc(&ctrl_info->num_interrupts);
3080
3081 pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
3082 pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
3083
3084 return IRQ_HANDLED;
3085}
3086
3087static int pqi_request_irqs(struct pqi_ctrl_info *ctrl_info)
3088{
Kevin Barnettd91d7822017-05-03 18:53:30 -05003089 struct pci_dev *pci_dev = ctrl_info->pci_dev;
Kevin Barnett6c223762016-06-27 16:41:00 -05003090 int i;
3091 int rc;
3092
Kevin Barnettd91d7822017-05-03 18:53:30 -05003093 ctrl_info->event_irq = pci_irq_vector(pci_dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05003094
3095 for (i = 0; i < ctrl_info->num_msix_vectors_enabled; i++) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05003096 rc = request_irq(pci_irq_vector(pci_dev, i), pqi_irq_handler, 0,
Christoph Hellwig52198222016-11-01 08:12:49 -06003097 DRIVER_NAME_SHORT, &ctrl_info->queue_groups[i]);
Kevin Barnett6c223762016-06-27 16:41:00 -05003098 if (rc) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05003099 dev_err(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05003100 "irq %u init failed with error %d\n",
Kevin Barnettd91d7822017-05-03 18:53:30 -05003101 pci_irq_vector(pci_dev, i), rc);
Kevin Barnett6c223762016-06-27 16:41:00 -05003102 return rc;
3103 }
3104 ctrl_info->num_msix_vectors_initialized++;
3105 }
3106
3107 return 0;
3108}
3109
Kevin Barnett98bf0612017-05-03 18:52:28 -05003110static void pqi_free_irqs(struct pqi_ctrl_info *ctrl_info)
3111{
3112 int i;
3113
3114 for (i = 0; i < ctrl_info->num_msix_vectors_initialized; i++)
3115 free_irq(pci_irq_vector(ctrl_info->pci_dev, i),
3116 &ctrl_info->queue_groups[i]);
3117
3118 ctrl_info->num_msix_vectors_initialized = 0;
3119}
3120
Kevin Barnett6c223762016-06-27 16:41:00 -05003121static int pqi_enable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3122{
Kevin Barnett98bf0612017-05-03 18:52:28 -05003123 int num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003124
Kevin Barnett98bf0612017-05-03 18:52:28 -05003125 num_vectors_enabled = pci_alloc_irq_vectors(ctrl_info->pci_dev,
Christoph Hellwig52198222016-11-01 08:12:49 -06003126 PQI_MIN_MSIX_VECTORS, ctrl_info->num_queue_groups,
3127 PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
Kevin Barnett98bf0612017-05-03 18:52:28 -05003128 if (num_vectors_enabled < 0) {
Kevin Barnett6c223762016-06-27 16:41:00 -05003129 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett98bf0612017-05-03 18:52:28 -05003130 "MSI-X init failed with error %d\n",
3131 num_vectors_enabled);
3132 return num_vectors_enabled;
Kevin Barnett6c223762016-06-27 16:41:00 -05003133 }
3134
Kevin Barnett98bf0612017-05-03 18:52:28 -05003135 ctrl_info->num_msix_vectors_enabled = num_vectors_enabled;
Kevin Barnett061ef062017-05-03 18:53:05 -05003136 ctrl_info->irq_mode = IRQ_MODE_MSIX;
Kevin Barnett6c223762016-06-27 16:41:00 -05003137 return 0;
3138}
3139
Kevin Barnett98bf0612017-05-03 18:52:28 -05003140static void pqi_disable_msix_interrupts(struct pqi_ctrl_info *ctrl_info)
3141{
3142 if (ctrl_info->num_msix_vectors_enabled) {
3143 pci_free_irq_vectors(ctrl_info->pci_dev);
3144 ctrl_info->num_msix_vectors_enabled = 0;
3145 }
3146}
3147
Kevin Barnett6c223762016-06-27 16:41:00 -05003148static int pqi_alloc_operational_queues(struct pqi_ctrl_info *ctrl_info)
3149{
3150 unsigned int i;
3151 size_t alloc_length;
3152 size_t element_array_length_per_iq;
3153 size_t element_array_length_per_oq;
3154 void *element_array;
3155 void *next_queue_index;
3156 void *aligned_pointer;
3157 unsigned int num_inbound_queues;
3158 unsigned int num_outbound_queues;
3159 unsigned int num_queue_indexes;
3160 struct pqi_queue_group *queue_group;
3161
3162 element_array_length_per_iq =
3163 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH *
3164 ctrl_info->num_elements_per_iq;
3165 element_array_length_per_oq =
3166 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH *
3167 ctrl_info->num_elements_per_oq;
3168 num_inbound_queues = ctrl_info->num_queue_groups * 2;
3169 num_outbound_queues = ctrl_info->num_queue_groups;
3170 num_queue_indexes = (ctrl_info->num_queue_groups * 3) + 1;
3171
3172 aligned_pointer = NULL;
3173
3174 for (i = 0; i < num_inbound_queues; i++) {
3175 aligned_pointer = PTR_ALIGN(aligned_pointer,
3176 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3177 aligned_pointer += element_array_length_per_iq;
3178 }
3179
3180 for (i = 0; i < num_outbound_queues; i++) {
3181 aligned_pointer = PTR_ALIGN(aligned_pointer,
3182 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3183 aligned_pointer += element_array_length_per_oq;
3184 }
3185
3186 aligned_pointer = PTR_ALIGN(aligned_pointer,
3187 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3188 aligned_pointer += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3189 PQI_EVENT_OQ_ELEMENT_LENGTH;
3190
3191 for (i = 0; i < num_queue_indexes; i++) {
3192 aligned_pointer = PTR_ALIGN(aligned_pointer,
3193 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3194 aligned_pointer += sizeof(pqi_index_t);
3195 }
3196
3197 alloc_length = (size_t)aligned_pointer +
3198 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3199
Kevin Barnette1d213b2017-05-03 18:53:18 -05003200 alloc_length += PQI_EXTRA_SGL_MEMORY;
3201
Kevin Barnett6c223762016-06-27 16:41:00 -05003202 ctrl_info->queue_memory_base =
3203 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3204 alloc_length,
3205 &ctrl_info->queue_memory_base_dma_handle, GFP_KERNEL);
3206
Kevin Barnettd87d5472017-05-03 18:54:00 -05003207 if (!ctrl_info->queue_memory_base)
Kevin Barnett6c223762016-06-27 16:41:00 -05003208 return -ENOMEM;
Kevin Barnett6c223762016-06-27 16:41:00 -05003209
3210 ctrl_info->queue_memory_length = alloc_length;
3211
3212 element_array = PTR_ALIGN(ctrl_info->queue_memory_base,
3213 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3214
3215 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3216 queue_group = &ctrl_info->queue_groups[i];
3217 queue_group->iq_element_array[RAID_PATH] = element_array;
3218 queue_group->iq_element_array_bus_addr[RAID_PATH] =
3219 ctrl_info->queue_memory_base_dma_handle +
3220 (element_array - ctrl_info->queue_memory_base);
3221 element_array += element_array_length_per_iq;
3222 element_array = PTR_ALIGN(element_array,
3223 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3224 queue_group->iq_element_array[AIO_PATH] = element_array;
3225 queue_group->iq_element_array_bus_addr[AIO_PATH] =
3226 ctrl_info->queue_memory_base_dma_handle +
3227 (element_array - ctrl_info->queue_memory_base);
3228 element_array += element_array_length_per_iq;
3229 element_array = PTR_ALIGN(element_array,
3230 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3231 }
3232
3233 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3234 queue_group = &ctrl_info->queue_groups[i];
3235 queue_group->oq_element_array = element_array;
3236 queue_group->oq_element_array_bus_addr =
3237 ctrl_info->queue_memory_base_dma_handle +
3238 (element_array - ctrl_info->queue_memory_base);
3239 element_array += element_array_length_per_oq;
3240 element_array = PTR_ALIGN(element_array,
3241 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3242 }
3243
3244 ctrl_info->event_queue.oq_element_array = element_array;
3245 ctrl_info->event_queue.oq_element_array_bus_addr =
3246 ctrl_info->queue_memory_base_dma_handle +
3247 (element_array - ctrl_info->queue_memory_base);
3248 element_array += PQI_NUM_EVENT_QUEUE_ELEMENTS *
3249 PQI_EVENT_OQ_ELEMENT_LENGTH;
3250
3251 next_queue_index = PTR_ALIGN(element_array,
3252 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3253
3254 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3255 queue_group = &ctrl_info->queue_groups[i];
3256 queue_group->iq_ci[RAID_PATH] = next_queue_index;
3257 queue_group->iq_ci_bus_addr[RAID_PATH] =
3258 ctrl_info->queue_memory_base_dma_handle +
3259 (next_queue_index - ctrl_info->queue_memory_base);
3260 next_queue_index += sizeof(pqi_index_t);
3261 next_queue_index = PTR_ALIGN(next_queue_index,
3262 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3263 queue_group->iq_ci[AIO_PATH] = next_queue_index;
3264 queue_group->iq_ci_bus_addr[AIO_PATH] =
3265 ctrl_info->queue_memory_base_dma_handle +
3266 (next_queue_index - ctrl_info->queue_memory_base);
3267 next_queue_index += sizeof(pqi_index_t);
3268 next_queue_index = PTR_ALIGN(next_queue_index,
3269 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3270 queue_group->oq_pi = next_queue_index;
3271 queue_group->oq_pi_bus_addr =
3272 ctrl_info->queue_memory_base_dma_handle +
3273 (next_queue_index - ctrl_info->queue_memory_base);
3274 next_queue_index += sizeof(pqi_index_t);
3275 next_queue_index = PTR_ALIGN(next_queue_index,
3276 PQI_OPERATIONAL_INDEX_ALIGNMENT);
3277 }
3278
3279 ctrl_info->event_queue.oq_pi = next_queue_index;
3280 ctrl_info->event_queue.oq_pi_bus_addr =
3281 ctrl_info->queue_memory_base_dma_handle +
3282 (next_queue_index - ctrl_info->queue_memory_base);
3283
3284 return 0;
3285}
3286
3287static void pqi_init_operational_queues(struct pqi_ctrl_info *ctrl_info)
3288{
3289 unsigned int i;
3290 u16 next_iq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3291 u16 next_oq_id = PQI_MIN_OPERATIONAL_QUEUE_ID;
3292
3293 /*
3294 * Initialize the backpointers to the controller structure in
3295 * each operational queue group structure.
3296 */
3297 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3298 ctrl_info->queue_groups[i].ctrl_info = ctrl_info;
3299
3300 /*
3301 * Assign IDs to all operational queues. Note that the IDs
3302 * assigned to operational IQs are independent of the IDs
3303 * assigned to operational OQs.
3304 */
3305 ctrl_info->event_queue.oq_id = next_oq_id++;
3306 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3307 ctrl_info->queue_groups[i].iq_id[RAID_PATH] = next_iq_id++;
3308 ctrl_info->queue_groups[i].iq_id[AIO_PATH] = next_iq_id++;
3309 ctrl_info->queue_groups[i].oq_id = next_oq_id++;
3310 }
3311
3312 /*
3313 * Assign MSI-X table entry indexes to all queues. Note that the
3314 * interrupt for the event queue is shared with the first queue group.
3315 */
3316 ctrl_info->event_queue.int_msg_num = 0;
3317 for (i = 0; i < ctrl_info->num_queue_groups; i++)
3318 ctrl_info->queue_groups[i].int_msg_num = i;
3319
3320 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
3321 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[0]);
3322 spin_lock_init(&ctrl_info->queue_groups[i].submit_lock[1]);
3323 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[0]);
3324 INIT_LIST_HEAD(&ctrl_info->queue_groups[i].request_list[1]);
3325 }
3326}
3327
3328static int pqi_alloc_admin_queues(struct pqi_ctrl_info *ctrl_info)
3329{
3330 size_t alloc_length;
3331 struct pqi_admin_queues_aligned *admin_queues_aligned;
3332 struct pqi_admin_queues *admin_queues;
3333
3334 alloc_length = sizeof(struct pqi_admin_queues_aligned) +
3335 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT;
3336
3337 ctrl_info->admin_queue_memory_base =
3338 dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
3339 alloc_length,
3340 &ctrl_info->admin_queue_memory_base_dma_handle,
3341 GFP_KERNEL);
3342
3343 if (!ctrl_info->admin_queue_memory_base)
3344 return -ENOMEM;
3345
3346 ctrl_info->admin_queue_memory_length = alloc_length;
3347
3348 admin_queues = &ctrl_info->admin_queues;
3349 admin_queues_aligned = PTR_ALIGN(ctrl_info->admin_queue_memory_base,
3350 PQI_QUEUE_ELEMENT_ARRAY_ALIGNMENT);
3351 admin_queues->iq_element_array =
3352 &admin_queues_aligned->iq_element_array;
3353 admin_queues->oq_element_array =
3354 &admin_queues_aligned->oq_element_array;
3355 admin_queues->iq_ci = &admin_queues_aligned->iq_ci;
3356 admin_queues->oq_pi = &admin_queues_aligned->oq_pi;
3357
3358 admin_queues->iq_element_array_bus_addr =
3359 ctrl_info->admin_queue_memory_base_dma_handle +
3360 (admin_queues->iq_element_array -
3361 ctrl_info->admin_queue_memory_base);
3362 admin_queues->oq_element_array_bus_addr =
3363 ctrl_info->admin_queue_memory_base_dma_handle +
3364 (admin_queues->oq_element_array -
3365 ctrl_info->admin_queue_memory_base);
3366 admin_queues->iq_ci_bus_addr =
3367 ctrl_info->admin_queue_memory_base_dma_handle +
3368 ((void *)admin_queues->iq_ci -
3369 ctrl_info->admin_queue_memory_base);
3370 admin_queues->oq_pi_bus_addr =
3371 ctrl_info->admin_queue_memory_base_dma_handle +
3372 ((void *)admin_queues->oq_pi -
3373 ctrl_info->admin_queue_memory_base);
3374
3375 return 0;
3376}
3377
3378#define PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES HZ
3379#define PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS 1
3380
3381static int pqi_create_admin_queues(struct pqi_ctrl_info *ctrl_info)
3382{
3383 struct pqi_device_registers __iomem *pqi_registers;
3384 struct pqi_admin_queues *admin_queues;
3385 unsigned long timeout;
3386 u8 status;
3387 u32 reg;
3388
3389 pqi_registers = ctrl_info->pqi_registers;
3390 admin_queues = &ctrl_info->admin_queues;
3391
3392 writeq((u64)admin_queues->iq_element_array_bus_addr,
3393 &pqi_registers->admin_iq_element_array_addr);
3394 writeq((u64)admin_queues->oq_element_array_bus_addr,
3395 &pqi_registers->admin_oq_element_array_addr);
3396 writeq((u64)admin_queues->iq_ci_bus_addr,
3397 &pqi_registers->admin_iq_ci_addr);
3398 writeq((u64)admin_queues->oq_pi_bus_addr,
3399 &pqi_registers->admin_oq_pi_addr);
3400
3401 reg = PQI_ADMIN_IQ_NUM_ELEMENTS |
3402 (PQI_ADMIN_OQ_NUM_ELEMENTS) << 8 |
3403 (admin_queues->int_msg_num << 16);
3404 writel(reg, &pqi_registers->admin_iq_num_elements);
3405 writel(PQI_CREATE_ADMIN_QUEUE_PAIR,
3406 &pqi_registers->function_and_status_code);
3407
3408 timeout = PQI_ADMIN_QUEUE_CREATE_TIMEOUT_JIFFIES + jiffies;
3409 while (1) {
3410 status = readb(&pqi_registers->function_and_status_code);
3411 if (status == PQI_STATUS_IDLE)
3412 break;
3413 if (time_after(jiffies, timeout))
3414 return -ETIMEDOUT;
3415 msleep(PQI_ADMIN_QUEUE_CREATE_POLL_INTERVAL_MSECS);
3416 }
3417
3418 /*
3419 * The offset registers are not initialized to the correct
3420 * offsets until *after* the create admin queue pair command
3421 * completes successfully.
3422 */
3423 admin_queues->iq_pi = ctrl_info->iomem_base +
3424 PQI_DEVICE_REGISTERS_OFFSET +
3425 readq(&pqi_registers->admin_iq_pi_offset);
3426 admin_queues->oq_ci = ctrl_info->iomem_base +
3427 PQI_DEVICE_REGISTERS_OFFSET +
3428 readq(&pqi_registers->admin_oq_ci_offset);
3429
3430 return 0;
3431}
3432
3433static void pqi_submit_admin_request(struct pqi_ctrl_info *ctrl_info,
3434 struct pqi_general_admin_request *request)
3435{
3436 struct pqi_admin_queues *admin_queues;
3437 void *next_element;
3438 pqi_index_t iq_pi;
3439
3440 admin_queues = &ctrl_info->admin_queues;
3441 iq_pi = admin_queues->iq_pi_copy;
3442
3443 next_element = admin_queues->iq_element_array +
3444 (iq_pi * PQI_ADMIN_IQ_ELEMENT_LENGTH);
3445
3446 memcpy(next_element, request, sizeof(*request));
3447
3448 iq_pi = (iq_pi + 1) % PQI_ADMIN_IQ_NUM_ELEMENTS;
3449 admin_queues->iq_pi_copy = iq_pi;
3450
3451 /*
3452 * This write notifies the controller that an IU is available to be
3453 * processed.
3454 */
3455 writel(iq_pi, admin_queues->iq_pi);
3456}
3457
3458static int pqi_poll_for_admin_response(struct pqi_ctrl_info *ctrl_info,
3459 struct pqi_general_admin_response *response)
3460{
3461 struct pqi_admin_queues *admin_queues;
3462 pqi_index_t oq_pi;
3463 pqi_index_t oq_ci;
3464 unsigned long timeout;
3465
3466 admin_queues = &ctrl_info->admin_queues;
3467 oq_ci = admin_queues->oq_ci_copy;
3468
3469 timeout = (3 * HZ) + jiffies;
3470
3471 while (1) {
3472 oq_pi = *admin_queues->oq_pi;
3473 if (oq_pi != oq_ci)
3474 break;
3475 if (time_after(jiffies, timeout)) {
3476 dev_err(&ctrl_info->pci_dev->dev,
3477 "timed out waiting for admin response\n");
3478 return -ETIMEDOUT;
3479 }
3480 usleep_range(1000, 2000);
3481 }
3482
3483 memcpy(response, admin_queues->oq_element_array +
3484 (oq_ci * PQI_ADMIN_OQ_ELEMENT_LENGTH), sizeof(*response));
3485
3486 oq_ci = (oq_ci + 1) % PQI_ADMIN_OQ_NUM_ELEMENTS;
3487 admin_queues->oq_ci_copy = oq_ci;
3488 writel(oq_ci, admin_queues->oq_ci);
3489
3490 return 0;
3491}
3492
3493static void pqi_start_io(struct pqi_ctrl_info *ctrl_info,
3494 struct pqi_queue_group *queue_group, enum pqi_io_path path,
3495 struct pqi_io_request *io_request)
3496{
3497 struct pqi_io_request *next;
3498 void *next_element;
3499 pqi_index_t iq_pi;
3500 pqi_index_t iq_ci;
3501 size_t iu_length;
3502 unsigned long flags;
3503 unsigned int num_elements_needed;
3504 unsigned int num_elements_to_end_of_queue;
3505 size_t copy_count;
3506 struct pqi_iu_header *request;
3507
3508 spin_lock_irqsave(&queue_group->submit_lock[path], flags);
3509
3510 if (io_request)
3511 list_add_tail(&io_request->request_list_entry,
3512 &queue_group->request_list[path]);
3513
3514 iq_pi = queue_group->iq_pi_copy[path];
3515
3516 list_for_each_entry_safe(io_request, next,
3517 &queue_group->request_list[path], request_list_entry) {
3518
3519 request = io_request->iu;
3520
3521 iu_length = get_unaligned_le16(&request->iu_length) +
3522 PQI_REQUEST_HEADER_LENGTH;
3523 num_elements_needed =
3524 DIV_ROUND_UP(iu_length,
3525 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3526
3527 iq_ci = *queue_group->iq_ci[path];
3528
3529 if (num_elements_needed > pqi_num_elements_free(iq_pi, iq_ci,
3530 ctrl_info->num_elements_per_iq))
3531 break;
3532
3533 put_unaligned_le16(queue_group->oq_id,
3534 &request->response_queue_id);
3535
3536 next_element = queue_group->iq_element_array[path] +
3537 (iq_pi * PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3538
3539 num_elements_to_end_of_queue =
3540 ctrl_info->num_elements_per_iq - iq_pi;
3541
3542 if (num_elements_needed <= num_elements_to_end_of_queue) {
3543 memcpy(next_element, request, iu_length);
3544 } else {
3545 copy_count = num_elements_to_end_of_queue *
3546 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
3547 memcpy(next_element, request, copy_count);
3548 memcpy(queue_group->iq_element_array[path],
3549 (u8 *)request + copy_count,
3550 iu_length - copy_count);
3551 }
3552
3553 iq_pi = (iq_pi + num_elements_needed) %
3554 ctrl_info->num_elements_per_iq;
3555
3556 list_del(&io_request->request_list_entry);
3557 }
3558
3559 if (iq_pi != queue_group->iq_pi_copy[path]) {
3560 queue_group->iq_pi_copy[path] = iq_pi;
3561 /*
3562 * This write notifies the controller that one or more IUs are
3563 * available to be processed.
3564 */
3565 writel(iq_pi, queue_group->iq_pi[path]);
3566 }
3567
3568 spin_unlock_irqrestore(&queue_group->submit_lock[path], flags);
3569}
3570
Kevin Barnett1f37e992017-05-03 18:53:24 -05003571#define PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS 10
3572
3573static int pqi_wait_for_completion_io(struct pqi_ctrl_info *ctrl_info,
3574 struct completion *wait)
3575{
3576 int rc;
Kevin Barnett1f37e992017-05-03 18:53:24 -05003577
3578 while (1) {
3579 if (wait_for_completion_io_timeout(wait,
3580 PQI_WAIT_FOR_COMPLETION_IO_TIMEOUT_SECS * HZ)) {
3581 rc = 0;
3582 break;
3583 }
3584
3585 pqi_check_ctrl_health(ctrl_info);
3586 if (pqi_ctrl_offline(ctrl_info)) {
3587 rc = -ENXIO;
3588 break;
3589 }
Kevin Barnett1f37e992017-05-03 18:53:24 -05003590 }
3591
3592 return rc;
3593}
3594
Kevin Barnett6c223762016-06-27 16:41:00 -05003595static void pqi_raid_synchronous_complete(struct pqi_io_request *io_request,
3596 void *context)
3597{
3598 struct completion *waiting = context;
3599
3600 complete(waiting);
3601}
3602
3603static int pqi_submit_raid_request_synchronous_with_io_request(
3604 struct pqi_ctrl_info *ctrl_info, struct pqi_io_request *io_request,
3605 unsigned long timeout_msecs)
3606{
3607 int rc = 0;
3608 DECLARE_COMPLETION_ONSTACK(wait);
3609
3610 io_request->io_complete_callback = pqi_raid_synchronous_complete;
3611 io_request->context = &wait;
3612
3613 pqi_start_io(ctrl_info,
3614 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
3615 io_request);
3616
3617 if (timeout_msecs == NO_TIMEOUT) {
Kevin Barnett1f37e992017-05-03 18:53:24 -05003618 pqi_wait_for_completion_io(ctrl_info, &wait);
Kevin Barnett6c223762016-06-27 16:41:00 -05003619 } else {
3620 if (!wait_for_completion_io_timeout(&wait,
3621 msecs_to_jiffies(timeout_msecs))) {
3622 dev_warn(&ctrl_info->pci_dev->dev,
3623 "command timed out\n");
3624 rc = -ETIMEDOUT;
3625 }
3626 }
3627
3628 return rc;
3629}
3630
3631static int pqi_submit_raid_request_synchronous(struct pqi_ctrl_info *ctrl_info,
3632 struct pqi_iu_header *request, unsigned int flags,
3633 struct pqi_raid_error_info *error_info, unsigned long timeout_msecs)
3634{
3635 int rc;
3636 struct pqi_io_request *io_request;
3637 unsigned long start_jiffies;
3638 unsigned long msecs_blocked;
3639 size_t iu_length;
3640
3641 /*
3642 * Note that specifying PQI_SYNC_FLAGS_INTERRUPTABLE and a timeout value
3643 * are mutually exclusive.
3644 */
3645
3646 if (flags & PQI_SYNC_FLAGS_INTERRUPTABLE) {
3647 if (down_interruptible(&ctrl_info->sync_request_sem))
3648 return -ERESTARTSYS;
3649 } else {
3650 if (timeout_msecs == NO_TIMEOUT) {
3651 down(&ctrl_info->sync_request_sem);
3652 } else {
3653 start_jiffies = jiffies;
3654 if (down_timeout(&ctrl_info->sync_request_sem,
3655 msecs_to_jiffies(timeout_msecs)))
3656 return -ETIMEDOUT;
3657 msecs_blocked =
3658 jiffies_to_msecs(jiffies - start_jiffies);
3659 if (msecs_blocked >= timeout_msecs)
3660 return -ETIMEDOUT;
3661 timeout_msecs -= msecs_blocked;
3662 }
3663 }
3664
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003665 pqi_ctrl_busy(ctrl_info);
3666 timeout_msecs = pqi_wait_if_ctrl_blocked(ctrl_info, timeout_msecs);
3667 if (timeout_msecs == 0) {
3668 rc = -ETIMEDOUT;
3669 goto out;
3670 }
3671
Kevin Barnett6c223762016-06-27 16:41:00 -05003672 io_request = pqi_alloc_io_request(ctrl_info);
3673
3674 put_unaligned_le16(io_request->index,
3675 &(((struct pqi_raid_path_request *)request)->request_id));
3676
3677 if (request->iu_type == PQI_REQUEST_IU_RAID_PATH_IO)
3678 ((struct pqi_raid_path_request *)request)->error_index =
3679 ((struct pqi_raid_path_request *)request)->request_id;
3680
3681 iu_length = get_unaligned_le16(&request->iu_length) +
3682 PQI_REQUEST_HEADER_LENGTH;
3683 memcpy(io_request->iu, request, iu_length);
3684
3685 rc = pqi_submit_raid_request_synchronous_with_io_request(ctrl_info,
3686 io_request, timeout_msecs);
3687
3688 if (error_info) {
3689 if (io_request->error_info)
3690 memcpy(error_info, io_request->error_info,
3691 sizeof(*error_info));
3692 else
3693 memset(error_info, 0, sizeof(*error_info));
3694 } else if (rc == 0 && io_request->error_info) {
3695 u8 scsi_status;
3696 struct pqi_raid_error_info *raid_error_info;
3697
3698 raid_error_info = io_request->error_info;
3699 scsi_status = raid_error_info->status;
3700
3701 if (scsi_status == SAM_STAT_CHECK_CONDITION &&
3702 raid_error_info->data_out_result ==
3703 PQI_DATA_IN_OUT_UNDERFLOW)
3704 scsi_status = SAM_STAT_GOOD;
3705
3706 if (scsi_status != SAM_STAT_GOOD)
3707 rc = -EIO;
3708 }
3709
3710 pqi_free_io_request(io_request);
3711
Kevin Barnett7561a7e2017-05-03 18:52:58 -05003712out:
3713 pqi_ctrl_unbusy(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05003714 up(&ctrl_info->sync_request_sem);
3715
3716 return rc;
3717}
3718
3719static int pqi_validate_admin_response(
3720 struct pqi_general_admin_response *response, u8 expected_function_code)
3721{
3722 if (response->header.iu_type != PQI_RESPONSE_IU_GENERAL_ADMIN)
3723 return -EINVAL;
3724
3725 if (get_unaligned_le16(&response->header.iu_length) !=
3726 PQI_GENERAL_ADMIN_IU_LENGTH)
3727 return -EINVAL;
3728
3729 if (response->function_code != expected_function_code)
3730 return -EINVAL;
3731
3732 if (response->status != PQI_GENERAL_ADMIN_STATUS_SUCCESS)
3733 return -EINVAL;
3734
3735 return 0;
3736}
3737
3738static int pqi_submit_admin_request_synchronous(
3739 struct pqi_ctrl_info *ctrl_info,
3740 struct pqi_general_admin_request *request,
3741 struct pqi_general_admin_response *response)
3742{
3743 int rc;
3744
3745 pqi_submit_admin_request(ctrl_info, request);
3746
3747 rc = pqi_poll_for_admin_response(ctrl_info, response);
3748
3749 if (rc == 0)
3750 rc = pqi_validate_admin_response(response,
3751 request->function_code);
3752
3753 return rc;
3754}
3755
3756static int pqi_report_device_capability(struct pqi_ctrl_info *ctrl_info)
3757{
3758 int rc;
3759 struct pqi_general_admin_request request;
3760 struct pqi_general_admin_response response;
3761 struct pqi_device_capability *capability;
3762 struct pqi_iu_layer_descriptor *sop_iu_layer_descriptor;
3763
3764 capability = kmalloc(sizeof(*capability), GFP_KERNEL);
3765 if (!capability)
3766 return -ENOMEM;
3767
3768 memset(&request, 0, sizeof(request));
3769
3770 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3771 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3772 &request.header.iu_length);
3773 request.function_code =
3774 PQI_GENERAL_ADMIN_FUNCTION_REPORT_DEVICE_CAPABILITY;
3775 put_unaligned_le32(sizeof(*capability),
3776 &request.data.report_device_capability.buffer_length);
3777
3778 rc = pqi_map_single(ctrl_info->pci_dev,
3779 &request.data.report_device_capability.sg_descriptor,
3780 capability, sizeof(*capability),
3781 PCI_DMA_FROMDEVICE);
3782 if (rc)
3783 goto out;
3784
3785 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3786 &response);
3787
3788 pqi_pci_unmap(ctrl_info->pci_dev,
3789 &request.data.report_device_capability.sg_descriptor, 1,
3790 PCI_DMA_FROMDEVICE);
3791
3792 if (rc)
3793 goto out;
3794
3795 if (response.status != PQI_GENERAL_ADMIN_STATUS_SUCCESS) {
3796 rc = -EIO;
3797 goto out;
3798 }
3799
3800 ctrl_info->max_inbound_queues =
3801 get_unaligned_le16(&capability->max_inbound_queues);
3802 ctrl_info->max_elements_per_iq =
3803 get_unaligned_le16(&capability->max_elements_per_iq);
3804 ctrl_info->max_iq_element_length =
3805 get_unaligned_le16(&capability->max_iq_element_length)
3806 * 16;
3807 ctrl_info->max_outbound_queues =
3808 get_unaligned_le16(&capability->max_outbound_queues);
3809 ctrl_info->max_elements_per_oq =
3810 get_unaligned_le16(&capability->max_elements_per_oq);
3811 ctrl_info->max_oq_element_length =
3812 get_unaligned_le16(&capability->max_oq_element_length)
3813 * 16;
3814
3815 sop_iu_layer_descriptor =
3816 &capability->iu_layer_descriptors[PQI_PROTOCOL_SOP];
3817
3818 ctrl_info->max_inbound_iu_length_per_firmware =
3819 get_unaligned_le16(
3820 &sop_iu_layer_descriptor->max_inbound_iu_length);
3821 ctrl_info->inbound_spanning_supported =
3822 sop_iu_layer_descriptor->inbound_spanning_supported;
3823 ctrl_info->outbound_spanning_supported =
3824 sop_iu_layer_descriptor->outbound_spanning_supported;
3825
3826out:
3827 kfree(capability);
3828
3829 return rc;
3830}
3831
3832static int pqi_validate_device_capability(struct pqi_ctrl_info *ctrl_info)
3833{
3834 if (ctrl_info->max_iq_element_length <
3835 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3836 dev_err(&ctrl_info->pci_dev->dev,
3837 "max. inbound queue element length of %d is less than the required length of %d\n",
3838 ctrl_info->max_iq_element_length,
3839 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3840 return -EINVAL;
3841 }
3842
3843 if (ctrl_info->max_oq_element_length <
3844 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH) {
3845 dev_err(&ctrl_info->pci_dev->dev,
3846 "max. outbound queue element length of %d is less than the required length of %d\n",
3847 ctrl_info->max_oq_element_length,
3848 PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
3849 return -EINVAL;
3850 }
3851
3852 if (ctrl_info->max_inbound_iu_length_per_firmware <
3853 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) {
3854 dev_err(&ctrl_info->pci_dev->dev,
3855 "max. inbound IU length of %u is less than the min. required length of %d\n",
3856 ctrl_info->max_inbound_iu_length_per_firmware,
3857 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
3858 return -EINVAL;
3859 }
3860
Kevin Barnett77668f42016-08-31 14:54:23 -05003861 if (!ctrl_info->inbound_spanning_supported) {
3862 dev_err(&ctrl_info->pci_dev->dev,
3863 "the controller does not support inbound spanning\n");
3864 return -EINVAL;
3865 }
3866
3867 if (ctrl_info->outbound_spanning_supported) {
3868 dev_err(&ctrl_info->pci_dev->dev,
3869 "the controller supports outbound spanning but this driver does not\n");
3870 return -EINVAL;
3871 }
3872
Kevin Barnett6c223762016-06-27 16:41:00 -05003873 return 0;
3874}
3875
3876static int pqi_delete_operational_queue(struct pqi_ctrl_info *ctrl_info,
3877 bool inbound_queue, u16 queue_id)
3878{
3879 struct pqi_general_admin_request request;
3880 struct pqi_general_admin_response response;
3881
3882 memset(&request, 0, sizeof(request));
3883 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3884 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3885 &request.header.iu_length);
3886 if (inbound_queue)
3887 request.function_code =
3888 PQI_GENERAL_ADMIN_FUNCTION_DELETE_IQ;
3889 else
3890 request.function_code =
3891 PQI_GENERAL_ADMIN_FUNCTION_DELETE_OQ;
3892 put_unaligned_le16(queue_id,
3893 &request.data.delete_operational_queue.queue_id);
3894
3895 return pqi_submit_admin_request_synchronous(ctrl_info, &request,
3896 &response);
3897}
3898
3899static int pqi_create_event_queue(struct pqi_ctrl_info *ctrl_info)
3900{
3901 int rc;
3902 struct pqi_event_queue *event_queue;
3903 struct pqi_general_admin_request request;
3904 struct pqi_general_admin_response response;
3905
3906 event_queue = &ctrl_info->event_queue;
3907
3908 /*
3909 * Create OQ (Outbound Queue - device to host queue) to dedicate
3910 * to events.
3911 */
3912 memset(&request, 0, sizeof(request));
3913 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3914 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3915 &request.header.iu_length);
3916 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
3917 put_unaligned_le16(event_queue->oq_id,
3918 &request.data.create_operational_oq.queue_id);
3919 put_unaligned_le64((u64)event_queue->oq_element_array_bus_addr,
3920 &request.data.create_operational_oq.element_array_addr);
3921 put_unaligned_le64((u64)event_queue->oq_pi_bus_addr,
3922 &request.data.create_operational_oq.pi_addr);
3923 put_unaligned_le16(PQI_NUM_EVENT_QUEUE_ELEMENTS,
3924 &request.data.create_operational_oq.num_elements);
3925 put_unaligned_le16(PQI_EVENT_OQ_ELEMENT_LENGTH / 16,
3926 &request.data.create_operational_oq.element_length);
3927 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
3928 put_unaligned_le16(event_queue->int_msg_num,
3929 &request.data.create_operational_oq.int_msg_num);
3930
3931 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3932 &response);
3933 if (rc)
3934 return rc;
3935
3936 event_queue->oq_ci = ctrl_info->iomem_base +
3937 PQI_DEVICE_REGISTERS_OFFSET +
3938 get_unaligned_le64(
3939 &response.data.create_operational_oq.oq_ci_offset);
3940
3941 return 0;
3942}
3943
Kevin Barnett061ef062017-05-03 18:53:05 -05003944static int pqi_create_queue_group(struct pqi_ctrl_info *ctrl_info,
3945 unsigned int group_number)
Kevin Barnett6c223762016-06-27 16:41:00 -05003946{
Kevin Barnett6c223762016-06-27 16:41:00 -05003947 int rc;
3948 struct pqi_queue_group *queue_group;
3949 struct pqi_general_admin_request request;
3950 struct pqi_general_admin_response response;
3951
Kevin Barnett061ef062017-05-03 18:53:05 -05003952 queue_group = &ctrl_info->queue_groups[group_number];
Kevin Barnett6c223762016-06-27 16:41:00 -05003953
3954 /*
3955 * Create IQ (Inbound Queue - host to device queue) for
3956 * RAID path.
3957 */
3958 memset(&request, 0, sizeof(request));
3959 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3960 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3961 &request.header.iu_length);
3962 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3963 put_unaligned_le16(queue_group->iq_id[RAID_PATH],
3964 &request.data.create_operational_iq.queue_id);
3965 put_unaligned_le64(
3966 (u64)queue_group->iq_element_array_bus_addr[RAID_PATH],
3967 &request.data.create_operational_iq.element_array_addr);
3968 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[RAID_PATH],
3969 &request.data.create_operational_iq.ci_addr);
3970 put_unaligned_le16(ctrl_info->num_elements_per_iq,
3971 &request.data.create_operational_iq.num_elements);
3972 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
3973 &request.data.create_operational_iq.element_length);
3974 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
3975
3976 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
3977 &response);
3978 if (rc) {
3979 dev_err(&ctrl_info->pci_dev->dev,
3980 "error creating inbound RAID queue\n");
3981 return rc;
3982 }
3983
3984 queue_group->iq_pi[RAID_PATH] = ctrl_info->iomem_base +
3985 PQI_DEVICE_REGISTERS_OFFSET +
3986 get_unaligned_le64(
3987 &response.data.create_operational_iq.iq_pi_offset);
3988
3989 /*
3990 * Create IQ (Inbound Queue - host to device queue) for
3991 * Advanced I/O (AIO) path.
3992 */
3993 memset(&request, 0, sizeof(request));
3994 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
3995 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
3996 &request.header.iu_length);
3997 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_IQ;
3998 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
3999 &request.data.create_operational_iq.queue_id);
4000 put_unaligned_le64((u64)queue_group->
4001 iq_element_array_bus_addr[AIO_PATH],
4002 &request.data.create_operational_iq.element_array_addr);
4003 put_unaligned_le64((u64)queue_group->iq_ci_bus_addr[AIO_PATH],
4004 &request.data.create_operational_iq.ci_addr);
4005 put_unaligned_le16(ctrl_info->num_elements_per_iq,
4006 &request.data.create_operational_iq.num_elements);
4007 put_unaligned_le16(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH / 16,
4008 &request.data.create_operational_iq.element_length);
4009 request.data.create_operational_iq.queue_protocol = PQI_PROTOCOL_SOP;
4010
4011 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4012 &response);
4013 if (rc) {
4014 dev_err(&ctrl_info->pci_dev->dev,
4015 "error creating inbound AIO queue\n");
4016 goto delete_inbound_queue_raid;
4017 }
4018
4019 queue_group->iq_pi[AIO_PATH] = ctrl_info->iomem_base +
4020 PQI_DEVICE_REGISTERS_OFFSET +
4021 get_unaligned_le64(
4022 &response.data.create_operational_iq.iq_pi_offset);
4023
4024 /*
4025 * Designate the 2nd IQ as the AIO path. By default, all IQs are
4026 * assumed to be for RAID path I/O unless we change the queue's
4027 * property.
4028 */
4029 memset(&request, 0, sizeof(request));
4030 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4031 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4032 &request.header.iu_length);
4033 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CHANGE_IQ_PROPERTY;
4034 put_unaligned_le16(queue_group->iq_id[AIO_PATH],
4035 &request.data.change_operational_iq_properties.queue_id);
4036 put_unaligned_le32(PQI_IQ_PROPERTY_IS_AIO_QUEUE,
4037 &request.data.change_operational_iq_properties.vendor_specific);
4038
4039 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4040 &response);
4041 if (rc) {
4042 dev_err(&ctrl_info->pci_dev->dev,
4043 "error changing queue property\n");
4044 goto delete_inbound_queue_aio;
4045 }
4046
4047 /*
4048 * Create OQ (Outbound Queue - device to host queue).
4049 */
4050 memset(&request, 0, sizeof(request));
4051 request.header.iu_type = PQI_REQUEST_IU_GENERAL_ADMIN;
4052 put_unaligned_le16(PQI_GENERAL_ADMIN_IU_LENGTH,
4053 &request.header.iu_length);
4054 request.function_code = PQI_GENERAL_ADMIN_FUNCTION_CREATE_OQ;
4055 put_unaligned_le16(queue_group->oq_id,
4056 &request.data.create_operational_oq.queue_id);
4057 put_unaligned_le64((u64)queue_group->oq_element_array_bus_addr,
4058 &request.data.create_operational_oq.element_array_addr);
4059 put_unaligned_le64((u64)queue_group->oq_pi_bus_addr,
4060 &request.data.create_operational_oq.pi_addr);
4061 put_unaligned_le16(ctrl_info->num_elements_per_oq,
4062 &request.data.create_operational_oq.num_elements);
4063 put_unaligned_le16(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH / 16,
4064 &request.data.create_operational_oq.element_length);
4065 request.data.create_operational_oq.queue_protocol = PQI_PROTOCOL_SOP;
4066 put_unaligned_le16(queue_group->int_msg_num,
4067 &request.data.create_operational_oq.int_msg_num);
4068
4069 rc = pqi_submit_admin_request_synchronous(ctrl_info, &request,
4070 &response);
4071 if (rc) {
4072 dev_err(&ctrl_info->pci_dev->dev,
4073 "error creating outbound queue\n");
4074 goto delete_inbound_queue_aio;
4075 }
4076
4077 queue_group->oq_ci = ctrl_info->iomem_base +
4078 PQI_DEVICE_REGISTERS_OFFSET +
4079 get_unaligned_le64(
4080 &response.data.create_operational_oq.oq_ci_offset);
4081
Kevin Barnett6c223762016-06-27 16:41:00 -05004082 return 0;
4083
4084delete_inbound_queue_aio:
4085 pqi_delete_operational_queue(ctrl_info, true,
4086 queue_group->iq_id[AIO_PATH]);
4087
4088delete_inbound_queue_raid:
4089 pqi_delete_operational_queue(ctrl_info, true,
4090 queue_group->iq_id[RAID_PATH]);
4091
4092 return rc;
4093}
4094
4095static int pqi_create_queues(struct pqi_ctrl_info *ctrl_info)
4096{
4097 int rc;
4098 unsigned int i;
4099
4100 rc = pqi_create_event_queue(ctrl_info);
4101 if (rc) {
4102 dev_err(&ctrl_info->pci_dev->dev,
4103 "error creating event queue\n");
4104 return rc;
4105 }
4106
4107 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
Kevin Barnett061ef062017-05-03 18:53:05 -05004108 rc = pqi_create_queue_group(ctrl_info, i);
Kevin Barnett6c223762016-06-27 16:41:00 -05004109 if (rc) {
4110 dev_err(&ctrl_info->pci_dev->dev,
4111 "error creating queue group number %u/%u\n",
4112 i, ctrl_info->num_queue_groups);
4113 return rc;
4114 }
4115 }
4116
4117 return 0;
4118}
4119
4120#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
4121 (offsetof(struct pqi_event_config, descriptors) + \
4122 (PQI_MAX_EVENT_DESCRIPTORS * sizeof(struct pqi_event_descriptor)))
4123
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004124static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
4125 bool enable_events)
Kevin Barnett6c223762016-06-27 16:41:00 -05004126{
4127 int rc;
4128 unsigned int i;
4129 struct pqi_event_config *event_config;
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004130 struct pqi_event_descriptor *event_descriptor;
Kevin Barnett6c223762016-06-27 16:41:00 -05004131 struct pqi_general_management_request request;
4132
4133 event_config = kmalloc(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4134 GFP_KERNEL);
4135 if (!event_config)
4136 return -ENOMEM;
4137
4138 memset(&request, 0, sizeof(request));
4139
4140 request.header.iu_type = PQI_REQUEST_IU_REPORT_VENDOR_EVENT_CONFIG;
4141 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4142 data.report_event_configuration.sg_descriptors[1]) -
4143 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4144 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4145 &request.data.report_event_configuration.buffer_length);
4146
4147 rc = pqi_map_single(ctrl_info->pci_dev,
4148 request.data.report_event_configuration.sg_descriptors,
4149 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4150 PCI_DMA_FROMDEVICE);
4151 if (rc)
4152 goto out;
4153
4154 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
4155 0, NULL, NO_TIMEOUT);
4156
4157 pqi_pci_unmap(ctrl_info->pci_dev,
4158 request.data.report_event_configuration.sg_descriptors, 1,
4159 PCI_DMA_FROMDEVICE);
4160
4161 if (rc)
4162 goto out;
4163
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004164 for (i = 0; i < event_config->num_event_descriptors; i++) {
4165 event_descriptor = &event_config->descriptors[i];
4166 if (enable_events &&
4167 pqi_is_supported_event(event_descriptor->event_type))
4168 put_unaligned_le16(ctrl_info->event_queue.oq_id,
4169 &event_descriptor->oq_id);
4170 else
4171 put_unaligned_le16(0, &event_descriptor->oq_id);
4172 }
Kevin Barnett6c223762016-06-27 16:41:00 -05004173
4174 memset(&request, 0, sizeof(request));
4175
4176 request.header.iu_type = PQI_REQUEST_IU_SET_VENDOR_EVENT_CONFIG;
4177 put_unaligned_le16(offsetof(struct pqi_general_management_request,
4178 data.report_event_configuration.sg_descriptors[1]) -
4179 PQI_REQUEST_HEADER_LENGTH, &request.header.iu_length);
4180 put_unaligned_le32(PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4181 &request.data.report_event_configuration.buffer_length);
4182
4183 rc = pqi_map_single(ctrl_info->pci_dev,
4184 request.data.report_event_configuration.sg_descriptors,
4185 event_config, PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH,
4186 PCI_DMA_TODEVICE);
4187 if (rc)
4188 goto out;
4189
4190 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header, 0,
4191 NULL, NO_TIMEOUT);
4192
4193 pqi_pci_unmap(ctrl_info->pci_dev,
4194 request.data.report_event_configuration.sg_descriptors, 1,
4195 PCI_DMA_TODEVICE);
4196
4197out:
4198 kfree(event_config);
4199
4200 return rc;
4201}
4202
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05004203static inline int pqi_enable_events(struct pqi_ctrl_info *ctrl_info)
4204{
4205 return pqi_configure_events(ctrl_info, true);
4206}
4207
4208static inline int pqi_disable_events(struct pqi_ctrl_info *ctrl_info)
4209{
4210 return pqi_configure_events(ctrl_info, false);
4211}
4212
Kevin Barnett6c223762016-06-27 16:41:00 -05004213static void pqi_free_all_io_requests(struct pqi_ctrl_info *ctrl_info)
4214{
4215 unsigned int i;
4216 struct device *dev;
4217 size_t sg_chain_buffer_length;
4218 struct pqi_io_request *io_request;
4219
4220 if (!ctrl_info->io_request_pool)
4221 return;
4222
4223 dev = &ctrl_info->pci_dev->dev;
4224 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4225 io_request = ctrl_info->io_request_pool;
4226
4227 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4228 kfree(io_request->iu);
4229 if (!io_request->sg_chain_buffer)
4230 break;
4231 dma_free_coherent(dev, sg_chain_buffer_length,
4232 io_request->sg_chain_buffer,
4233 io_request->sg_chain_buffer_dma_handle);
4234 io_request++;
4235 }
4236
4237 kfree(ctrl_info->io_request_pool);
4238 ctrl_info->io_request_pool = NULL;
4239}
4240
4241static inline int pqi_alloc_error_buffer(struct pqi_ctrl_info *ctrl_info)
4242{
4243 ctrl_info->error_buffer = dma_zalloc_coherent(&ctrl_info->pci_dev->dev,
4244 ctrl_info->error_buffer_length,
4245 &ctrl_info->error_buffer_dma_handle, GFP_KERNEL);
4246
4247 if (!ctrl_info->error_buffer)
4248 return -ENOMEM;
4249
4250 return 0;
4251}
4252
4253static int pqi_alloc_io_resources(struct pqi_ctrl_info *ctrl_info)
4254{
4255 unsigned int i;
4256 void *sg_chain_buffer;
4257 size_t sg_chain_buffer_length;
4258 dma_addr_t sg_chain_buffer_dma_handle;
4259 struct device *dev;
4260 struct pqi_io_request *io_request;
4261
4262 ctrl_info->io_request_pool = kzalloc(ctrl_info->max_io_slots *
4263 sizeof(ctrl_info->io_request_pool[0]), GFP_KERNEL);
4264
4265 if (!ctrl_info->io_request_pool) {
4266 dev_err(&ctrl_info->pci_dev->dev,
4267 "failed to allocate I/O request pool\n");
4268 goto error;
4269 }
4270
4271 dev = &ctrl_info->pci_dev->dev;
4272 sg_chain_buffer_length = ctrl_info->sg_chain_buffer_length;
4273 io_request = ctrl_info->io_request_pool;
4274
4275 for (i = 0; i < ctrl_info->max_io_slots; i++) {
4276 io_request->iu =
4277 kmalloc(ctrl_info->max_inbound_iu_length, GFP_KERNEL);
4278
4279 if (!io_request->iu) {
4280 dev_err(&ctrl_info->pci_dev->dev,
4281 "failed to allocate IU buffers\n");
4282 goto error;
4283 }
4284
4285 sg_chain_buffer = dma_alloc_coherent(dev,
4286 sg_chain_buffer_length, &sg_chain_buffer_dma_handle,
4287 GFP_KERNEL);
4288
4289 if (!sg_chain_buffer) {
4290 dev_err(&ctrl_info->pci_dev->dev,
4291 "failed to allocate PQI scatter-gather chain buffers\n");
4292 goto error;
4293 }
4294
4295 io_request->index = i;
4296 io_request->sg_chain_buffer = sg_chain_buffer;
4297 io_request->sg_chain_buffer_dma_handle =
4298 sg_chain_buffer_dma_handle;
4299 io_request++;
4300 }
4301
4302 return 0;
4303
4304error:
4305 pqi_free_all_io_requests(ctrl_info);
4306
4307 return -ENOMEM;
4308}
4309
4310/*
4311 * Calculate required resources that are sized based on max. outstanding
4312 * requests and max. transfer size.
4313 */
4314
4315static void pqi_calculate_io_resources(struct pqi_ctrl_info *ctrl_info)
4316{
4317 u32 max_transfer_size;
4318 u32 max_sg_entries;
4319
4320 ctrl_info->scsi_ml_can_queue =
4321 ctrl_info->max_outstanding_requests - PQI_RESERVED_IO_SLOTS;
4322 ctrl_info->max_io_slots = ctrl_info->max_outstanding_requests;
4323
4324 ctrl_info->error_buffer_length =
4325 ctrl_info->max_io_slots * PQI_ERROR_BUFFER_ELEMENT_LENGTH;
4326
4327 max_transfer_size =
4328 min(ctrl_info->max_transfer_size, PQI_MAX_TRANSFER_SIZE);
4329
4330 max_sg_entries = max_transfer_size / PAGE_SIZE;
4331
4332 /* +1 to cover when the buffer is not page-aligned. */
4333 max_sg_entries++;
4334
4335 max_sg_entries = min(ctrl_info->max_sg_entries, max_sg_entries);
4336
4337 max_transfer_size = (max_sg_entries - 1) * PAGE_SIZE;
4338
4339 ctrl_info->sg_chain_buffer_length =
Kevin Barnette1d213b2017-05-03 18:53:18 -05004340 (max_sg_entries * sizeof(struct pqi_sg_descriptor)) +
4341 PQI_EXTRA_SGL_MEMORY;
Kevin Barnett6c223762016-06-27 16:41:00 -05004342 ctrl_info->sg_tablesize = max_sg_entries;
4343 ctrl_info->max_sectors = max_transfer_size / 512;
4344}
4345
4346static void pqi_calculate_queue_resources(struct pqi_ctrl_info *ctrl_info)
4347{
4348 int num_cpus;
4349 int max_queue_groups;
4350 int num_queue_groups;
4351 u16 num_elements_per_iq;
4352 u16 num_elements_per_oq;
4353
4354 max_queue_groups = min(ctrl_info->max_inbound_queues / 2,
4355 ctrl_info->max_outbound_queues - 1);
4356 max_queue_groups = min(max_queue_groups, PQI_MAX_QUEUE_GROUPS);
4357
4358 num_cpus = num_online_cpus();
4359 num_queue_groups = min(num_cpus, ctrl_info->max_msix_vectors);
4360 num_queue_groups = min(num_queue_groups, max_queue_groups);
4361
4362 ctrl_info->num_queue_groups = num_queue_groups;
Kevin Barnett061ef062017-05-03 18:53:05 -05004363 ctrl_info->max_hw_queue_index = num_queue_groups - 1;
Kevin Barnett6c223762016-06-27 16:41:00 -05004364
Kevin Barnett77668f42016-08-31 14:54:23 -05004365 /*
4366 * Make sure that the max. inbound IU length is an even multiple
4367 * of our inbound element length.
4368 */
4369 ctrl_info->max_inbound_iu_length =
4370 (ctrl_info->max_inbound_iu_length_per_firmware /
4371 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) *
4372 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH;
Kevin Barnett6c223762016-06-27 16:41:00 -05004373
4374 num_elements_per_iq =
4375 (ctrl_info->max_inbound_iu_length /
4376 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
4377
4378 /* Add one because one element in each queue is unusable. */
4379 num_elements_per_iq++;
4380
4381 num_elements_per_iq = min(num_elements_per_iq,
4382 ctrl_info->max_elements_per_iq);
4383
4384 num_elements_per_oq = ((num_elements_per_iq - 1) * 2) + 1;
4385 num_elements_per_oq = min(num_elements_per_oq,
4386 ctrl_info->max_elements_per_oq);
4387
4388 ctrl_info->num_elements_per_iq = num_elements_per_iq;
4389 ctrl_info->num_elements_per_oq = num_elements_per_oq;
4390
4391 ctrl_info->max_sg_per_iu =
4392 ((ctrl_info->max_inbound_iu_length -
4393 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH) /
4394 sizeof(struct pqi_sg_descriptor)) +
4395 PQI_MAX_EMBEDDED_SG_DESCRIPTORS;
4396}
4397
4398static inline void pqi_set_sg_descriptor(
4399 struct pqi_sg_descriptor *sg_descriptor, struct scatterlist *sg)
4400{
4401 u64 address = (u64)sg_dma_address(sg);
4402 unsigned int length = sg_dma_len(sg);
4403
4404 put_unaligned_le64(address, &sg_descriptor->address);
4405 put_unaligned_le32(length, &sg_descriptor->length);
4406 put_unaligned_le32(0, &sg_descriptor->flags);
4407}
4408
4409static int pqi_build_raid_sg_list(struct pqi_ctrl_info *ctrl_info,
4410 struct pqi_raid_path_request *request, struct scsi_cmnd *scmd,
4411 struct pqi_io_request *io_request)
4412{
4413 int i;
4414 u16 iu_length;
4415 int sg_count;
4416 bool chained;
4417 unsigned int num_sg_in_iu;
4418 unsigned int max_sg_per_iu;
4419 struct scatterlist *sg;
4420 struct pqi_sg_descriptor *sg_descriptor;
4421
4422 sg_count = scsi_dma_map(scmd);
4423 if (sg_count < 0)
4424 return sg_count;
4425
4426 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
4427 PQI_REQUEST_HEADER_LENGTH;
4428
4429 if (sg_count == 0)
4430 goto out;
4431
4432 sg = scsi_sglist(scmd);
4433 sg_descriptor = request->sg_descriptors;
4434 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4435 chained = false;
4436 num_sg_in_iu = 0;
4437 i = 0;
4438
4439 while (1) {
4440 pqi_set_sg_descriptor(sg_descriptor, sg);
4441 if (!chained)
4442 num_sg_in_iu++;
4443 i++;
4444 if (i == sg_count)
4445 break;
4446 sg_descriptor++;
4447 if (i == max_sg_per_iu) {
4448 put_unaligned_le64(
4449 (u64)io_request->sg_chain_buffer_dma_handle,
4450 &sg_descriptor->address);
4451 put_unaligned_le32((sg_count - num_sg_in_iu)
4452 * sizeof(*sg_descriptor),
4453 &sg_descriptor->length);
4454 put_unaligned_le32(CISS_SG_CHAIN,
4455 &sg_descriptor->flags);
4456 chained = true;
4457 num_sg_in_iu++;
4458 sg_descriptor = io_request->sg_chain_buffer;
4459 }
4460 sg = sg_next(sg);
4461 }
4462
4463 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4464 request->partial = chained;
4465 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
4466
4467out:
4468 put_unaligned_le16(iu_length, &request->header.iu_length);
4469
4470 return 0;
4471}
4472
4473static int pqi_build_aio_sg_list(struct pqi_ctrl_info *ctrl_info,
4474 struct pqi_aio_path_request *request, struct scsi_cmnd *scmd,
4475 struct pqi_io_request *io_request)
4476{
4477 int i;
4478 u16 iu_length;
4479 int sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004480 bool chained;
4481 unsigned int num_sg_in_iu;
4482 unsigned int max_sg_per_iu;
Kevin Barnett6c223762016-06-27 16:41:00 -05004483 struct scatterlist *sg;
4484 struct pqi_sg_descriptor *sg_descriptor;
4485
4486 sg_count = scsi_dma_map(scmd);
4487 if (sg_count < 0)
4488 return sg_count;
Kevin Barnetta60eec02016-08-31 14:54:11 -05004489
4490 iu_length = offsetof(struct pqi_aio_path_request, sg_descriptors) -
4491 PQI_REQUEST_HEADER_LENGTH;
4492 num_sg_in_iu = 0;
4493
Kevin Barnett6c223762016-06-27 16:41:00 -05004494 if (sg_count == 0)
4495 goto out;
4496
Kevin Barnetta60eec02016-08-31 14:54:11 -05004497 sg = scsi_sglist(scmd);
4498 sg_descriptor = request->sg_descriptors;
4499 max_sg_per_iu = ctrl_info->max_sg_per_iu - 1;
4500 chained = false;
4501 i = 0;
Kevin Barnett6c223762016-06-27 16:41:00 -05004502
Kevin Barnetta60eec02016-08-31 14:54:11 -05004503 while (1) {
4504 pqi_set_sg_descriptor(sg_descriptor, sg);
4505 if (!chained)
4506 num_sg_in_iu++;
4507 i++;
4508 if (i == sg_count)
4509 break;
4510 sg_descriptor++;
4511 if (i == max_sg_per_iu) {
4512 put_unaligned_le64(
4513 (u64)io_request->sg_chain_buffer_dma_handle,
4514 &sg_descriptor->address);
4515 put_unaligned_le32((sg_count - num_sg_in_iu)
4516 * sizeof(*sg_descriptor),
4517 &sg_descriptor->length);
4518 put_unaligned_le32(CISS_SG_CHAIN,
4519 &sg_descriptor->flags);
4520 chained = true;
4521 num_sg_in_iu++;
4522 sg_descriptor = io_request->sg_chain_buffer;
Kevin Barnett6c223762016-06-27 16:41:00 -05004523 }
Kevin Barnetta60eec02016-08-31 14:54:11 -05004524 sg = sg_next(sg);
Kevin Barnett6c223762016-06-27 16:41:00 -05004525 }
4526
Kevin Barnetta60eec02016-08-31 14:54:11 -05004527 put_unaligned_le32(CISS_SG_LAST, &sg_descriptor->flags);
4528 request->partial = chained;
Kevin Barnett6c223762016-06-27 16:41:00 -05004529 iu_length += num_sg_in_iu * sizeof(*sg_descriptor);
Kevin Barnetta60eec02016-08-31 14:54:11 -05004530
4531out:
Kevin Barnett6c223762016-06-27 16:41:00 -05004532 put_unaligned_le16(iu_length, &request->header.iu_length);
4533 request->num_sg_descriptors = num_sg_in_iu;
4534
4535 return 0;
4536}
4537
4538static void pqi_raid_io_complete(struct pqi_io_request *io_request,
4539 void *context)
4540{
4541 struct scsi_cmnd *scmd;
4542
4543 scmd = io_request->scmd;
4544 pqi_free_io_request(io_request);
4545 scsi_dma_unmap(scmd);
4546 pqi_scsi_done(scmd);
4547}
4548
4549static int pqi_raid_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4550 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4551 struct pqi_queue_group *queue_group)
4552{
4553 int rc;
4554 size_t cdb_length;
4555 struct pqi_io_request *io_request;
4556 struct pqi_raid_path_request *request;
4557
4558 io_request = pqi_alloc_io_request(ctrl_info);
4559 io_request->io_complete_callback = pqi_raid_io_complete;
4560 io_request->scmd = scmd;
4561
4562 scmd->host_scribble = (unsigned char *)io_request;
4563
4564 request = io_request->iu;
4565 memset(request, 0,
4566 offsetof(struct pqi_raid_path_request, sg_descriptors));
4567
4568 request->header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
4569 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4570 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4571 put_unaligned_le16(io_request->index, &request->request_id);
4572 request->error_index = request->request_id;
4573 memcpy(request->lun_number, device->scsi3addr,
4574 sizeof(request->lun_number));
4575
4576 cdb_length = min_t(size_t, scmd->cmd_len, sizeof(request->cdb));
4577 memcpy(request->cdb, scmd->cmnd, cdb_length);
4578
4579 switch (cdb_length) {
4580 case 6:
4581 case 10:
4582 case 12:
4583 case 16:
4584 /* No bytes in the Additional CDB bytes field */
4585 request->additional_cdb_bytes_usage =
4586 SOP_ADDITIONAL_CDB_BYTES_0;
4587 break;
4588 case 20:
4589 /* 4 bytes in the Additional cdb field */
4590 request->additional_cdb_bytes_usage =
4591 SOP_ADDITIONAL_CDB_BYTES_4;
4592 break;
4593 case 24:
4594 /* 8 bytes in the Additional cdb field */
4595 request->additional_cdb_bytes_usage =
4596 SOP_ADDITIONAL_CDB_BYTES_8;
4597 break;
4598 case 28:
4599 /* 12 bytes in the Additional cdb field */
4600 request->additional_cdb_bytes_usage =
4601 SOP_ADDITIONAL_CDB_BYTES_12;
4602 break;
4603 case 32:
4604 default:
4605 /* 16 bytes in the Additional cdb field */
4606 request->additional_cdb_bytes_usage =
4607 SOP_ADDITIONAL_CDB_BYTES_16;
4608 break;
4609 }
4610
4611 switch (scmd->sc_data_direction) {
4612 case DMA_TO_DEVICE:
4613 request->data_direction = SOP_READ_FLAG;
4614 break;
4615 case DMA_FROM_DEVICE:
4616 request->data_direction = SOP_WRITE_FLAG;
4617 break;
4618 case DMA_NONE:
4619 request->data_direction = SOP_NO_DIRECTION_FLAG;
4620 break;
4621 case DMA_BIDIRECTIONAL:
4622 request->data_direction = SOP_BIDIRECTIONAL;
4623 break;
4624 default:
4625 dev_err(&ctrl_info->pci_dev->dev,
4626 "unknown data direction: %d\n",
4627 scmd->sc_data_direction);
Kevin Barnett6c223762016-06-27 16:41:00 -05004628 break;
4629 }
4630
4631 rc = pqi_build_raid_sg_list(ctrl_info, request, scmd, io_request);
4632 if (rc) {
4633 pqi_free_io_request(io_request);
4634 return SCSI_MLQUEUE_HOST_BUSY;
4635 }
4636
4637 pqi_start_io(ctrl_info, queue_group, RAID_PATH, io_request);
4638
4639 return 0;
4640}
4641
4642static void pqi_aio_io_complete(struct pqi_io_request *io_request,
4643 void *context)
4644{
4645 struct scsi_cmnd *scmd;
4646
4647 scmd = io_request->scmd;
4648 scsi_dma_unmap(scmd);
4649 if (io_request->status == -EAGAIN)
4650 set_host_byte(scmd, DID_IMM_RETRY);
4651 pqi_free_io_request(io_request);
4652 pqi_scsi_done(scmd);
4653}
4654
4655static inline int pqi_aio_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
4656 struct pqi_scsi_dev *device, struct scsi_cmnd *scmd,
4657 struct pqi_queue_group *queue_group)
4658{
4659 return pqi_aio_submit_io(ctrl_info, scmd, device->aio_handle,
4660 scmd->cmnd, scmd->cmd_len, queue_group, NULL);
4661}
4662
4663static int pqi_aio_submit_io(struct pqi_ctrl_info *ctrl_info,
4664 struct scsi_cmnd *scmd, u32 aio_handle, u8 *cdb,
4665 unsigned int cdb_length, struct pqi_queue_group *queue_group,
4666 struct pqi_encryption_info *encryption_info)
4667{
4668 int rc;
4669 struct pqi_io_request *io_request;
4670 struct pqi_aio_path_request *request;
4671
4672 io_request = pqi_alloc_io_request(ctrl_info);
4673 io_request->io_complete_callback = pqi_aio_io_complete;
4674 io_request->scmd = scmd;
4675
4676 scmd->host_scribble = (unsigned char *)io_request;
4677
4678 request = io_request->iu;
4679 memset(request, 0,
4680 offsetof(struct pqi_raid_path_request, sg_descriptors));
4681
4682 request->header.iu_type = PQI_REQUEST_IU_AIO_PATH_IO;
4683 put_unaligned_le32(aio_handle, &request->nexus_id);
4684 put_unaligned_le32(scsi_bufflen(scmd), &request->buffer_length);
4685 request->task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
4686 put_unaligned_le16(io_request->index, &request->request_id);
4687 request->error_index = request->request_id;
4688 if (cdb_length > sizeof(request->cdb))
4689 cdb_length = sizeof(request->cdb);
4690 request->cdb_length = cdb_length;
4691 memcpy(request->cdb, cdb, cdb_length);
4692
4693 switch (scmd->sc_data_direction) {
4694 case DMA_TO_DEVICE:
4695 request->data_direction = SOP_READ_FLAG;
4696 break;
4697 case DMA_FROM_DEVICE:
4698 request->data_direction = SOP_WRITE_FLAG;
4699 break;
4700 case DMA_NONE:
4701 request->data_direction = SOP_NO_DIRECTION_FLAG;
4702 break;
4703 case DMA_BIDIRECTIONAL:
4704 request->data_direction = SOP_BIDIRECTIONAL;
4705 break;
4706 default:
4707 dev_err(&ctrl_info->pci_dev->dev,
4708 "unknown data direction: %d\n",
4709 scmd->sc_data_direction);
Kevin Barnett6c223762016-06-27 16:41:00 -05004710 break;
4711 }
4712
4713 if (encryption_info) {
4714 request->encryption_enable = true;
4715 put_unaligned_le16(encryption_info->data_encryption_key_index,
4716 &request->data_encryption_key_index);
4717 put_unaligned_le32(encryption_info->encrypt_tweak_lower,
4718 &request->encrypt_tweak_lower);
4719 put_unaligned_le32(encryption_info->encrypt_tweak_upper,
4720 &request->encrypt_tweak_upper);
4721 }
4722
4723 rc = pqi_build_aio_sg_list(ctrl_info, request, scmd, io_request);
4724 if (rc) {
4725 pqi_free_io_request(io_request);
4726 return SCSI_MLQUEUE_HOST_BUSY;
4727 }
4728
4729 pqi_start_io(ctrl_info, queue_group, AIO_PATH, io_request);
4730
4731 return 0;
4732}
4733
Kevin Barnett061ef062017-05-03 18:53:05 -05004734static inline u16 pqi_get_hw_queue(struct pqi_ctrl_info *ctrl_info,
4735 struct scsi_cmnd *scmd)
4736{
4737 u16 hw_queue;
4738
4739 hw_queue = blk_mq_unique_tag_to_hwq(blk_mq_unique_tag(scmd->request));
4740 if (hw_queue > ctrl_info->max_hw_queue_index)
4741 hw_queue = 0;
4742
4743 return hw_queue;
4744}
4745
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004746/*
4747 * This function gets called just before we hand the completed SCSI request
4748 * back to the SML.
4749 */
4750
4751void pqi_prep_for_scsi_done(struct scsi_cmnd *scmd)
4752{
4753 struct pqi_scsi_dev *device;
4754
4755 device = scmd->device->hostdata;
4756 atomic_dec(&device->scsi_cmds_outstanding);
4757}
4758
Kevin Barnett6c223762016-06-27 16:41:00 -05004759static int pqi_scsi_queue_command(struct Scsi_Host *shost,
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05004760 struct scsi_cmnd *scmd)
Kevin Barnett6c223762016-06-27 16:41:00 -05004761{
4762 int rc;
4763 struct pqi_ctrl_info *ctrl_info;
4764 struct pqi_scsi_dev *device;
Kevin Barnett061ef062017-05-03 18:53:05 -05004765 u16 hw_queue;
Kevin Barnett6c223762016-06-27 16:41:00 -05004766 struct pqi_queue_group *queue_group;
4767 bool raid_bypassed;
4768
4769 device = scmd->device->hostdata;
Kevin Barnett6c223762016-06-27 16:41:00 -05004770 ctrl_info = shost_to_hba(shost);
4771
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004772 atomic_inc(&device->scsi_cmds_outstanding);
4773
Kevin Barnett6c223762016-06-27 16:41:00 -05004774 if (pqi_ctrl_offline(ctrl_info)) {
4775 set_host_byte(scmd, DID_NO_CONNECT);
4776 pqi_scsi_done(scmd);
4777 return 0;
4778 }
4779
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004780 pqi_ctrl_busy(ctrl_info);
4781 if (pqi_ctrl_blocked(ctrl_info) || pqi_device_in_reset(device)) {
4782 rc = SCSI_MLQUEUE_HOST_BUSY;
4783 goto out;
4784 }
4785
Kevin Barnett7d81d2b2016-08-31 14:55:11 -05004786 /*
4787 * This is necessary because the SML doesn't zero out this field during
4788 * error recovery.
4789 */
4790 scmd->result = 0;
4791
Kevin Barnett061ef062017-05-03 18:53:05 -05004792 hw_queue = pqi_get_hw_queue(ctrl_info, scmd);
4793 queue_group = &ctrl_info->queue_groups[hw_queue];
Kevin Barnett6c223762016-06-27 16:41:00 -05004794
4795 if (pqi_is_logical_device(device)) {
4796 raid_bypassed = false;
4797 if (device->offload_enabled &&
Christoph Hellwig57292b52017-01-31 16:57:29 +01004798 !blk_rq_is_passthrough(scmd->request)) {
Kevin Barnett6c223762016-06-27 16:41:00 -05004799 rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
4800 scmd, queue_group);
4801 if (rc == 0 ||
4802 rc == SCSI_MLQUEUE_HOST_BUSY ||
4803 rc == SAM_STAT_CHECK_CONDITION ||
4804 rc == SAM_STAT_RESERVATION_CONFLICT)
4805 raid_bypassed = true;
4806 }
4807 if (!raid_bypassed)
4808 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4809 queue_group);
4810 } else {
4811 if (device->aio_enabled)
4812 rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
4813 queue_group);
4814 else
4815 rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
4816 queue_group);
4817 }
4818
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004819out:
4820 pqi_ctrl_unbusy(ctrl_info);
4821 if (rc)
4822 atomic_dec(&device->scsi_cmds_outstanding);
4823
Kevin Barnett6c223762016-06-27 16:41:00 -05004824 return rc;
4825}
4826
Kevin Barnett7561a7e2017-05-03 18:52:58 -05004827static int pqi_wait_until_queued_io_drained(struct pqi_ctrl_info *ctrl_info,
4828 struct pqi_queue_group *queue_group)
4829{
4830 unsigned int path;
4831 unsigned long flags;
4832 bool list_is_empty;
4833
4834 for (path = 0; path < 2; path++) {
4835 while (1) {
4836 spin_lock_irqsave(
4837 &queue_group->submit_lock[path], flags);
4838 list_is_empty =
4839 list_empty(&queue_group->request_list[path]);
4840 spin_unlock_irqrestore(
4841 &queue_group->submit_lock[path], flags);
4842 if (list_is_empty)
4843 break;
4844 pqi_check_ctrl_health(ctrl_info);
4845 if (pqi_ctrl_offline(ctrl_info))
4846 return -ENXIO;
4847 usleep_range(1000, 2000);
4848 }
4849 }
4850
4851 return 0;
4852}
4853
4854static int pqi_wait_until_inbound_queues_empty(struct pqi_ctrl_info *ctrl_info)
4855{
4856 int rc;
4857 unsigned int i;
4858 unsigned int path;
4859 struct pqi_queue_group *queue_group;
4860 pqi_index_t iq_pi;
4861 pqi_index_t iq_ci;
4862
4863 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4864 queue_group = &ctrl_info->queue_groups[i];
4865
4866 rc = pqi_wait_until_queued_io_drained(ctrl_info, queue_group);
4867 if (rc)
4868 return rc;
4869
4870 for (path = 0; path < 2; path++) {
4871 iq_pi = queue_group->iq_pi_copy[path];
4872
4873 while (1) {
4874 iq_ci = *queue_group->iq_ci[path];
4875 if (iq_ci == iq_pi)
4876 break;
4877 pqi_check_ctrl_health(ctrl_info);
4878 if (pqi_ctrl_offline(ctrl_info))
4879 return -ENXIO;
4880 usleep_range(1000, 2000);
4881 }
4882 }
4883 }
4884
4885 return 0;
4886}
4887
4888static void pqi_fail_io_queued_for_device(struct pqi_ctrl_info *ctrl_info,
4889 struct pqi_scsi_dev *device)
4890{
4891 unsigned int i;
4892 unsigned int path;
4893 struct pqi_queue_group *queue_group;
4894 unsigned long flags;
4895 struct pqi_io_request *io_request;
4896 struct pqi_io_request *next;
4897 struct scsi_cmnd *scmd;
4898 struct pqi_scsi_dev *scsi_device;
4899
4900 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
4901 queue_group = &ctrl_info->queue_groups[i];
4902
4903 for (path = 0; path < 2; path++) {
4904 spin_lock_irqsave(
4905 &queue_group->submit_lock[path], flags);
4906
4907 list_for_each_entry_safe(io_request, next,
4908 &queue_group->request_list[path],
4909 request_list_entry) {
4910 scmd = io_request->scmd;
4911 if (!scmd)
4912 continue;
4913
4914 scsi_device = scmd->device->hostdata;
4915 if (scsi_device != device)
4916 continue;
4917
4918 list_del(&io_request->request_list_entry);
4919 set_host_byte(scmd, DID_RESET);
4920 pqi_scsi_done(scmd);
4921 }
4922
4923 spin_unlock_irqrestore(
4924 &queue_group->submit_lock[path], flags);
4925 }
4926 }
4927}
4928
Kevin Barnett061ef062017-05-03 18:53:05 -05004929static int pqi_device_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info,
4930 struct pqi_scsi_dev *device)
4931{
4932 while (atomic_read(&device->scsi_cmds_outstanding)) {
4933 pqi_check_ctrl_health(ctrl_info);
4934 if (pqi_ctrl_offline(ctrl_info))
4935 return -ENXIO;
4936 usleep_range(1000, 2000);
4937 }
4938
4939 return 0;
4940}
4941
4942static int pqi_ctrl_wait_for_pending_io(struct pqi_ctrl_info *ctrl_info)
4943{
4944 bool io_pending;
4945 unsigned long flags;
4946 struct pqi_scsi_dev *device;
4947
4948 while (1) {
4949 io_pending = false;
4950
4951 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
4952 list_for_each_entry(device, &ctrl_info->scsi_device_list,
4953 scsi_device_list_entry) {
4954 if (atomic_read(&device->scsi_cmds_outstanding)) {
4955 io_pending = true;
4956 break;
4957 }
4958 }
4959 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
4960 flags);
4961
4962 if (!io_pending)
4963 break;
4964
4965 pqi_check_ctrl_health(ctrl_info);
4966 if (pqi_ctrl_offline(ctrl_info))
4967 return -ENXIO;
4968
4969 usleep_range(1000, 2000);
4970 }
4971
4972 return 0;
4973}
4974
Kevin Barnett14bb2152016-08-31 14:54:35 -05004975static void pqi_lun_reset_complete(struct pqi_io_request *io_request,
Kevin Barnett6c223762016-06-27 16:41:00 -05004976 void *context)
4977{
4978 struct completion *waiting = context;
4979
4980 complete(waiting);
4981}
4982
Kevin Barnett14bb2152016-08-31 14:54:35 -05004983#define PQI_LUN_RESET_TIMEOUT_SECS 10
4984
4985static int pqi_wait_for_lun_reset_completion(struct pqi_ctrl_info *ctrl_info,
4986 struct pqi_scsi_dev *device, struct completion *wait)
4987{
4988 int rc;
Kevin Barnett14bb2152016-08-31 14:54:35 -05004989
4990 while (1) {
4991 if (wait_for_completion_io_timeout(wait,
4992 PQI_LUN_RESET_TIMEOUT_SECS * HZ)) {
4993 rc = 0;
4994 break;
4995 }
4996
4997 pqi_check_ctrl_health(ctrl_info);
4998 if (pqi_ctrl_offline(ctrl_info)) {
4999 rc = -ETIMEDOUT;
5000 break;
5001 }
Kevin Barnett14bb2152016-08-31 14:54:35 -05005002 }
5003
5004 return rc;
5005}
5006
5007static int pqi_lun_reset(struct pqi_ctrl_info *ctrl_info,
Kevin Barnett6c223762016-06-27 16:41:00 -05005008 struct pqi_scsi_dev *device)
5009{
5010 int rc;
5011 struct pqi_io_request *io_request;
5012 DECLARE_COMPLETION_ONSTACK(wait);
5013 struct pqi_task_management_request *request;
5014
Kevin Barnett6c223762016-06-27 16:41:00 -05005015 io_request = pqi_alloc_io_request(ctrl_info);
Kevin Barnett14bb2152016-08-31 14:54:35 -05005016 io_request->io_complete_callback = pqi_lun_reset_complete;
Kevin Barnett6c223762016-06-27 16:41:00 -05005017 io_request->context = &wait;
5018
5019 request = io_request->iu;
5020 memset(request, 0, sizeof(*request));
5021
5022 request->header.iu_type = PQI_REQUEST_IU_TASK_MANAGEMENT;
5023 put_unaligned_le16(sizeof(*request) - PQI_REQUEST_HEADER_LENGTH,
5024 &request->header.iu_length);
5025 put_unaligned_le16(io_request->index, &request->request_id);
5026 memcpy(request->lun_number, device->scsi3addr,
5027 sizeof(request->lun_number));
5028 request->task_management_function = SOP_TASK_MANAGEMENT_LUN_RESET;
5029
5030 pqi_start_io(ctrl_info,
5031 &ctrl_info->queue_groups[PQI_DEFAULT_QUEUE_GROUP], RAID_PATH,
5032 io_request);
5033
Kevin Barnett14bb2152016-08-31 14:54:35 -05005034 rc = pqi_wait_for_lun_reset_completion(ctrl_info, device, &wait);
5035 if (rc == 0)
Kevin Barnett6c223762016-06-27 16:41:00 -05005036 rc = io_request->status;
Kevin Barnett6c223762016-06-27 16:41:00 -05005037
5038 pqi_free_io_request(io_request);
Kevin Barnett6c223762016-06-27 16:41:00 -05005039
5040 return rc;
5041}
5042
5043/* Performs a reset at the LUN level. */
5044
5045static int pqi_device_reset(struct pqi_ctrl_info *ctrl_info,
5046 struct pqi_scsi_dev *device)
5047{
5048 int rc;
5049
Kevin Barnett14bb2152016-08-31 14:54:35 -05005050 rc = pqi_lun_reset(ctrl_info, device);
Kevin Barnett061ef062017-05-03 18:53:05 -05005051 if (rc == 0)
5052 rc = pqi_device_wait_for_pending_io(ctrl_info, device);
Kevin Barnett6c223762016-06-27 16:41:00 -05005053
Kevin Barnett14bb2152016-08-31 14:54:35 -05005054 return rc == 0 ? SUCCESS : FAILED;
Kevin Barnett6c223762016-06-27 16:41:00 -05005055}
5056
5057static int pqi_eh_device_reset_handler(struct scsi_cmnd *scmd)
5058{
5059 int rc;
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005060 struct Scsi_Host *shost;
Kevin Barnett6c223762016-06-27 16:41:00 -05005061 struct pqi_ctrl_info *ctrl_info;
5062 struct pqi_scsi_dev *device;
5063
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005064 shost = scmd->device->host;
5065 ctrl_info = shost_to_hba(shost);
Kevin Barnett6c223762016-06-27 16:41:00 -05005066 device = scmd->device->hostdata;
5067
5068 dev_err(&ctrl_info->pci_dev->dev,
5069 "resetting scsi %d:%d:%d:%d\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005070 shost->host_no, device->bus, device->target, device->lun);
Kevin Barnett6c223762016-06-27 16:41:00 -05005071
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005072 pqi_check_ctrl_health(ctrl_info);
5073 if (pqi_ctrl_offline(ctrl_info)) {
5074 rc = FAILED;
5075 goto out;
5076 }
Kevin Barnett6c223762016-06-27 16:41:00 -05005077
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005078 mutex_lock(&ctrl_info->lun_reset_mutex);
5079
5080 pqi_ctrl_block_requests(ctrl_info);
5081 pqi_ctrl_wait_until_quiesced(ctrl_info);
5082 pqi_fail_io_queued_for_device(ctrl_info, device);
5083 rc = pqi_wait_until_inbound_queues_empty(ctrl_info);
5084 pqi_device_reset_start(device);
5085 pqi_ctrl_unblock_requests(ctrl_info);
5086
5087 if (rc)
5088 rc = FAILED;
5089 else
5090 rc = pqi_device_reset(ctrl_info, device);
5091
5092 pqi_device_reset_done(device);
5093
5094 mutex_unlock(&ctrl_info->lun_reset_mutex);
5095
5096out:
Kevin Barnett6c223762016-06-27 16:41:00 -05005097 dev_err(&ctrl_info->pci_dev->dev,
5098 "reset of scsi %d:%d:%d:%d: %s\n",
Kevin Barnett7561a7e2017-05-03 18:52:58 -05005099 shost->host_no, device->bus, device->target, device->lun,
Kevin Barnett6c223762016-06-27 16:41:00 -05005100 rc == SUCCESS ? "SUCCESS" : "FAILED");
5101
5102 return rc;
5103}
5104
5105static int pqi_slave_alloc(struct scsi_device *sdev)
5106{
5107 struct pqi_scsi_dev *device;
5108 unsigned long flags;
5109 struct pqi_ctrl_info *ctrl_info;
5110 struct scsi_target *starget;
5111 struct sas_rphy *rphy;
5112
5113 ctrl_info = shost_to_hba(sdev->host);
5114
5115 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5116
5117 if (sdev_channel(sdev) == PQI_PHYSICAL_DEVICE_BUS) {
5118 starget = scsi_target(sdev);
5119 rphy = target_to_rphy(starget);
5120 device = pqi_find_device_by_sas_rphy(ctrl_info, rphy);
5121 if (device) {
5122 device->target = sdev_id(sdev);
5123 device->lun = sdev->lun;
5124 device->target_lun_valid = true;
5125 }
5126 } else {
5127 device = pqi_find_scsi_dev(ctrl_info, sdev_channel(sdev),
5128 sdev_id(sdev), sdev->lun);
5129 }
5130
5131 if (device && device->expose_device) {
5132 sdev->hostdata = device;
5133 device->sdev = sdev;
5134 if (device->queue_depth) {
5135 device->advertised_queue_depth = device->queue_depth;
5136 scsi_change_queue_depth(sdev,
5137 device->advertised_queue_depth);
5138 }
5139 }
5140
5141 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5142
5143 return 0;
5144}
5145
5146static int pqi_slave_configure(struct scsi_device *sdev)
5147{
5148 struct pqi_scsi_dev *device;
5149
5150 device = sdev->hostdata;
5151 if (!device->expose_device)
5152 sdev->no_uld_attach = true;
5153
5154 return 0;
5155}
5156
Christoph Hellwig52198222016-11-01 08:12:49 -06005157static int pqi_map_queues(struct Scsi_Host *shost)
5158{
5159 struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
5160
5161 return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev);
5162}
5163
Kevin Barnett6c223762016-06-27 16:41:00 -05005164static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
5165 void __user *arg)
5166{
5167 struct pci_dev *pci_dev;
5168 u32 subsystem_vendor;
5169 u32 subsystem_device;
5170 cciss_pci_info_struct pciinfo;
5171
5172 if (!arg)
5173 return -EINVAL;
5174
5175 pci_dev = ctrl_info->pci_dev;
5176
5177 pciinfo.domain = pci_domain_nr(pci_dev->bus);
5178 pciinfo.bus = pci_dev->bus->number;
5179 pciinfo.dev_fn = pci_dev->devfn;
5180 subsystem_vendor = pci_dev->subsystem_vendor;
5181 subsystem_device = pci_dev->subsystem_device;
5182 pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
5183 subsystem_vendor;
5184
5185 if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
5186 return -EFAULT;
5187
5188 return 0;
5189}
5190
5191static int pqi_getdrivver_ioctl(void __user *arg)
5192{
5193 u32 version;
5194
5195 if (!arg)
5196 return -EINVAL;
5197
5198 version = (DRIVER_MAJOR << 28) | (DRIVER_MINOR << 24) |
5199 (DRIVER_RELEASE << 16) | DRIVER_REVISION;
5200
5201 if (copy_to_user(arg, &version, sizeof(version)))
5202 return -EFAULT;
5203
5204 return 0;
5205}
5206
5207struct ciss_error_info {
5208 u8 scsi_status;
5209 int command_status;
5210 size_t sense_data_length;
5211};
5212
5213static void pqi_error_info_to_ciss(struct pqi_raid_error_info *pqi_error_info,
5214 struct ciss_error_info *ciss_error_info)
5215{
5216 int ciss_cmd_status;
5217 size_t sense_data_length;
5218
5219 switch (pqi_error_info->data_out_result) {
5220 case PQI_DATA_IN_OUT_GOOD:
5221 ciss_cmd_status = CISS_CMD_STATUS_SUCCESS;
5222 break;
5223 case PQI_DATA_IN_OUT_UNDERFLOW:
5224 ciss_cmd_status = CISS_CMD_STATUS_DATA_UNDERRUN;
5225 break;
5226 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW:
5227 ciss_cmd_status = CISS_CMD_STATUS_DATA_OVERRUN;
5228 break;
5229 case PQI_DATA_IN_OUT_PROTOCOL_ERROR:
5230 case PQI_DATA_IN_OUT_BUFFER_ERROR:
5231 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_DESCRIPTOR_AREA:
5232 case PQI_DATA_IN_OUT_BUFFER_OVERFLOW_BRIDGE:
5233 case PQI_DATA_IN_OUT_ERROR:
5234 ciss_cmd_status = CISS_CMD_STATUS_PROTOCOL_ERROR;
5235 break;
5236 case PQI_DATA_IN_OUT_HARDWARE_ERROR:
5237 case PQI_DATA_IN_OUT_PCIE_FABRIC_ERROR:
5238 case PQI_DATA_IN_OUT_PCIE_COMPLETION_TIMEOUT:
5239 case PQI_DATA_IN_OUT_PCIE_COMPLETER_ABORT_RECEIVED:
5240 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST_RECEIVED:
5241 case PQI_DATA_IN_OUT_PCIE_ECRC_CHECK_FAILED:
5242 case PQI_DATA_IN_OUT_PCIE_UNSUPPORTED_REQUEST:
5243 case PQI_DATA_IN_OUT_PCIE_ACS_VIOLATION:
5244 case PQI_DATA_IN_OUT_PCIE_TLP_PREFIX_BLOCKED:
5245 case PQI_DATA_IN_OUT_PCIE_POISONED_MEMORY_READ:
5246 ciss_cmd_status = CISS_CMD_STATUS_HARDWARE_ERROR;
5247 break;
5248 case PQI_DATA_IN_OUT_UNSOLICITED_ABORT:
5249 ciss_cmd_status = CISS_CMD_STATUS_UNSOLICITED_ABORT;
5250 break;
5251 case PQI_DATA_IN_OUT_ABORTED:
5252 ciss_cmd_status = CISS_CMD_STATUS_ABORTED;
5253 break;
5254 case PQI_DATA_IN_OUT_TIMEOUT:
5255 ciss_cmd_status = CISS_CMD_STATUS_TIMEOUT;
5256 break;
5257 default:
5258 ciss_cmd_status = CISS_CMD_STATUS_TARGET_STATUS;
5259 break;
5260 }
5261
5262 sense_data_length =
5263 get_unaligned_le16(&pqi_error_info->sense_data_length);
5264 if (sense_data_length == 0)
5265 sense_data_length =
5266 get_unaligned_le16(&pqi_error_info->response_data_length);
5267 if (sense_data_length)
5268 if (sense_data_length > sizeof(pqi_error_info->data))
5269 sense_data_length = sizeof(pqi_error_info->data);
5270
5271 ciss_error_info->scsi_status = pqi_error_info->status;
5272 ciss_error_info->command_status = ciss_cmd_status;
5273 ciss_error_info->sense_data_length = sense_data_length;
5274}
5275
5276static int pqi_passthru_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
5277{
5278 int rc;
5279 char *kernel_buffer = NULL;
5280 u16 iu_length;
5281 size_t sense_data_length;
5282 IOCTL_Command_struct iocommand;
5283 struct pqi_raid_path_request request;
5284 struct pqi_raid_error_info pqi_error_info;
5285 struct ciss_error_info ciss_error_info;
5286
5287 if (pqi_ctrl_offline(ctrl_info))
5288 return -ENXIO;
5289 if (!arg)
5290 return -EINVAL;
5291 if (!capable(CAP_SYS_RAWIO))
5292 return -EPERM;
5293 if (copy_from_user(&iocommand, arg, sizeof(iocommand)))
5294 return -EFAULT;
5295 if (iocommand.buf_size < 1 &&
5296 iocommand.Request.Type.Direction != XFER_NONE)
5297 return -EINVAL;
5298 if (iocommand.Request.CDBLen > sizeof(request.cdb))
5299 return -EINVAL;
5300 if (iocommand.Request.Type.Type != TYPE_CMD)
5301 return -EINVAL;
5302
5303 switch (iocommand.Request.Type.Direction) {
5304 case XFER_NONE:
5305 case XFER_WRITE:
5306 case XFER_READ:
5307 break;
5308 default:
5309 return -EINVAL;
5310 }
5311
5312 if (iocommand.buf_size > 0) {
5313 kernel_buffer = kmalloc(iocommand.buf_size, GFP_KERNEL);
5314 if (!kernel_buffer)
5315 return -ENOMEM;
5316 if (iocommand.Request.Type.Direction & XFER_WRITE) {
5317 if (copy_from_user(kernel_buffer, iocommand.buf,
5318 iocommand.buf_size)) {
5319 rc = -EFAULT;
5320 goto out;
5321 }
5322 } else {
5323 memset(kernel_buffer, 0, iocommand.buf_size);
5324 }
5325 }
5326
5327 memset(&request, 0, sizeof(request));
5328
5329 request.header.iu_type = PQI_REQUEST_IU_RAID_PATH_IO;
5330 iu_length = offsetof(struct pqi_raid_path_request, sg_descriptors) -
5331 PQI_REQUEST_HEADER_LENGTH;
5332 memcpy(request.lun_number, iocommand.LUN_info.LunAddrBytes,
5333 sizeof(request.lun_number));
5334 memcpy(request.cdb, iocommand.Request.CDB, iocommand.Request.CDBLen);
5335 request.additional_cdb_bytes_usage = SOP_ADDITIONAL_CDB_BYTES_0;
5336
5337 switch (iocommand.Request.Type.Direction) {
5338 case XFER_NONE:
5339 request.data_direction = SOP_NO_DIRECTION_FLAG;
5340 break;
5341 case XFER_WRITE:
5342 request.data_direction = SOP_WRITE_FLAG;
5343 break;
5344 case XFER_READ:
5345 request.data_direction = SOP_READ_FLAG;
5346 break;
5347 }
5348
5349 request.task_attribute = SOP_TASK_ATTRIBUTE_SIMPLE;
5350
5351 if (iocommand.buf_size > 0) {
5352 put_unaligned_le32(iocommand.buf_size, &request.buffer_length);
5353
5354 rc = pqi_map_single(ctrl_info->pci_dev,
5355 &request.sg_descriptors[0], kernel_buffer,
5356 iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
5357 if (rc)
5358 goto out;
5359
5360 iu_length += sizeof(request.sg_descriptors[0]);
5361 }
5362
5363 put_unaligned_le16(iu_length, &request.header.iu_length);
5364
5365 rc = pqi_submit_raid_request_synchronous(ctrl_info, &request.header,
5366 PQI_SYNC_FLAGS_INTERRUPTABLE, &pqi_error_info, NO_TIMEOUT);
5367
5368 if (iocommand.buf_size > 0)
5369 pqi_pci_unmap(ctrl_info->pci_dev, request.sg_descriptors, 1,
5370 PCI_DMA_BIDIRECTIONAL);
5371
5372 memset(&iocommand.error_info, 0, sizeof(iocommand.error_info));
5373
5374 if (rc == 0) {
5375 pqi_error_info_to_ciss(&pqi_error_info, &ciss_error_info);
5376 iocommand.error_info.ScsiStatus = ciss_error_info.scsi_status;
5377 iocommand.error_info.CommandStatus =
5378 ciss_error_info.command_status;
5379 sense_data_length = ciss_error_info.sense_data_length;
5380 if (sense_data_length) {
5381 if (sense_data_length >
5382 sizeof(iocommand.error_info.SenseInfo))
5383 sense_data_length =
5384 sizeof(iocommand.error_info.SenseInfo);
5385 memcpy(iocommand.error_info.SenseInfo,
5386 pqi_error_info.data, sense_data_length);
5387 iocommand.error_info.SenseLen = sense_data_length;
5388 }
5389 }
5390
5391 if (copy_to_user(arg, &iocommand, sizeof(iocommand))) {
5392 rc = -EFAULT;
5393 goto out;
5394 }
5395
5396 if (rc == 0 && iocommand.buf_size > 0 &&
5397 (iocommand.Request.Type.Direction & XFER_READ)) {
5398 if (copy_to_user(iocommand.buf, kernel_buffer,
5399 iocommand.buf_size)) {
5400 rc = -EFAULT;
5401 }
5402 }
5403
5404out:
5405 kfree(kernel_buffer);
5406
5407 return rc;
5408}
5409
5410static int pqi_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5411{
5412 int rc;
5413 struct pqi_ctrl_info *ctrl_info;
5414
5415 ctrl_info = shost_to_hba(sdev->host);
5416
5417 switch (cmd) {
5418 case CCISS_DEREGDISK:
5419 case CCISS_REGNEWDISK:
5420 case CCISS_REGNEWD:
5421 rc = pqi_scan_scsi_devices(ctrl_info);
5422 break;
5423 case CCISS_GETPCIINFO:
5424 rc = pqi_getpciinfo_ioctl(ctrl_info, arg);
5425 break;
5426 case CCISS_GETDRIVVER:
5427 rc = pqi_getdrivver_ioctl(arg);
5428 break;
5429 case CCISS_PASSTHRU:
5430 rc = pqi_passthru_ioctl(ctrl_info, arg);
5431 break;
5432 default:
5433 rc = -EINVAL;
5434 break;
5435 }
5436
5437 return rc;
5438}
5439
5440static ssize_t pqi_version_show(struct device *dev,
5441 struct device_attribute *attr, char *buffer)
5442{
5443 ssize_t count = 0;
5444 struct Scsi_Host *shost;
5445 struct pqi_ctrl_info *ctrl_info;
5446
5447 shost = class_to_shost(dev);
5448 ctrl_info = shost_to_hba(shost);
5449
5450 count += snprintf(buffer + count, PAGE_SIZE - count,
5451 " driver: %s\n", DRIVER_VERSION BUILD_TIMESTAMP);
5452
5453 count += snprintf(buffer + count, PAGE_SIZE - count,
5454 "firmware: %s\n", ctrl_info->firmware_version);
5455
5456 return count;
5457}
5458
5459static ssize_t pqi_host_rescan_store(struct device *dev,
5460 struct device_attribute *attr, const char *buffer, size_t count)
5461{
5462 struct Scsi_Host *shost = class_to_shost(dev);
5463
5464 pqi_scan_start(shost);
5465
5466 return count;
5467}
5468
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05005469static DEVICE_ATTR(version, 0444, pqi_version_show, NULL);
5470static DEVICE_ATTR(rescan, 0200, NULL, pqi_host_rescan_store);
Kevin Barnett6c223762016-06-27 16:41:00 -05005471
5472static struct device_attribute *pqi_shost_attrs[] = {
5473 &dev_attr_version,
5474 &dev_attr_rescan,
5475 NULL
5476};
5477
5478static ssize_t pqi_sas_address_show(struct device *dev,
5479 struct device_attribute *attr, char *buffer)
5480{
5481 struct pqi_ctrl_info *ctrl_info;
5482 struct scsi_device *sdev;
5483 struct pqi_scsi_dev *device;
5484 unsigned long flags;
5485 u64 sas_address;
5486
5487 sdev = to_scsi_device(dev);
5488 ctrl_info = shost_to_hba(sdev->host);
5489
5490 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5491
5492 device = sdev->hostdata;
5493 if (pqi_is_logical_device(device)) {
5494 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
5495 flags);
5496 return -ENODEV;
5497 }
5498 sas_address = device->sas_address;
5499
5500 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5501
5502 return snprintf(buffer, PAGE_SIZE, "0x%016llx\n", sas_address);
5503}
5504
5505static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
5506 struct device_attribute *attr, char *buffer)
5507{
5508 struct pqi_ctrl_info *ctrl_info;
5509 struct scsi_device *sdev;
5510 struct pqi_scsi_dev *device;
5511 unsigned long flags;
5512
5513 sdev = to_scsi_device(dev);
5514 ctrl_info = shost_to_hba(sdev->host);
5515
5516 spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
5517
5518 device = sdev->hostdata;
5519 buffer[0] = device->offload_enabled ? '1' : '0';
5520 buffer[1] = '\n';
5521 buffer[2] = '\0';
5522
5523 spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
5524
5525 return 2;
5526}
5527
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05005528static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
5529static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
Kevin Barnett6c223762016-06-27 16:41:00 -05005530 pqi_ssd_smart_path_enabled_show, NULL);
5531
5532static struct device_attribute *pqi_sdev_attrs[] = {
5533 &dev_attr_sas_address,
5534 &dev_attr_ssd_smart_path_enabled,
5535 NULL
5536};
5537
5538static struct scsi_host_template pqi_driver_template = {
5539 .module = THIS_MODULE,
5540 .name = DRIVER_NAME_SHORT,
5541 .proc_name = DRIVER_NAME_SHORT,
5542 .queuecommand = pqi_scsi_queue_command,
5543 .scan_start = pqi_scan_start,
5544 .scan_finished = pqi_scan_finished,
5545 .this_id = -1,
5546 .use_clustering = ENABLE_CLUSTERING,
5547 .eh_device_reset_handler = pqi_eh_device_reset_handler,
5548 .ioctl = pqi_ioctl,
5549 .slave_alloc = pqi_slave_alloc,
5550 .slave_configure = pqi_slave_configure,
Christoph Hellwig52198222016-11-01 08:12:49 -06005551 .map_queues = pqi_map_queues,
Kevin Barnett6c223762016-06-27 16:41:00 -05005552 .sdev_attrs = pqi_sdev_attrs,
5553 .shost_attrs = pqi_shost_attrs,
5554};
5555
5556static int pqi_register_scsi(struct pqi_ctrl_info *ctrl_info)
5557{
5558 int rc;
5559 struct Scsi_Host *shost;
5560
5561 shost = scsi_host_alloc(&pqi_driver_template, sizeof(ctrl_info));
5562 if (!shost) {
5563 dev_err(&ctrl_info->pci_dev->dev,
5564 "scsi_host_alloc failed for controller %u\n",
5565 ctrl_info->ctrl_id);
5566 return -ENOMEM;
5567 }
5568
5569 shost->io_port = 0;
5570 shost->n_io_port = 0;
5571 shost->this_id = -1;
5572 shost->max_channel = PQI_MAX_BUS;
5573 shost->max_cmd_len = MAX_COMMAND_SIZE;
5574 shost->max_lun = ~0;
5575 shost->max_id = ~0;
5576 shost->max_sectors = ctrl_info->max_sectors;
5577 shost->can_queue = ctrl_info->scsi_ml_can_queue;
5578 shost->cmd_per_lun = shost->can_queue;
5579 shost->sg_tablesize = ctrl_info->sg_tablesize;
5580 shost->transportt = pqi_sas_transport_template;
Christoph Hellwig52198222016-11-01 08:12:49 -06005581 shost->irq = pci_irq_vector(ctrl_info->pci_dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05005582 shost->unique_id = shost->irq;
5583 shost->nr_hw_queues = ctrl_info->num_queue_groups;
5584 shost->hostdata[0] = (unsigned long)ctrl_info;
5585
5586 rc = scsi_add_host(shost, &ctrl_info->pci_dev->dev);
5587 if (rc) {
5588 dev_err(&ctrl_info->pci_dev->dev,
5589 "scsi_add_host failed for controller %u\n",
5590 ctrl_info->ctrl_id);
5591 goto free_host;
5592 }
5593
5594 rc = pqi_add_sas_host(shost, ctrl_info);
5595 if (rc) {
5596 dev_err(&ctrl_info->pci_dev->dev,
5597 "add SAS host failed for controller %u\n",
5598 ctrl_info->ctrl_id);
5599 goto remove_host;
5600 }
5601
5602 ctrl_info->scsi_host = shost;
5603
5604 return 0;
5605
5606remove_host:
5607 scsi_remove_host(shost);
5608free_host:
5609 scsi_host_put(shost);
5610
5611 return rc;
5612}
5613
5614static void pqi_unregister_scsi(struct pqi_ctrl_info *ctrl_info)
5615{
5616 struct Scsi_Host *shost;
5617
5618 pqi_delete_sas_host(ctrl_info);
5619
5620 shost = ctrl_info->scsi_host;
5621 if (!shost)
5622 return;
5623
5624 scsi_remove_host(shost);
5625 scsi_host_put(shost);
5626}
5627
5628#define PQI_RESET_ACTION_RESET 0x1
5629
5630#define PQI_RESET_TYPE_NO_RESET 0x0
5631#define PQI_RESET_TYPE_SOFT_RESET 0x1
5632#define PQI_RESET_TYPE_FIRM_RESET 0x2
5633#define PQI_RESET_TYPE_HARD_RESET 0x3
5634
5635static int pqi_reset(struct pqi_ctrl_info *ctrl_info)
5636{
5637 int rc;
5638 u32 reset_params;
5639
5640 reset_params = (PQI_RESET_ACTION_RESET << 5) |
5641 PQI_RESET_TYPE_HARD_RESET;
5642
5643 writel(reset_params,
5644 &ctrl_info->pqi_registers->device_reset);
5645
5646 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5647 if (rc)
5648 dev_err(&ctrl_info->pci_dev->dev,
5649 "PQI reset failed\n");
5650
5651 return rc;
5652}
5653
5654static int pqi_get_ctrl_firmware_version(struct pqi_ctrl_info *ctrl_info)
5655{
5656 int rc;
5657 struct bmic_identify_controller *identify;
5658
5659 identify = kmalloc(sizeof(*identify), GFP_KERNEL);
5660 if (!identify)
5661 return -ENOMEM;
5662
5663 rc = pqi_identify_controller(ctrl_info, identify);
5664 if (rc)
5665 goto out;
5666
5667 memcpy(ctrl_info->firmware_version, identify->firmware_version,
5668 sizeof(identify->firmware_version));
5669 ctrl_info->firmware_version[sizeof(identify->firmware_version)] = '\0';
5670 snprintf(ctrl_info->firmware_version +
5671 strlen(ctrl_info->firmware_version),
5672 sizeof(ctrl_info->firmware_version),
5673 "-%u", get_unaligned_le16(&identify->firmware_build_number));
5674
5675out:
5676 kfree(identify);
5677
5678 return rc;
5679}
5680
Kevin Barnett98f87662017-05-03 18:53:11 -05005681static int pqi_process_config_table(struct pqi_ctrl_info *ctrl_info)
5682{
5683 u32 table_length;
5684 u32 section_offset;
5685 void __iomem *table_iomem_addr;
5686 struct pqi_config_table *config_table;
5687 struct pqi_config_table_section_header *section;
5688
5689 table_length = ctrl_info->config_table_length;
5690
5691 config_table = kmalloc(table_length, GFP_KERNEL);
5692 if (!config_table) {
5693 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05005694 "failed to allocate memory for PQI configuration table\n");
Kevin Barnett98f87662017-05-03 18:53:11 -05005695 return -ENOMEM;
5696 }
5697
5698 /*
5699 * Copy the config table contents from I/O memory space into the
5700 * temporary buffer.
5701 */
5702 table_iomem_addr = ctrl_info->iomem_base +
5703 ctrl_info->config_table_offset;
5704 memcpy_fromio(config_table, table_iomem_addr, table_length);
5705
5706 section_offset =
5707 get_unaligned_le32(&config_table->first_section_offset);
5708
5709 while (section_offset) {
5710 section = (void *)config_table + section_offset;
5711
5712 switch (get_unaligned_le16(&section->section_id)) {
5713 case PQI_CONFIG_TABLE_SECTION_HEARTBEAT:
5714 ctrl_info->heartbeat_counter = table_iomem_addr +
5715 section_offset +
5716 offsetof(struct pqi_config_table_heartbeat,
5717 heartbeat_counter);
5718 break;
5719 }
5720
5721 section_offset =
5722 get_unaligned_le16(&section->next_section_offset);
5723 }
5724
5725 kfree(config_table);
5726
5727 return 0;
5728}
5729
Kevin Barnett162d7752017-05-03 18:52:46 -05005730/* Switches the controller from PQI mode back into SIS mode. */
5731
5732static int pqi_revert_to_sis_mode(struct pqi_ctrl_info *ctrl_info)
5733{
5734 int rc;
5735
Kevin Barnett061ef062017-05-03 18:53:05 -05005736 pqi_change_irq_mode(ctrl_info, IRQ_MODE_NONE);
Kevin Barnett162d7752017-05-03 18:52:46 -05005737 rc = pqi_reset(ctrl_info);
5738 if (rc)
5739 return rc;
5740 sis_reenable_sis_mode(ctrl_info);
5741 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5742
5743 return 0;
5744}
5745
5746/*
5747 * If the controller isn't already in SIS mode, this function forces it into
5748 * SIS mode.
5749 */
5750
5751static int pqi_force_sis_mode(struct pqi_ctrl_info *ctrl_info)
Kevin Barnettff6abb72016-08-31 14:54:41 -05005752{
5753 if (!sis_is_firmware_running(ctrl_info))
5754 return -ENXIO;
5755
Kevin Barnett162d7752017-05-03 18:52:46 -05005756 if (pqi_get_ctrl_mode(ctrl_info) == SIS_MODE)
5757 return 0;
5758
5759 if (sis_is_kernel_up(ctrl_info)) {
5760 pqi_save_ctrl_mode(ctrl_info, SIS_MODE);
5761 return 0;
Kevin Barnettff6abb72016-08-31 14:54:41 -05005762 }
5763
Kevin Barnett162d7752017-05-03 18:52:46 -05005764 return pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnettff6abb72016-08-31 14:54:41 -05005765}
5766
Kevin Barnett6c223762016-06-27 16:41:00 -05005767static int pqi_ctrl_init(struct pqi_ctrl_info *ctrl_info)
5768{
5769 int rc;
5770
Kevin Barnett162d7752017-05-03 18:52:46 -05005771 rc = pqi_force_sis_mode(ctrl_info);
5772 if (rc)
5773 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05005774
5775 /*
5776 * Wait until the controller is ready to start accepting SIS
5777 * commands.
5778 */
5779 rc = sis_wait_for_ctrl_ready(ctrl_info);
Kevin Barnett8845fdf2017-05-03 18:53:36 -05005780 if (rc)
Kevin Barnett6c223762016-06-27 16:41:00 -05005781 return rc;
Kevin Barnett6c223762016-06-27 16:41:00 -05005782
5783 /*
5784 * Get the controller properties. This allows us to determine
5785 * whether or not it supports PQI mode.
5786 */
5787 rc = sis_get_ctrl_properties(ctrl_info);
5788 if (rc) {
5789 dev_err(&ctrl_info->pci_dev->dev,
5790 "error obtaining controller properties\n");
5791 return rc;
5792 }
5793
5794 rc = sis_get_pqi_capabilities(ctrl_info);
5795 if (rc) {
5796 dev_err(&ctrl_info->pci_dev->dev,
5797 "error obtaining controller capabilities\n");
5798 return rc;
5799 }
5800
5801 if (ctrl_info->max_outstanding_requests > PQI_MAX_OUTSTANDING_REQUESTS)
5802 ctrl_info->max_outstanding_requests =
5803 PQI_MAX_OUTSTANDING_REQUESTS;
5804
5805 pqi_calculate_io_resources(ctrl_info);
5806
5807 rc = pqi_alloc_error_buffer(ctrl_info);
5808 if (rc) {
5809 dev_err(&ctrl_info->pci_dev->dev,
5810 "failed to allocate PQI error buffer\n");
5811 return rc;
5812 }
5813
5814 /*
5815 * If the function we are about to call succeeds, the
5816 * controller will transition from legacy SIS mode
5817 * into PQI mode.
5818 */
5819 rc = sis_init_base_struct_addr(ctrl_info);
5820 if (rc) {
5821 dev_err(&ctrl_info->pci_dev->dev,
5822 "error initializing PQI mode\n");
5823 return rc;
5824 }
5825
5826 /* Wait for the controller to complete the SIS -> PQI transition. */
5827 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5828 if (rc) {
5829 dev_err(&ctrl_info->pci_dev->dev,
5830 "transition to PQI mode failed\n");
5831 return rc;
5832 }
5833
5834 /* From here on, we are running in PQI mode. */
5835 ctrl_info->pqi_mode_enabled = true;
Kevin Barnettff6abb72016-08-31 14:54:41 -05005836 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
Kevin Barnett6c223762016-06-27 16:41:00 -05005837
Kevin Barnett98f87662017-05-03 18:53:11 -05005838 rc = pqi_process_config_table(ctrl_info);
5839 if (rc)
5840 return rc;
5841
Kevin Barnett6c223762016-06-27 16:41:00 -05005842 rc = pqi_alloc_admin_queues(ctrl_info);
5843 if (rc) {
5844 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05005845 "failed to allocate admin queues\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05005846 return rc;
5847 }
5848
5849 rc = pqi_create_admin_queues(ctrl_info);
5850 if (rc) {
5851 dev_err(&ctrl_info->pci_dev->dev,
5852 "error creating admin queues\n");
5853 return rc;
5854 }
5855
5856 rc = pqi_report_device_capability(ctrl_info);
5857 if (rc) {
5858 dev_err(&ctrl_info->pci_dev->dev,
5859 "obtaining device capability failed\n");
5860 return rc;
5861 }
5862
5863 rc = pqi_validate_device_capability(ctrl_info);
5864 if (rc)
5865 return rc;
5866
5867 pqi_calculate_queue_resources(ctrl_info);
5868
5869 rc = pqi_enable_msix_interrupts(ctrl_info);
5870 if (rc)
5871 return rc;
5872
5873 if (ctrl_info->num_msix_vectors_enabled < ctrl_info->num_queue_groups) {
5874 ctrl_info->max_msix_vectors =
5875 ctrl_info->num_msix_vectors_enabled;
5876 pqi_calculate_queue_resources(ctrl_info);
5877 }
5878
5879 rc = pqi_alloc_io_resources(ctrl_info);
5880 if (rc)
5881 return rc;
5882
5883 rc = pqi_alloc_operational_queues(ctrl_info);
Kevin Barnettd87d5472017-05-03 18:54:00 -05005884 if (rc) {
5885 dev_err(&ctrl_info->pci_dev->dev,
5886 "failed to allocate operational queues\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05005887 return rc;
Kevin Barnettd87d5472017-05-03 18:54:00 -05005888 }
Kevin Barnett6c223762016-06-27 16:41:00 -05005889
5890 pqi_init_operational_queues(ctrl_info);
5891
5892 rc = pqi_request_irqs(ctrl_info);
5893 if (rc)
5894 return rc;
5895
Kevin Barnett6c223762016-06-27 16:41:00 -05005896 rc = pqi_create_queues(ctrl_info);
5897 if (rc)
5898 return rc;
5899
Kevin Barnett061ef062017-05-03 18:53:05 -05005900 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
5901
5902 ctrl_info->controller_online = true;
5903 pqi_start_heartbeat_timer(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05005904
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05005905 rc = pqi_enable_events(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05005906 if (rc) {
5907 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnett6a50d6a2017-05-03 18:52:52 -05005908 "error enabling events\n");
Kevin Barnett6c223762016-06-27 16:41:00 -05005909 return rc;
5910 }
5911
Kevin Barnett6c223762016-06-27 16:41:00 -05005912 /* Register with the SCSI subsystem. */
5913 rc = pqi_register_scsi(ctrl_info);
5914 if (rc)
5915 return rc;
5916
5917 rc = pqi_get_ctrl_firmware_version(ctrl_info);
5918 if (rc) {
5919 dev_err(&ctrl_info->pci_dev->dev,
5920 "error obtaining firmware version\n");
5921 return rc;
5922 }
5923
5924 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
5925 if (rc) {
5926 dev_err(&ctrl_info->pci_dev->dev,
5927 "error updating host wellness\n");
5928 return rc;
5929 }
5930
5931 pqi_schedule_update_time_worker(ctrl_info);
5932
5933 pqi_scan_scsi_devices(ctrl_info);
5934
5935 return 0;
5936}
5937
Kevin Barnett061ef062017-05-03 18:53:05 -05005938#if defined(CONFIG_PM)
5939
5940static void pqi_reinit_queues(struct pqi_ctrl_info *ctrl_info)
5941{
5942 unsigned int i;
5943 struct pqi_admin_queues *admin_queues;
5944 struct pqi_event_queue *event_queue;
5945
5946 admin_queues = &ctrl_info->admin_queues;
5947 admin_queues->iq_pi_copy = 0;
5948 admin_queues->oq_ci_copy = 0;
5949 *admin_queues->oq_pi = 0;
5950
5951 for (i = 0; i < ctrl_info->num_queue_groups; i++) {
5952 ctrl_info->queue_groups[i].iq_pi_copy[RAID_PATH] = 0;
5953 ctrl_info->queue_groups[i].iq_pi_copy[AIO_PATH] = 0;
5954 ctrl_info->queue_groups[i].oq_ci_copy = 0;
5955
5956 *ctrl_info->queue_groups[i].iq_ci[RAID_PATH] = 0;
5957 *ctrl_info->queue_groups[i].iq_ci[AIO_PATH] = 0;
5958 *ctrl_info->queue_groups[i].oq_pi = 0;
5959 }
5960
5961 event_queue = &ctrl_info->event_queue;
5962 *event_queue->oq_pi = 0;
5963 event_queue->oq_ci_copy = 0;
5964}
5965
5966static int pqi_ctrl_init_resume(struct pqi_ctrl_info *ctrl_info)
5967{
5968 int rc;
5969
5970 rc = pqi_force_sis_mode(ctrl_info);
5971 if (rc)
5972 return rc;
5973
5974 /*
5975 * Wait until the controller is ready to start accepting SIS
5976 * commands.
5977 */
5978 rc = sis_wait_for_ctrl_ready_resume(ctrl_info);
5979 if (rc)
5980 return rc;
5981
5982 /*
5983 * If the function we are about to call succeeds, the
5984 * controller will transition from legacy SIS mode
5985 * into PQI mode.
5986 */
5987 rc = sis_init_base_struct_addr(ctrl_info);
5988 if (rc) {
5989 dev_err(&ctrl_info->pci_dev->dev,
5990 "error initializing PQI mode\n");
5991 return rc;
5992 }
5993
5994 /* Wait for the controller to complete the SIS -> PQI transition. */
5995 rc = pqi_wait_for_pqi_mode_ready(ctrl_info);
5996 if (rc) {
5997 dev_err(&ctrl_info->pci_dev->dev,
5998 "transition to PQI mode failed\n");
5999 return rc;
6000 }
6001
6002 /* From here on, we are running in PQI mode. */
6003 ctrl_info->pqi_mode_enabled = true;
6004 pqi_save_ctrl_mode(ctrl_info, PQI_MODE);
6005
6006 pqi_reinit_queues(ctrl_info);
6007
6008 rc = pqi_create_admin_queues(ctrl_info);
6009 if (rc) {
6010 dev_err(&ctrl_info->pci_dev->dev,
6011 "error creating admin queues\n");
6012 return rc;
6013 }
6014
6015 rc = pqi_create_queues(ctrl_info);
6016 if (rc)
6017 return rc;
6018
6019 pqi_change_irq_mode(ctrl_info, IRQ_MODE_MSIX);
6020
6021 ctrl_info->controller_online = true;
6022 pqi_start_heartbeat_timer(ctrl_info);
6023 pqi_ctrl_unblock_requests(ctrl_info);
6024
6025 rc = pqi_enable_events(ctrl_info);
6026 if (rc) {
6027 dev_err(&ctrl_info->pci_dev->dev,
Kevin Barnettd87d5472017-05-03 18:54:00 -05006028 "error enabling events\n");
Kevin Barnett061ef062017-05-03 18:53:05 -05006029 return rc;
6030 }
6031
6032 rc = pqi_write_driver_version_to_host_wellness(ctrl_info);
6033 if (rc) {
6034 dev_err(&ctrl_info->pci_dev->dev,
6035 "error updating host wellness\n");
6036 return rc;
6037 }
6038
6039 pqi_schedule_update_time_worker(ctrl_info);
6040
6041 pqi_scan_scsi_devices(ctrl_info);
6042
6043 return 0;
6044}
6045
6046#endif /* CONFIG_PM */
6047
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05006048static inline int pqi_set_pcie_completion_timeout(struct pci_dev *pci_dev,
6049 u16 timeout)
6050{
6051 return pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL2,
6052 PCI_EXP_DEVCTL2_COMP_TIMEOUT, timeout);
6053}
6054
Kevin Barnett6c223762016-06-27 16:41:00 -05006055static int pqi_pci_init(struct pqi_ctrl_info *ctrl_info)
6056{
6057 int rc;
6058 u64 mask;
6059
6060 rc = pci_enable_device(ctrl_info->pci_dev);
6061 if (rc) {
6062 dev_err(&ctrl_info->pci_dev->dev,
6063 "failed to enable PCI device\n");
6064 return rc;
6065 }
6066
6067 if (sizeof(dma_addr_t) > 4)
6068 mask = DMA_BIT_MASK(64);
6069 else
6070 mask = DMA_BIT_MASK(32);
6071
6072 rc = dma_set_mask(&ctrl_info->pci_dev->dev, mask);
6073 if (rc) {
6074 dev_err(&ctrl_info->pci_dev->dev, "failed to set DMA mask\n");
6075 goto disable_device;
6076 }
6077
6078 rc = pci_request_regions(ctrl_info->pci_dev, DRIVER_NAME_SHORT);
6079 if (rc) {
6080 dev_err(&ctrl_info->pci_dev->dev,
6081 "failed to obtain PCI resources\n");
6082 goto disable_device;
6083 }
6084
6085 ctrl_info->iomem_base = ioremap_nocache(pci_resource_start(
6086 ctrl_info->pci_dev, 0),
6087 sizeof(struct pqi_ctrl_registers));
6088 if (!ctrl_info->iomem_base) {
6089 dev_err(&ctrl_info->pci_dev->dev,
6090 "failed to map memory for controller registers\n");
6091 rc = -ENOMEM;
6092 goto release_regions;
6093 }
6094
Kevin Barnetta81ed5f32017-05-03 18:52:34 -05006095#define PCI_EXP_COMP_TIMEOUT_65_TO_210_MS 0x6
6096
6097 /* Increase the PCIe completion timeout. */
6098 rc = pqi_set_pcie_completion_timeout(ctrl_info->pci_dev,
6099 PCI_EXP_COMP_TIMEOUT_65_TO_210_MS);
6100 if (rc) {
6101 dev_err(&ctrl_info->pci_dev->dev,
6102 "failed to set PCIe completion timeout\n");
6103 goto release_regions;
6104 }
6105
Kevin Barnett6c223762016-06-27 16:41:00 -05006106 /* Enable bus mastering. */
6107 pci_set_master(ctrl_info->pci_dev);
6108
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05006109 ctrl_info->registers = ctrl_info->iomem_base;
6110 ctrl_info->pqi_registers = &ctrl_info->registers->pqi_registers;
6111
Kevin Barnett6c223762016-06-27 16:41:00 -05006112 pci_set_drvdata(ctrl_info->pci_dev, ctrl_info);
6113
6114 return 0;
6115
6116release_regions:
6117 pci_release_regions(ctrl_info->pci_dev);
6118disable_device:
6119 pci_disable_device(ctrl_info->pci_dev);
6120
6121 return rc;
6122}
6123
6124static void pqi_cleanup_pci_init(struct pqi_ctrl_info *ctrl_info)
6125{
6126 iounmap(ctrl_info->iomem_base);
6127 pci_release_regions(ctrl_info->pci_dev);
Kevin Barnettcbe0c7b2017-05-03 18:53:48 -05006128 if (pci_is_enabled(ctrl_info->pci_dev))
6129 pci_disable_device(ctrl_info->pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05006130 pci_set_drvdata(ctrl_info->pci_dev, NULL);
6131}
6132
6133static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node)
6134{
6135 struct pqi_ctrl_info *ctrl_info;
6136
6137 ctrl_info = kzalloc_node(sizeof(struct pqi_ctrl_info),
6138 GFP_KERNEL, numa_node);
6139 if (!ctrl_info)
6140 return NULL;
6141
6142 mutex_init(&ctrl_info->scan_mutex);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05006143 mutex_init(&ctrl_info->lun_reset_mutex);
Kevin Barnett6c223762016-06-27 16:41:00 -05006144
6145 INIT_LIST_HEAD(&ctrl_info->scsi_device_list);
6146 spin_lock_init(&ctrl_info->scsi_device_list_lock);
6147
6148 INIT_WORK(&ctrl_info->event_work, pqi_event_worker);
6149 atomic_set(&ctrl_info->num_interrupts, 0);
6150
6151 INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker);
6152 INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker);
6153
Kevin Barnett98f87662017-05-03 18:53:11 -05006154 init_timer(&ctrl_info->heartbeat_timer);
6155
Kevin Barnett6c223762016-06-27 16:41:00 -05006156 sema_init(&ctrl_info->sync_request_sem,
6157 PQI_RESERVED_IO_SLOTS_SYNCHRONOUS_REQUESTS);
Kevin Barnett7561a7e2017-05-03 18:52:58 -05006158 init_waitqueue_head(&ctrl_info->block_requests_wait);
Kevin Barnett6c223762016-06-27 16:41:00 -05006159
6160 ctrl_info->ctrl_id = atomic_inc_return(&pqi_controller_count) - 1;
Kevin Barnett061ef062017-05-03 18:53:05 -05006161 ctrl_info->irq_mode = IRQ_MODE_NONE;
Kevin Barnett6c223762016-06-27 16:41:00 -05006162 ctrl_info->max_msix_vectors = PQI_MAX_MSIX_VECTORS;
6163
6164 return ctrl_info;
6165}
6166
6167static inline void pqi_free_ctrl_info(struct pqi_ctrl_info *ctrl_info)
6168{
6169 kfree(ctrl_info);
6170}
6171
6172static void pqi_free_interrupts(struct pqi_ctrl_info *ctrl_info)
6173{
Kevin Barnett98bf0612017-05-03 18:52:28 -05006174 pqi_free_irqs(ctrl_info);
6175 pqi_disable_msix_interrupts(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006176}
6177
6178static void pqi_free_ctrl_resources(struct pqi_ctrl_info *ctrl_info)
6179{
6180 pqi_stop_heartbeat_timer(ctrl_info);
6181 pqi_free_interrupts(ctrl_info);
6182 if (ctrl_info->queue_memory_base)
6183 dma_free_coherent(&ctrl_info->pci_dev->dev,
6184 ctrl_info->queue_memory_length,
6185 ctrl_info->queue_memory_base,
6186 ctrl_info->queue_memory_base_dma_handle);
6187 if (ctrl_info->admin_queue_memory_base)
6188 dma_free_coherent(&ctrl_info->pci_dev->dev,
6189 ctrl_info->admin_queue_memory_length,
6190 ctrl_info->admin_queue_memory_base,
6191 ctrl_info->admin_queue_memory_base_dma_handle);
6192 pqi_free_all_io_requests(ctrl_info);
6193 if (ctrl_info->error_buffer)
6194 dma_free_coherent(&ctrl_info->pci_dev->dev,
6195 ctrl_info->error_buffer_length,
6196 ctrl_info->error_buffer,
6197 ctrl_info->error_buffer_dma_handle);
6198 if (ctrl_info->iomem_base)
6199 pqi_cleanup_pci_init(ctrl_info);
6200 pqi_free_ctrl_info(ctrl_info);
6201}
6202
6203static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
6204{
Kevin Barnett061ef062017-05-03 18:53:05 -05006205 pqi_cancel_rescan_worker(ctrl_info);
6206 pqi_cancel_update_time_worker(ctrl_info);
Kevin Barnette57a1f92016-08-31 14:54:47 -05006207 pqi_remove_all_scsi_devices(ctrl_info);
6208 pqi_unregister_scsi(ctrl_info);
Kevin Barnett162d7752017-05-03 18:52:46 -05006209 if (ctrl_info->pqi_mode_enabled)
6210 pqi_revert_to_sis_mode(ctrl_info);
Kevin Barnett6c223762016-06-27 16:41:00 -05006211 pqi_free_ctrl_resources(ctrl_info);
6212}
6213
Kevin Barnettd91d7822017-05-03 18:53:30 -05006214static void pqi_print_ctrl_info(struct pci_dev *pci_dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05006215 const struct pci_device_id *id)
6216{
6217 char *ctrl_description;
6218
6219 if (id->driver_data) {
6220 ctrl_description = (char *)id->driver_data;
6221 } else {
6222 switch (id->subvendor) {
6223 case PCI_VENDOR_ID_HP:
6224 ctrl_description = hpe_branded_controller;
6225 break;
6226 case PCI_VENDOR_ID_ADAPTEC2:
6227 default:
6228 ctrl_description = microsemi_branded_controller;
6229 break;
6230 }
6231 }
6232
Kevin Barnettd91d7822017-05-03 18:53:30 -05006233 dev_info(&pci_dev->dev, "%s found\n", ctrl_description);
Kevin Barnett6c223762016-06-27 16:41:00 -05006234}
6235
Kevin Barnettd91d7822017-05-03 18:53:30 -05006236static int pqi_pci_probe(struct pci_dev *pci_dev,
6237 const struct pci_device_id *id)
Kevin Barnett6c223762016-06-27 16:41:00 -05006238{
6239 int rc;
6240 int node;
6241 struct pqi_ctrl_info *ctrl_info;
6242
Kevin Barnettd91d7822017-05-03 18:53:30 -05006243 pqi_print_ctrl_info(pci_dev, id);
Kevin Barnett6c223762016-06-27 16:41:00 -05006244
6245 if (pqi_disable_device_id_wildcards &&
6246 id->subvendor == PCI_ANY_ID &&
6247 id->subdevice == PCI_ANY_ID) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05006248 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05006249 "controller not probed because device ID wildcards are disabled\n");
6250 return -ENODEV;
6251 }
6252
6253 if (id->subvendor == PCI_ANY_ID || id->subdevice == PCI_ANY_ID)
Kevin Barnettd91d7822017-05-03 18:53:30 -05006254 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05006255 "controller device ID matched using wildcards\n");
6256
Kevin Barnettd91d7822017-05-03 18:53:30 -05006257 node = dev_to_node(&pci_dev->dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05006258 if (node == NUMA_NO_NODE)
Kevin Barnettd91d7822017-05-03 18:53:30 -05006259 set_dev_node(&pci_dev->dev, 0);
Kevin Barnett6c223762016-06-27 16:41:00 -05006260
6261 ctrl_info = pqi_alloc_ctrl_info(node);
6262 if (!ctrl_info) {
Kevin Barnettd91d7822017-05-03 18:53:30 -05006263 dev_err(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05006264 "failed to allocate controller info block\n");
6265 return -ENOMEM;
6266 }
6267
Kevin Barnettd91d7822017-05-03 18:53:30 -05006268 ctrl_info->pci_dev = pci_dev;
Kevin Barnett6c223762016-06-27 16:41:00 -05006269
6270 rc = pqi_pci_init(ctrl_info);
6271 if (rc)
6272 goto error;
6273
6274 rc = pqi_ctrl_init(ctrl_info);
6275 if (rc)
6276 goto error;
6277
6278 return 0;
6279
6280error:
6281 pqi_remove_ctrl(ctrl_info);
6282
6283 return rc;
6284}
6285
Kevin Barnettd91d7822017-05-03 18:53:30 -05006286static void pqi_pci_remove(struct pci_dev *pci_dev)
Kevin Barnett6c223762016-06-27 16:41:00 -05006287{
6288 struct pqi_ctrl_info *ctrl_info;
6289
Kevin Barnettd91d7822017-05-03 18:53:30 -05006290 ctrl_info = pci_get_drvdata(pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05006291 if (!ctrl_info)
6292 return;
6293
6294 pqi_remove_ctrl(ctrl_info);
6295}
6296
Kevin Barnettd91d7822017-05-03 18:53:30 -05006297static void pqi_shutdown(struct pci_dev *pci_dev)
Kevin Barnett6c223762016-06-27 16:41:00 -05006298{
6299 int rc;
6300 struct pqi_ctrl_info *ctrl_info;
6301
Kevin Barnettd91d7822017-05-03 18:53:30 -05006302 ctrl_info = pci_get_drvdata(pci_dev);
Kevin Barnett6c223762016-06-27 16:41:00 -05006303 if (!ctrl_info)
6304 goto error;
6305
6306 /*
6307 * Write all data in the controller's battery-backed cache to
6308 * storage.
6309 */
6310 rc = pqi_flush_cache(ctrl_info);
6311 if (rc == 0)
6312 return;
6313
6314error:
Kevin Barnettd91d7822017-05-03 18:53:30 -05006315 dev_warn(&pci_dev->dev,
Kevin Barnett6c223762016-06-27 16:41:00 -05006316 "unable to flush controller cache\n");
6317}
6318
Kevin Barnett061ef062017-05-03 18:53:05 -05006319#if defined(CONFIG_PM)
6320
6321static int pqi_suspend(struct pci_dev *pci_dev, pm_message_t state)
6322{
6323 struct pqi_ctrl_info *ctrl_info;
6324
6325 ctrl_info = pci_get_drvdata(pci_dev);
6326
6327 pqi_disable_events(ctrl_info);
6328 pqi_cancel_update_time_worker(ctrl_info);
6329 pqi_cancel_rescan_worker(ctrl_info);
6330 pqi_wait_until_scan_finished(ctrl_info);
6331 pqi_wait_until_lun_reset_finished(ctrl_info);
6332 pqi_flush_cache(ctrl_info);
6333 pqi_ctrl_block_requests(ctrl_info);
6334 pqi_ctrl_wait_until_quiesced(ctrl_info);
6335 pqi_wait_until_inbound_queues_empty(ctrl_info);
6336 pqi_ctrl_wait_for_pending_io(ctrl_info);
6337 pqi_stop_heartbeat_timer(ctrl_info);
6338
6339 if (state.event == PM_EVENT_FREEZE)
6340 return 0;
6341
6342 pci_save_state(pci_dev);
6343 pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state));
6344
6345 ctrl_info->controller_online = false;
6346 ctrl_info->pqi_mode_enabled = false;
6347
6348 return 0;
6349}
6350
6351static int pqi_resume(struct pci_dev *pci_dev)
6352{
6353 int rc;
6354 struct pqi_ctrl_info *ctrl_info;
6355
6356 ctrl_info = pci_get_drvdata(pci_dev);
6357
6358 if (pci_dev->current_state != PCI_D0) {
6359 ctrl_info->max_hw_queue_index = 0;
6360 pqi_free_interrupts(ctrl_info);
6361 pqi_change_irq_mode(ctrl_info, IRQ_MODE_INTX);
6362 rc = request_irq(pci_irq_vector(pci_dev, 0), pqi_irq_handler,
6363 IRQF_SHARED, DRIVER_NAME_SHORT,
6364 &ctrl_info->queue_groups[0]);
6365 if (rc) {
6366 dev_err(&ctrl_info->pci_dev->dev,
6367 "irq %u init failed with error %d\n",
6368 pci_dev->irq, rc);
6369 return rc;
6370 }
6371 pqi_start_heartbeat_timer(ctrl_info);
6372 pqi_ctrl_unblock_requests(ctrl_info);
6373 return 0;
6374 }
6375
6376 pci_set_power_state(pci_dev, PCI_D0);
6377 pci_restore_state(pci_dev);
6378
6379 return pqi_ctrl_init_resume(ctrl_info);
6380}
6381
6382#endif /* CONFIG_PM */
6383
Kevin Barnett6c223762016-06-27 16:41:00 -05006384/* Define the PCI IDs for the controllers that we support. */
6385static const struct pci_device_id pqi_pci_id_table[] = {
6386 {
6387 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05006388 0x152d, 0x8a22)
6389 },
6390 {
6391 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6392 0x152d, 0x8a23)
6393 },
6394 {
6395 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6396 0x152d, 0x8a24)
6397 },
6398 {
6399 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6400 0x152d, 0x8a36)
6401 },
6402 {
6403 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6404 0x152d, 0x8a37)
6405 },
6406 {
6407 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05006408 PCI_VENDOR_ID_ADAPTEC2, 0x0110)
6409 },
6410 {
6411 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05006412 PCI_VENDOR_ID_ADAPTEC2, 0x0605)
Kevin Barnett6c223762016-06-27 16:41:00 -05006413 },
6414 {
6415 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6416 PCI_VENDOR_ID_ADAPTEC2, 0x0800)
6417 },
6418 {
6419 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6420 PCI_VENDOR_ID_ADAPTEC2, 0x0801)
6421 },
6422 {
6423 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6424 PCI_VENDOR_ID_ADAPTEC2, 0x0802)
6425 },
6426 {
6427 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6428 PCI_VENDOR_ID_ADAPTEC2, 0x0803)
6429 },
6430 {
6431 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6432 PCI_VENDOR_ID_ADAPTEC2, 0x0804)
6433 },
6434 {
6435 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6436 PCI_VENDOR_ID_ADAPTEC2, 0x0805)
6437 },
6438 {
6439 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05006440 PCI_VENDOR_ID_ADAPTEC2, 0x0806)
6441 },
6442 {
6443 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05006444 PCI_VENDOR_ID_ADAPTEC2, 0x0900)
6445 },
6446 {
6447 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6448 PCI_VENDOR_ID_ADAPTEC2, 0x0901)
6449 },
6450 {
6451 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6452 PCI_VENDOR_ID_ADAPTEC2, 0x0902)
6453 },
6454 {
6455 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6456 PCI_VENDOR_ID_ADAPTEC2, 0x0903)
6457 },
6458 {
6459 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6460 PCI_VENDOR_ID_ADAPTEC2, 0x0904)
6461 },
6462 {
6463 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6464 PCI_VENDOR_ID_ADAPTEC2, 0x0905)
6465 },
6466 {
6467 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6468 PCI_VENDOR_ID_ADAPTEC2, 0x0906)
6469 },
6470 {
6471 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett7eddabf2017-05-03 18:53:54 -05006472 PCI_VENDOR_ID_ADAPTEC2, 0x0907)
6473 },
6474 {
6475 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6476 PCI_VENDOR_ID_ADAPTEC2, 0x0908)
6477 },
6478 {
6479 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6480 PCI_VENDOR_ID_ADAPTEC2, 0x1200)
6481 },
6482 {
6483 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6484 PCI_VENDOR_ID_ADAPTEC2, 0x1201)
6485 },
6486 {
6487 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6488 PCI_VENDOR_ID_ADAPTEC2, 0x1202)
6489 },
6490 {
6491 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6492 PCI_VENDOR_ID_ADAPTEC2, 0x1280)
6493 },
6494 {
6495 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6496 PCI_VENDOR_ID_ADAPTEC2, 0x1281)
6497 },
6498 {
6499 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6500 PCI_VENDOR_ID_ADAPTEC2, 0x1300)
6501 },
6502 {
6503 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6504 PCI_VENDOR_ID_ADAPTEC2, 0x1301)
6505 },
6506 {
6507 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6508 PCI_VENDOR_ID_ADAPTEC2, 0x1380)
6509 },
6510 {
6511 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6512 PCI_VENDOR_ID_HP, 0x0600)
6513 },
6514 {
6515 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6516 PCI_VENDOR_ID_HP, 0x0601)
6517 },
6518 {
6519 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6520 PCI_VENDOR_ID_HP, 0x0602)
6521 },
6522 {
6523 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6524 PCI_VENDOR_ID_HP, 0x0603)
6525 },
6526 {
6527 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6528 PCI_VENDOR_ID_HP, 0x0604)
6529 },
6530 {
6531 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6532 PCI_VENDOR_ID_HP, 0x0606)
6533 },
6534 {
6535 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6536 PCI_VENDOR_ID_HP, 0x0650)
6537 },
6538 {
6539 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6540 PCI_VENDOR_ID_HP, 0x0651)
6541 },
6542 {
6543 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6544 PCI_VENDOR_ID_HP, 0x0652)
6545 },
6546 {
6547 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6548 PCI_VENDOR_ID_HP, 0x0653)
6549 },
6550 {
6551 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6552 PCI_VENDOR_ID_HP, 0x0654)
6553 },
6554 {
6555 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6556 PCI_VENDOR_ID_HP, 0x0655)
6557 },
6558 {
6559 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6560 PCI_VENDOR_ID_HP, 0x0656)
6561 },
6562 {
6563 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6564 PCI_VENDOR_ID_HP, 0x0657)
6565 },
6566 {
6567 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6568 PCI_VENDOR_ID_HP, 0x0700)
6569 },
6570 {
6571 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6572 PCI_VENDOR_ID_HP, 0x0701)
6573 },
6574 {
6575 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
Kevin Barnett6c223762016-06-27 16:41:00 -05006576 PCI_VENDOR_ID_HP, 0x1001)
6577 },
6578 {
6579 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6580 PCI_VENDOR_ID_HP, 0x1100)
6581 },
6582 {
6583 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6584 PCI_VENDOR_ID_HP, 0x1101)
6585 },
6586 {
6587 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6588 PCI_VENDOR_ID_HP, 0x1102)
6589 },
6590 {
6591 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6592 PCI_VENDOR_ID_HP, 0x1150)
6593 },
6594 {
6595 PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
6596 PCI_ANY_ID, PCI_ANY_ID)
6597 },
6598 { 0 }
6599};
6600
6601MODULE_DEVICE_TABLE(pci, pqi_pci_id_table);
6602
6603static struct pci_driver pqi_pci_driver = {
6604 .name = DRIVER_NAME_SHORT,
6605 .id_table = pqi_pci_id_table,
6606 .probe = pqi_pci_probe,
6607 .remove = pqi_pci_remove,
6608 .shutdown = pqi_shutdown,
Kevin Barnett061ef062017-05-03 18:53:05 -05006609#if defined(CONFIG_PM)
6610 .suspend = pqi_suspend,
6611 .resume = pqi_resume,
6612#endif
Kevin Barnett6c223762016-06-27 16:41:00 -05006613};
6614
6615static int __init pqi_init(void)
6616{
6617 int rc;
6618
6619 pr_info(DRIVER_NAME "\n");
6620
6621 pqi_sas_transport_template =
6622 sas_attach_transport(&pqi_sas_transport_functions);
6623 if (!pqi_sas_transport_template)
6624 return -ENODEV;
6625
6626 rc = pci_register_driver(&pqi_pci_driver);
6627 if (rc)
6628 sas_release_transport(pqi_sas_transport_template);
6629
6630 return rc;
6631}
6632
6633static void __exit pqi_cleanup(void)
6634{
6635 pci_unregister_driver(&pqi_pci_driver);
6636 sas_release_transport(pqi_sas_transport_template);
6637}
6638
6639module_init(pqi_init);
6640module_exit(pqi_cleanup);
6641
6642static void __attribute__((unused)) verify_structures(void)
6643{
6644 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6645 sis_host_to_ctrl_doorbell) != 0x20);
6646 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6647 sis_interrupt_mask) != 0x34);
6648 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6649 sis_ctrl_to_host_doorbell) != 0x9c);
6650 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6651 sis_ctrl_to_host_doorbell_clear) != 0xa0);
6652 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnettff6abb72016-08-31 14:54:41 -05006653 sis_driver_scratch) != 0xb0);
6654 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
Kevin Barnett6c223762016-06-27 16:41:00 -05006655 sis_firmware_status) != 0xbc);
6656 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6657 sis_mailbox) != 0x1000);
6658 BUILD_BUG_ON(offsetof(struct pqi_ctrl_registers,
6659 pqi_registers) != 0x4000);
6660
6661 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6662 iu_type) != 0x0);
6663 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6664 iu_length) != 0x2);
6665 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6666 response_queue_id) != 0x4);
6667 BUILD_BUG_ON(offsetof(struct pqi_iu_header,
6668 work_area) != 0x6);
6669 BUILD_BUG_ON(sizeof(struct pqi_iu_header) != 0x8);
6670
6671 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6672 status) != 0x0);
6673 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6674 service_response) != 0x1);
6675 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6676 data_present) != 0x2);
6677 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6678 reserved) != 0x3);
6679 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6680 residual_count) != 0x4);
6681 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6682 data_length) != 0x8);
6683 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6684 reserved1) != 0xa);
6685 BUILD_BUG_ON(offsetof(struct pqi_aio_error_info,
6686 data) != 0xc);
6687 BUILD_BUG_ON(sizeof(struct pqi_aio_error_info) != 0x10c);
6688
6689 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6690 data_in_result) != 0x0);
6691 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6692 data_out_result) != 0x1);
6693 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6694 reserved) != 0x2);
6695 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6696 status) != 0x5);
6697 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6698 status_qualifier) != 0x6);
6699 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6700 sense_data_length) != 0x8);
6701 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6702 response_data_length) != 0xa);
6703 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6704 data_in_transferred) != 0xc);
6705 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6706 data_out_transferred) != 0x10);
6707 BUILD_BUG_ON(offsetof(struct pqi_raid_error_info,
6708 data) != 0x14);
6709 BUILD_BUG_ON(sizeof(struct pqi_raid_error_info) != 0x114);
6710
6711 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6712 signature) != 0x0);
6713 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6714 function_and_status_code) != 0x8);
6715 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6716 max_admin_iq_elements) != 0x10);
6717 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6718 max_admin_oq_elements) != 0x11);
6719 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6720 admin_iq_element_length) != 0x12);
6721 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6722 admin_oq_element_length) != 0x13);
6723 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6724 max_reset_timeout) != 0x14);
6725 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6726 legacy_intx_status) != 0x18);
6727 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6728 legacy_intx_mask_set) != 0x1c);
6729 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6730 legacy_intx_mask_clear) != 0x20);
6731 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6732 device_status) != 0x40);
6733 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6734 admin_iq_pi_offset) != 0x48);
6735 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6736 admin_oq_ci_offset) != 0x50);
6737 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6738 admin_iq_element_array_addr) != 0x58);
6739 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6740 admin_oq_element_array_addr) != 0x60);
6741 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6742 admin_iq_ci_addr) != 0x68);
6743 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6744 admin_oq_pi_addr) != 0x70);
6745 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6746 admin_iq_num_elements) != 0x78);
6747 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6748 admin_oq_num_elements) != 0x79);
6749 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6750 admin_queue_int_msg_num) != 0x7a);
6751 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6752 device_error) != 0x80);
6753 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6754 error_details) != 0x88);
6755 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6756 device_reset) != 0x90);
6757 BUILD_BUG_ON(offsetof(struct pqi_device_registers,
6758 power_action) != 0x94);
6759 BUILD_BUG_ON(sizeof(struct pqi_device_registers) != 0x100);
6760
6761 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6762 header.iu_type) != 0);
6763 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6764 header.iu_length) != 2);
6765 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6766 header.work_area) != 6);
6767 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6768 request_id) != 8);
6769 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6770 function_code) != 10);
6771 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6772 data.report_device_capability.buffer_length) != 44);
6773 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6774 data.report_device_capability.sg_descriptor) != 48);
6775 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6776 data.create_operational_iq.queue_id) != 12);
6777 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6778 data.create_operational_iq.element_array_addr) != 16);
6779 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6780 data.create_operational_iq.ci_addr) != 24);
6781 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6782 data.create_operational_iq.num_elements) != 32);
6783 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6784 data.create_operational_iq.element_length) != 34);
6785 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6786 data.create_operational_iq.queue_protocol) != 36);
6787 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6788 data.create_operational_oq.queue_id) != 12);
6789 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6790 data.create_operational_oq.element_array_addr) != 16);
6791 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6792 data.create_operational_oq.pi_addr) != 24);
6793 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6794 data.create_operational_oq.num_elements) != 32);
6795 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6796 data.create_operational_oq.element_length) != 34);
6797 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6798 data.create_operational_oq.queue_protocol) != 36);
6799 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6800 data.create_operational_oq.int_msg_num) != 40);
6801 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6802 data.create_operational_oq.coalescing_count) != 42);
6803 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6804 data.create_operational_oq.min_coalescing_time) != 44);
6805 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6806 data.create_operational_oq.max_coalescing_time) != 48);
6807 BUILD_BUG_ON(offsetof(struct pqi_general_admin_request,
6808 data.delete_operational_queue.queue_id) != 12);
6809 BUILD_BUG_ON(sizeof(struct pqi_general_admin_request) != 64);
6810 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6811 data.create_operational_iq) != 64 - 11);
6812 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6813 data.create_operational_oq) != 64 - 11);
6814 BUILD_BUG_ON(FIELD_SIZEOF(struct pqi_general_admin_request,
6815 data.delete_operational_queue) != 64 - 11);
6816
6817 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6818 header.iu_type) != 0);
6819 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6820 header.iu_length) != 2);
6821 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6822 header.work_area) != 6);
6823 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6824 request_id) != 8);
6825 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6826 function_code) != 10);
6827 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6828 status) != 11);
6829 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6830 data.create_operational_iq.status_descriptor) != 12);
6831 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6832 data.create_operational_iq.iq_pi_offset) != 16);
6833 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6834 data.create_operational_oq.status_descriptor) != 12);
6835 BUILD_BUG_ON(offsetof(struct pqi_general_admin_response,
6836 data.create_operational_oq.oq_ci_offset) != 16);
6837 BUILD_BUG_ON(sizeof(struct pqi_general_admin_response) != 64);
6838
6839 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6840 header.iu_type) != 0);
6841 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6842 header.iu_length) != 2);
6843 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6844 header.response_queue_id) != 4);
6845 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6846 header.work_area) != 6);
6847 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6848 request_id) != 8);
6849 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6850 nexus_id) != 10);
6851 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6852 buffer_length) != 12);
6853 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6854 lun_number) != 16);
6855 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6856 protocol_specific) != 24);
6857 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6858 error_index) != 27);
6859 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6860 cdb) != 32);
6861 BUILD_BUG_ON(offsetof(struct pqi_raid_path_request,
6862 sg_descriptors) != 64);
6863 BUILD_BUG_ON(sizeof(struct pqi_raid_path_request) !=
6864 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6865
6866 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6867 header.iu_type) != 0);
6868 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6869 header.iu_length) != 2);
6870 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6871 header.response_queue_id) != 4);
6872 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6873 header.work_area) != 6);
6874 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6875 request_id) != 8);
6876 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6877 nexus_id) != 12);
6878 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6879 buffer_length) != 16);
6880 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6881 data_encryption_key_index) != 22);
6882 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6883 encrypt_tweak_lower) != 24);
6884 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6885 encrypt_tweak_upper) != 28);
6886 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6887 cdb) != 32);
6888 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6889 error_index) != 48);
6890 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6891 num_sg_descriptors) != 50);
6892 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6893 cdb_length) != 51);
6894 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6895 lun_number) != 52);
6896 BUILD_BUG_ON(offsetof(struct pqi_aio_path_request,
6897 sg_descriptors) != 64);
6898 BUILD_BUG_ON(sizeof(struct pqi_aio_path_request) !=
6899 PQI_OPERATIONAL_IQ_ELEMENT_LENGTH);
6900
6901 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6902 header.iu_type) != 0);
6903 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6904 header.iu_length) != 2);
6905 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6906 request_id) != 8);
6907 BUILD_BUG_ON(offsetof(struct pqi_io_response,
6908 error_index) != 10);
6909
6910 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6911 header.iu_type) != 0);
6912 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6913 header.iu_length) != 2);
6914 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6915 header.response_queue_id) != 4);
6916 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6917 request_id) != 8);
6918 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6919 data.report_event_configuration.buffer_length) != 12);
6920 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6921 data.report_event_configuration.sg_descriptors) != 16);
6922 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6923 data.set_event_configuration.global_event_oq_id) != 10);
6924 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6925 data.set_event_configuration.buffer_length) != 12);
6926 BUILD_BUG_ON(offsetof(struct pqi_general_management_request,
6927 data.set_event_configuration.sg_descriptors) != 16);
6928
6929 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6930 max_inbound_iu_length) != 6);
6931 BUILD_BUG_ON(offsetof(struct pqi_iu_layer_descriptor,
6932 max_outbound_iu_length) != 14);
6933 BUILD_BUG_ON(sizeof(struct pqi_iu_layer_descriptor) != 16);
6934
6935 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6936 data_length) != 0);
6937 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6938 iq_arbitration_priority_support_bitmask) != 8);
6939 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6940 maximum_aw_a) != 9);
6941 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6942 maximum_aw_b) != 10);
6943 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6944 maximum_aw_c) != 11);
6945 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6946 max_inbound_queues) != 16);
6947 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6948 max_elements_per_iq) != 18);
6949 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6950 max_iq_element_length) != 24);
6951 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6952 min_iq_element_length) != 26);
6953 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6954 max_outbound_queues) != 30);
6955 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6956 max_elements_per_oq) != 32);
6957 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6958 intr_coalescing_time_granularity) != 34);
6959 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6960 max_oq_element_length) != 36);
6961 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6962 min_oq_element_length) != 38);
6963 BUILD_BUG_ON(offsetof(struct pqi_device_capability,
6964 iu_layer_descriptors) != 64);
6965 BUILD_BUG_ON(sizeof(struct pqi_device_capability) != 576);
6966
6967 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6968 event_type) != 0);
6969 BUILD_BUG_ON(offsetof(struct pqi_event_descriptor,
6970 oq_id) != 2);
6971 BUILD_BUG_ON(sizeof(struct pqi_event_descriptor) != 4);
6972
6973 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6974 num_event_descriptors) != 2);
6975 BUILD_BUG_ON(offsetof(struct pqi_event_config,
6976 descriptors) != 4);
6977
Kevin Barnett061ef062017-05-03 18:53:05 -05006978 BUILD_BUG_ON(PQI_NUM_SUPPORTED_EVENTS !=
6979 ARRAY_SIZE(pqi_supported_event_types));
6980
Kevin Barnett6c223762016-06-27 16:41:00 -05006981 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6982 header.iu_type) != 0);
6983 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6984 header.iu_length) != 2);
6985 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6986 event_type) != 8);
6987 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6988 event_id) != 10);
6989 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6990 additional_event_id) != 12);
6991 BUILD_BUG_ON(offsetof(struct pqi_event_response,
6992 data) != 16);
6993 BUILD_BUG_ON(sizeof(struct pqi_event_response) != 32);
6994
6995 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6996 header.iu_type) != 0);
6997 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
6998 header.iu_length) != 2);
6999 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7000 event_type) != 8);
7001 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7002 event_id) != 10);
7003 BUILD_BUG_ON(offsetof(struct pqi_event_acknowledge_request,
7004 additional_event_id) != 12);
7005 BUILD_BUG_ON(sizeof(struct pqi_event_acknowledge_request) != 16);
7006
7007 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7008 header.iu_type) != 0);
7009 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7010 header.iu_length) != 2);
7011 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7012 request_id) != 8);
7013 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7014 nexus_id) != 10);
7015 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7016 lun_number) != 16);
7017 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7018 protocol_specific) != 24);
7019 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7020 outbound_queue_id_to_manage) != 26);
7021 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7022 request_id_to_manage) != 28);
7023 BUILD_BUG_ON(offsetof(struct pqi_task_management_request,
7024 task_management_function) != 30);
7025 BUILD_BUG_ON(sizeof(struct pqi_task_management_request) != 32);
7026
7027 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7028 header.iu_type) != 0);
7029 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7030 header.iu_length) != 2);
7031 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7032 request_id) != 8);
7033 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7034 nexus_id) != 10);
7035 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7036 additional_response_info) != 12);
7037 BUILD_BUG_ON(offsetof(struct pqi_task_management_response,
7038 response_code) != 15);
7039 BUILD_BUG_ON(sizeof(struct pqi_task_management_response) != 16);
7040
7041 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7042 configured_logical_drive_count) != 0);
7043 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7044 configuration_signature) != 1);
7045 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7046 firmware_version) != 5);
7047 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7048 extended_logical_unit_count) != 154);
7049 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7050 firmware_build_number) != 190);
7051 BUILD_BUG_ON(offsetof(struct bmic_identify_controller,
7052 controller_mode) != 292);
7053
Kevin Barnett1be42f42017-05-03 18:53:42 -05007054 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7055 phys_bay_in_box) != 115);
7056 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7057 device_type) != 120);
7058 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7059 redundant_path_present_map) != 1736);
7060 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7061 active_path_number) != 1738);
7062 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7063 alternate_paths_phys_connector) != 1739);
7064 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7065 alternate_paths_phys_box_on_port) != 1755);
7066 BUILD_BUG_ON(offsetof(struct bmic_identify_physical_device,
7067 current_queue_depth_limit) != 1796);
7068 BUILD_BUG_ON(sizeof(struct bmic_identify_physical_device) != 2560);
7069
Kevin Barnett6c223762016-06-27 16:41:00 -05007070 BUILD_BUG_ON(PQI_ADMIN_IQ_NUM_ELEMENTS > 255);
7071 BUILD_BUG_ON(PQI_ADMIN_OQ_NUM_ELEMENTS > 255);
7072 BUILD_BUG_ON(PQI_ADMIN_IQ_ELEMENT_LENGTH %
7073 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7074 BUILD_BUG_ON(PQI_ADMIN_OQ_ELEMENT_LENGTH %
7075 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7076 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH > 1048560);
7077 BUILD_BUG_ON(PQI_OPERATIONAL_IQ_ELEMENT_LENGTH %
7078 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7079 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH > 1048560);
7080 BUILD_BUG_ON(PQI_OPERATIONAL_OQ_ELEMENT_LENGTH %
7081 PQI_QUEUE_ELEMENT_LENGTH_ALIGNMENT != 0);
7082
7083 BUILD_BUG_ON(PQI_RESERVED_IO_SLOTS >= PQI_MAX_OUTSTANDING_REQUESTS);
7084}