blob: 6d053e2201538eafb8a6d83324cc322ceed42f29 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -030063#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050075#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050076#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080077#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080078#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include "ipr.h"
88
89/*
90 * Global Data
91 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080092static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050097static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060098static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080099static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -0500100static unsigned int ipr_dual_ioa_raid = 1;
Wen Xiongcb05cbb2016-07-12 16:02:08 -0500101static unsigned int ipr_number_of_msix = 16;
Brian King4fdd7c72015-03-26 11:23:50 -0500102static unsigned int ipr_fast_reboot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103static DEFINE_SPINLOCK(ipr_driver_lock);
104
105/* This table describes the differences between DMA controller chips */
106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 .mailbox = 0x0042C,
Brian King89aad422012-03-14 21:20:10 -0500109 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500111 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600112 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
Wayne Boyer214777b2010-02-19 13:24:26 -0800116 .clr_interrupt_mask_reg32 = 0x00230,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 .sense_interrupt_mask_reg = 0x0022C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800118 .sense_interrupt_mask_reg32 = 0x0022C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .clr_interrupt_reg = 0x00228,
Wayne Boyer214777b2010-02-19 13:24:26 -0800120 .clr_interrupt_reg32 = 0x00228,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 .sense_interrupt_reg = 0x00224,
Wayne Boyer214777b2010-02-19 13:24:26 -0800122 .sense_interrupt_reg32 = 0x00224,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800125 .sense_uproc_interrupt_reg32 = 0x00214,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 .set_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
131 },
132 { /* Snipe and Scamp */
133 .mailbox = 0x0052C,
Brian King89aad422012-03-14 21:20:10 -0500134 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500136 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600137 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800141 .clr_interrupt_mask_reg32 = 0x0028C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 .sense_interrupt_mask_reg = 0x00288,
Wayne Boyer214777b2010-02-19 13:24:26 -0800143 .sense_interrupt_mask_reg32 = 0x00288,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 .clr_interrupt_reg = 0x00284,
Wayne Boyer214777b2010-02-19 13:24:26 -0800145 .clr_interrupt_reg32 = 0x00284,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 .sense_interrupt_reg = 0x00280,
Wayne Boyer214777b2010-02-19 13:24:26 -0800147 .sense_interrupt_reg32 = 0x00280,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800150 .sense_uproc_interrupt_reg32 = 0x00290,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 .set_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 }
156 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800157 { /* CRoC */
Wayne Boyer110def82010-11-04 09:36:16 -0700158 .mailbox = 0x00044,
Brian King89aad422012-03-14 21:20:10 -0500159 .max_cmds = 1000,
Wayne Boyera74c1632010-02-19 13:23:51 -0800160 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500161 .clear_isr = 0,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600162 .iopoll_weight = 64,
Wayne Boyera74c1632010-02-19 13:23:51 -0800163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
Wayne Boyer214777b2010-02-19 13:24:26 -0800166 .clr_interrupt_mask_reg32 = 0x0001C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800167 .sense_interrupt_mask_reg = 0x00010,
Wayne Boyer214777b2010-02-19 13:24:26 -0800168 .sense_interrupt_mask_reg32 = 0x00014,
Wayne Boyera74c1632010-02-19 13:23:51 -0800169 .clr_interrupt_reg = 0x00008,
Wayne Boyer214777b2010-02-19 13:24:26 -0800170 .clr_interrupt_reg32 = 0x0000C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800171 .sense_interrupt_reg = 0x00000,
Wayne Boyer214777b2010-02-19 13:24:26 -0800172 .sense_interrupt_reg32 = 0x00004,
Wayne Boyera74c1632010-02-19 13:23:51 -0800173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800175 .sense_uproc_interrupt_reg32 = 0x00024,
Wayne Boyera74c1632010-02-19 13:23:51 -0800176 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800177 .set_uproc_interrupt_reg32 = 0x00024,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800178 .clr_uproc_interrupt_reg = 0x00028,
Wayne Boyer214777b2010-02-19 13:24:26 -0800179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800181 .dump_addr_reg = 0x00064,
Wayne Boyer8701f182010-06-04 10:26:50 -0700182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
Wayne Boyera74c1632010-02-19 13:23:51 -0800184 }
185 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186};
187
188static const struct ipr_chip_t ipr_chip[] = {
Christoph Hellwiga299ee62016-09-11 15:31:24 +0200189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199};
200
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300201static int ipr_max_bus_speeds[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203};
204
205MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207module_param_named(max_speed, ipr_max_speed, uint, 0);
208MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209module_param_named(log_level, ipr_log_level, uint, 0);
210MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211module_param_named(testmode, ipr_testmode, int, 0);
212MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800213module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800217module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600218MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500219module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800221module_param_named(max_devs, ipr_max_devs, int, 0);
222MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600224module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
Wen Xiongcb05cbb2016-07-12 16:02:08 -0500225MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
Brian King4fdd7c72015-03-26 11:23:50 -0500226module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228MODULE_LICENSE("GPL");
229MODULE_VERSION(IPR_DRIVER_VERSION);
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231/* A constant array of IOASCs/URCs/Error Messages */
232static const
233struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 "8155: An unknown error was received"},
236 {0x00330000, 0, 0,
237 "Soft underlength error"},
238 {0x005A0000, 0, 0,
239 "Command to be cancelled not found"},
240 {0x00808000, 0, 0,
241 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500245 "4101: Soft device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFFD: Logical block guard error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500283 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 "8009: Impending cache battery pack failure"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500288 {0x02040100, 0, 0,
289 "Logical Unit in process of becoming ready"},
290 {0x02040200, 0, 0,
291 "Initializing command required"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 {0x02040400, 0, 0,
293 "34FF: Disk device format in progress"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500294 {0x02040C00, 0, 0,
295 "Logical unit not accessible, target port in unavailable state"},
Brian King65f56472007-04-26 16:00:12 -0500296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 {0x023F0000, 0, 0,
299 "Synchronization required"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500300 {0x02408500, 0, 0,
301 "IOA microcode download required"},
302 {0x02408600, 0, 0,
303 "Device bus connection is prohibited by host"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 {0x024E0000, 0, 0,
305 "No ready, IOA shutdown"},
306 {0x025A0000, 0, 0,
307 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 "3020: Storage subsystem configuration error"},
310 {0x03110B00, 0, 0,
311 "FFF5: Medium error, data unreadable, recommend reassign"},
312 {0x03110C00, 0, 0,
313 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 "3109: IOA timed out a device command"},
322 {0x04088000, 0, 0,
323 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500325 "4100: Hard device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "310D: Logical block guard error detected by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 "9002: IOA reserved area LRC error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347 "Hardware Error, IOA metadata access error"},
Brian King933916f2007-03-29 12:43:30 -0500348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 "8150: PCI bus error"},
360 {0x04430000, 1, 0,
361 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 "8151: IOA microcode error"},
370 {0x04448500, 0, 0,
371 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500374 {0x04448700, 0, 0,
375 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 {0x04490000, 0, 0,
377 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500389 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600393 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500395 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500397 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500399 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500401 "4110: Unsupported enclosure function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403 "4120: SAS cable VPD cannot be read"},
Brian King933916f2007-03-29 12:43:30 -0500404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 "FFF4: Command to logical unit failed"},
406 {0x05240000, 1, 0,
407 "Illegal request, invalid request type or request packet"},
408 {0x05250000, 0, 0,
409 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600410 {0x05258000, 0, 0,
411 "Illegal request, commands not allowed to this device"},
412 {0x05258100, 0, 0,
413 "Illegal request, command not allowed to a secondary adapter"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800414 {0x05258200, 0, 0,
415 "Illegal request, command not allowed to a non-optimized resource"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 {0x05260000, 0, 0,
417 "Illegal request, invalid field in parameter list"},
418 {0x05260100, 0, 0,
419 "Illegal request, parameter not supported"},
420 {0x05260200, 0, 0,
421 "Illegal request, parameter value invalid"},
422 {0x052C0000, 0, 0,
423 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600424 {0x052C8000, 1, 0,
425 "Illegal request, dual adapter support not enabled"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500426 {0x052C8100, 1, 0,
427 "Illegal request, another cable connector was physically disabled"},
428 {0x054E8000, 1, 0,
429 "Illegal request, inconsistent group id/group count"},
Brian King933916f2007-03-29 12:43:30 -0500430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 "9040: Array protection temporarily suspended, protection resuming"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"},
Wen Xiong81471b02018-05-09 13:47:54 -0500438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4086: SAS Adapter Hardware Configuration Error"},
Brian King933916f2007-03-29 12:43:30 -0500440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500441 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 "FFFB: SCSI bus was reset"},
444 {0x06290500, 0, 0,
445 "FFFE: SCSI bus transition to single ended"},
446 {0x06290600, 0, 0,
447 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500448 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500450 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 "3029: A device replacement has occurred"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500452 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453 "4102: Device bus fabric performance degradation"},
Brian King933916f2007-03-29 12:43:30 -0500454 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500456 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600457 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500458 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500460 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500462 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500464 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600465 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500466 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500467 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500468 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500469 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500470 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500471 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500472 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500473 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500474 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500475 "4050: Enclosure does not support a required multipath function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500476 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4121: Configuration error, required cable is missing"},
478 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4122: Cable is not plugged into the correct location on remote IOA"},
480 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "4123: Configuration error, invalid cable vital product data"},
482 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483 "4124: Configuration error, both cable ends are plugged into the same IOA"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800484 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500486 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500488 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500490 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500492 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600493 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500494 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600495 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500496 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 "9032: Array exposed but still protected"},
Brian King7b3871f2016-09-16 16:51:36 -0500498 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
Brian Kinge4353402007-03-29 12:43:37 -0500499 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500500 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500501 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500502 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500503 "4060: Multipath redundancy level got worse"},
Brian King7b3871f2016-09-16 16:51:36 -0500504 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
Wen Xiongf8ee25d2015-03-26 11:23:58 -0500505 "9083: Device raw mode enabled"},
Brian King7b3871f2016-09-16 16:51:36 -0500506 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
Wen Xiongf8ee25d2015-03-26 11:23:58 -0500507 "9084: Device raw mode disabled"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 {0x07270000, 0, 0,
509 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500510 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500512 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500514 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500516 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500518 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500520 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500522 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500524 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500526 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500528 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500530 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500532 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500534 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500536 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500538 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500540 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500542 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500544 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500546 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500548 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 "9063: Maximum number of functional arrays has been exceeded"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500550 {0x07279A00, 0, 0,
551 "Data protect, other volume set problem"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 {0x0B260000, 0, 0,
553 "Aborted command, invalid descriptor"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500554 {0x0B3F9000, 0, 0,
555 "Target operating conditions have changed, dual adapter takeover"},
556 {0x0B530200, 0, 0,
557 "Aborted command, medium removal prevented"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 {0x0B5A0000, 0, 0,
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500559 "Command terminated by host"},
560 {0x0B5B8000, 0, 0,
561 "Aborted command, command terminated by host"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562};
563
564static const struct ipr_ses_table_entry ipr_ses_table[] = {
565 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
566 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
567 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
572 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
576 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578};
579
580/*
581 * Function Prototypes
582 */
583static int ipr_reset_alert(struct ipr_cmnd *);
584static void ipr_process_ccn(struct ipr_cmnd *);
585static void ipr_process_error(struct ipr_cmnd *);
586static void ipr_reset_ioa_job(struct ipr_cmnd *);
587static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588 enum ipr_shutdown_type);
589
590#ifdef CONFIG_SCSI_IPR_TRACE
591/**
592 * ipr_trc_hook - Add a trace entry to the driver trace
593 * @ipr_cmd: ipr command struct
594 * @type: trace type
595 * @add_data: additional data
596 *
597 * Return value:
598 * none
599 **/
600static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601 u8 type, u32 add_data)
602{
603 struct ipr_trace_entry *trace_entry;
604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Brian Kingbb7c5432015-07-14 11:41:31 -0500605 unsigned int trace_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Brian Kingbb7c5432015-07-14 11:41:31 -0500607 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608 trace_entry = &ioa_cfg->trace[trace_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 trace_entry->time = jiffies;
610 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800612 if (ipr_cmd->ioa_cfg->sis64)
613 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
614 else
615 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500616 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618 trace_entry->u.add_data = add_data;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600619 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620}
621#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300622#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623#endif
624
625/**
Brian King172cd6e2012-07-17 08:14:40 -0500626 * ipr_lock_and_done - Acquire lock and complete command
627 * @ipr_cmd: ipr command struct
628 *
629 * Return value:
630 * none
631 **/
632static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
633{
634 unsigned long lock_flags;
635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636
637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638 ipr_cmd->done(ipr_cmd);
639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
640}
641
642/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644 * @ipr_cmd: ipr command struct
645 *
646 * Return value:
647 * none
648 **/
649static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
650{
651 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700652 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
Wayne Boyera32c0552010-02-19 13:23:36 -0800654 dma_addr_t dma_addr = ipr_cmd->dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600655 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600657 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600659 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
Wayne Boyera32c0552010-02-19 13:23:36 -0800660 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800662 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800664
Wayne Boyer96d21f02010-05-10 09:13:27 -0700665 if (ipr_cmd->ioa_cfg->sis64) {
Wayne Boyera32c0552010-02-19 13:23:36 -0800666 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
Wayne Boyer96d21f02010-05-10 09:13:27 -0700668 ioasa64->u.gata.status = 0;
669 } else {
Wayne Boyera32c0552010-02-19 13:23:36 -0800670 ioarcb->write_ioadl_addr =
671 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700673 ioasa->u.gata.status = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800674 }
675
Wayne Boyer96d21f02010-05-10 09:13:27 -0700676 ioasa->hdr.ioasc = 0;
677 ioasa->hdr.residual_data_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500679 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 ipr_cmd->sense_buffer[0] = 0;
681 ipr_cmd->dma_use_sg = 0;
682}
683
684/**
685 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686 * @ipr_cmd: ipr command struct
687 *
688 * Return value:
689 * none
690 **/
Brian King172cd6e2012-07-17 08:14:40 -0500691static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692 void (*fast_done) (struct ipr_cmnd *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693{
694 ipr_reinit_ipr_cmnd(ipr_cmd);
695 ipr_cmd->u.scratch = 0;
696 ipr_cmd->sibling = NULL;
Brian King6cdb0812014-10-30 17:27:10 -0500697 ipr_cmd->eh_comp = NULL;
Brian King172cd6e2012-07-17 08:14:40 -0500698 ipr_cmd->fast_done = fast_done;
Kees Cook738c6ec2017-08-18 16:53:24 -0700699 timer_setup(&ipr_cmd->timer, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700}
701
702/**
Brian King00bfef22012-07-17 08:13:52 -0500703 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 * @ioa_cfg: ioa config struct
705 *
706 * Return value:
707 * pointer to ipr command struct
708 **/
709static
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600710struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600712 struct ipr_cmnd *ipr_cmd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600714 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716 struct ipr_cmnd, queue);
717 list_del(&ipr_cmd->queue);
718 }
719
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 return ipr_cmd;
722}
723
724/**
Brian King00bfef22012-07-17 08:13:52 -0500725 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726 * @ioa_cfg: ioa config struct
727 *
728 * Return value:
729 * pointer to ipr command struct
730 **/
731static
732struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
733{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600734 struct ipr_cmnd *ipr_cmd =
735 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
Brian King172cd6e2012-07-17 08:14:40 -0500736 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King00bfef22012-07-17 08:13:52 -0500737 return ipr_cmd;
738}
739
740/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742 * @ioa_cfg: ioa config struct
743 * @clr_ints: interrupts to clear
744 *
745 * This function masks all interrupts on the adapter, then clears the
746 * interrupts specified in the mask
747 *
748 * Return value:
749 * none
750 **/
751static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752 u32 clr_ints)
753{
754 volatile u32 int_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600755 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
757 /* Stop new interrupts */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600758 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759 spin_lock(&ioa_cfg->hrrq[i]._lock);
760 ioa_cfg->hrrq[i].allow_interrupts = 0;
761 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763
764 /* Set interrupt mask to stop all new interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800765 if (ioa_cfg->sis64)
766 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
767 else
768 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770 /* Clear any pending interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800771 if (ioa_cfg->sis64)
772 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
773 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
775}
776
777/**
778 * ipr_save_pcix_cmd_reg - Save PCI-X command register
779 * @ioa_cfg: ioa config struct
780 *
781 * Return value:
782 * 0 on success / -EIO on failure
783 **/
784static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
785{
786 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
787
Brian King7dce0e12007-01-23 11:25:30 -0600788 if (pcix_cmd_reg == 0)
789 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
791 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
792 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
793 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
794 return -EIO;
795 }
796
797 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
798 return 0;
799}
800
801/**
802 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
803 * @ioa_cfg: ioa config struct
804 *
805 * Return value:
806 * 0 on success / -EIO on failure
807 **/
808static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
809{
810 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
811
812 if (pcix_cmd_reg) {
813 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
814 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
815 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
816 return -EIO;
817 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 }
819
820 return 0;
821}
822
823/**
Brian Kingf646f322017-03-15 16:58:39 -0500824 * __ipr_sata_eh_done - done function for aborted SATA commands
825 * @ipr_cmd: ipr command struct
826 *
827 * This function is invoked for ops generated to SATA
828 * devices which are being aborted.
829 *
830 * Return value:
831 * none
832 **/
833static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
834{
835 struct ata_queued_cmd *qc = ipr_cmd->qc;
836 struct ipr_sata_port *sata_port = qc->ap->private_data;
837
838 qc->err_mask |= AC_ERR_OTHER;
839 sata_port->ioasa.status |= ATA_BUSY;
840 ata_qc_complete(qc);
841 if (ipr_cmd->eh_comp)
842 complete(ipr_cmd->eh_comp);
843 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
844}
845
846/**
Brian King35a39692006-09-25 12:39:20 -0500847 * ipr_sata_eh_done - done function for aborted SATA commands
848 * @ipr_cmd: ipr command struct
849 *
850 * This function is invoked for ops generated to SATA
851 * devices which are being aborted.
852 *
853 * Return value:
854 * none
855 **/
856static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
857{
Brian Kingf646f322017-03-15 16:58:39 -0500858 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
859 unsigned long hrrq_flags;
Brian King35a39692006-09-25 12:39:20 -0500860
Brian Kingf646f322017-03-15 16:58:39 -0500861 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
862 __ipr_sata_eh_done(ipr_cmd);
863 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
864}
865
866/**
867 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
868 * @ipr_cmd: ipr command struct
869 *
870 * This function is invoked by the interrupt handler for
871 * ops generated by the SCSI mid-layer which are being aborted.
872 *
873 * Return value:
874 * none
875 **/
876static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
877{
878 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
879
880 scsi_cmd->result |= (DID_ERROR << 16);
881
882 scsi_dma_unmap(ipr_cmd->scsi_cmd);
883 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -0500884 if (ipr_cmd->eh_comp)
885 complete(ipr_cmd->eh_comp);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600886 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King35a39692006-09-25 12:39:20 -0500887}
888
889/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 * ipr_scsi_eh_done - mid-layer done function for aborted ops
891 * @ipr_cmd: ipr command struct
892 *
893 * This function is invoked by the interrupt handler for
894 * ops generated by the SCSI mid-layer which are being aborted.
895 *
896 * Return value:
897 * none
898 **/
899static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
900{
Brian Kingf646f322017-03-15 16:58:39 -0500901 unsigned long hrrq_flags;
902 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903
Brian Kingf646f322017-03-15 16:58:39 -0500904 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
905 __ipr_scsi_eh_done(ipr_cmd);
906 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907}
908
909/**
910 * ipr_fail_all_ops - Fails all outstanding ops.
911 * @ioa_cfg: ioa config struct
912 *
913 * This function fails all outstanding ops.
914 *
915 * Return value:
916 * none
917 **/
918static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
919{
920 struct ipr_cmnd *ipr_cmd, *temp;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600921 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
923 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600924 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600925 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600926 list_for_each_entry_safe(ipr_cmd,
927 temp, &hrrq->hrrq_pending_q, queue) {
928 list_del(&ipr_cmd->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600930 ipr_cmd->s.ioasa.hdr.ioasc =
931 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
932 ipr_cmd->s.ioasa.hdr.ilid =
933 cpu_to_be32(IPR_DRIVER_ILID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600935 if (ipr_cmd->scsi_cmd)
Brian Kingf646f322017-03-15 16:58:39 -0500936 ipr_cmd->done = __ipr_scsi_eh_done;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600937 else if (ipr_cmd->qc)
Brian Kingf646f322017-03-15 16:58:39 -0500938 ipr_cmd->done = __ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600940 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
941 IPR_IOASC_IOA_WAS_RESET);
942 del_timer(&ipr_cmd->timer);
943 ipr_cmd->done(ipr_cmd);
944 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600945 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 LEAVE;
948}
949
950/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800951 * ipr_send_command - Send driver initiated requests.
952 * @ipr_cmd: ipr command struct
953 *
954 * This function sends a command to the adapter using the correct write call.
955 * In the case of sis64, calculate the ioarcb size required. Then or in the
956 * appropriate bits.
957 *
958 * Return value:
959 * none
960 **/
961static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
962{
963 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
964 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
965
966 if (ioa_cfg->sis64) {
967 /* The default size is 256 bytes */
968 send_dma_addr |= 0x1;
969
970 /* If the number of ioadls * size of ioadl > 128 bytes,
971 then use a 512 byte ioarcb */
972 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
973 send_dma_addr |= 0x4;
974 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
975 } else
976 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
977}
978
979/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 * ipr_do_req - Send driver initiated requests.
981 * @ipr_cmd: ipr command struct
982 * @done: done function
983 * @timeout_func: timeout function
984 * @timeout: timeout value
985 *
986 * This function sends the specified command to the adapter with the
987 * timeout given. The done function is invoked on command completion.
988 *
989 * Return value:
990 * none
991 **/
992static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
993 void (*done) (struct ipr_cmnd *),
Kees Cook738c6ec2017-08-18 16:53:24 -0700994 void (*timeout_func) (struct timer_list *), u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600996 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997
998 ipr_cmd->done = done;
999
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 ipr_cmd->timer.expires = jiffies + timeout;
Kees Cook841b86f2017-10-23 09:40:42 +02001001 ipr_cmd->timer.function = timeout_func;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
1003 add_timer(&ipr_cmd->timer);
1004
1005 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1006
Wayne Boyera32c0552010-02-19 13:23:36 -08001007 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008}
1009
1010/**
1011 * ipr_internal_cmd_done - Op done function for an internally generated op.
1012 * @ipr_cmd: ipr command struct
1013 *
1014 * This function is the op done function for an internally generated,
1015 * blocking op. It simply wakes the sleeping thread.
1016 *
1017 * Return value:
1018 * none
1019 **/
1020static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1021{
1022 if (ipr_cmd->sibling)
1023 ipr_cmd->sibling = NULL;
1024 else
1025 complete(&ipr_cmd->completion);
1026}
1027
1028/**
Wayne Boyera32c0552010-02-19 13:23:36 -08001029 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1030 * @ipr_cmd: ipr command struct
1031 * @dma_addr: dma address
1032 * @len: transfer length
1033 * @flags: ioadl flag value
1034 *
1035 * This function initializes an ioadl in the case where there is only a single
1036 * descriptor.
1037 *
1038 * Return value:
1039 * nothing
1040 **/
1041static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1042 u32 len, int flags)
1043{
1044 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1045 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1046
1047 ipr_cmd->dma_use_sg = 1;
1048
1049 if (ipr_cmd->ioa_cfg->sis64) {
1050 ioadl64->flags = cpu_to_be32(flags);
1051 ioadl64->data_len = cpu_to_be32(len);
1052 ioadl64->address = cpu_to_be64(dma_addr);
1053
1054 ipr_cmd->ioarcb.ioadl_len =
1055 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1056 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1057 } else {
1058 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1059 ioadl->address = cpu_to_be32(dma_addr);
1060
1061 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1062 ipr_cmd->ioarcb.read_ioadl_len =
1063 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1064 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1065 } else {
1066 ipr_cmd->ioarcb.ioadl_len =
1067 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1068 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1069 }
1070 }
1071}
1072
1073/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1075 * @ipr_cmd: ipr command struct
1076 * @timeout_func: function to invoke if command times out
1077 * @timeout: timeout
1078 *
1079 * Return value:
1080 * none
1081 **/
1082static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
Kees Cook738c6ec2017-08-18 16:53:24 -07001083 void (*timeout_func) (struct timer_list *),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 u32 timeout)
1085{
1086 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1087
1088 init_completion(&ipr_cmd->completion);
1089 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1090
1091 spin_unlock_irq(ioa_cfg->host->host_lock);
1092 wait_for_completion(&ipr_cmd->completion);
1093 spin_lock_irq(ioa_cfg->host->host_lock);
1094}
1095
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001096static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1097{
Brian King3f1c0582015-07-14 11:41:33 -05001098 unsigned int hrrq;
1099
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001100 if (ioa_cfg->hrrq_num == 1)
Brian King3f1c0582015-07-14 11:41:33 -05001101 hrrq = 0;
1102 else {
1103 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1104 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1105 }
1106 return hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001107}
1108
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109/**
1110 * ipr_send_hcam - Send an HCAM to the adapter.
1111 * @ioa_cfg: ioa config struct
1112 * @type: HCAM type
1113 * @hostrcb: hostrcb struct
1114 *
1115 * This function will send a Host Controlled Async command to the adapter.
1116 * If HCAMs are currently not allowed to be issued to the adapter, it will
1117 * place the hostrcb on the free queue.
1118 *
1119 * Return value:
1120 * none
1121 **/
1122static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1123 struct ipr_hostrcb *hostrcb)
1124{
1125 struct ipr_cmnd *ipr_cmd;
1126 struct ipr_ioarcb *ioarcb;
1127
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001128 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001130 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1132
1133 ipr_cmd->u.hostrcb = hostrcb;
1134 ioarcb = &ipr_cmd->ioarcb;
1135
1136 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1137 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1138 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1139 ioarcb->cmd_pkt.cdb[1] = type;
1140 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1141 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1142
Wayne Boyera32c0552010-02-19 13:23:36 -08001143 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1144 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
1146 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1147 ipr_cmd->done = ipr_process_ccn;
1148 else
1149 ipr_cmd->done = ipr_process_error;
1150
1151 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1152
Wayne Boyera32c0552010-02-19 13:23:36 -08001153 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 } else {
1155 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1156 }
1157}
1158
1159/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001160 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001162 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 *
1164 * Return value:
1165 * none
1166 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001167static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03001169 switch (proto) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001170 case IPR_PROTO_SATA:
1171 case IPR_PROTO_SAS_STP:
1172 res->ata_class = ATA_DEV_ATA;
1173 break;
1174 case IPR_PROTO_SATA_ATAPI:
1175 case IPR_PROTO_SAS_STP_ATAPI:
1176 res->ata_class = ATA_DEV_ATAPI;
1177 break;
1178 default:
1179 res->ata_class = ATA_DEV_UNKNOWN;
1180 break;
1181 };
1182}
1183
1184/**
1185 * ipr_init_res_entry - Initialize a resource entry struct.
1186 * @res: resource entry struct
1187 * @cfgtew: config table entry wrapper struct
1188 *
1189 * Return value:
1190 * none
1191 **/
1192static void ipr_init_res_entry(struct ipr_resource_entry *res,
1193 struct ipr_config_table_entry_wrapper *cfgtew)
1194{
1195 int found = 0;
1196 unsigned int proto;
1197 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1198 struct ipr_resource_entry *gscsi_res = NULL;
1199
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06001200 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 res->in_erp = 0;
1202 res->add_to_ml = 0;
1203 res->del_from_ml = 0;
1204 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06001205 res->reset_occurred = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05001207 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001208
1209 if (ioa_cfg->sis64) {
1210 proto = cfgtew->u.cfgte64->proto;
Brian King359d96e2015-06-11 20:45:20 -05001211 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1212 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001213 res->qmodel = IPR_QUEUEING_MODEL64(res);
Wayne Boyer438b0332010-05-10 09:13:00 -07001214 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001215
1216 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1217 sizeof(res->res_path));
1218
1219 res->bus = 0;
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001220 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1221 sizeof(res->dev_lun.scsi_lun));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001222 res->lun = scsilun_to_int(&res->dev_lun);
1223
1224 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1225 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1226 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1227 found = 1;
1228 res->target = gscsi_res->target;
1229 break;
1230 }
1231 }
1232 if (!found) {
1233 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1234 ioa_cfg->max_devs_supported);
1235 set_bit(res->target, ioa_cfg->target_ids);
1236 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001237 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1238 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1239 res->target = 0;
1240 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1241 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1242 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1243 ioa_cfg->max_devs_supported);
1244 set_bit(res->target, ioa_cfg->array_ids);
1245 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1246 res->bus = IPR_VSET_VIRTUAL_BUS;
1247 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1248 ioa_cfg->max_devs_supported);
1249 set_bit(res->target, ioa_cfg->vset_ids);
1250 } else {
1251 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1252 ioa_cfg->max_devs_supported);
1253 set_bit(res->target, ioa_cfg->target_ids);
1254 }
1255 } else {
1256 proto = cfgtew->u.cfgte->proto;
1257 res->qmodel = IPR_QUEUEING_MODEL(res);
1258 res->flags = cfgtew->u.cfgte->flags;
1259 if (res->flags & IPR_IS_IOA_RESOURCE)
1260 res->type = IPR_RES_TYPE_IOAFP;
1261 else
1262 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1263
1264 res->bus = cfgtew->u.cfgte->res_addr.bus;
1265 res->target = cfgtew->u.cfgte->res_addr.target;
1266 res->lun = cfgtew->u.cfgte->res_addr.lun;
Wayne Boyer46d74562010-08-11 07:15:17 -07001267 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001268 }
1269
1270 ipr_update_ata_class(res, proto);
1271}
1272
1273/**
1274 * ipr_is_same_device - Determine if two devices are the same.
1275 * @res: resource entry struct
1276 * @cfgtew: config table entry wrapper struct
1277 *
1278 * Return value:
1279 * 1 if the devices are the same / 0 otherwise
1280 **/
1281static int ipr_is_same_device(struct ipr_resource_entry *res,
1282 struct ipr_config_table_entry_wrapper *cfgtew)
1283{
1284 if (res->ioa_cfg->sis64) {
1285 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1286 sizeof(cfgtew->u.cfgte64->dev_id)) &&
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001287 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001288 sizeof(cfgtew->u.cfgte64->lun))) {
1289 return 1;
1290 }
1291 } else {
1292 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1293 res->target == cfgtew->u.cfgte->res_addr.target &&
1294 res->lun == cfgtew->u.cfgte->res_addr.lun)
1295 return 1;
1296 }
1297
1298 return 0;
1299}
1300
1301/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001302 * __ipr_format_res_path - Format the resource path for printing.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001303 * @res_path: resource path
1304 * @buf: buffer
Brian Kingb3b3b402013-01-11 17:43:49 -06001305 * @len: length of buffer provided
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001306 *
1307 * Return value:
1308 * pointer to buffer
1309 **/
Brian Kingb3b3b402013-01-11 17:43:49 -06001310static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001311{
1312 int i;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001313 char *p = buffer;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001314
Wayne Boyer46d74562010-08-11 07:15:17 -07001315 *p = '\0';
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001316 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1317 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1318 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001319
1320 return buffer;
1321}
1322
1323/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001324 * ipr_format_res_path - Format the resource path for printing.
1325 * @ioa_cfg: ioa config struct
1326 * @res_path: resource path
1327 * @buf: buffer
1328 * @len: length of buffer provided
1329 *
1330 * Return value:
1331 * pointer to buffer
1332 **/
1333static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1334 u8 *res_path, char *buffer, int len)
1335{
1336 char *p = buffer;
1337
1338 *p = '\0';
1339 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1340 __ipr_format_res_path(res_path, p, len - (buffer - p));
1341 return buffer;
1342}
1343
1344/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001345 * ipr_update_res_entry - Update the resource entry.
1346 * @res: resource entry struct
1347 * @cfgtew: config table entry wrapper struct
1348 *
1349 * Return value:
1350 * none
1351 **/
1352static void ipr_update_res_entry(struct ipr_resource_entry *res,
1353 struct ipr_config_table_entry_wrapper *cfgtew)
1354{
1355 char buffer[IPR_MAX_RES_PATH_LENGTH];
1356 unsigned int proto;
1357 int new_path = 0;
1358
1359 if (res->ioa_cfg->sis64) {
Brian King359d96e2015-06-11 20:45:20 -05001360 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1361 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
Wayne Boyer75576bb2010-07-14 10:50:14 -07001362 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001363
1364 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1365 sizeof(struct ipr_std_inq_data));
1366
1367 res->qmodel = IPR_QUEUEING_MODEL64(res);
1368 proto = cfgtew->u.cfgte64->proto;
1369 res->res_handle = cfgtew->u.cfgte64->res_handle;
1370 res->dev_id = cfgtew->u.cfgte64->dev_id;
1371
1372 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1373 sizeof(res->dev_lun.scsi_lun));
1374
1375 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1376 sizeof(res->res_path))) {
1377 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1378 sizeof(res->res_path));
1379 new_path = 1;
1380 }
1381
1382 if (res->sdev && new_path)
1383 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06001384 ipr_format_res_path(res->ioa_cfg,
1385 res->res_path, buffer, sizeof(buffer)));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001386 } else {
1387 res->flags = cfgtew->u.cfgte->flags;
1388 if (res->flags & IPR_IS_IOA_RESOURCE)
1389 res->type = IPR_RES_TYPE_IOAFP;
1390 else
1391 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1392
1393 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1394 sizeof(struct ipr_std_inq_data));
1395
1396 res->qmodel = IPR_QUEUEING_MODEL(res);
1397 proto = cfgtew->u.cfgte->proto;
1398 res->res_handle = cfgtew->u.cfgte->res_handle;
1399 }
1400
1401 ipr_update_ata_class(res, proto);
1402}
1403
1404/**
1405 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1406 * for the resource.
1407 * @res: resource entry struct
1408 * @cfgtew: config table entry wrapper struct
1409 *
1410 * Return value:
1411 * none
1412 **/
1413static void ipr_clear_res_target(struct ipr_resource_entry *res)
1414{
1415 struct ipr_resource_entry *gscsi_res = NULL;
1416 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1417
1418 if (!ioa_cfg->sis64)
1419 return;
1420
1421 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1422 clear_bit(res->target, ioa_cfg->array_ids);
1423 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1424 clear_bit(res->target, ioa_cfg->vset_ids);
1425 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1426 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1427 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1428 return;
1429 clear_bit(res->target, ioa_cfg->target_ids);
1430
1431 } else if (res->bus == 0)
1432 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433}
1434
1435/**
1436 * ipr_handle_config_change - Handle a config change from the adapter
1437 * @ioa_cfg: ioa config struct
1438 * @hostrcb: hostrcb
1439 *
1440 * Return value:
1441 * none
1442 **/
1443static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001444 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445{
1446 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001447 struct ipr_config_table_entry_wrapper cfgtew;
1448 __be32 cc_res_handle;
1449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 u32 is_ndn = 1;
1451
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001452 if (ioa_cfg->sis64) {
1453 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1454 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1455 } else {
1456 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1457 cc_res_handle = cfgtew.u.cfgte->res_handle;
1458 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459
1460 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001461 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 is_ndn = 0;
1463 break;
1464 }
1465 }
1466
1467 if (is_ndn) {
1468 if (list_empty(&ioa_cfg->free_res_q)) {
1469 ipr_send_hcam(ioa_cfg,
1470 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1471 hostrcb);
1472 return;
1473 }
1474
1475 res = list_entry(ioa_cfg->free_res_q.next,
1476 struct ipr_resource_entry, queue);
1477
1478 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001479 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1481 }
1482
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001483 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
1485 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1486 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001488 res->res_handle = IPR_INVALID_RES_HANDLE;
Brian Kingf688f962014-12-02 12:47:37 -06001489 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001490 } else {
1491 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001493 }
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02001494 } else if (!res->sdev || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 res->add_to_ml = 1;
Brian Kingf688f962014-12-02 12:47:37 -06001496 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 }
1498
1499 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1500}
1501
1502/**
1503 * ipr_process_ccn - Op done function for a CCN.
1504 * @ipr_cmd: ipr command struct
1505 *
1506 * This function is the op done function for a configuration
1507 * change notification host controlled async from the adapter.
1508 *
1509 * Return value:
1510 * none
1511 **/
1512static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1513{
1514 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1515 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07001516 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
Brian Kingafc3f832016-08-24 12:56:51 -05001518 list_del_init(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001519 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520
1521 if (ioasc) {
Brian King4fdd7c72015-03-26 11:23:50 -05001522 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1523 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 dev_err(&ioa_cfg->pdev->dev,
1525 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1526
1527 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1528 } else {
1529 ipr_handle_config_change(ioa_cfg, hostrcb);
1530 }
1531}
1532
1533/**
Brian King8cf093e2007-04-26 16:00:14 -05001534 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1535 * @i: index into buffer
1536 * @buf: string to modify
1537 *
1538 * This function will strip all trailing whitespace, pad the end
1539 * of the string with a single space, and NULL terminate the string.
1540 *
1541 * Return value:
1542 * new length of string
1543 **/
1544static int strip_and_pad_whitespace(int i, char *buf)
1545{
1546 while (i && buf[i] == ' ')
1547 i--;
1548 buf[i+1] = ' ';
1549 buf[i+2] = '\0';
1550 return i + 2;
1551}
1552
1553/**
1554 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1555 * @prefix: string to print at start of printk
1556 * @hostrcb: hostrcb pointer
1557 * @vpd: vendor/product id/sn struct
1558 *
1559 * Return value:
1560 * none
1561 **/
1562static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1563 struct ipr_vpd *vpd)
1564{
1565 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1566 int i = 0;
1567
1568 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1569 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1570
1571 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1572 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1573
1574 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1575 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1576
1577 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1578}
1579
1580/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001582 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 *
1584 * Return value:
1585 * none
1586 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001587static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588{
1589 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1590 + IPR_SERIAL_NUM_LEN];
1591
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001592 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1593 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 IPR_PROD_ID_LEN);
1595 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1596 ipr_err("Vendor/Product ID: %s\n", buffer);
1597
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001598 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1600 ipr_err(" Serial Number: %s\n", buffer);
1601}
1602
1603/**
Brian King8cf093e2007-04-26 16:00:14 -05001604 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1605 * @prefix: string to print at start of printk
1606 * @hostrcb: hostrcb pointer
1607 * @vpd: vendor/product id/sn/wwn struct
1608 *
1609 * Return value:
1610 * none
1611 **/
1612static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1613 struct ipr_ext_vpd *vpd)
1614{
1615 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1616 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1617 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1618}
1619
1620/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001621 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1622 * @vpd: vendor/product id/sn/wwn struct
1623 *
1624 * Return value:
1625 * none
1626 **/
1627static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1628{
1629 ipr_log_vpd(&vpd->vpd);
1630 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1631 be32_to_cpu(vpd->wwid[1]));
1632}
1633
1634/**
1635 * ipr_log_enhanced_cache_error - Log a cache error.
1636 * @ioa_cfg: ioa config struct
1637 * @hostrcb: hostrcb struct
1638 *
1639 * Return value:
1640 * none
1641 **/
1642static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1643 struct ipr_hostrcb *hostrcb)
1644{
Wayne Boyer4565e372010-02-19 13:24:07 -08001645 struct ipr_hostrcb_type_12_error *error;
1646
1647 if (ioa_cfg->sis64)
1648 error = &hostrcb->hcam.u.error64.u.type_12_error;
1649 else
1650 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001651
1652 ipr_err("-----Current Configuration-----\n");
1653 ipr_err("Cache Directory Card Information:\n");
1654 ipr_log_ext_vpd(&error->ioa_vpd);
1655 ipr_err("Adapter Card Information:\n");
1656 ipr_log_ext_vpd(&error->cfc_vpd);
1657
1658 ipr_err("-----Expected Configuration-----\n");
1659 ipr_err("Cache Directory Card Information:\n");
1660 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1661 ipr_err("Adapter Card Information:\n");
1662 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1663
1664 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1665 be32_to_cpu(error->ioa_data[0]),
1666 be32_to_cpu(error->ioa_data[1]),
1667 be32_to_cpu(error->ioa_data[2]));
1668}
1669
1670/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001671 * ipr_log_cache_error - Log a cache error.
1672 * @ioa_cfg: ioa config struct
1673 * @hostrcb: hostrcb struct
1674 *
1675 * Return value:
1676 * none
1677 **/
1678static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1679 struct ipr_hostrcb *hostrcb)
1680{
1681 struct ipr_hostrcb_type_02_error *error =
1682 &hostrcb->hcam.u.error.u.type_02_error;
1683
1684 ipr_err("-----Current Configuration-----\n");
1685 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001686 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001688 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
1690 ipr_err("-----Expected Configuration-----\n");
1691 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001692 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001694 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695
1696 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1697 be32_to_cpu(error->ioa_data[0]),
1698 be32_to_cpu(error->ioa_data[1]),
1699 be32_to_cpu(error->ioa_data[2]));
1700}
1701
1702/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001703 * ipr_log_enhanced_config_error - Log a configuration error.
1704 * @ioa_cfg: ioa config struct
1705 * @hostrcb: hostrcb struct
1706 *
1707 * Return value:
1708 * none
1709 **/
1710static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1711 struct ipr_hostrcb *hostrcb)
1712{
1713 int errors_logged, i;
1714 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1715 struct ipr_hostrcb_type_13_error *error;
1716
1717 error = &hostrcb->hcam.u.error.u.type_13_error;
1718 errors_logged = be32_to_cpu(error->errors_logged);
1719
1720 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1721 be32_to_cpu(error->errors_detected), errors_logged);
1722
1723 dev_entry = error->dev;
1724
1725 for (i = 0; i < errors_logged; i++, dev_entry++) {
1726 ipr_err_separator;
1727
1728 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1729 ipr_log_ext_vpd(&dev_entry->vpd);
1730
1731 ipr_err("-----New Device Information-----\n");
1732 ipr_log_ext_vpd(&dev_entry->new_vpd);
1733
1734 ipr_err("Cache Directory Card Information:\n");
1735 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1736
1737 ipr_err("Adapter Card Information:\n");
1738 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1739 }
1740}
1741
1742/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001743 * ipr_log_sis64_config_error - Log a device error.
1744 * @ioa_cfg: ioa config struct
1745 * @hostrcb: hostrcb struct
1746 *
1747 * Return value:
1748 * none
1749 **/
1750static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1751 struct ipr_hostrcb *hostrcb)
1752{
1753 int errors_logged, i;
1754 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1755 struct ipr_hostrcb_type_23_error *error;
1756 char buffer[IPR_MAX_RES_PATH_LENGTH];
1757
1758 error = &hostrcb->hcam.u.error64.u.type_23_error;
1759 errors_logged = be32_to_cpu(error->errors_logged);
1760
1761 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1762 be32_to_cpu(error->errors_detected), errors_logged);
1763
1764 dev_entry = error->dev;
1765
1766 for (i = 0; i < errors_logged; i++, dev_entry++) {
1767 ipr_err_separator;
1768
1769 ipr_err("Device %d : %s", i + 1,
Brian Kingb3b3b402013-01-11 17:43:49 -06001770 __ipr_format_res_path(dev_entry->res_path,
1771 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001772 ipr_log_ext_vpd(&dev_entry->vpd);
1773
1774 ipr_err("-----New Device Information-----\n");
1775 ipr_log_ext_vpd(&dev_entry->new_vpd);
1776
1777 ipr_err("Cache Directory Card Information:\n");
1778 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1779
1780 ipr_err("Adapter Card Information:\n");
1781 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1782 }
1783}
1784
1785/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 * ipr_log_config_error - Log a configuration error.
1787 * @ioa_cfg: ioa config struct
1788 * @hostrcb: hostrcb struct
1789 *
1790 * Return value:
1791 * none
1792 **/
1793static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1794 struct ipr_hostrcb *hostrcb)
1795{
1796 int errors_logged, i;
1797 struct ipr_hostrcb_device_data_entry *dev_entry;
1798 struct ipr_hostrcb_type_03_error *error;
1799
1800 error = &hostrcb->hcam.u.error.u.type_03_error;
1801 errors_logged = be32_to_cpu(error->errors_logged);
1802
1803 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1804 be32_to_cpu(error->errors_detected), errors_logged);
1805
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001806 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807
1808 for (i = 0; i < errors_logged; i++, dev_entry++) {
1809 ipr_err_separator;
1810
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001811 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001812 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813
1814 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001815 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
1817 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001818 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
1820 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001821 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
1823 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1824 be32_to_cpu(dev_entry->ioa_data[0]),
1825 be32_to_cpu(dev_entry->ioa_data[1]),
1826 be32_to_cpu(dev_entry->ioa_data[2]),
1827 be32_to_cpu(dev_entry->ioa_data[3]),
1828 be32_to_cpu(dev_entry->ioa_data[4]));
1829 }
1830}
1831
1832/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001833 * ipr_log_enhanced_array_error - Log an array configuration error.
1834 * @ioa_cfg: ioa config struct
1835 * @hostrcb: hostrcb struct
1836 *
1837 * Return value:
1838 * none
1839 **/
1840static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1841 struct ipr_hostrcb *hostrcb)
1842{
1843 int i, num_entries;
1844 struct ipr_hostrcb_type_14_error *error;
1845 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1846 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1847
1848 error = &hostrcb->hcam.u.error.u.type_14_error;
1849
1850 ipr_err_separator;
1851
1852 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1853 error->protection_level,
1854 ioa_cfg->host->host_no,
1855 error->last_func_vset_res_addr.bus,
1856 error->last_func_vset_res_addr.target,
1857 error->last_func_vset_res_addr.lun);
1858
1859 ipr_err_separator;
1860
1861 array_entry = error->array_member;
1862 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
Wayne Boyer72620262010-09-27 10:45:28 -07001863 ARRAY_SIZE(error->array_member));
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001864
1865 for (i = 0; i < num_entries; i++, array_entry++) {
1866 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1867 continue;
1868
1869 if (be32_to_cpu(error->exposed_mode_adn) == i)
1870 ipr_err("Exposed Array Member %d:\n", i);
1871 else
1872 ipr_err("Array Member %d:\n", i);
1873
1874 ipr_log_ext_vpd(&array_entry->vpd);
1875 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1876 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1877 "Expected Location");
1878
1879 ipr_err_separator;
1880 }
1881}
1882
1883/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001884 * ipr_log_array_error - Log an array configuration error.
1885 * @ioa_cfg: ioa config struct
1886 * @hostrcb: hostrcb struct
1887 *
1888 * Return value:
1889 * none
1890 **/
1891static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1892 struct ipr_hostrcb *hostrcb)
1893{
1894 int i;
1895 struct ipr_hostrcb_type_04_error *error;
1896 struct ipr_hostrcb_array_data_entry *array_entry;
1897 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1898
1899 error = &hostrcb->hcam.u.error.u.type_04_error;
1900
1901 ipr_err_separator;
1902
1903 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1904 error->protection_level,
1905 ioa_cfg->host->host_no,
1906 error->last_func_vset_res_addr.bus,
1907 error->last_func_vset_res_addr.target,
1908 error->last_func_vset_res_addr.lun);
1909
1910 ipr_err_separator;
1911
1912 array_entry = error->array_member;
1913
1914 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001915 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 continue;
1917
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001918 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001920 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001923 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001925 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1926 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1927 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928
1929 ipr_err_separator;
1930
1931 if (i == 9)
1932 array_entry = error->array_member2;
1933 else
1934 array_entry++;
1935 }
1936}
1937
1938/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001939 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001940 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001941 * @data: IOA error data
1942 * @len: data length
1943 *
1944 * Return value:
1945 * none
1946 **/
Brian King359d96e2015-06-11 20:45:20 -05001947static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001948{
1949 int i;
1950
1951 if (len == 0)
1952 return;
1953
Brian Kingac719ab2006-11-21 10:28:42 -06001954 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1955 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1956
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001957 for (i = 0; i < len / 4; i += 4) {
1958 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1959 be32_to_cpu(data[i]),
1960 be32_to_cpu(data[i+1]),
1961 be32_to_cpu(data[i+2]),
1962 be32_to_cpu(data[i+3]));
1963 }
1964}
1965
1966/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001967 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1968 * @ioa_cfg: ioa config struct
1969 * @hostrcb: hostrcb struct
1970 *
1971 * Return value:
1972 * none
1973 **/
1974static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1975 struct ipr_hostrcb *hostrcb)
1976{
1977 struct ipr_hostrcb_type_17_error *error;
1978
Wayne Boyer4565e372010-02-19 13:24:07 -08001979 if (ioa_cfg->sis64)
1980 error = &hostrcb->hcam.u.error64.u.type_17_error;
1981 else
1982 error = &hostrcb->hcam.u.error.u.type_17_error;
1983
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001984 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001985 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001986
Brian King8cf093e2007-04-26 16:00:14 -05001987 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1988 be32_to_cpu(hostrcb->hcam.u.error.prc));
1989 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001990 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001991 be32_to_cpu(hostrcb->hcam.length) -
1992 (offsetof(struct ipr_hostrcb_error, u) +
1993 offsetof(struct ipr_hostrcb_type_17_error, data)));
1994}
1995
1996/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001997 * ipr_log_dual_ioa_error - Log a dual adapter error.
1998 * @ioa_cfg: ioa config struct
1999 * @hostrcb: hostrcb struct
2000 *
2001 * Return value:
2002 * none
2003 **/
2004static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2005 struct ipr_hostrcb *hostrcb)
2006{
2007 struct ipr_hostrcb_type_07_error *error;
2008
2009 error = &hostrcb->hcam.u.error.u.type_07_error;
2010 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08002011 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002012
Brian King8cf093e2007-04-26 16:00:14 -05002013 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2014 be32_to_cpu(hostrcb->hcam.u.error.prc));
2015 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06002016 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002017 be32_to_cpu(hostrcb->hcam.length) -
2018 (offsetof(struct ipr_hostrcb_error, u) +
2019 offsetof(struct ipr_hostrcb_type_07_error, data)));
2020}
2021
Brian King49dc6a12006-11-21 10:28:35 -06002022static const struct {
2023 u8 active;
2024 char *desc;
2025} path_active_desc[] = {
2026 { IPR_PATH_NO_INFO, "Path" },
2027 { IPR_PATH_ACTIVE, "Active path" },
2028 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2029};
2030
2031static const struct {
2032 u8 state;
2033 char *desc;
2034} path_state_desc[] = {
2035 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2036 { IPR_PATH_HEALTHY, "is healthy" },
2037 { IPR_PATH_DEGRADED, "is degraded" },
2038 { IPR_PATH_FAILED, "is failed" }
2039};
2040
2041/**
2042 * ipr_log_fabric_path - Log a fabric path error
2043 * @hostrcb: hostrcb struct
2044 * @fabric: fabric descriptor
2045 *
2046 * Return value:
2047 * none
2048 **/
2049static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2050 struct ipr_hostrcb_fabric_desc *fabric)
2051{
2052 int i, j;
2053 u8 path_state = fabric->path_state;
2054 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2055 u8 state = path_state & IPR_PATH_STATE_MASK;
2056
2057 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2058 if (path_active_desc[i].active != active)
2059 continue;
2060
2061 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2062 if (path_state_desc[j].state != state)
2063 continue;
2064
2065 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2066 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2067 path_active_desc[i].desc, path_state_desc[j].desc,
2068 fabric->ioa_port);
2069 } else if (fabric->cascaded_expander == 0xff) {
2070 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2071 path_active_desc[i].desc, path_state_desc[j].desc,
2072 fabric->ioa_port, fabric->phy);
2073 } else if (fabric->phy == 0xff) {
2074 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2075 path_active_desc[i].desc, path_state_desc[j].desc,
2076 fabric->ioa_port, fabric->cascaded_expander);
2077 } else {
2078 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2079 path_active_desc[i].desc, path_state_desc[j].desc,
2080 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2081 }
2082 return;
2083 }
2084 }
2085
2086 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2087 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2088}
2089
Wayne Boyer4565e372010-02-19 13:24:07 -08002090/**
2091 * ipr_log64_fabric_path - Log a fabric path error
2092 * @hostrcb: hostrcb struct
2093 * @fabric: fabric descriptor
2094 *
2095 * Return value:
2096 * none
2097 **/
2098static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2099 struct ipr_hostrcb64_fabric_desc *fabric)
2100{
2101 int i, j;
2102 u8 path_state = fabric->path_state;
2103 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2104 u8 state = path_state & IPR_PATH_STATE_MASK;
2105 char buffer[IPR_MAX_RES_PATH_LENGTH];
2106
2107 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2108 if (path_active_desc[i].active != active)
2109 continue;
2110
2111 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2112 if (path_state_desc[j].state != state)
2113 continue;
2114
2115 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2116 path_active_desc[i].desc, path_state_desc[j].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002117 ipr_format_res_path(hostrcb->ioa_cfg,
2118 fabric->res_path,
2119 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002120 return;
2121 }
2122 }
2123
2124 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
Brian Kingb3b3b402013-01-11 17:43:49 -06002125 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2126 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002127}
2128
Brian King49dc6a12006-11-21 10:28:35 -06002129static const struct {
2130 u8 type;
2131 char *desc;
2132} path_type_desc[] = {
2133 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2134 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2135 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2136 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2137};
2138
2139static const struct {
2140 u8 status;
2141 char *desc;
2142} path_status_desc[] = {
2143 { IPR_PATH_CFG_NO_PROB, "Functional" },
2144 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2145 { IPR_PATH_CFG_FAILED, "Failed" },
2146 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2147 { IPR_PATH_NOT_DETECTED, "Missing" },
2148 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2149};
2150
2151static const char *link_rate[] = {
2152 "unknown",
2153 "disabled",
2154 "phy reset problem",
2155 "spinup hold",
2156 "port selector",
2157 "unknown",
2158 "unknown",
2159 "unknown",
2160 "1.5Gbps",
2161 "3.0Gbps",
2162 "unknown",
2163 "unknown",
2164 "unknown",
2165 "unknown",
2166 "unknown",
2167 "unknown"
2168};
2169
2170/**
2171 * ipr_log_path_elem - Log a fabric path element.
2172 * @hostrcb: hostrcb struct
2173 * @cfg: fabric path element struct
2174 *
2175 * Return value:
2176 * none
2177 **/
2178static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2179 struct ipr_hostrcb_config_element *cfg)
2180{
2181 int i, j;
2182 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2183 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2184
2185 if (type == IPR_PATH_CFG_NOT_EXIST)
2186 return;
2187
2188 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2189 if (path_type_desc[i].type != type)
2190 continue;
2191
2192 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2193 if (path_status_desc[j].status != status)
2194 continue;
2195
2196 if (type == IPR_PATH_CFG_IOA_PORT) {
2197 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2198 path_status_desc[j].desc, path_type_desc[i].desc,
2199 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2200 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2201 } else {
2202 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2203 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2204 path_status_desc[j].desc, path_type_desc[i].desc,
2205 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2206 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2207 } else if (cfg->cascaded_expander == 0xff) {
2208 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2209 "WWN=%08X%08X\n", path_status_desc[j].desc,
2210 path_type_desc[i].desc, cfg->phy,
2211 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2212 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2213 } else if (cfg->phy == 0xff) {
2214 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2215 "WWN=%08X%08X\n", path_status_desc[j].desc,
2216 path_type_desc[i].desc, cfg->cascaded_expander,
2217 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2218 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2219 } else {
2220 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2221 "WWN=%08X%08X\n", path_status_desc[j].desc,
2222 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2223 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2224 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2225 }
2226 }
2227 return;
2228 }
2229 }
2230
2231 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2232 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2233 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2234 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2235}
2236
2237/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002238 * ipr_log64_path_elem - Log a fabric path element.
2239 * @hostrcb: hostrcb struct
2240 * @cfg: fabric path element struct
2241 *
2242 * Return value:
2243 * none
2244 **/
2245static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2246 struct ipr_hostrcb64_config_element *cfg)
2247{
2248 int i, j;
2249 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2250 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2251 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2252 char buffer[IPR_MAX_RES_PATH_LENGTH];
2253
2254 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2255 return;
2256
2257 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2258 if (path_type_desc[i].type != type)
2259 continue;
2260
2261 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2262 if (path_status_desc[j].status != status)
2263 continue;
2264
2265 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2266 path_status_desc[j].desc, path_type_desc[i].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002267 ipr_format_res_path(hostrcb->ioa_cfg,
2268 cfg->res_path, buffer, sizeof(buffer)),
2269 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2270 be32_to_cpu(cfg->wwid[0]),
2271 be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002272 return;
2273 }
2274 }
2275 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2276 "WWN=%08X%08X\n", cfg->type_status,
Brian Kingb3b3b402013-01-11 17:43:49 -06002277 ipr_format_res_path(hostrcb->ioa_cfg,
2278 cfg->res_path, buffer, sizeof(buffer)),
2279 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2280 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002281}
2282
2283/**
Brian King49dc6a12006-11-21 10:28:35 -06002284 * ipr_log_fabric_error - Log a fabric error.
2285 * @ioa_cfg: ioa config struct
2286 * @hostrcb: hostrcb struct
2287 *
2288 * Return value:
2289 * none
2290 **/
2291static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2292 struct ipr_hostrcb *hostrcb)
2293{
2294 struct ipr_hostrcb_type_20_error *error;
2295 struct ipr_hostrcb_fabric_desc *fabric;
2296 struct ipr_hostrcb_config_element *cfg;
2297 int i, add_len;
2298
2299 error = &hostrcb->hcam.u.error.u.type_20_error;
2300 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2301 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2302
2303 add_len = be32_to_cpu(hostrcb->hcam.length) -
2304 (offsetof(struct ipr_hostrcb_error, u) +
2305 offsetof(struct ipr_hostrcb_type_20_error, desc));
2306
2307 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2308 ipr_log_fabric_path(hostrcb, fabric);
2309 for_each_fabric_cfg(fabric, cfg)
2310 ipr_log_path_elem(hostrcb, cfg);
2311
2312 add_len -= be16_to_cpu(fabric->length);
2313 fabric = (struct ipr_hostrcb_fabric_desc *)
2314 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2315 }
2316
Brian King359d96e2015-06-11 20:45:20 -05002317 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002318}
2319
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002320/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002321 * ipr_log_sis64_array_error - Log a sis64 array error.
2322 * @ioa_cfg: ioa config struct
2323 * @hostrcb: hostrcb struct
2324 *
2325 * Return value:
2326 * none
2327 **/
2328static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2329 struct ipr_hostrcb *hostrcb)
2330{
2331 int i, num_entries;
2332 struct ipr_hostrcb_type_24_error *error;
2333 struct ipr_hostrcb64_array_data_entry *array_entry;
2334 char buffer[IPR_MAX_RES_PATH_LENGTH];
2335 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2336
2337 error = &hostrcb->hcam.u.error64.u.type_24_error;
2338
2339 ipr_err_separator;
2340
2341 ipr_err("RAID %s Array Configuration: %s\n",
2342 error->protection_level,
Brian Kingb3b3b402013-01-11 17:43:49 -06002343 ipr_format_res_path(ioa_cfg, error->last_res_path,
2344 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002345
2346 ipr_err_separator;
2347
2348 array_entry = error->array_member;
Wayne Boyer72620262010-09-27 10:45:28 -07002349 num_entries = min_t(u32, error->num_entries,
2350 ARRAY_SIZE(error->array_member));
Wayne Boyer4565e372010-02-19 13:24:07 -08002351
2352 for (i = 0; i < num_entries; i++, array_entry++) {
2353
2354 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2355 continue;
2356
2357 if (error->exposed_mode_adn == i)
2358 ipr_err("Exposed Array Member %d:\n", i);
2359 else
2360 ipr_err("Array Member %d:\n", i);
2361
2362 ipr_err("Array Member %d:\n", i);
2363 ipr_log_ext_vpd(&array_entry->vpd);
Wayne Boyer72620262010-09-27 10:45:28 -07002364 ipr_err("Current Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002365 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2366 buffer, sizeof(buffer)));
Wayne Boyer72620262010-09-27 10:45:28 -07002367 ipr_err("Expected Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002368 ipr_format_res_path(ioa_cfg,
2369 array_entry->expected_res_path,
2370 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002371
2372 ipr_err_separator;
2373 }
2374}
2375
2376/**
2377 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2378 * @ioa_cfg: ioa config struct
2379 * @hostrcb: hostrcb struct
2380 *
2381 * Return value:
2382 * none
2383 **/
2384static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2385 struct ipr_hostrcb *hostrcb)
2386{
2387 struct ipr_hostrcb_type_30_error *error;
2388 struct ipr_hostrcb64_fabric_desc *fabric;
2389 struct ipr_hostrcb64_config_element *cfg;
2390 int i, add_len;
2391
2392 error = &hostrcb->hcam.u.error64.u.type_30_error;
2393
2394 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2395 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2396
2397 add_len = be32_to_cpu(hostrcb->hcam.length) -
2398 (offsetof(struct ipr_hostrcb64_error, u) +
2399 offsetof(struct ipr_hostrcb_type_30_error, desc));
2400
2401 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2402 ipr_log64_fabric_path(hostrcb, fabric);
2403 for_each_fabric_cfg(fabric, cfg)
2404 ipr_log64_path_elem(hostrcb, cfg);
2405
2406 add_len -= be16_to_cpu(fabric->length);
2407 fabric = (struct ipr_hostrcb64_fabric_desc *)
2408 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2409 }
2410
Brian King359d96e2015-06-11 20:45:20 -05002411 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
Wayne Boyer4565e372010-02-19 13:24:07 -08002412}
2413
2414/**
Wen Xiong15c5a5e2018-06-06 10:01:36 -05002415 * ipr_log_sis64_service_required_error - Log a sis64 service required error.
2416 * @ioa_cfg: ioa config struct
2417 * @hostrcb: hostrcb struct
2418 *
2419 * Return value:
2420 * none
2421 **/
2422static void ipr_log_sis64_service_required_error(struct ipr_ioa_cfg *ioa_cfg,
2423 struct ipr_hostrcb *hostrcb)
2424{
2425 struct ipr_hostrcb_type_41_error *error;
2426
2427 error = &hostrcb->hcam.u.error64.u.type_41_error;
2428
2429 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2430 ipr_err("Primary Failure Reason: %s\n", error->failure_reason);
2431 ipr_log_hex_data(ioa_cfg, error->data,
2432 be32_to_cpu(hostrcb->hcam.length) -
2433 (offsetof(struct ipr_hostrcb_error, u) +
2434 offsetof(struct ipr_hostrcb_type_41_error, data)));
2435}
2436/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 * ipr_log_generic_error - Log an adapter error.
2438 * @ioa_cfg: ioa config struct
2439 * @hostrcb: hostrcb struct
2440 *
2441 * Return value:
2442 * none
2443 **/
2444static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2445 struct ipr_hostrcb *hostrcb)
2446{
Brian Kingac719ab2006-11-21 10:28:42 -06002447 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002448 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449}
2450
2451/**
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002452 * ipr_log_sis64_device_error - Log a cache error.
2453 * @ioa_cfg: ioa config struct
2454 * @hostrcb: hostrcb struct
2455 *
2456 * Return value:
2457 * none
2458 **/
2459static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2460 struct ipr_hostrcb *hostrcb)
2461{
2462 struct ipr_hostrcb_type_21_error *error;
2463 char buffer[IPR_MAX_RES_PATH_LENGTH];
2464
2465 error = &hostrcb->hcam.u.error64.u.type_21_error;
2466
2467 ipr_err("-----Failing Device Information-----\n");
2468 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2469 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2470 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2471 ipr_err("Device Resource Path: %s\n",
2472 __ipr_format_res_path(error->res_path,
2473 buffer, sizeof(buffer)));
2474 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2475 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2476 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2477 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2478 ipr_err("SCSI Sense Data:\n");
2479 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2480 ipr_err("SCSI Command Descriptor Block: \n");
2481 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2482
2483 ipr_err("Additional IOA Data:\n");
2484 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2485}
2486
2487/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2489 * @ioasc: IOASC
2490 *
2491 * This function will return the index of into the ipr_error_table
2492 * for the specified IOASC. If the IOASC is not in the table,
2493 * 0 will be returned, which points to the entry used for unknown errors.
2494 *
2495 * Return value:
2496 * index into the ipr_error_table
2497 **/
2498static u32 ipr_get_error(u32 ioasc)
2499{
2500 int i;
2501
2502 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002503 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504 return i;
2505
2506 return 0;
2507}
2508
2509/**
2510 * ipr_handle_log_data - Log an adapter error.
2511 * @ioa_cfg: ioa config struct
2512 * @hostrcb: hostrcb struct
2513 *
2514 * This function logs an adapter error to the system.
2515 *
2516 * Return value:
2517 * none
2518 **/
2519static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2520 struct ipr_hostrcb *hostrcb)
2521{
2522 u32 ioasc;
2523 int error_index;
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002524 struct ipr_hostrcb_type_21_error *error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
2526 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2527 return;
2528
2529 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2530 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2531
Wayne Boyer4565e372010-02-19 13:24:07 -08002532 if (ioa_cfg->sis64)
2533 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2534 else
2535 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536
Wayne Boyer4565e372010-02-19 13:24:07 -08002537 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2538 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2540 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002541 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 }
2543
2544 error_index = ipr_get_error(ioasc);
2545
2546 if (!ipr_error_table[error_index].log_hcam)
2547 return;
2548
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002549 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2550 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2551 error = &hostrcb->hcam.u.error64.u.type_21_error;
2552
2553 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2554 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2555 return;
2556 }
2557
Brian King49dc6a12006-11-21 10:28:35 -06002558 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559
2560 /* Set indication we have logged an error */
2561 ioa_cfg->errors_logged++;
2562
Brian King933916f2007-03-29 12:43:30 -05002563 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002565 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2566 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567
2568 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 case IPR_HOST_RCB_OVERLAY_ID_2:
2570 ipr_log_cache_error(ioa_cfg, hostrcb);
2571 break;
2572 case IPR_HOST_RCB_OVERLAY_ID_3:
2573 ipr_log_config_error(ioa_cfg, hostrcb);
2574 break;
2575 case IPR_HOST_RCB_OVERLAY_ID_4:
2576 case IPR_HOST_RCB_OVERLAY_ID_6:
2577 ipr_log_array_error(ioa_cfg, hostrcb);
2578 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002579 case IPR_HOST_RCB_OVERLAY_ID_7:
2580 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2581 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002582 case IPR_HOST_RCB_OVERLAY_ID_12:
2583 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2584 break;
2585 case IPR_HOST_RCB_OVERLAY_ID_13:
2586 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2587 break;
2588 case IPR_HOST_RCB_OVERLAY_ID_14:
2589 case IPR_HOST_RCB_OVERLAY_ID_16:
2590 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2591 break;
2592 case IPR_HOST_RCB_OVERLAY_ID_17:
2593 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2594 break;
Brian King49dc6a12006-11-21 10:28:35 -06002595 case IPR_HOST_RCB_OVERLAY_ID_20:
2596 ipr_log_fabric_error(ioa_cfg, hostrcb);
2597 break;
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002598 case IPR_HOST_RCB_OVERLAY_ID_21:
2599 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2600 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002601 case IPR_HOST_RCB_OVERLAY_ID_23:
2602 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2603 break;
2604 case IPR_HOST_RCB_OVERLAY_ID_24:
2605 case IPR_HOST_RCB_OVERLAY_ID_26:
2606 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2607 break;
2608 case IPR_HOST_RCB_OVERLAY_ID_30:
2609 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2610 break;
Wen Xiong15c5a5e2018-06-06 10:01:36 -05002611 case IPR_HOST_RCB_OVERLAY_ID_41:
2612 ipr_log_sis64_service_required_error(ioa_cfg, hostrcb);
2613 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002614 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002617 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 break;
2619 }
2620}
2621
Brian Kingafc3f832016-08-24 12:56:51 -05002622static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2623{
2624 struct ipr_hostrcb *hostrcb;
2625
2626 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2627 struct ipr_hostrcb, queue);
2628
2629 if (unlikely(!hostrcb)) {
2630 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2631 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2632 struct ipr_hostrcb, queue);
2633 }
2634
2635 list_del_init(&hostrcb->queue);
2636 return hostrcb;
2637}
2638
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639/**
2640 * ipr_process_error - Op done function for an adapter error log.
2641 * @ipr_cmd: ipr command struct
2642 *
2643 * This function is the op done function for an error log host
2644 * controlled async from the adapter. It will log the error and
2645 * send the HCAM back to the adapter.
2646 *
2647 * Return value:
2648 * none
2649 **/
2650static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2651{
2652 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2653 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07002654 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002655 u32 fd_ioasc;
2656
2657 if (ioa_cfg->sis64)
2658 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2659 else
2660 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661
Brian Kingafc3f832016-08-24 12:56:51 -05002662 list_del_init(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06002663 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664
2665 if (!ioasc) {
2666 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002667 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2668 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Brian King4fdd7c72015-03-26 11:23:50 -05002669 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2670 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 dev_err(&ioa_cfg->pdev->dev,
2672 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2673 }
2674
Brian Kingafc3f832016-08-24 12:56:51 -05002675 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
Brian King8a4236a2016-10-13 14:45:24 -05002676 schedule_work(&ioa_cfg->work_q);
Brian Kingafc3f832016-08-24 12:56:51 -05002677 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
Brian Kingafc3f832016-08-24 12:56:51 -05002678
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2680}
2681
2682/**
2683 * ipr_timeout - An internally generated op has timed out.
2684 * @ipr_cmd: ipr command struct
2685 *
2686 * This function blocks host requests and initiates an
2687 * adapter reset.
2688 *
2689 * Return value:
2690 * none
2691 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07002692static void ipr_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693{
Kees Cook738c6ec2017-08-18 16:53:24 -07002694 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695 unsigned long lock_flags = 0;
2696 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2697
2698 ENTER;
2699 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2700
2701 ioa_cfg->errors_logged++;
2702 dev_err(&ioa_cfg->pdev->dev,
2703 "Adapter being reset due to command timeout.\n");
2704
2705 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2706 ioa_cfg->sdt_state = GET_DUMP;
2707
2708 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2709 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2710
2711 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2712 LEAVE;
2713}
2714
2715/**
2716 * ipr_oper_timeout - Adapter timed out transitioning to operational
2717 * @ipr_cmd: ipr command struct
2718 *
2719 * This function blocks host requests and initiates an
2720 * adapter reset.
2721 *
2722 * Return value:
2723 * none
2724 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07002725static void ipr_oper_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726{
Kees Cook738c6ec2017-08-18 16:53:24 -07002727 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 unsigned long lock_flags = 0;
2729 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2730
2731 ENTER;
2732 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2733
2734 ioa_cfg->errors_logged++;
2735 dev_err(&ioa_cfg->pdev->dev,
2736 "Adapter timed out transitioning to operational.\n");
2737
2738 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2739 ioa_cfg->sdt_state = GET_DUMP;
2740
2741 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2742 if (ipr_fastfail)
2743 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2744 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2745 }
2746
2747 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2748 LEAVE;
2749}
2750
2751/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 * ipr_find_ses_entry - Find matching SES in SES table
2753 * @res: resource entry struct of SES
2754 *
2755 * Return value:
2756 * pointer to SES table entry / NULL on failure
2757 **/
2758static const struct ipr_ses_table_entry *
2759ipr_find_ses_entry(struct ipr_resource_entry *res)
2760{
2761 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002762 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2764
2765 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2766 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2767 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002768 vpids = &res->std_inq_data.vpids;
2769 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 matches++;
2771 else
2772 break;
2773 } else
2774 matches++;
2775 }
2776
2777 if (matches == IPR_PROD_ID_LEN)
2778 return ste;
2779 }
2780
2781 return NULL;
2782}
2783
2784/**
2785 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2786 * @ioa_cfg: ioa config struct
2787 * @bus: SCSI bus
2788 * @bus_width: bus width
2789 *
2790 * Return value:
2791 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2792 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2793 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2794 * max 160MHz = max 320MB/sec).
2795 **/
2796static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2797{
2798 struct ipr_resource_entry *res;
2799 const struct ipr_ses_table_entry *ste;
2800 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2801
2802 /* Loop through each config table entry in the config table buffer */
2803 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002804 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 continue;
2806
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002807 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 continue;
2809
2810 if (!(ste = ipr_find_ses_entry(res)))
2811 continue;
2812
2813 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2814 }
2815
2816 return max_xfer_rate;
2817}
2818
2819/**
2820 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2821 * @ioa_cfg: ioa config struct
2822 * @max_delay: max delay in micro-seconds to wait
2823 *
2824 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2825 *
2826 * Return value:
2827 * 0 on success / other on failure
2828 **/
2829static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2830{
2831 volatile u32 pcii_reg;
2832 int delay = 1;
2833
2834 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2835 while (delay < max_delay) {
2836 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2837
2838 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2839 return 0;
2840
2841 /* udelay cannot be used if delay is more than a few milliseconds */
2842 if ((delay / 1000) > MAX_UDELAY_MS)
2843 mdelay(delay / 1000);
2844 else
2845 udelay(delay);
2846
2847 delay += delay;
2848 }
2849 return -EIO;
2850}
2851
2852/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002853 * ipr_get_sis64_dump_data_section - Dump IOA memory
2854 * @ioa_cfg: ioa config struct
2855 * @start_addr: adapter address to dump
2856 * @dest: destination kernel buffer
2857 * @length_in_words: length to dump in 4 byte words
2858 *
2859 * Return value:
2860 * 0 on success
2861 **/
2862static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2863 u32 start_addr,
2864 __be32 *dest, u32 length_in_words)
2865{
2866 int i;
2867
2868 for (i = 0; i < length_in_words; i++) {
2869 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2870 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2871 dest++;
2872 }
2873
2874 return 0;
2875}
2876
2877/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 * ipr_get_ldump_data_section - Dump IOA memory
2879 * @ioa_cfg: ioa config struct
2880 * @start_addr: adapter address to dump
2881 * @dest: destination kernel buffer
2882 * @length_in_words: length to dump in 4 byte words
2883 *
2884 * Return value:
2885 * 0 on success / -EIO on failure
2886 **/
2887static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2888 u32 start_addr,
2889 __be32 *dest, u32 length_in_words)
2890{
2891 volatile u32 temp_pcii_reg;
2892 int i, delay = 0;
2893
Wayne Boyerdcbad002010-02-19 13:24:14 -08002894 if (ioa_cfg->sis64)
2895 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2896 dest, length_in_words);
2897
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 /* Write IOA interrupt reg starting LDUMP state */
2899 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
Wayne Boyer214777b2010-02-19 13:24:26 -08002900 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901
2902 /* Wait for IO debug acknowledge */
2903 if (ipr_wait_iodbg_ack(ioa_cfg,
2904 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2905 dev_err(&ioa_cfg->pdev->dev,
2906 "IOA dump long data transfer timeout\n");
2907 return -EIO;
2908 }
2909
2910 /* Signal LDUMP interlocked - clear IO debug ack */
2911 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2912 ioa_cfg->regs.clr_interrupt_reg);
2913
2914 /* Write Mailbox with starting address */
2915 writel(start_addr, ioa_cfg->ioa_mailbox);
2916
2917 /* Signal address valid - clear IOA Reset alert */
2918 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002919 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920
2921 for (i = 0; i < length_in_words; i++) {
2922 /* Wait for IO debug acknowledge */
2923 if (ipr_wait_iodbg_ack(ioa_cfg,
2924 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2925 dev_err(&ioa_cfg->pdev->dev,
2926 "IOA dump short data transfer timeout\n");
2927 return -EIO;
2928 }
2929
2930 /* Read data from mailbox and increment destination pointer */
2931 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2932 dest++;
2933
2934 /* For all but the last word of data, signal data received */
2935 if (i < (length_in_words - 1)) {
2936 /* Signal dump data received - Clear IO debug Ack */
2937 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2938 ioa_cfg->regs.clr_interrupt_reg);
2939 }
2940 }
2941
2942 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2943 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002944 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945
2946 writel(IPR_UPROCI_IO_DEBUG_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002947 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948
2949 /* Signal dump data received - Clear IO debug Ack */
2950 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2951 ioa_cfg->regs.clr_interrupt_reg);
2952
2953 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2954 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2955 temp_pcii_reg =
Wayne Boyer214777b2010-02-19 13:24:26 -08002956 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002957
2958 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2959 return 0;
2960
2961 udelay(10);
2962 delay += 10;
2963 }
2964
2965 return 0;
2966}
2967
2968#ifdef CONFIG_SCSI_IPR_DUMP
2969/**
2970 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2971 * @ioa_cfg: ioa config struct
2972 * @pci_address: adapter address
2973 * @length: length of data to copy
2974 *
2975 * Copy data from PCI adapter to kernel buffer.
2976 * Note: length MUST be a 4 byte multiple
2977 * Return value:
2978 * 0 on success / other on failure
2979 **/
2980static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2981 unsigned long pci_address, u32 length)
2982{
2983 int bytes_copied = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002984 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 __be32 *page;
2986 unsigned long lock_flags = 0;
2987 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2988
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002989 if (ioa_cfg->sis64)
2990 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2991 else
2992 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2993
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 while (bytes_copied < length &&
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002995 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002996 if (ioa_dump->page_offset >= PAGE_SIZE ||
2997 ioa_dump->page_offset == 0) {
2998 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2999
3000 if (!page) {
3001 ipr_trace;
3002 return bytes_copied;
3003 }
3004
3005 ioa_dump->page_offset = 0;
3006 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
3007 ioa_dump->next_page_index++;
3008 } else
3009 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
3010
3011 rem_len = length - bytes_copied;
3012 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
3013 cur_len = min(rem_len, rem_page_len);
3014
3015 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3016 if (ioa_cfg->sdt_state == ABORT_DUMP) {
3017 rc = -EIO;
3018 } else {
3019 rc = ipr_get_ldump_data_section(ioa_cfg,
3020 pci_address + bytes_copied,
3021 &page[ioa_dump->page_offset / 4],
3022 (cur_len / sizeof(u32)));
3023 }
3024 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3025
3026 if (!rc) {
3027 ioa_dump->page_offset += cur_len;
3028 bytes_copied += cur_len;
3029 } else {
3030 ipr_trace;
3031 break;
3032 }
3033 schedule();
3034 }
3035
3036 return bytes_copied;
3037}
3038
3039/**
3040 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3041 * @hdr: dump entry header struct
3042 *
3043 * Return value:
3044 * nothing
3045 **/
3046static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3047{
3048 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3049 hdr->num_elems = 1;
3050 hdr->offset = sizeof(*hdr);
3051 hdr->status = IPR_DUMP_STATUS_SUCCESS;
3052}
3053
3054/**
3055 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3056 * @ioa_cfg: ioa config struct
3057 * @driver_dump: driver dump struct
3058 *
3059 * Return value:
3060 * nothing
3061 **/
3062static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3063 struct ipr_driver_dump *driver_dump)
3064{
3065 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3066
3067 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3068 driver_dump->ioa_type_entry.hdr.len =
3069 sizeof(struct ipr_dump_ioa_type_entry) -
3070 sizeof(struct ipr_dump_entry_header);
3071 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3072 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3073 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3074 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3075 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3076 ucode_vpd->minor_release[1];
3077 driver_dump->hdr.num_entries++;
3078}
3079
3080/**
3081 * ipr_dump_version_data - Fill in the driver version in the dump.
3082 * @ioa_cfg: ioa config struct
3083 * @driver_dump: driver dump struct
3084 *
3085 * Return value:
3086 * nothing
3087 **/
3088static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3089 struct ipr_driver_dump *driver_dump)
3090{
3091 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3092 driver_dump->version_entry.hdr.len =
3093 sizeof(struct ipr_dump_version_entry) -
3094 sizeof(struct ipr_dump_entry_header);
3095 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3096 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3097 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3098 driver_dump->hdr.num_entries++;
3099}
3100
3101/**
3102 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3103 * @ioa_cfg: ioa config struct
3104 * @driver_dump: driver dump struct
3105 *
3106 * Return value:
3107 * nothing
3108 **/
3109static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3110 struct ipr_driver_dump *driver_dump)
3111{
3112 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3113 driver_dump->trace_entry.hdr.len =
3114 sizeof(struct ipr_dump_trace_entry) -
3115 sizeof(struct ipr_dump_entry_header);
3116 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3117 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3118 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3119 driver_dump->hdr.num_entries++;
3120}
3121
3122/**
3123 * ipr_dump_location_data - Fill in the IOA location in the dump.
3124 * @ioa_cfg: ioa config struct
3125 * @driver_dump: driver dump struct
3126 *
3127 * Return value:
3128 * nothing
3129 **/
3130static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3131 struct ipr_driver_dump *driver_dump)
3132{
3133 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3134 driver_dump->location_entry.hdr.len =
3135 sizeof(struct ipr_dump_location_entry) -
3136 sizeof(struct ipr_dump_entry_header);
3137 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3138 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01003139 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140 driver_dump->hdr.num_entries++;
3141}
3142
3143/**
3144 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3145 * @ioa_cfg: ioa config struct
3146 * @dump: dump struct
3147 *
3148 * Return value:
3149 * nothing
3150 **/
3151static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3152{
3153 unsigned long start_addr, sdt_word;
3154 unsigned long lock_flags = 0;
3155 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3156 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003157 u32 num_entries, max_num_entries, start_off, end_off;
3158 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08003160 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 int i;
3162
3163 ENTER;
3164
3165 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3166
Brian King41e9a692011-09-21 08:51:11 -05003167 if (ioa_cfg->sdt_state != READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3169 return;
3170 }
3171
Wayne Boyer110def82010-11-04 09:36:16 -07003172 if (ioa_cfg->sis64) {
3173 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3174 ssleep(IPR_DUMP_DELAY_SECONDS);
3175 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3176 }
3177
Linus Torvalds1da177e2005-04-16 15:20:36 -07003178 start_addr = readl(ioa_cfg->ioa_mailbox);
3179
Wayne Boyerdcbad002010-02-19 13:24:14 -08003180 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 dev_err(&ioa_cfg->pdev->dev,
3182 "Invalid dump table format: %lx\n", start_addr);
3183 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3184 return;
3185 }
3186
3187 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3188
3189 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3190
3191 /* Initialize the overall dump header */
3192 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3193 driver_dump->hdr.num_entries = 1;
3194 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3195 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3196 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3197 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3198
3199 ipr_dump_version_data(ioa_cfg, driver_dump);
3200 ipr_dump_location_data(ioa_cfg, driver_dump);
3201 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3202 ipr_dump_trace_data(ioa_cfg, driver_dump);
3203
3204 /* Update dump_header */
3205 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3206
3207 /* IOA Dump entry */
3208 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 ioa_dump->hdr.len = 0;
3210 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3211 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3212
3213 /* First entries in sdt are actually a list of dump addresses and
3214 lengths to gather the real dump data. sdt represents the pointer
3215 to the ioa generated dump table. Dump data will be extracted based
3216 on entries in this table */
3217 sdt = &ioa_dump->sdt;
3218
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003219 if (ioa_cfg->sis64) {
3220 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3221 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3222 } else {
3223 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3224 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3225 }
3226
3227 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3228 (max_num_entries * sizeof(struct ipr_sdt_entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003230 bytes_to_copy / sizeof(__be32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231
3232 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08003233 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3234 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235 dev_err(&ioa_cfg->pdev->dev,
3236 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3237 rc, be32_to_cpu(sdt->hdr.state));
3238 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3239 ioa_cfg->sdt_state = DUMP_OBTAINED;
3240 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3241 return;
3242 }
3243
3244 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3245
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003246 if (num_entries > max_num_entries)
3247 num_entries = max_num_entries;
3248
3249 /* Update dump length to the actual data to be copied */
3250 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3251 if (ioa_cfg->sis64)
3252 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3253 else
3254 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255
3256 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3257
3258 for (i = 0; i < num_entries; i++) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003259 if (ioa_dump->hdr.len > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3261 break;
3262 }
3263
3264 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08003265 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3266 if (ioa_cfg->sis64)
3267 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3268 else {
3269 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3270 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271
Wayne Boyerdcbad002010-02-19 13:24:14 -08003272 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3273 bytes_to_copy = end_off - start_off;
3274 else
3275 valid = 0;
3276 }
3277 if (valid) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003278 if (bytes_to_copy > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3280 continue;
3281 }
3282
3283 /* Copy data from adapter to driver buffers */
3284 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3285 bytes_to_copy);
3286
3287 ioa_dump->hdr.len += bytes_copied;
3288
3289 if (bytes_copied != bytes_to_copy) {
3290 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3291 break;
3292 }
3293 }
3294 }
3295 }
3296
3297 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3298
3299 /* Update dump_header */
3300 driver_dump->hdr.len += ioa_dump->hdr.len;
3301 wmb();
3302 ioa_cfg->sdt_state = DUMP_OBTAINED;
3303 LEAVE;
3304}
3305
3306#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003307#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308#endif
3309
3310/**
3311 * ipr_release_dump - Free adapter dump memory
3312 * @kref: kref struct
3313 *
3314 * Return value:
3315 * nothing
3316 **/
3317static void ipr_release_dump(struct kref *kref)
3318{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003319 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3321 unsigned long lock_flags = 0;
3322 int i;
3323
3324 ENTER;
3325 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3326 ioa_cfg->dump = NULL;
3327 ioa_cfg->sdt_state = INACTIVE;
3328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3329
3330 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3331 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3332
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003333 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 kfree(dump);
3335 LEAVE;
3336}
3337
Wen Xiong318ddb32018-09-20 19:32:12 -05003338static void ipr_add_remove_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339{
3340 unsigned long lock_flags;
3341 struct ipr_resource_entry *res;
3342 struct scsi_device *sdev;
David Howellsc4028952006-11-22 14:57:56 +00003343 struct ipr_ioa_cfg *ioa_cfg =
Wen Xiong318ddb32018-09-20 19:32:12 -05003344 container_of(work, struct ipr_ioa_cfg, scsi_add_work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 u8 bus, target, lun;
3346 int did_work;
3347
3348 ENTER;
3349 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3350
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351restart:
3352 do {
3353 did_work = 0;
Brian Kingf688f962014-12-02 12:47:37 -06003354 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3356 return;
3357 }
3358
3359 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3360 if (res->del_from_ml && res->sdev) {
3361 did_work = 1;
3362 sdev = res->sdev;
3363 if (!scsi_device_get(sdev)) {
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02003364 if (!res->add_to_ml)
3365 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3366 else
3367 res->del_from_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3369 scsi_remove_device(sdev);
3370 scsi_device_put(sdev);
3371 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3372 }
3373 break;
3374 }
3375 }
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003376 } while (did_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003377
3378 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3379 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003380 bus = res->bus;
3381 target = res->target;
3382 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003383 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3385 scsi_add_device(ioa_cfg->host, bus, target, lun);
3386 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3387 goto restart;
3388 }
3389 }
3390
Brian Kingf688f962014-12-02 12:47:37 -06003391 ioa_cfg->scan_done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003393 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 LEAVE;
3395}
3396
Wen Xiong318ddb32018-09-20 19:32:12 -05003397/**
3398 * ipr_worker_thread - Worker thread
3399 * @work: ioa config struct
3400 *
3401 * Called at task level from a work thread. This function takes care
3402 * of adding and removing device from the mid-layer as configuration
3403 * changes are detected by the adapter.
3404 *
3405 * Return value:
3406 * nothing
3407 **/
3408static void ipr_worker_thread(struct work_struct *work)
3409{
3410 unsigned long lock_flags;
3411 struct ipr_dump *dump;
3412 struct ipr_ioa_cfg *ioa_cfg =
3413 container_of(work, struct ipr_ioa_cfg, work_q);
3414
3415 ENTER;
3416 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3417
3418 if (ioa_cfg->sdt_state == READ_DUMP) {
3419 dump = ioa_cfg->dump;
3420 if (!dump) {
3421 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3422 return;
3423 }
3424 kref_get(&dump->kref);
3425 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3426 ipr_get_ioa_dump(ioa_cfg, dump);
3427 kref_put(&dump->kref, ipr_release_dump);
3428
3429 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3430 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
3431 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3433 return;
3434 }
3435
3436 if (ioa_cfg->scsi_unblock) {
3437 ioa_cfg->scsi_unblock = 0;
3438 ioa_cfg->scsi_blocked = 0;
3439 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3440 scsi_unblock_requests(ioa_cfg->host);
3441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3442 if (ioa_cfg->scsi_blocked)
3443 scsi_block_requests(ioa_cfg->host);
3444 }
3445
3446 if (!ioa_cfg->scan_enabled) {
3447 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3448 return;
3449 }
3450
3451 schedule_work(&ioa_cfg->scsi_add_work_q);
3452
3453 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3454 LEAVE;
3455}
3456
Linus Torvalds1da177e2005-04-16 15:20:36 -07003457#ifdef CONFIG_SCSI_IPR_TRACE
3458/**
3459 * ipr_read_trace - Dump the adapter trace
Chris Wright2c3c8be2010-05-12 18:28:57 -07003460 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003462 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 * @buf: buffer
3464 * @off: offset
3465 * @count: buffer size
3466 *
3467 * Return value:
3468 * number of bytes printed to buffer
3469 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003470static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003471 struct bin_attribute *bin_attr,
3472 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473{
Tony Jonesee959b02008-02-22 00:13:36 +01003474 struct device *dev = container_of(kobj, struct device, kobj);
3475 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3477 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003478 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479
3480 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003481 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3482 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003484
3485 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486}
3487
3488static struct bin_attribute ipr_trace_attr = {
3489 .attr = {
3490 .name = "trace",
3491 .mode = S_IRUGO,
3492 },
3493 .size = 0,
3494 .read = ipr_read_trace,
3495};
3496#endif
3497
3498/**
3499 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003500 * @dev: class device struct
3501 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502 *
3503 * Return value:
3504 * number of bytes printed to buffer
3505 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003506static ssize_t ipr_show_fw_version(struct device *dev,
3507 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508{
Tony Jonesee959b02008-02-22 00:13:36 +01003509 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3511 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3512 unsigned long lock_flags = 0;
3513 int len;
3514
3515 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3516 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3517 ucode_vpd->major_release, ucode_vpd->card_type,
3518 ucode_vpd->minor_release[0],
3519 ucode_vpd->minor_release[1]);
3520 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3521 return len;
3522}
3523
Tony Jonesee959b02008-02-22 00:13:36 +01003524static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525 .attr = {
3526 .name = "fw_version",
3527 .mode = S_IRUGO,
3528 },
3529 .show = ipr_show_fw_version,
3530};
3531
3532/**
3533 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003534 * @dev: class device struct
3535 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536 *
3537 * Return value:
3538 * number of bytes printed to buffer
3539 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003540static ssize_t ipr_show_log_level(struct device *dev,
3541 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542{
Tony Jonesee959b02008-02-22 00:13:36 +01003543 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003544 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3545 unsigned long lock_flags = 0;
3546 int len;
3547
3548 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3549 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3550 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3551 return len;
3552}
3553
3554/**
3555 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003556 * @dev: class device struct
3557 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558 *
3559 * Return value:
3560 * number of bytes printed to buffer
3561 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003562static ssize_t ipr_store_log_level(struct device *dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003563 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003564 const char *buf, size_t count)
3565{
Tony Jonesee959b02008-02-22 00:13:36 +01003566 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3568 unsigned long lock_flags = 0;
3569
3570 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3571 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3572 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3573 return strlen(buf);
3574}
3575
Tony Jonesee959b02008-02-22 00:13:36 +01003576static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 .attr = {
3578 .name = "log_level",
3579 .mode = S_IRUGO | S_IWUSR,
3580 },
3581 .show = ipr_show_log_level,
3582 .store = ipr_store_log_level
3583};
3584
3585/**
3586 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003587 * @dev: device struct
3588 * @buf: buffer
3589 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003590 *
3591 * This function will reset the adapter and wait a reasonable
3592 * amount of time for any errors that the adapter might log.
3593 *
3594 * Return value:
3595 * count on success / other on failure
3596 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003597static ssize_t ipr_store_diagnostics(struct device *dev,
3598 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599 const char *buf, size_t count)
3600{
Tony Jonesee959b02008-02-22 00:13:36 +01003601 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3603 unsigned long lock_flags = 0;
3604 int rc = count;
3605
3606 if (!capable(CAP_SYS_ADMIN))
3607 return -EACCES;
3608
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003610 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003611 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3612 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3613 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3614 }
3615
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616 ioa_cfg->errors_logged = 0;
3617 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3618
3619 if (ioa_cfg->in_reset_reload) {
3620 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3621 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3622
3623 /* Wait for a second for any errors to be logged */
3624 msleep(1000);
3625 } else {
3626 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3627 return -EIO;
3628 }
3629
3630 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3631 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3632 rc = -EIO;
3633 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3634
3635 return rc;
3636}
3637
Tony Jonesee959b02008-02-22 00:13:36 +01003638static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639 .attr = {
3640 .name = "run_diagnostics",
3641 .mode = S_IWUSR,
3642 },
3643 .store = ipr_store_diagnostics
3644};
3645
3646/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003647 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003648 * @class_dev: device struct
3649 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003650 *
3651 * Return value:
3652 * number of bytes printed to buffer
3653 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003654static ssize_t ipr_show_adapter_state(struct device *dev,
3655 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003656{
Tony Jonesee959b02008-02-22 00:13:36 +01003657 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003658 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3659 unsigned long lock_flags = 0;
3660 int len;
3661
3662 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003663 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003664 len = snprintf(buf, PAGE_SIZE, "offline\n");
3665 else
3666 len = snprintf(buf, PAGE_SIZE, "online\n");
3667 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3668 return len;
3669}
3670
3671/**
3672 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003673 * @dev: device struct
3674 * @buf: buffer
3675 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003676 *
3677 * This function will change the adapter's state.
3678 *
3679 * Return value:
3680 * count on success / other on failure
3681 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003682static ssize_t ipr_store_adapter_state(struct device *dev,
3683 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003684 const char *buf, size_t count)
3685{
Tony Jonesee959b02008-02-22 00:13:36 +01003686 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003687 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3688 unsigned long lock_flags;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003689 int result = count, i;
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003690
3691 if (!capable(CAP_SYS_ADMIN))
3692 return -EACCES;
3693
3694 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003695 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3696 !strncmp(buf, "online", 6)) {
3697 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3698 spin_lock(&ioa_cfg->hrrq[i]._lock);
3699 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3700 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3701 }
3702 wmb();
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003703 ioa_cfg->reset_retries = 0;
3704 ioa_cfg->in_ioa_bringdown = 0;
3705 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3706 }
3707 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3708 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3709
3710 return result;
3711}
3712
Tony Jonesee959b02008-02-22 00:13:36 +01003713static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003714 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003715 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003716 .mode = S_IRUGO | S_IWUSR,
3717 },
3718 .show = ipr_show_adapter_state,
3719 .store = ipr_store_adapter_state
3720};
3721
3722/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003723 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003724 * @dev: device struct
3725 * @buf: buffer
3726 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003727 *
3728 * This function will reset the adapter.
3729 *
3730 * Return value:
3731 * count on success / other on failure
3732 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003733static ssize_t ipr_store_reset_adapter(struct device *dev,
3734 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735 const char *buf, size_t count)
3736{
Tony Jonesee959b02008-02-22 00:13:36 +01003737 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003738 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3739 unsigned long lock_flags;
3740 int result = count;
3741
3742 if (!capable(CAP_SYS_ADMIN))
3743 return -EACCES;
3744
3745 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3746 if (!ioa_cfg->in_reset_reload)
3747 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3748 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3749 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3750
3751 return result;
3752}
3753
Tony Jonesee959b02008-02-22 00:13:36 +01003754static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 .attr = {
3756 .name = "reset_host",
3757 .mode = S_IWUSR,
3758 },
3759 .store = ipr_store_reset_adapter
3760};
3761
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003762static int ipr_iopoll(struct irq_poll *iop, int budget);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003763 /**
3764 * ipr_show_iopoll_weight - Show ipr polling mode
3765 * @dev: class device struct
3766 * @buf: buffer
3767 *
3768 * Return value:
3769 * number of bytes printed to buffer
3770 **/
3771static ssize_t ipr_show_iopoll_weight(struct device *dev,
3772 struct device_attribute *attr, char *buf)
3773{
3774 struct Scsi_Host *shost = class_to_shost(dev);
3775 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3776 unsigned long lock_flags = 0;
3777 int len;
3778
3779 spin_lock_irqsave(shost->host_lock, lock_flags);
3780 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3781 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3782
3783 return len;
3784}
3785
3786/**
3787 * ipr_store_iopoll_weight - Change the adapter's polling mode
3788 * @dev: class device struct
3789 * @buf: buffer
3790 *
3791 * Return value:
3792 * number of bytes printed to buffer
3793 **/
3794static ssize_t ipr_store_iopoll_weight(struct device *dev,
3795 struct device_attribute *attr,
3796 const char *buf, size_t count)
3797{
3798 struct Scsi_Host *shost = class_to_shost(dev);
3799 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3800 unsigned long user_iopoll_weight;
3801 unsigned long lock_flags = 0;
3802 int i;
3803
3804 if (!ioa_cfg->sis64) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003805 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003806 return -EINVAL;
3807 }
3808 if (kstrtoul(buf, 10, &user_iopoll_weight))
3809 return -EINVAL;
3810
3811 if (user_iopoll_weight > 256) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003812 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003813 return -EINVAL;
3814 }
3815
3816 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003817 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003818 return strlen(buf);
3819 }
3820
Jens Axboe89f8b332014-03-13 09:38:42 -06003821 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003822 for (i = 1; i < ioa_cfg->hrrq_num; i++)
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003823 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003824 }
3825
3826 spin_lock_irqsave(shost->host_lock, lock_flags);
3827 ioa_cfg->iopoll_weight = user_iopoll_weight;
Jens Axboe89f8b332014-03-13 09:38:42 -06003828 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003829 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003830 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003831 ioa_cfg->iopoll_weight, ipr_iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003832 }
3833 }
3834 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3835
3836 return strlen(buf);
3837}
3838
3839static struct device_attribute ipr_iopoll_weight_attr = {
3840 .attr = {
3841 .name = "iopoll_weight",
3842 .mode = S_IRUGO | S_IWUSR,
3843 },
3844 .show = ipr_show_iopoll_weight,
3845 .store = ipr_store_iopoll_weight
3846};
3847
Linus Torvalds1da177e2005-04-16 15:20:36 -07003848/**
3849 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3850 * @buf_len: buffer length
3851 *
3852 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3853 * list to use for microcode download
3854 *
3855 * Return value:
3856 * pointer to sglist / NULL on failure
3857 **/
3858static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3859{
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003860 int sg_size, order;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003861 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003862
3863 /* Get the minimum size per scatter/gather element */
3864 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3865
3866 /* Get the actual size per element */
3867 order = get_order(sg_size);
3868
Linus Torvalds1da177e2005-04-16 15:20:36 -07003869 /* Allocate a scatter/gather list for the DMA */
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003870 sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003871 if (sglist == NULL) {
3872 ipr_trace;
3873 return NULL;
3874 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003875 sglist->order = order;
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003876 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3877 &sglist->num_sg);
3878 if (!sglist->scatterlist) {
3879 kfree(sglist);
3880 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881 }
3882
3883 return sglist;
3884}
3885
3886/**
3887 * ipr_free_ucode_buffer - Frees a microcode download buffer
3888 * @p_dnld: scatter/gather list pointer
3889 *
3890 * Free a DMA'able ucode download buffer previously allocated with
3891 * ipr_alloc_ucode_buffer
3892 *
3893 * Return value:
3894 * nothing
3895 **/
3896static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3897{
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003898 sgl_free_order(sglist->scatterlist, sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899 kfree(sglist);
3900}
3901
3902/**
3903 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3904 * @sglist: scatter/gather list pointer
3905 * @buffer: buffer pointer
3906 * @len: buffer length
3907 *
3908 * Copy a microcode image from a user buffer into a buffer allocated by
3909 * ipr_alloc_ucode_buffer
3910 *
3911 * Return value:
3912 * 0 on success / other on failure
3913 **/
3914static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3915 u8 *buffer, u32 len)
3916{
3917 int bsize_elem, i, result = 0;
3918 struct scatterlist *scatterlist;
3919 void *kaddr;
3920
3921 /* Determine the actual number of bytes per element */
3922 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3923
3924 scatterlist = sglist->scatterlist;
3925
3926 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003927 struct page *page = sg_page(&scatterlist[i]);
3928
3929 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003930 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003931 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932
3933 scatterlist[i].length = bsize_elem;
3934
3935 if (result != 0) {
3936 ipr_trace;
3937 return result;
3938 }
3939 }
3940
3941 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003942 struct page *page = sg_page(&scatterlist[i]);
3943
3944 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003946 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947
3948 scatterlist[i].length = len % bsize_elem;
3949 }
3950
3951 sglist->buffer_len = len;
3952 return result;
3953}
3954
3955/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003956 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3957 * @ipr_cmd: ipr command struct
3958 * @sglist: scatter/gather list
3959 *
3960 * Builds a microcode download IOA data list (IOADL).
3961 *
3962 **/
3963static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3964 struct ipr_sglist *sglist)
3965{
3966 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3967 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3968 struct scatterlist *scatterlist = sglist->scatterlist;
3969 int i;
3970
3971 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3972 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3973 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3974
3975 ioarcb->ioadl_len =
3976 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3977 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3978 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3979 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3980 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3981 }
3982
3983 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3984}
3985
3986/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003987 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988 * @ipr_cmd: ipr command struct
3989 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003991 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003993 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003994static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3995 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003996{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003998 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003999 struct scatterlist *scatterlist = sglist->scatterlist;
4000 int i;
4001
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004002 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08004004 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
4005
4006 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4008
4009 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
4010 ioadl[i].flags_and_data_len =
4011 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
4012 ioadl[i].address =
4013 cpu_to_be32(sg_dma_address(&scatterlist[i]));
4014 }
4015
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004016 ioadl[i-1].flags_and_data_len |=
4017 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4018}
4019
4020/**
4021 * ipr_update_ioa_ucode - Update IOA's microcode
4022 * @ioa_cfg: ioa config struct
4023 * @sglist: scatter/gather list
4024 *
4025 * Initiate an adapter reset to update the IOA's microcode
4026 *
4027 * Return value:
4028 * 0 on success / -EIO on failure
4029 **/
4030static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
4031 struct ipr_sglist *sglist)
4032{
4033 unsigned long lock_flags;
4034
4035 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004036 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05004037 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4038 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4039 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4040 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004041
4042 if (ioa_cfg->ucode_sglist) {
4043 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4044 dev_err(&ioa_cfg->pdev->dev,
4045 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046 return -EIO;
4047 }
4048
Anton Blanchardd73341b2014-10-30 17:27:08 -05004049 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4050 sglist->scatterlist, sglist->num_sg,
4051 DMA_TO_DEVICE);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004052
4053 if (!sglist->num_dma_sg) {
4054 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4055 dev_err(&ioa_cfg->pdev->dev,
4056 "Failed to map microcode download buffer!\n");
4057 return -EIO;
4058 }
4059
4060 ioa_cfg->ucode_sglist = sglist;
4061 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4062 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4063 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4064
4065 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4066 ioa_cfg->ucode_sglist = NULL;
4067 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 return 0;
4069}
4070
4071/**
4072 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01004073 * @class_dev: device struct
4074 * @buf: buffer
4075 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076 *
4077 * This function will update the firmware on the adapter.
4078 *
4079 * Return value:
4080 * count on success / other on failure
4081 **/
Tony Jonesee959b02008-02-22 00:13:36 +01004082static ssize_t ipr_store_update_fw(struct device *dev,
4083 struct device_attribute *attr,
4084 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085{
Tony Jonesee959b02008-02-22 00:13:36 +01004086 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4088 struct ipr_ucode_image_header *image_hdr;
4089 const struct firmware *fw_entry;
4090 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004091 char fname[100];
4092 char *src;
Gabriel Krisman Bertazi21b81712016-02-25 13:54:20 -03004093 char *endline;
Insu Yund63c7dd2016-01-06 12:44:01 -05004094 int result, dnld_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095
4096 if (!capable(CAP_SYS_ADMIN))
4097 return -EACCES;
4098
Insu Yund63c7dd2016-01-06 12:44:01 -05004099 snprintf(fname, sizeof(fname), "%s", buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004100
Gabriel Krisman Bertazi21b81712016-02-25 13:54:20 -03004101 endline = strchr(fname, '\n');
4102 if (endline)
4103 *endline = '\0';
4104
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004105 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4107 return -EIO;
4108 }
4109
4110 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4111
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4113 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4114 sglist = ipr_alloc_ucode_buffer(dnld_size);
4115
4116 if (!sglist) {
4117 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4118 release_firmware(fw_entry);
4119 return -ENOMEM;
4120 }
4121
4122 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4123
4124 if (result) {
4125 dev_err(&ioa_cfg->pdev->dev,
4126 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004127 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 }
4129
Wayne Boyer14ed9cc2011-10-03 20:54:37 -07004130 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4131
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004132 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004133
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004134 if (!result)
4135 result = count;
4136out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137 ipr_free_ucode_buffer(sglist);
4138 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004139 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140}
4141
Tony Jonesee959b02008-02-22 00:13:36 +01004142static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004143 .attr = {
4144 .name = "update_fw",
4145 .mode = S_IWUSR,
4146 },
4147 .store = ipr_store_update_fw
4148};
4149
Wayne Boyer75576bb2010-07-14 10:50:14 -07004150/**
4151 * ipr_show_fw_type - Show the adapter's firmware type.
4152 * @dev: class device struct
4153 * @buf: buffer
4154 *
4155 * Return value:
4156 * number of bytes printed to buffer
4157 **/
4158static ssize_t ipr_show_fw_type(struct device *dev,
4159 struct device_attribute *attr, char *buf)
4160{
4161 struct Scsi_Host *shost = class_to_shost(dev);
4162 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4163 unsigned long lock_flags = 0;
4164 int len;
4165
4166 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4167 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4169 return len;
4170}
4171
4172static struct device_attribute ipr_ioa_fw_type_attr = {
4173 .attr = {
4174 .name = "fw_type",
4175 .mode = S_IRUGO,
4176 },
4177 .show = ipr_show_fw_type
4178};
4179
Brian Kingafc3f832016-08-24 12:56:51 -05004180static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4181 struct bin_attribute *bin_attr, char *buf,
4182 loff_t off, size_t count)
4183{
4184 struct device *cdev = container_of(kobj, struct device, kobj);
4185 struct Scsi_Host *shost = class_to_shost(cdev);
4186 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4187 struct ipr_hostrcb *hostrcb;
4188 unsigned long lock_flags = 0;
4189 int ret;
4190
4191 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4192 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4193 struct ipr_hostrcb, queue);
4194 if (!hostrcb) {
4195 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4196 return 0;
4197 }
4198 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4199 sizeof(hostrcb->hcam));
4200 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4201 return ret;
4202}
4203
4204static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4205 struct bin_attribute *bin_attr, char *buf,
4206 loff_t off, size_t count)
4207{
4208 struct device *cdev = container_of(kobj, struct device, kobj);
4209 struct Scsi_Host *shost = class_to_shost(cdev);
4210 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4211 struct ipr_hostrcb *hostrcb;
4212 unsigned long lock_flags = 0;
4213
4214 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4215 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4216 struct ipr_hostrcb, queue);
4217 if (!hostrcb) {
4218 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4219 return count;
4220 }
4221
4222 /* Reclaim hostrcb before exit */
4223 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4224 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4225 return count;
4226}
4227
4228static struct bin_attribute ipr_ioa_async_err_log = {
4229 .attr = {
4230 .name = "async_err_log",
4231 .mode = S_IRUGO | S_IWUSR,
4232 },
4233 .size = 0,
4234 .read = ipr_read_async_err_log,
4235 .write = ipr_next_async_err_log
4236};
4237
Tony Jonesee959b02008-02-22 00:13:36 +01004238static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239 &ipr_fw_version_attr,
4240 &ipr_log_level_attr,
4241 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06004242 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 &ipr_ioa_reset_attr,
4244 &ipr_update_fw_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004245 &ipr_ioa_fw_type_attr,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06004246 &ipr_iopoll_weight_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 NULL,
4248};
4249
4250#ifdef CONFIG_SCSI_IPR_DUMP
4251/**
4252 * ipr_read_dump - Dump the adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004253 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004255 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256 * @buf: buffer
4257 * @off: offset
4258 * @count: buffer size
4259 *
4260 * Return value:
4261 * number of bytes printed to buffer
4262 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004263static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004264 struct bin_attribute *bin_attr,
4265 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266{
Tony Jonesee959b02008-02-22 00:13:36 +01004267 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268 struct Scsi_Host *shost = class_to_shost(cdev);
4269 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4270 struct ipr_dump *dump;
4271 unsigned long lock_flags = 0;
4272 char *src;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004273 int len, sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274 size_t rc = count;
4275
4276 if (!capable(CAP_SYS_ADMIN))
4277 return -EACCES;
4278
4279 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4280 dump = ioa_cfg->dump;
4281
4282 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4283 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4284 return 0;
4285 }
4286 kref_get(&dump->kref);
4287 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4288
4289 if (off > dump->driver_dump.hdr.len) {
4290 kref_put(&dump->kref, ipr_release_dump);
4291 return 0;
4292 }
4293
4294 if (off + count > dump->driver_dump.hdr.len) {
4295 count = dump->driver_dump.hdr.len - off;
4296 rc = count;
4297 }
4298
4299 if (count && off < sizeof(dump->driver_dump)) {
4300 if (off + count > sizeof(dump->driver_dump))
4301 len = sizeof(dump->driver_dump) - off;
4302 else
4303 len = count;
4304 src = (u8 *)&dump->driver_dump + off;
4305 memcpy(buf, src, len);
4306 buf += len;
4307 off += len;
4308 count -= len;
4309 }
4310
4311 off -= sizeof(dump->driver_dump);
4312
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004313 if (ioa_cfg->sis64)
4314 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4315 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4316 sizeof(struct ipr_sdt_entry));
4317 else
4318 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4319 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4320
4321 if (count && off < sdt_end) {
4322 if (off + count > sdt_end)
4323 len = sdt_end - off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 else
4325 len = count;
4326 src = (u8 *)&dump->ioa_dump + off;
4327 memcpy(buf, src, len);
4328 buf += len;
4329 off += len;
4330 count -= len;
4331 }
4332
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004333 off -= sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334
4335 while (count) {
4336 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4337 len = PAGE_ALIGN(off) - off;
4338 else
4339 len = count;
4340 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4341 src += off & ~PAGE_MASK;
4342 memcpy(buf, src, len);
4343 buf += len;
4344 off += len;
4345 count -= len;
4346 }
4347
4348 kref_put(&dump->kref, ipr_release_dump);
4349 return rc;
4350}
4351
4352/**
4353 * ipr_alloc_dump - Prepare for adapter dump
4354 * @ioa_cfg: ioa config struct
4355 *
4356 * Return value:
4357 * 0 on success / other on failure
4358 **/
4359static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4360{
4361 struct ipr_dump *dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004362 __be32 **ioa_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363 unsigned long lock_flags = 0;
4364
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06004365 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366
4367 if (!dump) {
4368 ipr_err("Dump memory allocation failed\n");
4369 return -ENOMEM;
4370 }
4371
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004372 if (ioa_cfg->sis64)
Kees Cook42bc47b2018-06-12 14:27:11 -07004373 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4374 sizeof(__be32 *)));
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004375 else
Kees Cook42bc47b2018-06-12 14:27:11 -07004376 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4377 sizeof(__be32 *)));
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004378
4379 if (!ioa_data) {
4380 ipr_err("Dump memory allocation failed\n");
4381 kfree(dump);
4382 return -ENOMEM;
4383 }
4384
4385 dump->ioa_dump.ioa_data = ioa_data;
4386
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387 kref_init(&dump->kref);
4388 dump->ioa_cfg = ioa_cfg;
4389
4390 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4391
4392 if (INACTIVE != ioa_cfg->sdt_state) {
4393 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004394 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004395 kfree(dump);
4396 return 0;
4397 }
4398
4399 ioa_cfg->dump = dump;
4400 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004401 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402 ioa_cfg->dump_taken = 1;
4403 schedule_work(&ioa_cfg->work_q);
4404 }
4405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4406
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407 return 0;
4408}
4409
4410/**
4411 * ipr_free_dump - Free adapter dump memory
4412 * @ioa_cfg: ioa config struct
4413 *
4414 * Return value:
4415 * 0 on success / other on failure
4416 **/
4417static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4418{
4419 struct ipr_dump *dump;
4420 unsigned long lock_flags = 0;
4421
4422 ENTER;
4423
4424 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4425 dump = ioa_cfg->dump;
4426 if (!dump) {
4427 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4428 return 0;
4429 }
4430
4431 ioa_cfg->dump = NULL;
4432 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4433
4434 kref_put(&dump->kref, ipr_release_dump);
4435
4436 LEAVE;
4437 return 0;
4438}
4439
4440/**
4441 * ipr_write_dump - Setup dump state of adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004442 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004444 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004445 * @buf: buffer
4446 * @off: offset
4447 * @count: buffer size
4448 *
4449 * Return value:
4450 * number of bytes printed to buffer
4451 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004452static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004453 struct bin_attribute *bin_attr,
4454 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004455{
Tony Jonesee959b02008-02-22 00:13:36 +01004456 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 struct Scsi_Host *shost = class_to_shost(cdev);
4458 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4459 int rc;
4460
4461 if (!capable(CAP_SYS_ADMIN))
4462 return -EACCES;
4463
4464 if (buf[0] == '1')
4465 rc = ipr_alloc_dump(ioa_cfg);
4466 else if (buf[0] == '0')
4467 rc = ipr_free_dump(ioa_cfg);
4468 else
4469 return -EINVAL;
4470
4471 if (rc)
4472 return rc;
4473 else
4474 return count;
4475}
4476
4477static struct bin_attribute ipr_dump_attr = {
4478 .attr = {
4479 .name = "dump",
4480 .mode = S_IRUSR | S_IWUSR,
4481 },
4482 .size = 0,
4483 .read = ipr_read_dump,
4484 .write = ipr_write_dump
4485};
4486#else
4487static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4488#endif
4489
4490/**
4491 * ipr_change_queue_depth - Change the device's queue depth
4492 * @sdev: scsi device struct
4493 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07004494 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07004495 *
4496 * Return value:
4497 * actual depth set
4498 **/
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004499static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004500{
Brian King35a39692006-09-25 12:39:20 -05004501 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4502 struct ipr_resource_entry *res;
4503 unsigned long lock_flags = 0;
4504
4505 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4506 res = (struct ipr_resource_entry *)sdev->hostdata;
4507
4508 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4509 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4510 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4511
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004512 scsi_change_queue_depth(sdev, qdepth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004513 return sdev->queue_depth;
4514}
4515
4516/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004517 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4518 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004519 * @attr: device attribute structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 * @buf: buffer
4521 *
4522 * Return value:
4523 * number of bytes printed to buffer
4524 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004525static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004526{
4527 struct scsi_device *sdev = to_scsi_device(dev);
4528 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4529 struct ipr_resource_entry *res;
4530 unsigned long lock_flags = 0;
4531 ssize_t len = -ENXIO;
4532
4533 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4534 res = (struct ipr_resource_entry *)sdev->hostdata;
4535 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004536 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4538 return len;
4539}
4540
4541static struct device_attribute ipr_adapter_handle_attr = {
4542 .attr = {
4543 .name = "adapter_handle",
4544 .mode = S_IRUSR,
4545 },
4546 .show = ipr_show_adapter_handle
4547};
4548
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004549/**
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004550 * ipr_show_resource_path - Show the resource path or the resource address for
4551 * this device.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004552 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004553 * @attr: device attribute structure
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004554 * @buf: buffer
4555 *
4556 * Return value:
4557 * number of bytes printed to buffer
4558 **/
4559static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4560{
4561 struct scsi_device *sdev = to_scsi_device(dev);
4562 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4563 struct ipr_resource_entry *res;
4564 unsigned long lock_flags = 0;
4565 ssize_t len = -ENXIO;
4566 char buffer[IPR_MAX_RES_PATH_LENGTH];
4567
4568 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4569 res = (struct ipr_resource_entry *)sdev->hostdata;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004570 if (res && ioa_cfg->sis64)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004571 len = snprintf(buf, PAGE_SIZE, "%s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004572 __ipr_format_res_path(res->res_path, buffer,
4573 sizeof(buffer)));
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004574 else if (res)
4575 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4576 res->bus, res->target, res->lun);
4577
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004578 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4579 return len;
4580}
4581
4582static struct device_attribute ipr_resource_path_attr = {
4583 .attr = {
4584 .name = "resource_path",
Wayne Boyer75576bb2010-07-14 10:50:14 -07004585 .mode = S_IRUGO,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004586 },
4587 .show = ipr_show_resource_path
4588};
4589
Wayne Boyer75576bb2010-07-14 10:50:14 -07004590/**
Wayne Boyer46d74562010-08-11 07:15:17 -07004591 * ipr_show_device_id - Show the device_id for this device.
4592 * @dev: device struct
4593 * @attr: device attribute structure
4594 * @buf: buffer
4595 *
4596 * Return value:
4597 * number of bytes printed to buffer
4598 **/
4599static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4600{
4601 struct scsi_device *sdev = to_scsi_device(dev);
4602 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4603 struct ipr_resource_entry *res;
4604 unsigned long lock_flags = 0;
4605 ssize_t len = -ENXIO;
4606
4607 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4608 res = (struct ipr_resource_entry *)sdev->hostdata;
4609 if (res && ioa_cfg->sis64)
Wen Xiongbb8647e2015-06-11 20:45:18 -05004610 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
Wayne Boyer46d74562010-08-11 07:15:17 -07004611 else if (res)
4612 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4613
4614 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4615 return len;
4616}
4617
4618static struct device_attribute ipr_device_id_attr = {
4619 .attr = {
4620 .name = "device_id",
4621 .mode = S_IRUGO,
4622 },
4623 .show = ipr_show_device_id
4624};
4625
4626/**
Wayne Boyer75576bb2010-07-14 10:50:14 -07004627 * ipr_show_resource_type - Show the resource type for this device.
4628 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004629 * @attr: device attribute structure
Wayne Boyer75576bb2010-07-14 10:50:14 -07004630 * @buf: buffer
4631 *
4632 * Return value:
4633 * number of bytes printed to buffer
4634 **/
4635static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4636{
4637 struct scsi_device *sdev = to_scsi_device(dev);
4638 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4639 struct ipr_resource_entry *res;
4640 unsigned long lock_flags = 0;
4641 ssize_t len = -ENXIO;
4642
4643 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4644 res = (struct ipr_resource_entry *)sdev->hostdata;
4645
4646 if (res)
4647 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4648
4649 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4650 return len;
4651}
4652
4653static struct device_attribute ipr_resource_type_attr = {
4654 .attr = {
4655 .name = "resource_type",
4656 .mode = S_IRUGO,
4657 },
4658 .show = ipr_show_resource_type
4659};
4660
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004661/**
4662 * ipr_show_raw_mode - Show the adapter's raw mode
4663 * @dev: class device struct
4664 * @buf: buffer
4665 *
4666 * Return value:
4667 * number of bytes printed to buffer
4668 **/
4669static ssize_t ipr_show_raw_mode(struct device *dev,
4670 struct device_attribute *attr, char *buf)
4671{
4672 struct scsi_device *sdev = to_scsi_device(dev);
4673 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4674 struct ipr_resource_entry *res;
4675 unsigned long lock_flags = 0;
4676 ssize_t len;
4677
4678 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4679 res = (struct ipr_resource_entry *)sdev->hostdata;
4680 if (res)
4681 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4682 else
4683 len = -ENXIO;
4684 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4685 return len;
4686}
4687
4688/**
4689 * ipr_store_raw_mode - Change the adapter's raw mode
4690 * @dev: class device struct
4691 * @buf: buffer
4692 *
4693 * Return value:
4694 * number of bytes printed to buffer
4695 **/
4696static ssize_t ipr_store_raw_mode(struct device *dev,
4697 struct device_attribute *attr,
4698 const char *buf, size_t count)
4699{
4700 struct scsi_device *sdev = to_scsi_device(dev);
4701 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4702 struct ipr_resource_entry *res;
4703 unsigned long lock_flags = 0;
4704 ssize_t len;
4705
4706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4707 res = (struct ipr_resource_entry *)sdev->hostdata;
4708 if (res) {
Gabriel Krisman Bertazie35d7f272015-08-19 11:47:06 -03004709 if (ipr_is_af_dasd_device(res)) {
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004710 res->raw_mode = simple_strtoul(buf, NULL, 10);
4711 len = strlen(buf);
4712 if (res->sdev)
4713 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4714 res->raw_mode ? "enabled" : "disabled");
4715 } else
4716 len = -EINVAL;
4717 } else
4718 len = -ENXIO;
4719 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4720 return len;
4721}
4722
4723static struct device_attribute ipr_raw_mode_attr = {
4724 .attr = {
4725 .name = "raw_mode",
4726 .mode = S_IRUGO | S_IWUSR,
4727 },
4728 .show = ipr_show_raw_mode,
4729 .store = ipr_store_raw_mode
4730};
4731
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732static struct device_attribute *ipr_dev_attrs[] = {
4733 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004734 &ipr_resource_path_attr,
Wayne Boyer46d74562010-08-11 07:15:17 -07004735 &ipr_device_id_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004736 &ipr_resource_type_attr,
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004737 &ipr_raw_mode_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004738 NULL,
4739};
4740
4741/**
4742 * ipr_biosparam - Return the HSC mapping
4743 * @sdev: scsi device struct
4744 * @block_device: block device pointer
4745 * @capacity: capacity of the device
4746 * @parm: Array containing returned HSC values.
4747 *
4748 * This function generates the HSC parms that fdisk uses.
4749 * We want to make sure we return something that places partitions
4750 * on 4k boundaries for best performance with the IOA.
4751 *
4752 * Return value:
4753 * 0 on success
4754 **/
4755static int ipr_biosparam(struct scsi_device *sdev,
4756 struct block_device *block_device,
4757 sector_t capacity, int *parm)
4758{
4759 int heads, sectors;
4760 sector_t cylinders;
4761
4762 heads = 128;
4763 sectors = 32;
4764
4765 cylinders = capacity;
4766 sector_div(cylinders, (128 * 32));
4767
4768 /* return result */
4769 parm[0] = heads;
4770 parm[1] = sectors;
4771 parm[2] = cylinders;
4772
4773 return 0;
4774}
4775
4776/**
Brian King35a39692006-09-25 12:39:20 -05004777 * ipr_find_starget - Find target based on bus/target.
4778 * @starget: scsi target struct
4779 *
4780 * Return value:
4781 * resource entry pointer if found / NULL if not found
4782 **/
4783static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4784{
4785 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4786 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4787 struct ipr_resource_entry *res;
4788
4789 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004790 if ((res->bus == starget->channel) &&
Brian King0ee1d712012-03-14 21:20:06 -05004791 (res->target == starget->id)) {
Brian King35a39692006-09-25 12:39:20 -05004792 return res;
4793 }
4794 }
4795
4796 return NULL;
4797}
4798
4799static struct ata_port_info sata_port_info;
4800
4801/**
4802 * ipr_target_alloc - Prepare for commands to a SCSI target
4803 * @starget: scsi target struct
4804 *
4805 * If the device is a SATA device, this function allocates an
4806 * ATA port with libata, else it does nothing.
4807 *
4808 * Return value:
4809 * 0 on success / non-0 on failure
4810 **/
4811static int ipr_target_alloc(struct scsi_target *starget)
4812{
4813 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4814 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4815 struct ipr_sata_port *sata_port;
4816 struct ata_port *ap;
4817 struct ipr_resource_entry *res;
4818 unsigned long lock_flags;
4819
4820 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4821 res = ipr_find_starget(starget);
4822 starget->hostdata = NULL;
4823
4824 if (res && ipr_is_gata(res)) {
4825 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4826 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4827 if (!sata_port)
4828 return -ENOMEM;
4829
4830 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4831 if (ap) {
4832 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4833 sata_port->ioa_cfg = ioa_cfg;
4834 sata_port->ap = ap;
4835 sata_port->res = res;
4836
4837 res->sata_port = sata_port;
4838 ap->private_data = sata_port;
4839 starget->hostdata = sata_port;
4840 } else {
4841 kfree(sata_port);
4842 return -ENOMEM;
4843 }
4844 }
4845 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4846
4847 return 0;
4848}
4849
4850/**
4851 * ipr_target_destroy - Destroy a SCSI target
4852 * @starget: scsi target struct
4853 *
4854 * If the device was a SATA device, this function frees the libata
4855 * ATA port, else it does nothing.
4856 *
4857 **/
4858static void ipr_target_destroy(struct scsi_target *starget)
4859{
4860 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004861 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4862 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4863
4864 if (ioa_cfg->sis64) {
Brian King0ee1d712012-03-14 21:20:06 -05004865 if (!ipr_find_starget(starget)) {
4866 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4867 clear_bit(starget->id, ioa_cfg->array_ids);
4868 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4869 clear_bit(starget->id, ioa_cfg->vset_ids);
4870 else if (starget->channel == 0)
4871 clear_bit(starget->id, ioa_cfg->target_ids);
4872 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004873 }
Brian King35a39692006-09-25 12:39:20 -05004874
4875 if (sata_port) {
4876 starget->hostdata = NULL;
4877 ata_sas_port_destroy(sata_port->ap);
4878 kfree(sata_port);
4879 }
4880}
4881
4882/**
4883 * ipr_find_sdev - Find device based on bus/target/lun.
4884 * @sdev: scsi device struct
4885 *
4886 * Return value:
4887 * resource entry pointer if found / NULL if not found
4888 **/
4889static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4890{
4891 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4892 struct ipr_resource_entry *res;
4893
4894 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004895 if ((res->bus == sdev->channel) &&
4896 (res->target == sdev->id) &&
4897 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004898 return res;
4899 }
4900
4901 return NULL;
4902}
4903
4904/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004905 * ipr_slave_destroy - Unconfigure a SCSI device
4906 * @sdev: scsi device struct
4907 *
4908 * Return value:
4909 * nothing
4910 **/
4911static void ipr_slave_destroy(struct scsi_device *sdev)
4912{
4913 struct ipr_resource_entry *res;
4914 struct ipr_ioa_cfg *ioa_cfg;
4915 unsigned long lock_flags = 0;
4916
4917 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4918
4919 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4920 res = (struct ipr_resource_entry *) sdev->hostdata;
4921 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004922 if (res->sata_port)
Tejun Heo3e4ec342010-05-10 21:41:30 +02004923 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 sdev->hostdata = NULL;
4925 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004926 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 }
4928 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4929}
4930
4931/**
4932 * ipr_slave_configure - Configure a SCSI device
4933 * @sdev: scsi device struct
4934 *
4935 * This function configures the specified scsi device.
4936 *
4937 * Return value:
4938 * 0 on success
4939 **/
4940static int ipr_slave_configure(struct scsi_device *sdev)
4941{
4942 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4943 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004944 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004945 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004946 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947
4948 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4949 res = sdev->hostdata;
4950 if (res) {
4951 if (ipr_is_af_dasd_device(res))
4952 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004953 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004954 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004955 sdev->no_uld_attach = 1;
4956 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957 if (ipr_is_vset_device(res)) {
Brian King60654e22014-12-02 12:47:46 -06004958 sdev->scsi_level = SCSI_SPC_3;
Brian King723cd772017-08-18 16:17:32 -05004959 sdev->no_report_opcodes = 1;
Jens Axboe242f9dc2008-09-14 05:55:09 -07004960 blk_queue_rq_timeout(sdev->request_queue,
4961 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004962 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004963 }
Brian Kingdd406ef2009-04-22 08:58:02 -05004964 if (ipr_is_gata(res) && res->sata_port)
4965 ap = res->sata_port->ap;
4966 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4967
4968 if (ap) {
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004969 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004970 ata_sas_slave_configure(sdev, ap);
Christoph Hellwigc8b09f62014-11-03 20:15:14 +01004971 }
4972
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004973 if (ioa_cfg->sis64)
4974 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004975 ipr_format_res_path(ioa_cfg,
4976 res->res_path, buffer, sizeof(buffer)));
Brian Kingdd406ef2009-04-22 08:58:02 -05004977 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978 }
4979 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4980 return 0;
4981}
4982
4983/**
Brian King35a39692006-09-25 12:39:20 -05004984 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4985 * @sdev: scsi device struct
4986 *
4987 * This function initializes an ATA port so that future commands
4988 * sent through queuecommand will work.
4989 *
4990 * Return value:
4991 * 0 on success
4992 **/
4993static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4994{
4995 struct ipr_sata_port *sata_port = NULL;
4996 int rc = -ENXIO;
4997
4998 ENTER;
4999 if (sdev->sdev_target)
5000 sata_port = sdev->sdev_target->hostdata;
Dan Williamsb2024452012-03-21 21:09:07 -07005001 if (sata_port) {
Brian King35a39692006-09-25 12:39:20 -05005002 rc = ata_sas_port_init(sata_port->ap);
Dan Williamsb2024452012-03-21 21:09:07 -07005003 if (rc == 0)
5004 rc = ata_sas_sync_probe(sata_port->ap);
5005 }
5006
Brian King35a39692006-09-25 12:39:20 -05005007 if (rc)
5008 ipr_slave_destroy(sdev);
5009
5010 LEAVE;
5011 return rc;
5012}
5013
5014/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005015 * ipr_slave_alloc - Prepare for commands to a device.
5016 * @sdev: scsi device struct
5017 *
5018 * This function saves a pointer to the resource entry
5019 * in the scsi device struct if the device exists. We
5020 * can then use this pointer in ipr_queuecommand when
5021 * handling new commands.
5022 *
5023 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06005024 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07005025 **/
5026static int ipr_slave_alloc(struct scsi_device *sdev)
5027{
5028 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
5029 struct ipr_resource_entry *res;
5030 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06005031 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005032
5033 sdev->hostdata = NULL;
5034
5035 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5036
Brian King35a39692006-09-25 12:39:20 -05005037 res = ipr_find_sdev(sdev);
5038 if (res) {
5039 res->sdev = sdev;
5040 res->add_to_ml = 0;
5041 res->in_erp = 0;
5042 sdev->hostdata = res;
5043 if (!ipr_is_naca_model(res))
5044 res->needs_sync_complete = 1;
5045 rc = 0;
5046 if (ipr_is_gata(res)) {
5047 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5048 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005049 }
5050 }
5051
5052 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5053
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06005054 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005055}
5056
Brian King6cdb0812014-10-30 17:27:10 -05005057/**
5058 * ipr_match_lun - Match function for specified LUN
5059 * @ipr_cmd: ipr command struct
5060 * @device: device to match (sdev)
5061 *
5062 * Returns:
5063 * 1 if command matches sdev / 0 if command does not match sdev
5064 **/
5065static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5066{
5067 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5068 return 1;
5069 return 0;
5070}
5071
5072/**
Brian King439ae282017-03-15 16:58:39 -05005073 * ipr_cmnd_is_free - Check if a command is free or not
5074 * @ipr_cmd ipr command struct
5075 *
5076 * Returns:
5077 * true / false
5078 **/
5079static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5080{
5081 struct ipr_cmnd *loop_cmd;
5082
5083 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5084 if (loop_cmd == ipr_cmd)
5085 return true;
5086 }
5087
5088 return false;
5089}
5090
5091/**
Brian Kingef97d8a2017-03-15 16:58:41 -05005092 * ipr_match_res - Match function for specified resource entry
5093 * @ipr_cmd: ipr command struct
5094 * @resource: resource entry to match
5095 *
5096 * Returns:
5097 * 1 if command matches sdev / 0 if command does not match sdev
5098 **/
5099static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5100{
5101 struct ipr_resource_entry *res = resource;
5102
5103 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5104 return 1;
5105 return 0;
5106}
5107
5108/**
Brian King6cdb0812014-10-30 17:27:10 -05005109 * ipr_wait_for_ops - Wait for matching commands to complete
5110 * @ipr_cmd: ipr command struct
5111 * @device: device to match (sdev)
5112 * @match: match function to use
5113 *
5114 * Returns:
5115 * SUCCESS / FAILED
5116 **/
5117static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5118 int (*match)(struct ipr_cmnd *, void *))
5119{
5120 struct ipr_cmnd *ipr_cmd;
Brian King439ae282017-03-15 16:58:39 -05005121 int wait, i;
Brian King6cdb0812014-10-30 17:27:10 -05005122 unsigned long flags;
5123 struct ipr_hrr_queue *hrrq;
5124 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5125 DECLARE_COMPLETION_ONSTACK(comp);
5126
5127 ENTER;
5128 do {
5129 wait = 0;
5130
5131 for_each_hrrq(hrrq, ioa_cfg) {
5132 spin_lock_irqsave(hrrq->lock, flags);
Brian King439ae282017-03-15 16:58:39 -05005133 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5134 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5135 if (!ipr_cmnd_is_free(ipr_cmd)) {
5136 if (match(ipr_cmd, device)) {
5137 ipr_cmd->eh_comp = &comp;
5138 wait++;
5139 }
Brian King6cdb0812014-10-30 17:27:10 -05005140 }
5141 }
5142 spin_unlock_irqrestore(hrrq->lock, flags);
5143 }
5144
5145 if (wait) {
5146 timeout = wait_for_completion_timeout(&comp, timeout);
5147
5148 if (!timeout) {
5149 wait = 0;
5150
5151 for_each_hrrq(hrrq, ioa_cfg) {
5152 spin_lock_irqsave(hrrq->lock, flags);
Brian King439ae282017-03-15 16:58:39 -05005153 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5154 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5155 if (!ipr_cmnd_is_free(ipr_cmd)) {
5156 if (match(ipr_cmd, device)) {
5157 ipr_cmd->eh_comp = NULL;
5158 wait++;
5159 }
Brian King6cdb0812014-10-30 17:27:10 -05005160 }
5161 }
5162 spin_unlock_irqrestore(hrrq->lock, flags);
5163 }
5164
5165 if (wait)
5166 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5167 LEAVE;
5168 return wait ? FAILED : SUCCESS;
5169 }
5170 }
5171 } while (wait);
5172
5173 LEAVE;
5174 return SUCCESS;
5175}
5176
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005177static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178{
5179 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005180 unsigned long lock_flags = 0;
5181 int rc = SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005182
5183 ENTER;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005184 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5185 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005186
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05005187 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005188 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005189 dev_err(&ioa_cfg->pdev->dev,
5190 "Adapter being reset as a result of error recovery.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005191
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005192 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5193 ioa_cfg->sdt_state = GET_DUMP;
5194 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005195
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005196 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5197 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5198 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005199
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005200 /* If we got hit with a host reset while we were already resetting
5201 the adapter for some reason, and the reset failed. */
5202 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5203 ipr_trace;
5204 rc = FAILED;
5205 }
5206
5207 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005208 LEAVE;
5209 return rc;
5210}
5211
5212/**
Brian Kingc6513092006-03-29 09:37:43 -06005213 * ipr_device_reset - Reset the device
5214 * @ioa_cfg: ioa config struct
5215 * @res: resource entry struct
5216 *
5217 * This function issues a device reset to the affected device.
5218 * If the device is a SCSI device, a LUN reset will be sent
5219 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05005220 * will be sent. If the device is a SATA device, a PHY reset will
5221 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06005222 *
5223 * Return value:
5224 * 0 on success / non-zero on failure
5225 **/
5226static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5227 struct ipr_resource_entry *res)
5228{
5229 struct ipr_cmnd *ipr_cmd;
5230 struct ipr_ioarcb *ioarcb;
5231 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05005232 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06005233 u32 ioasc;
5234
5235 ENTER;
5236 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5237 ioarcb = &ipr_cmd->ioarcb;
5238 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08005239
5240 if (ipr_cmd->ioa_cfg->sis64) {
5241 regs = &ipr_cmd->i.ata_ioadl.regs;
5242 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5243 } else
5244 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06005245
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005246 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06005247 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5248 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05005249 if (ipr_is_gata(res)) {
5250 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08005251 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05005252 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5253 }
Brian Kingc6513092006-03-29 09:37:43 -06005254
5255 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005256 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005257 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005258 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5259 if (ipr_cmd->ioa_cfg->sis64)
5260 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5261 sizeof(struct ipr_ioasa_gata));
5262 else
5263 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5264 sizeof(struct ipr_ioasa_gata));
5265 }
Brian Kingc6513092006-03-29 09:37:43 -06005266
5267 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005268 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
Brian Kingc6513092006-03-29 09:37:43 -06005269}
5270
5271/**
Brian King35a39692006-09-25 12:39:20 -05005272 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09005273 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05005274 * @classes: class of the attached device
5275 *
Tejun Heocc0680a2007-08-06 18:36:23 +09005276 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05005277 *
5278 * Return value:
5279 * 0 on success / non-zero on failure
5280 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09005281static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07005282 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05005283{
Tejun Heocc0680a2007-08-06 18:36:23 +09005284 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05005285 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5286 struct ipr_resource_entry *res;
5287 unsigned long lock_flags = 0;
Brian Kingef97d8a2017-03-15 16:58:41 -05005288 int rc = -ENXIO, ret;
Brian King35a39692006-09-25 12:39:20 -05005289
5290 ENTER;
5291 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005292 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06005293 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5294 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5295 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5296 }
5297
Brian King35a39692006-09-25 12:39:20 -05005298 res = sata_port->res;
5299 if (res) {
5300 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005301 *classes = res->ata_class;
Brian Kingef97d8a2017-03-15 16:58:41 -05005302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Brian King35a39692006-09-25 12:39:20 -05005303
Brian Kingef97d8a2017-03-15 16:58:41 -05005304 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5305 if (ret != SUCCESS) {
5306 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5307 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5308 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5309
5310 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5311 }
5312 } else
5313 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5314
Brian King35a39692006-09-25 12:39:20 -05005315 LEAVE;
5316 return rc;
5317}
5318
5319/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005320 * ipr_eh_dev_reset - Reset the device
5321 * @scsi_cmd: scsi command struct
5322 *
5323 * This function issues a device reset to the affected device.
5324 * A LUN reset will be sent to the device first. If that does
5325 * not work, a target reset will be sent.
5326 *
5327 * Return value:
5328 * SUCCESS / FAILED
5329 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005330static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005331{
5332 struct ipr_cmnd *ipr_cmd;
5333 struct ipr_ioa_cfg *ioa_cfg;
5334 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05005335 struct ata_port *ap;
Brian King439ae282017-03-15 16:58:39 -05005336 int rc = 0, i;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005337 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005338
5339 ENTER;
5340 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5341 res = scsi_cmd->device->hostdata;
5342
Linus Torvalds1da177e2005-04-16 15:20:36 -07005343 /*
5344 * If we are currently going through reset/reload, return failed. This will force the
5345 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5346 * reset to complete
5347 */
5348 if (ioa_cfg->in_reset_reload)
5349 return FAILED;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005350 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005351 return FAILED;
5352
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005353 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005354 spin_lock(&hrrq->_lock);
Brian King439ae282017-03-15 16:58:39 -05005355 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5356 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5357
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005358 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Brian King960e9642017-03-15 16:58:37 -05005359 if (!ipr_cmd->qc)
5360 continue;
Brian King439ae282017-03-15 16:58:39 -05005361 if (ipr_cmnd_is_free(ipr_cmd))
5362 continue;
Brian King960e9642017-03-15 16:58:37 -05005363
5364 ipr_cmd->done = ipr_sata_eh_done;
5365 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005366 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5367 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5368 }
Brian King7402ece2006-11-21 10:28:23 -06005369 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005370 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005371 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005372 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005373 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005374 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05005375
5376 if (ipr_is_gata(res) && res->sata_port) {
5377 ap = res->sata_port->ap;
5378 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09005379 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05005380 spin_lock_irq(scsi_cmd->device->host->host_lock);
5381 } else
5382 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005383 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06005384 res->reset_occurred = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005385
Linus Torvalds1da177e2005-04-16 15:20:36 -07005386 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005387 return rc ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005388}
5389
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005390static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005391{
5392 int rc;
Brian King6cdb0812014-10-30 17:27:10 -05005393 struct ipr_ioa_cfg *ioa_cfg;
Brian Kingef97d8a2017-03-15 16:58:41 -05005394 struct ipr_resource_entry *res;
Brian King6cdb0812014-10-30 17:27:10 -05005395
5396 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
Brian Kingef97d8a2017-03-15 16:58:41 -05005397 res = cmd->device->hostdata;
5398
5399 if (!res)
5400 return FAILED;
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005401
5402 spin_lock_irq(cmd->device->host->host_lock);
5403 rc = __ipr_eh_dev_reset(cmd);
5404 spin_unlock_irq(cmd->device->host->host_lock);
5405
Brian Kingef97d8a2017-03-15 16:58:41 -05005406 if (rc == SUCCESS) {
5407 if (ipr_is_gata(res) && res->sata_port)
5408 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5409 else
5410 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5411 }
Brian King6cdb0812014-10-30 17:27:10 -05005412
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005413 return rc;
5414}
5415
Linus Torvalds1da177e2005-04-16 15:20:36 -07005416/**
5417 * ipr_bus_reset_done - Op done function for bus reset.
5418 * @ipr_cmd: ipr command struct
5419 *
5420 * This function is the op done function for a bus reset
5421 *
5422 * Return value:
5423 * none
5424 **/
5425static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5426{
5427 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5428 struct ipr_resource_entry *res;
5429
5430 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005431 if (!ioa_cfg->sis64)
5432 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5433 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5434 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5435 break;
5436 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005437 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438
5439 /*
5440 * If abort has not completed, indicate the reset has, else call the
5441 * abort's done function to wake the sleeping eh thread
5442 */
5443 if (ipr_cmd->sibling->sibling)
5444 ipr_cmd->sibling->sibling = NULL;
5445 else
5446 ipr_cmd->sibling->done(ipr_cmd->sibling);
5447
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005448 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005449 LEAVE;
5450}
5451
5452/**
5453 * ipr_abort_timeout - An abort task has timed out
5454 * @ipr_cmd: ipr command struct
5455 *
5456 * This function handles when an abort task times out. If this
5457 * happens we issue a bus reset since we have resources tied
5458 * up that must be freed before returning to the midlayer.
5459 *
5460 * Return value:
5461 * none
5462 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07005463static void ipr_abort_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005464{
Kees Cook738c6ec2017-08-18 16:53:24 -07005465 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005466 struct ipr_cmnd *reset_cmd;
5467 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5468 struct ipr_cmd_pkt *cmd_pkt;
5469 unsigned long lock_flags = 0;
5470
5471 ENTER;
5472 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5473 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5474 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5475 return;
5476 }
5477
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005478 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005479 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5480 ipr_cmd->sibling = reset_cmd;
5481 reset_cmd->sibling = ipr_cmd;
5482 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5483 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5484 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5485 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5486 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5487
5488 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5489 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5490 LEAVE;
5491}
5492
5493/**
5494 * ipr_cancel_op - Cancel specified op
5495 * @scsi_cmd: scsi command struct
5496 *
5497 * This function cancels specified op.
5498 *
5499 * Return value:
5500 * SUCCESS / FAILED
5501 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005502static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005503{
5504 struct ipr_cmnd *ipr_cmd;
5505 struct ipr_ioa_cfg *ioa_cfg;
5506 struct ipr_resource_entry *res;
5507 struct ipr_cmd_pkt *cmd_pkt;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005508 u32 ioasc, int_reg;
Brian King439ae282017-03-15 16:58:39 -05005509 int i, op_found = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005510 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005511
5512 ENTER;
5513 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5514 res = scsi_cmd->device->hostdata;
5515
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005516 /* If we are currently going through reset/reload, return failed.
5517 * This will force the mid-layer to call ipr_eh_host_reset,
5518 * which will then go to sleep and wait for the reset to complete
5519 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005520 if (ioa_cfg->in_reset_reload ||
5521 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005522 return FAILED;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005523 if (!res)
5524 return FAILED;
5525
5526 /*
5527 * If we are aborting a timed out op, chances are that the timeout was caused
5528 * by a still not detected EEH error. In such cases, reading a register will
5529 * trigger the EEH recovery infrastructure.
5530 */
5531 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5532
5533 if (!ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005534 return FAILED;
5535
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005536 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005537 spin_lock(&hrrq->_lock);
Brian King439ae282017-03-15 16:58:39 -05005538 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5539 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5540 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5541 op_found = 1;
5542 break;
5543 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005544 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005545 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005546 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005547 }
5548
5549 if (!op_found)
5550 return SUCCESS;
5551
5552 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005553 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005554 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5555 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5556 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5557 ipr_cmd->u.sdev = scsi_cmd->device;
5558
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005559 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5560 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005561 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005562 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005563
5564 /*
5565 * If the abort task timed out and we sent a bus reset, we will get
5566 * one the following responses to the abort
5567 */
5568 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5569 ioasc = 0;
5570 ipr_trace;
5571 }
5572
Kleber Sacilotto de Souzac4ee22a2013-03-14 13:52:23 -05005573 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005574 if (!ipr_is_naca_model(res))
5575 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005576
5577 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005578 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005579}
5580
5581/**
5582 * ipr_eh_abort - Abort a single op
5583 * @scsi_cmd: scsi command struct
5584 *
5585 * Return value:
Brian Kingf688f962014-12-02 12:47:37 -06005586 * 0 if scan in progress / 1 if scan is complete
5587 **/
5588static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5589{
5590 unsigned long lock_flags;
5591 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5592 int rc = 0;
5593
5594 spin_lock_irqsave(shost->host_lock, lock_flags);
5595 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5596 rc = 1;
5597 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5598 rc = 1;
5599 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5600 return rc;
5601}
5602
5603/**
5604 * ipr_eh_host_reset - Reset the host adapter
5605 * @scsi_cmd: scsi command struct
5606 *
5607 * Return value:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005608 * SUCCESS / FAILED
5609 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005610static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005611{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005612 unsigned long flags;
5613 int rc;
Brian King6cdb0812014-10-30 17:27:10 -05005614 struct ipr_ioa_cfg *ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005615
5616 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005617
Brian King6cdb0812014-10-30 17:27:10 -05005618 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5619
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005620 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5621 rc = ipr_cancel_op(scsi_cmd);
5622 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005623
Brian King6cdb0812014-10-30 17:27:10 -05005624 if (rc == SUCCESS)
5625 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005626 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005627 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005628}
5629
5630/**
5631 * ipr_handle_other_interrupt - Handle "other" interrupts
5632 * @ioa_cfg: ioa config struct
Wayne Boyer634651f2010-08-27 14:45:07 -07005633 * @int_reg: interrupt register
Linus Torvalds1da177e2005-04-16 15:20:36 -07005634 *
5635 * Return value:
5636 * IRQ_NONE / IRQ_HANDLED
5637 **/
Wayne Boyer634651f2010-08-27 14:45:07 -07005638static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer630ad8312011-04-07 12:12:30 -07005639 u32 int_reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640{
5641 irqreturn_t rc = IRQ_HANDLED;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005642 u32 int_mask_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005643
Wayne Boyer7dacb642011-04-12 10:29:02 -07005644 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5645 int_reg &= ~int_mask_reg;
5646
5647 /* If an interrupt on the adapter did not occur, ignore it.
5648 * Or in the case of SIS 64, check for a stage change interrupt.
5649 */
5650 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5651 if (ioa_cfg->sis64) {
5652 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5653 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5654 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5655
5656 /* clear stage change */
5657 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5658 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5659 list_del(&ioa_cfg->reset_cmd->queue);
5660 del_timer(&ioa_cfg->reset_cmd->timer);
5661 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5662 return IRQ_HANDLED;
5663 }
5664 }
5665
5666 return IRQ_NONE;
5667 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668
5669 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5670 /* Mask the interrupt */
5671 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005672 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5673
5674 list_del(&ioa_cfg->reset_cmd->queue);
5675 del_timer(&ioa_cfg->reset_cmd->timer);
5676 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005677 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
Brian King7dd21302012-03-14 21:20:08 -05005678 if (ioa_cfg->clear_isr) {
5679 if (ipr_debug && printk_ratelimit())
5680 dev_err(&ioa_cfg->pdev->dev,
5681 "Spurious interrupt detected. 0x%08X\n", int_reg);
5682 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5683 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5684 return IRQ_NONE;
5685 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005686 } else {
5687 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5688 ioa_cfg->ioa_unit_checked = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005689 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5690 dev_err(&ioa_cfg->pdev->dev,
5691 "No Host RRQ. 0x%08X\n", int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005692 else
5693 dev_err(&ioa_cfg->pdev->dev,
5694 "Permanent IOA failure. 0x%08X\n", int_reg);
5695
5696 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5697 ioa_cfg->sdt_state = GET_DUMP;
5698
5699 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5700 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5701 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005702
Linus Torvalds1da177e2005-04-16 15:20:36 -07005703 return rc;
5704}
5705
5706/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005707 * ipr_isr_eh - Interrupt service routine error handler
5708 * @ioa_cfg: ioa config struct
5709 * @msg: message to log
5710 *
5711 * Return value:
5712 * none
5713 **/
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005714static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005715{
5716 ioa_cfg->errors_logged++;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005717 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005718
5719 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5720 ioa_cfg->sdt_state = GET_DUMP;
5721
5722 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5723}
5724
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005725static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005726 struct list_head *doneq)
5727{
5728 u32 ioasc;
5729 u16 cmd_index;
5730 struct ipr_cmnd *ipr_cmd;
5731 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5732 int num_hrrq = 0;
5733
5734 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005735 if (!hrr_queue->allow_interrupts)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005736 return 0;
5737
5738 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5739 hrr_queue->toggle_bit) {
5740
5741 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5742 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5743 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5744
5745 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5746 cmd_index < hrr_queue->min_cmd_id)) {
5747 ipr_isr_eh(ioa_cfg,
5748 "Invalid response handle from IOA: ",
5749 cmd_index);
5750 break;
5751 }
5752
5753 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5754 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5755
5756 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5757
5758 list_move_tail(&ipr_cmd->queue, doneq);
5759
5760 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5761 hrr_queue->hrrq_curr++;
5762 } else {
5763 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5764 hrr_queue->toggle_bit ^= 1u;
5765 }
5766 num_hrrq++;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005767 if (budget > 0 && num_hrrq >= budget)
5768 break;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005769 }
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005770
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005771 return num_hrrq;
5772}
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005773
Christoph Hellwig511cbce2015-11-10 14:56:14 +01005774static int ipr_iopoll(struct irq_poll *iop, int budget)
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005775{
5776 struct ipr_ioa_cfg *ioa_cfg;
5777 struct ipr_hrr_queue *hrrq;
5778 struct ipr_cmnd *ipr_cmd, *temp;
5779 unsigned long hrrq_flags;
5780 int completed_ops;
5781 LIST_HEAD(doneq);
5782
5783 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5784 ioa_cfg = hrrq->ioa_cfg;
5785
5786 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5787 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5788
5789 if (completed_ops < budget)
Christoph Hellwig511cbce2015-11-10 14:56:14 +01005790 irq_poll_complete(iop);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005791 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5792
5793 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5794 list_del(&ipr_cmd->queue);
5795 del_timer(&ipr_cmd->timer);
5796 ipr_cmd->fast_done(ipr_cmd);
5797 }
5798
5799 return completed_ops;
5800}
5801
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005802/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005803 * ipr_isr - Interrupt service routine
5804 * @irq: irq number
5805 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005806 *
5807 * Return value:
5808 * IRQ_NONE / IRQ_HANDLED
5809 **/
David Howells7d12e782006-10-05 14:55:46 +01005810static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005811{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005812 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5813 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005814 unsigned long hrrq_flags = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005815 u32 int_reg = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005816 int num_hrrq = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005817 int irq_none = 0;
Brian King172cd6e2012-07-17 08:14:40 -05005818 struct ipr_cmnd *ipr_cmd, *temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005819 irqreturn_t rc = IRQ_NONE;
Brian King172cd6e2012-07-17 08:14:40 -05005820 LIST_HEAD(doneq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005821
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005822 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005823 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005824 if (!hrrq->allow_interrupts) {
5825 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005826 return IRQ_NONE;
5827 }
5828
Linus Torvalds1da177e2005-04-16 15:20:36 -07005829 while (1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005830 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5831 rc = IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005832
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005833 if (!ioa_cfg->clear_isr)
5834 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005835
Linus Torvalds1da177e2005-04-16 15:20:36 -07005836 /* Clear the PCI interrupt */
Wayne Boyera5442ba2011-05-17 09:18:53 -07005837 num_hrrq = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005838 do {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005839 writel(IPR_PCII_HRRQ_UPDATED,
5840 ioa_cfg->regs.clr_interrupt_reg32);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005841 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005842 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005843 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005844
Wayne Boyer7dacb642011-04-12 10:29:02 -07005845 } else if (rc == IRQ_NONE && irq_none == 0) {
5846 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5847 irq_none++;
Wayne Boyera5442ba2011-05-17 09:18:53 -07005848 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5849 int_reg & IPR_PCII_HRRQ_UPDATED) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005850 ipr_isr_eh(ioa_cfg,
5851 "Error clearing HRRQ: ", num_hrrq);
Brian King172cd6e2012-07-17 08:14:40 -05005852 rc = IRQ_HANDLED;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005853 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005854 } else
5855 break;
5856 }
5857
5858 if (unlikely(rc == IRQ_NONE))
Wayne Boyer634651f2010-08-27 14:45:07 -07005859 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005860
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005861 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05005862 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5863 list_del(&ipr_cmd->queue);
5864 del_timer(&ipr_cmd->timer);
5865 ipr_cmd->fast_done(ipr_cmd);
5866 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005867 return rc;
5868}
Brian King172cd6e2012-07-17 08:14:40 -05005869
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005870/**
5871 * ipr_isr_mhrrq - Interrupt service routine
5872 * @irq: irq number
5873 * @devp: pointer to ioa config struct
5874 *
5875 * Return value:
5876 * IRQ_NONE / IRQ_HANDLED
5877 **/
5878static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5879{
5880 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005881 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005882 unsigned long hrrq_flags = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005883 struct ipr_cmnd *ipr_cmd, *temp;
5884 irqreturn_t rc = IRQ_NONE;
5885 LIST_HEAD(doneq);
5886
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005887 spin_lock_irqsave(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005888
5889 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005890 if (!hrrq->allow_interrupts) {
5891 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005892 return IRQ_NONE;
5893 }
5894
Jens Axboe89f8b332014-03-13 09:38:42 -06005895 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005896 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5897 hrrq->toggle_bit) {
Christoph Hellwigea511902015-12-07 06:41:11 -08005898 irq_poll_sched(&hrrq->iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005899 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5900 return IRQ_HANDLED;
5901 }
5902 } else {
5903 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5904 hrrq->toggle_bit)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005905
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005906 if (ipr_process_hrrq(hrrq, -1, &doneq))
5907 rc = IRQ_HANDLED;
5908 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005909
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005910 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005911
5912 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5913 list_del(&ipr_cmd->queue);
5914 del_timer(&ipr_cmd->timer);
5915 ipr_cmd->fast_done(ipr_cmd);
5916 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005917 return rc;
5918}
5919
5920/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005921 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07005922 * @ioa_cfg: ioa config struct
5923 * @ipr_cmd: ipr command struct
5924 *
5925 * Return value:
5926 * 0 on success / -1 on failure
5927 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08005928static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5929 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005930{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005931 int i, nseg;
5932 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005933 u32 length;
5934 u32 ioadl_flags = 0;
5935 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5936 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005937 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005938
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005939 length = scsi_bufflen(scsi_cmd);
5940 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005941 return 0;
5942
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005943 nseg = scsi_dma_map(scsi_cmd);
5944 if (nseg < 0) {
Anton Blanchard51f52a42011-05-09 10:07:40 +10005945 if (printk_ratelimit())
Anton Blanchardd73341b2014-10-30 17:27:08 -05005946 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005947 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005948 }
5949
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005950 ipr_cmd->dma_use_sg = nseg;
5951
Wayne Boyer438b0332010-05-10 09:13:00 -07005952 ioarcb->data_transfer_length = cpu_to_be32(length);
Wayne Boyerb8803b12010-05-14 08:55:13 -07005953 ioarcb->ioadl_len =
5954 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
Wayne Boyer438b0332010-05-10 09:13:00 -07005955
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005956 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5957 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5958 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005959 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5960 ioadl_flags = IPR_IOADL_FLAGS_READ;
5961
5962 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5963 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5964 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5965 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5966 }
5967
5968 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5969 return 0;
5970}
5971
5972/**
5973 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5974 * @ioa_cfg: ioa config struct
5975 * @ipr_cmd: ipr command struct
5976 *
5977 * Return value:
5978 * 0 on success / -1 on failure
5979 **/
5980static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5981 struct ipr_cmnd *ipr_cmd)
5982{
5983 int i, nseg;
5984 struct scatterlist *sg;
5985 u32 length;
5986 u32 ioadl_flags = 0;
5987 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5988 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5989 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5990
5991 length = scsi_bufflen(scsi_cmd);
5992 if (!length)
5993 return 0;
5994
5995 nseg = scsi_dma_map(scsi_cmd);
5996 if (nseg < 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05005997 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
Wayne Boyera32c0552010-02-19 13:23:36 -08005998 return -1;
5999 }
6000
6001 ipr_cmd->dma_use_sg = nseg;
6002
6003 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
6004 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6005 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6006 ioarcb->data_transfer_length = cpu_to_be32(length);
6007 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006008 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6009 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
6010 ioadl_flags = IPR_IOADL_FLAGS_READ;
6011 ioarcb->read_data_transfer_length = cpu_to_be32(length);
6012 ioarcb->read_ioadl_len =
6013 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6014 }
6015
Wayne Boyera32c0552010-02-19 13:23:36 -08006016 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
6017 ioadl = ioarcb->u.add_data.u.ioadl;
6018 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
6019 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006020 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6021 }
6022
6023 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
6024 ioadl[i].flags_and_data_len =
6025 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6026 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
6027 }
6028
6029 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6030 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006031}
6032
6033/**
Brian Kingf646f322017-03-15 16:58:39 -05006034 * __ipr_erp_done - Process completion of ERP for a device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006035 * @ipr_cmd: ipr command struct
6036 *
6037 * This function copies the sense buffer into the scsi_cmd
6038 * struct and pushes the scsi_done function.
6039 *
6040 * Return value:
6041 * nothing
6042 **/
Brian Kingf646f322017-03-15 16:58:39 -05006043static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006044{
6045 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6046 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006047 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006048
6049 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6050 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06006051 scmd_printk(KERN_ERR, scsi_cmd,
6052 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006053 } else {
6054 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6055 SCSI_SENSE_BUFFERSIZE);
6056 }
6057
6058 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006059 if (!ipr_is_naca_model(res))
6060 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006061 res->in_erp = 0;
6062 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006063 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006064 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006065 if (ipr_cmd->eh_comp)
6066 complete(ipr_cmd->eh_comp);
6067 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006068}
6069
6070/**
Brian Kingf646f322017-03-15 16:58:39 -05006071 * ipr_erp_done - Process completion of ERP for a device
6072 * @ipr_cmd: ipr command struct
6073 *
6074 * This function copies the sense buffer into the scsi_cmd
6075 * struct and pushes the scsi_done function.
6076 *
6077 * Return value:
6078 * nothing
6079 **/
6080static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6081{
6082 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6083 unsigned long hrrq_flags;
6084
6085 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6086 __ipr_erp_done(ipr_cmd);
6087 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006088}
6089
6090/**
6091 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6092 * @ipr_cmd: ipr command struct
6093 *
6094 * Return value:
6095 * none
6096 **/
6097static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6098{
Brian King51b1c7e2007-03-29 12:43:50 -05006099 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006100 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08006101 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006102
6103 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08006104 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006105 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08006106 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006107 ioarcb->read_ioadl_len = 0;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006108 ioasa->hdr.ioasc = 0;
6109 ioasa->hdr.residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08006110
6111 if (ipr_cmd->ioa_cfg->sis64)
6112 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6113 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6114 else {
6115 ioarcb->write_ioadl_addr =
6116 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6117 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6118 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006119}
6120
6121/**
Brian Kingf646f322017-03-15 16:58:39 -05006122 * __ipr_erp_request_sense - Send request sense to a device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006123 * @ipr_cmd: ipr command struct
6124 *
6125 * This function sends a request sense to a device as a result
6126 * of a check condition.
6127 *
6128 * Return value:
6129 * nothing
6130 **/
Brian Kingf646f322017-03-15 16:58:39 -05006131static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006132{
6133 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006134 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006135
6136 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
Brian Kingf646f322017-03-15 16:58:39 -05006137 __ipr_erp_done(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006138 return;
6139 }
6140
6141 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6142
6143 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6144 cmd_pkt->cdb[0] = REQUEST_SENSE;
6145 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6146 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6147 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6148 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6149
Wayne Boyera32c0552010-02-19 13:23:36 -08006150 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6151 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006152
6153 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6154 IPR_REQUEST_SENSE_TIMEOUT * 2);
6155}
6156
6157/**
Brian Kingf646f322017-03-15 16:58:39 -05006158 * ipr_erp_request_sense - Send request sense to a device
6159 * @ipr_cmd: ipr command struct
6160 *
6161 * This function sends a request sense to a device as a result
6162 * of a check condition.
6163 *
6164 * Return value:
6165 * nothing
6166 **/
6167static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6168{
6169 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6170 unsigned long hrrq_flags;
6171
6172 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6173 __ipr_erp_request_sense(ipr_cmd);
6174 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6175}
6176
6177/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006178 * ipr_erp_cancel_all - Send cancel all to a device
6179 * @ipr_cmd: ipr command struct
6180 *
6181 * This function sends a cancel all to a device to clear the
6182 * queue. If we are running TCQ on the device, QERR is set to 1,
6183 * which means all outstanding ops have been dropped on the floor.
6184 * Cancel all will return them to us.
6185 *
6186 * Return value:
6187 * nothing
6188 **/
6189static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6190{
6191 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6192 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6193 struct ipr_cmd_pkt *cmd_pkt;
6194
6195 res->in_erp = 1;
6196
6197 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6198
Christoph Hellwig17ea0122014-11-24 15:36:20 +01006199 if (!scsi_cmd->device->simple_tags) {
Brian Kingf646f322017-03-15 16:58:39 -05006200 __ipr_erp_request_sense(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006201 return;
6202 }
6203
6204 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6205 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6206 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6207
6208 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6209 IPR_CANCEL_ALL_TIMEOUT);
6210}
6211
6212/**
6213 * ipr_dump_ioasa - Dump contents of IOASA
6214 * @ioa_cfg: ioa config struct
6215 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06006216 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006217 *
6218 * This function is invoked by the interrupt handler when ops
6219 * fail. It will log the IOASA if appropriate. Only called
6220 * for GPDD ops.
6221 *
6222 * Return value:
6223 * none
6224 **/
6225static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06006226 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006227{
6228 int i;
6229 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05006230 u32 ioasc, fd_ioasc;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006231 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006232 __be32 *ioasa_data = (__be32 *)ioasa;
6233 int error_index;
6234
Wayne Boyer96d21f02010-05-10 09:13:27 -07006235 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6236 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006237
6238 if (0 == ioasc)
6239 return;
6240
6241 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6242 return;
6243
Brian Kingb0692dd2007-03-29 12:43:09 -05006244 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6245 error_index = ipr_get_error(fd_ioasc);
6246 else
6247 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006248
6249 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6250 /* Don't log an error if the IOA already logged one */
Wayne Boyer96d21f02010-05-10 09:13:27 -07006251 if (ioasa->hdr.ilid != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006252 return;
6253
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006254 if (!ipr_is_gscsi(res))
6255 return;
6256
Linus Torvalds1da177e2005-04-16 15:20:36 -07006257 if (ipr_error_table[error_index].log_ioasa == 0)
6258 return;
6259 }
6260
Brian Kingfe964d02006-03-29 09:37:29 -06006261 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006262
Wayne Boyer96d21f02010-05-10 09:13:27 -07006263 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6264 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6265 data_len = sizeof(struct ipr_ioasa64);
6266 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006267 data_len = sizeof(struct ipr_ioasa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006268
6269 ipr_err("IOASA Dump:\n");
6270
6271 for (i = 0; i < data_len / 4; i += 4) {
6272 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6273 be32_to_cpu(ioasa_data[i]),
6274 be32_to_cpu(ioasa_data[i+1]),
6275 be32_to_cpu(ioasa_data[i+2]),
6276 be32_to_cpu(ioasa_data[i+3]));
6277 }
6278}
6279
6280/**
6281 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6282 * @ioasa: IOASA
6283 * @sense_buf: sense data buffer
6284 *
6285 * Return value:
6286 * none
6287 **/
6288static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6289{
6290 u32 failing_lba;
6291 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6292 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006293 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6294 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006295
6296 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6297
6298 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6299 return;
6300
6301 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6302
6303 if (ipr_is_vset_device(res) &&
6304 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6305 ioasa->u.vset.failing_lba_hi != 0) {
6306 sense_buf[0] = 0x72;
6307 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6308 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6309 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6310
6311 sense_buf[7] = 12;
6312 sense_buf[8] = 0;
6313 sense_buf[9] = 0x0A;
6314 sense_buf[10] = 0x80;
6315
6316 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6317
6318 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6319 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6320 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6321 sense_buf[15] = failing_lba & 0x000000ff;
6322
6323 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6324
6325 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6326 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6327 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6328 sense_buf[19] = failing_lba & 0x000000ff;
6329 } else {
6330 sense_buf[0] = 0x70;
6331 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6332 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6333 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6334
6335 /* Illegal request */
6336 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
Wayne Boyer96d21f02010-05-10 09:13:27 -07006337 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006338 sense_buf[7] = 10; /* additional length */
6339
6340 /* IOARCB was in error */
6341 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6342 sense_buf[15] = 0xC0;
6343 else /* Parameter data was invalid */
6344 sense_buf[15] = 0x80;
6345
6346 sense_buf[16] =
6347 ((IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07006348 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006349 sense_buf[17] =
6350 (IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07006351 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006352 } else {
6353 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6354 if (ipr_is_vset_device(res))
6355 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6356 else
6357 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6358
6359 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6360 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6361 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6362 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6363 sense_buf[6] = failing_lba & 0x000000ff;
6364 }
6365
6366 sense_buf[7] = 6; /* additional length */
6367 }
6368 }
6369}
6370
6371/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006372 * ipr_get_autosense - Copy autosense data to sense buffer
6373 * @ipr_cmd: ipr command struct
6374 *
6375 * This function copies the autosense buffer to the buffer
6376 * in the scsi_cmd, if there is autosense available.
6377 *
6378 * Return value:
6379 * 1 if autosense was available / 0 if not
6380 **/
6381static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6382{
Wayne Boyer96d21f02010-05-10 09:13:27 -07006383 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6384 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006385
Wayne Boyer96d21f02010-05-10 09:13:27 -07006386 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006387 return 0;
6388
Wayne Boyer96d21f02010-05-10 09:13:27 -07006389 if (ipr_cmd->ioa_cfg->sis64)
6390 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6391 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6392 SCSI_SENSE_BUFFERSIZE));
6393 else
6394 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6395 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6396 SCSI_SENSE_BUFFERSIZE));
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006397 return 1;
6398}
6399
6400/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006401 * ipr_erp_start - Process an error response for a SCSI op
6402 * @ioa_cfg: ioa config struct
6403 * @ipr_cmd: ipr command struct
6404 *
6405 * This function determines whether or not to initiate ERP
6406 * on the affected device.
6407 *
6408 * Return value:
6409 * nothing
6410 **/
6411static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6412 struct ipr_cmnd *ipr_cmd)
6413{
6414 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6415 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006416 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King8a048992007-04-26 16:00:10 -05006417 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006418
6419 if (!res) {
Brian Kingf646f322017-03-15 16:58:39 -05006420 __ipr_scsi_eh_done(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006421 return;
6422 }
6423
Brian King8a048992007-04-26 16:00:10 -05006424 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006425 ipr_gen_sense(ipr_cmd);
6426
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006427 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6428
Brian King8a048992007-04-26 16:00:10 -05006429 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006430 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006431 if (ipr_is_naca_model(res))
6432 scsi_cmd->result |= (DID_ABORT << 16);
6433 else
6434 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006435 break;
6436 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006437 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006438 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6439 break;
6440 case IPR_IOASC_HW_SEL_TIMEOUT:
6441 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006442 if (!ipr_is_naca_model(res))
6443 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006444 break;
6445 case IPR_IOASC_SYNC_REQUIRED:
6446 if (!res->in_erp)
6447 res->needs_sync_complete = 1;
6448 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6449 break;
6450 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006451 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Mauricio Faria de Oliveira785a4702017-04-11 11:46:04 -03006452 /*
6453 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6454 * so SCSI mid-layer and upper layers handle it accordingly.
6455 */
6456 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6457 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006458 break;
6459 case IPR_IOASC_BUS_WAS_RESET:
6460 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6461 /*
6462 * Report the bus reset and ask for a retry. The device
6463 * will give CC/UA the next command.
6464 */
6465 if (!res->resetting_device)
6466 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6467 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006468 if (!ipr_is_naca_model(res))
6469 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006470 break;
6471 case IPR_IOASC_HW_DEV_BUS_STATUS:
6472 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6473 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006474 if (!ipr_get_autosense(ipr_cmd)) {
6475 if (!ipr_is_naca_model(res)) {
6476 ipr_erp_cancel_all(ipr_cmd);
6477 return;
6478 }
6479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006480 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006481 if (!ipr_is_naca_model(res))
6482 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006483 break;
6484 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6485 break;
Wen Xiongf8ee25d2015-03-26 11:23:58 -05006486 case IPR_IOASC_IR_NON_OPTIMIZED:
6487 if (res->raw_mode) {
6488 res->raw_mode = 0;
6489 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6490 } else
6491 scsi_cmd->result |= (DID_ERROR << 16);
6492 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006493 default:
Brian King5b7304f2006-08-02 14:57:51 -05006494 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6495 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006496 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006497 res->needs_sync_complete = 1;
6498 break;
6499 }
6500
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006501 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006502 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006503 if (ipr_cmd->eh_comp)
6504 complete(ipr_cmd->eh_comp);
6505 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006506}
6507
6508/**
6509 * ipr_scsi_done - mid-layer done function
6510 * @ipr_cmd: ipr command struct
6511 *
6512 * This function is invoked by the interrupt handler for
6513 * ops generated by the SCSI mid-layer
6514 *
6515 * Return value:
6516 * none
6517 **/
6518static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6519{
6520 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6521 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006522 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King36b8e182015-07-14 11:41:29 -05006523 unsigned long lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006524
Wayne Boyer96d21f02010-05-10 09:13:27 -07006525 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006526
6527 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
Brian King172cd6e2012-07-17 08:14:40 -05006528 scsi_dma_unmap(scsi_cmd);
6529
Brian King36b8e182015-07-14 11:41:29 -05006530 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006531 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006532 if (ipr_cmd->eh_comp)
6533 complete(ipr_cmd->eh_comp);
6534 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King36b8e182015-07-14 11:41:29 -05006535 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006536 } else {
Brian King36b8e182015-07-14 11:41:29 -05006537 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6538 spin_lock(&ipr_cmd->hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006539 ipr_erp_start(ioa_cfg, ipr_cmd);
Brian King36b8e182015-07-14 11:41:29 -05006540 spin_unlock(&ipr_cmd->hrrq->_lock);
6541 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006543}
6544
6545/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006546 * ipr_queuecommand - Queue a mid-layer request
Brian King00bfef22012-07-17 08:13:52 -05006547 * @shost: scsi host struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006548 * @scsi_cmd: scsi command struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006549 *
6550 * This function queues a request generated by the mid-layer.
6551 *
6552 * Return value:
6553 * 0 on success
6554 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6555 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6556 **/
Brian King00bfef22012-07-17 08:13:52 -05006557static int ipr_queuecommand(struct Scsi_Host *shost,
6558 struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006559{
6560 struct ipr_ioa_cfg *ioa_cfg;
6561 struct ipr_resource_entry *res;
6562 struct ipr_ioarcb *ioarcb;
6563 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006564 unsigned long hrrq_flags, lock_flags;
Dan Carpenterd12f1572012-07-30 11:18:22 +03006565 int rc;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006566 struct ipr_hrr_queue *hrrq;
6567 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006568
Brian King00bfef22012-07-17 08:13:52 -05006569 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6570
Linus Torvalds1da177e2005-04-16 15:20:36 -07006571 scsi_cmd->result = (DID_OK << 16);
Brian King00bfef22012-07-17 08:13:52 -05006572 res = scsi_cmd->device->hostdata;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006573
6574 if (ipr_is_gata(res) && res->sata_port) {
6575 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6576 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6577 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6578 return rc;
6579 }
6580
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006581 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6582 hrrq = &ioa_cfg->hrrq[hrrq_id];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006583
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006584 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006585 /*
6586 * We are currently blocking all devices due to a host reset
6587 * We have told the host to stop giving us new requests, but
6588 * ERP ops don't count. FIXME
6589 */
Brian Kingbfae7822013-01-30 23:45:08 -06006590 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006591 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006592 return SCSI_MLQUEUE_HOST_BUSY;
Brian King00bfef22012-07-17 08:13:52 -05006593 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006594
6595 /*
6596 * FIXME - Create scsi_set_host_offline interface
6597 * and the ioa_is_dead check can be removed
6598 */
Brian Kingbfae7822013-01-30 23:45:08 -06006599 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006600 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006601 goto err_nodev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006602 }
6603
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006604 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6605 if (ipr_cmd == NULL) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006606 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006607 return SCSI_MLQUEUE_HOST_BUSY;
6608 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006609 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006610
Brian King172cd6e2012-07-17 08:14:40 -05006611 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006612 ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006613
6614 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6615 ipr_cmd->scsi_cmd = scsi_cmd;
Brian King172cd6e2012-07-17 08:14:40 -05006616 ipr_cmd->done = ipr_scsi_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006617
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006618 if (ipr_is_gscsi(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006619 if (scsi_cmd->underflow == 0)
6620 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6621
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006622 if (res->reset_occurred) {
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006623 res->reset_occurred = 0;
Wayne Boyerab6c10b2011-03-31 09:56:10 -07006624 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006625 }
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006626 }
6627
6628 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6629 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6630
Linus Torvalds1da177e2005-04-16 15:20:36 -07006631 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
Christoph Hellwig50668632014-10-30 14:30:06 +01006632 if (scsi_cmd->flags & SCMD_TAGGED)
6633 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6634 else
6635 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006636 }
6637
6638 if (scsi_cmd->cmnd[0] >= 0xC0 &&
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006639 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006640 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006641 }
Gabriel Krisman Bertazi3cb4fc12015-08-19 11:47:05 -03006642 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
Wen Xiongf8ee25d2015-03-26 11:23:58 -05006643 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006644
Gabriel Krisman Bertazi3cb4fc12015-08-19 11:47:05 -03006645 if (scsi_cmd->underflow == 0)
6646 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6647 }
6648
Dan Carpenterd12f1572012-07-30 11:18:22 +03006649 if (ioa_cfg->sis64)
6650 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6651 else
6652 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006653
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006654 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6655 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006656 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006657 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006658 if (!rc)
6659 scsi_dma_unmap(scsi_cmd);
Brian Kinga5fb4072012-03-14 21:20:09 -05006660 return SCSI_MLQUEUE_HOST_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006661 }
6662
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006663 if (unlikely(hrrq->ioa_is_dead)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006664 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006665 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006666 scsi_dma_unmap(scsi_cmd);
6667 goto err_nodev;
6668 }
6669
6670 ioarcb->res_handle = res->res_handle;
6671 if (res->needs_sync_complete) {
6672 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6673 res->needs_sync_complete = 0;
6674 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006675 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
Brian King00bfef22012-07-17 08:13:52 -05006676 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian Kinga5fb4072012-03-14 21:20:09 -05006677 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006678 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006679 return 0;
6680
6681err_nodev:
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006682 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006683 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6684 scsi_cmd->result = (DID_NO_CONNECT << 16);
6685 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006686 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006687 return 0;
6688}
6689
6690/**
Brian King35a39692006-09-25 12:39:20 -05006691 * ipr_ioctl - IOCTL handler
6692 * @sdev: scsi device struct
6693 * @cmd: IOCTL cmd
6694 * @arg: IOCTL arg
6695 *
6696 * Return value:
6697 * 0 on success / other on failure
6698 **/
Nathan Chancellor6f4e6262019-02-07 09:07:20 -07006699static int ipr_ioctl(struct scsi_device *sdev, unsigned int cmd,
6700 void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05006701{
6702 struct ipr_resource_entry *res;
6703
6704 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05006705 if (res && ipr_is_gata(res)) {
6706 if (cmd == HDIO_GET_IDENTITY)
6707 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05006708 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05006709 }
Brian King35a39692006-09-25 12:39:20 -05006710
6711 return -EINVAL;
6712}
6713
6714/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006715 * ipr_info - Get information about the card/driver
6716 * @scsi_host: scsi host struct
6717 *
6718 * Return value:
6719 * pointer to buffer with description string
6720 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006721static const char *ipr_ioa_info(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006722{
6723 static char buffer[512];
6724 struct ipr_ioa_cfg *ioa_cfg;
6725 unsigned long lock_flags = 0;
6726
6727 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6728
6729 spin_lock_irqsave(host->host_lock, lock_flags);
6730 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6731 spin_unlock_irqrestore(host->host_lock, lock_flags);
6732
6733 return buffer;
6734}
6735
6736static struct scsi_host_template driver_template = {
6737 .module = THIS_MODULE,
6738 .name = "IPR",
6739 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05006740 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006741 .queuecommand = ipr_queuecommand,
6742 .eh_abort_handler = ipr_eh_abort,
6743 .eh_device_reset_handler = ipr_eh_dev_reset,
6744 .eh_host_reset_handler = ipr_eh_host_reset,
6745 .slave_alloc = ipr_slave_alloc,
6746 .slave_configure = ipr_slave_configure,
6747 .slave_destroy = ipr_slave_destroy,
Brian Kingf688f962014-12-02 12:47:37 -06006748 .scan_finished = ipr_scan_finished,
Brian King35a39692006-09-25 12:39:20 -05006749 .target_alloc = ipr_target_alloc,
6750 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006751 .change_queue_depth = ipr_change_queue_depth,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006752 .bios_param = ipr_biosparam,
6753 .can_queue = IPR_MAX_COMMANDS,
6754 .this_id = -1,
6755 .sg_tablesize = IPR_MAX_SGLIST,
6756 .max_sectors = IPR_IOA_MAX_SECTORS,
6757 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006758 .shost_attrs = ipr_ioa_attrs,
6759 .sdev_attrs = ipr_dev_attrs,
Martin K. Petersen54b2b502013-10-23 06:25:40 -04006760 .proc_name = IPR_NAME,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006761};
6762
Brian King35a39692006-09-25 12:39:20 -05006763/**
6764 * ipr_ata_phy_reset - libata phy_reset handler
6765 * @ap: ata port to reset
6766 *
6767 **/
6768static void ipr_ata_phy_reset(struct ata_port *ap)
6769{
6770 unsigned long flags;
6771 struct ipr_sata_port *sata_port = ap->private_data;
6772 struct ipr_resource_entry *res = sata_port->res;
6773 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6774 int rc;
6775
6776 ENTER;
6777 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006778 while (ioa_cfg->in_reset_reload) {
Brian King35a39692006-09-25 12:39:20 -05006779 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6780 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6781 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6782 }
6783
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006784 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Brian King35a39692006-09-25 12:39:20 -05006785 goto out_unlock;
6786
6787 rc = ipr_device_reset(ioa_cfg, res);
6788
6789 if (rc) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02006790 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006791 goto out_unlock;
6792 }
6793
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006794 ap->link.device[0].class = res->ata_class;
6795 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Tejun Heo3e4ec342010-05-10 21:41:30 +02006796 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006797
6798out_unlock:
6799 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6800 LEAVE;
6801}
6802
6803/**
6804 * ipr_ata_post_internal - Cleanup after an internal command
6805 * @qc: ATA queued command
6806 *
6807 * Return value:
6808 * none
6809 **/
6810static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6811{
6812 struct ipr_sata_port *sata_port = qc->ap->private_data;
6813 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6814 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006815 struct ipr_hrr_queue *hrrq;
Brian King35a39692006-09-25 12:39:20 -05006816 unsigned long flags;
6817
6818 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006819 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06006820 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6821 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6822 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6823 }
6824
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006825 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006826 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006827 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6828 if (ipr_cmd->qc == qc) {
6829 ipr_device_reset(ioa_cfg, sata_port->res);
6830 break;
6831 }
Brian King35a39692006-09-25 12:39:20 -05006832 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006833 spin_unlock(&hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006834 }
6835 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6836}
6837
6838/**
Brian King35a39692006-09-25 12:39:20 -05006839 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6840 * @regs: destination
6841 * @tf: source ATA taskfile
6842 *
6843 * Return value:
6844 * none
6845 **/
6846static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6847 struct ata_taskfile *tf)
6848{
6849 regs->feature = tf->feature;
6850 regs->nsect = tf->nsect;
6851 regs->lbal = tf->lbal;
6852 regs->lbam = tf->lbam;
6853 regs->lbah = tf->lbah;
6854 regs->device = tf->device;
6855 regs->command = tf->command;
6856 regs->hob_feature = tf->hob_feature;
6857 regs->hob_nsect = tf->hob_nsect;
6858 regs->hob_lbal = tf->hob_lbal;
6859 regs->hob_lbam = tf->hob_lbam;
6860 regs->hob_lbah = tf->hob_lbah;
6861 regs->ctl = tf->ctl;
6862}
6863
6864/**
6865 * ipr_sata_done - done function for SATA commands
6866 * @ipr_cmd: ipr command struct
6867 *
6868 * This function is invoked by the interrupt handler for
6869 * ops generated by the SCSI mid-layer to SATA devices
6870 *
6871 * Return value:
6872 * none
6873 **/
6874static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6875{
6876 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6877 struct ata_queued_cmd *qc = ipr_cmd->qc;
6878 struct ipr_sata_port *sata_port = qc->ap->private_data;
6879 struct ipr_resource_entry *res = sata_port->res;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006880 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King35a39692006-09-25 12:39:20 -05006881
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006882 spin_lock(&ipr_cmd->hrrq->_lock);
Wayne Boyer96d21f02010-05-10 09:13:27 -07006883 if (ipr_cmd->ioa_cfg->sis64)
6884 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6885 sizeof(struct ipr_ioasa_gata));
6886 else
6887 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6888 sizeof(struct ipr_ioasa_gata));
Brian King35a39692006-09-25 12:39:20 -05006889 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6890
Wayne Boyer96d21f02010-05-10 09:13:27 -07006891 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006892 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05006893
6894 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
Wayne Boyer96d21f02010-05-10 09:13:27 -07006895 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
Brian King35a39692006-09-25 12:39:20 -05006896 else
Wayne Boyer96d21f02010-05-10 09:13:27 -07006897 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006898 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006899 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006900 ata_qc_complete(qc);
6901}
6902
6903/**
Wayne Boyera32c0552010-02-19 13:23:36 -08006904 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6905 * @ipr_cmd: ipr command struct
6906 * @qc: ATA queued command
6907 *
6908 **/
6909static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6910 struct ata_queued_cmd *qc)
6911{
6912 u32 ioadl_flags = 0;
6913 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006914 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
Wayne Boyera32c0552010-02-19 13:23:36 -08006915 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6916 int len = qc->nbytes;
6917 struct scatterlist *sg;
6918 unsigned int si;
6919 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6920
6921 if (len == 0)
6922 return;
6923
6924 if (qc->dma_dir == DMA_TO_DEVICE) {
6925 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6926 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6927 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6928 ioadl_flags = IPR_IOADL_FLAGS_READ;
6929
6930 ioarcb->data_transfer_length = cpu_to_be32(len);
6931 ioarcb->ioadl_len =
6932 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6933 ioarcb->u.sis64_addr_data.data_ioadl_addr =
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006934 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
Wayne Boyera32c0552010-02-19 13:23:36 -08006935
6936 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6937 ioadl64->flags = cpu_to_be32(ioadl_flags);
6938 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6939 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6940
6941 last_ioadl64 = ioadl64;
6942 ioadl64++;
6943 }
6944
6945 if (likely(last_ioadl64))
6946 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6947}
6948
6949/**
Brian King35a39692006-09-25 12:39:20 -05006950 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6951 * @ipr_cmd: ipr command struct
6952 * @qc: ATA queued command
6953 *
6954 **/
6955static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6956 struct ata_queued_cmd *qc)
6957{
6958 u32 ioadl_flags = 0;
6959 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08006960 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006961 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01006962 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05006963 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09006964 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05006965
6966 if (len == 0)
6967 return;
6968
6969 if (qc->dma_dir == DMA_TO_DEVICE) {
6970 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6971 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08006972 ioarcb->data_transfer_length = cpu_to_be32(len);
6973 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05006974 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6975 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6976 ioadl_flags = IPR_IOADL_FLAGS_READ;
6977 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6978 ioarcb->read_ioadl_len =
6979 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6980 }
6981
Tejun Heoff2aeb12007-12-05 16:43:11 +09006982 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05006983 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6984 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006985
6986 last_ioadl = ioadl;
6987 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05006988 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006989
6990 if (likely(last_ioadl))
6991 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05006992}
6993
6994/**
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006995 * ipr_qc_defer - Get a free ipr_cmd
6996 * @qc: queued command
6997 *
6998 * Return value:
6999 * 0 if success
7000 **/
7001static int ipr_qc_defer(struct ata_queued_cmd *qc)
7002{
7003 struct ata_port *ap = qc->ap;
7004 struct ipr_sata_port *sata_port = ap->private_data;
7005 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7006 struct ipr_cmnd *ipr_cmd;
7007 struct ipr_hrr_queue *hrrq;
7008 int hrrq_id;
7009
7010 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
7011 hrrq = &ioa_cfg->hrrq[hrrq_id];
7012
7013 qc->lldd_task = NULL;
7014 spin_lock(&hrrq->_lock);
7015 if (unlikely(hrrq->ioa_is_dead)) {
7016 spin_unlock(&hrrq->_lock);
7017 return 0;
7018 }
7019
7020 if (unlikely(!hrrq->allow_cmds)) {
7021 spin_unlock(&hrrq->_lock);
7022 return ATA_DEFER_LINK;
7023 }
7024
7025 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
7026 if (ipr_cmd == NULL) {
7027 spin_unlock(&hrrq->_lock);
7028 return ATA_DEFER_LINK;
7029 }
7030
7031 qc->lldd_task = ipr_cmd;
7032 spin_unlock(&hrrq->_lock);
7033 return 0;
7034}
7035
7036/**
Brian King35a39692006-09-25 12:39:20 -05007037 * ipr_qc_issue - Issue a SATA qc to a device
7038 * @qc: queued command
7039 *
7040 * Return value:
7041 * 0 if success
7042 **/
7043static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7044{
7045 struct ata_port *ap = qc->ap;
7046 struct ipr_sata_port *sata_port = ap->private_data;
7047 struct ipr_resource_entry *res = sata_port->res;
7048 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7049 struct ipr_cmnd *ipr_cmd;
7050 struct ipr_ioarcb *ioarcb;
7051 struct ipr_ioarcb_ata_regs *regs;
7052
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007053 if (qc->lldd_task == NULL)
7054 ipr_qc_defer(qc);
7055
7056 ipr_cmd = qc->lldd_task;
7057 if (ipr_cmd == NULL)
Brian King0feeed82007-03-29 12:43:43 -05007058 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05007059
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007060 qc->lldd_task = NULL;
7061 spin_lock(&ipr_cmd->hrrq->_lock);
7062 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7063 ipr_cmd->hrrq->ioa_is_dead)) {
7064 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7065 spin_unlock(&ipr_cmd->hrrq->_lock);
7066 return AC_ERR_SYSTEM;
7067 }
7068
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007069 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King35a39692006-09-25 12:39:20 -05007070 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05007071
Wayne Boyera32c0552010-02-19 13:23:36 -08007072 if (ioa_cfg->sis64) {
7073 regs = &ipr_cmd->i.ata_ioadl.regs;
7074 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7075 } else
7076 regs = &ioarcb->u.add_data.u.regs;
7077
7078 memset(regs, 0, sizeof(*regs));
7079 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05007080
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007081 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Brian King35a39692006-09-25 12:39:20 -05007082 ipr_cmd->qc = qc;
7083 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007084 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05007085 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7086 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7087 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01007088 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05007089
Wayne Boyera32c0552010-02-19 13:23:36 -08007090 if (ioa_cfg->sis64)
7091 ipr_build_ata_ioadl64(ipr_cmd, qc);
7092 else
7093 ipr_build_ata_ioadl(ipr_cmd, qc);
7094
Brian King35a39692006-09-25 12:39:20 -05007095 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7096 ipr_copy_sata_tf(regs, &qc->tf);
7097 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007098 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05007099
7100 switch (qc->tf.protocol) {
7101 case ATA_PROT_NODATA:
7102 case ATA_PROT_PIO:
7103 break;
7104
7105 case ATA_PROT_DMA:
7106 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7107 break;
7108
Tejun Heo0dc36882007-12-18 16:34:43 -05007109 case ATAPI_PROT_PIO:
7110 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05007111 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7112 break;
7113
Tejun Heo0dc36882007-12-18 16:34:43 -05007114 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05007115 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7116 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7117 break;
7118
7119 default:
7120 WARN_ON(1);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007121 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King0feeed82007-03-29 12:43:43 -05007122 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05007123 }
7124
Wayne Boyera32c0552010-02-19 13:23:36 -08007125 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007126 spin_unlock(&ipr_cmd->hrrq->_lock);
Wayne Boyera32c0552010-02-19 13:23:36 -08007127
Brian King35a39692006-09-25 12:39:20 -05007128 return 0;
7129}
7130
7131/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007132 * ipr_qc_fill_rtf - Read result TF
7133 * @qc: ATA queued command
7134 *
7135 * Return value:
7136 * true
7137 **/
7138static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7139{
7140 struct ipr_sata_port *sata_port = qc->ap->private_data;
7141 struct ipr_ioasa_gata *g = &sata_port->ioasa;
7142 struct ata_taskfile *tf = &qc->result_tf;
7143
7144 tf->feature = g->error;
7145 tf->nsect = g->nsect;
7146 tf->lbal = g->lbal;
7147 tf->lbam = g->lbam;
7148 tf->lbah = g->lbah;
7149 tf->device = g->device;
7150 tf->command = g->status;
7151 tf->hob_nsect = g->hob_nsect;
7152 tf->hob_lbal = g->hob_lbal;
7153 tf->hob_lbam = g->hob_lbam;
7154 tf->hob_lbah = g->hob_lbah;
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007155
7156 return true;
7157}
7158
Brian King35a39692006-09-25 12:39:20 -05007159static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05007160 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09007161 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05007162 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05007163 .qc_prep = ata_noop_qc_prep,
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007164 .qc_defer = ipr_qc_defer,
Brian King35a39692006-09-25 12:39:20 -05007165 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007166 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05007167 .port_start = ata_sas_port_start,
7168 .port_stop = ata_sas_port_stop
7169};
7170
7171static struct ata_port_info sata_port_info = {
Shaohua Li5067c042015-03-12 10:32:18 -07007172 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7173 ATA_FLAG_SAS_HOST,
Sergei Shtylyov0f2e0332011-01-21 20:32:01 +03007174 .pio_mask = ATA_PIO4_ONLY,
7175 .mwdma_mask = ATA_MWDMA2,
7176 .udma_mask = ATA_UDMA6,
Brian King35a39692006-09-25 12:39:20 -05007177 .port_ops = &ipr_sata_ops
7178};
7179
Linus Torvalds1da177e2005-04-16 15:20:36 -07007180#ifdef CONFIG_PPC_PSERIES
7181static const u16 ipr_blocked_processors[] = {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00007182 PVR_NORTHSTAR,
7183 PVR_PULSAR,
7184 PVR_POWER4,
7185 PVR_ICESTAR,
7186 PVR_SSTAR,
7187 PVR_POWER4p,
7188 PVR_630,
7189 PVR_630p
Linus Torvalds1da177e2005-04-16 15:20:36 -07007190};
7191
7192/**
7193 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7194 * @ioa_cfg: ioa cfg struct
7195 *
7196 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7197 * certain pSeries hardware. This function determines if the given
7198 * adapter is in one of these confgurations or not.
7199 *
7200 * Return value:
7201 * 1 if adapter is not supported / 0 if adapter is supported
7202 **/
7203static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7204{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007205 int i;
7206
Auke Kok44c10132007-06-08 15:46:36 -07007207 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007208 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00007209 if (pvr_version_is(ipr_blocked_processors[i]))
Auke Kok44c10132007-06-08 15:46:36 -07007210 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007211 }
7212 }
7213 return 0;
7214}
7215#else
7216#define ipr_invalid_adapter(ioa_cfg) 0
7217#endif
7218
7219/**
7220 * ipr_ioa_bringdown_done - IOA bring down completion.
7221 * @ipr_cmd: ipr command struct
7222 *
7223 * This function processes the completion of an adapter bring down.
7224 * It wakes any reset sleepers.
7225 *
7226 * Return value:
7227 * IPR_RC_JOB_RETURN
7228 **/
7229static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7230{
7231 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05007232 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007233
7234 ENTER;
Brian Kingbfae7822013-01-30 23:45:08 -06007235 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7236 ipr_trace;
Brian Kingb0e17a92017-08-01 10:21:30 -05007237 ioa_cfg->scsi_unblock = 1;
7238 schedule_work(&ioa_cfg->work_q);
Brian Kingbfae7822013-01-30 23:45:08 -06007239 }
7240
Linus Torvalds1da177e2005-04-16 15:20:36 -07007241 ioa_cfg->in_reset_reload = 0;
7242 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05007243 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7244 spin_lock(&ioa_cfg->hrrq[i]._lock);
7245 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7246 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7247 }
7248 wmb();
7249
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007250 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007251 wake_up_all(&ioa_cfg->reset_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007252 LEAVE;
7253
7254 return IPR_RC_JOB_RETURN;
7255}
7256
7257/**
7258 * ipr_ioa_reset_done - IOA reset completion.
7259 * @ipr_cmd: ipr command struct
7260 *
7261 * This function processes the completion of an adapter reset.
7262 * It schedules any necessary mid-layer add/removes and
7263 * wakes any reset sleepers.
7264 *
7265 * Return value:
7266 * IPR_RC_JOB_RETURN
7267 **/
7268static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7269{
7270 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7271 struct ipr_resource_entry *res;
Brian Kingafc3f832016-08-24 12:56:51 -05007272 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007273
7274 ENTER;
7275 ioa_cfg->in_reset_reload = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007276 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7277 spin_lock(&ioa_cfg->hrrq[j]._lock);
7278 ioa_cfg->hrrq[j].allow_cmds = 1;
7279 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7280 }
7281 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007282 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06007283 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007284
7285 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Brian Kingf688f962014-12-02 12:47:37 -06007286 if (res->add_to_ml || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007287 ipr_trace;
7288 break;
7289 }
7290 }
7291 schedule_work(&ioa_cfg->work_q);
7292
Brian Kingafc3f832016-08-24 12:56:51 -05007293 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7294 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7295 if (j < IPR_NUM_LOG_HCAMS)
7296 ipr_send_hcam(ioa_cfg,
7297 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7298 ioa_cfg->hostrcb[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007299 else
Brian Kingafc3f832016-08-24 12:56:51 -05007300 ipr_send_hcam(ioa_cfg,
7301 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7302 ioa_cfg->hostrcb[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007303 }
7304
Brian King6bb04172007-04-26 16:00:08 -05007305 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007306 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7307
7308 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007309 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007310 wake_up_all(&ioa_cfg->reset_wait_q);
7311
Brian Kingb0e17a92017-08-01 10:21:30 -05007312 ioa_cfg->scsi_unblock = 1;
Brian Kingf688f962014-12-02 12:47:37 -06007313 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007314 LEAVE;
7315 return IPR_RC_JOB_RETURN;
7316}
7317
7318/**
7319 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7320 * @supported_dev: supported device struct
7321 * @vpids: vendor product id struct
7322 *
7323 * Return value:
7324 * none
7325 **/
7326static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7327 struct ipr_std_inq_vpids *vpids)
7328{
7329 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7330 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7331 supported_dev->num_records = 1;
7332 supported_dev->data_length =
7333 cpu_to_be16(sizeof(struct ipr_supported_device));
7334 supported_dev->reserved = 0;
7335}
7336
7337/**
7338 * ipr_set_supported_devs - Send Set Supported Devices for a device
7339 * @ipr_cmd: ipr command struct
7340 *
Wayne Boyera32c0552010-02-19 13:23:36 -08007341 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07007342 *
7343 * Return value:
7344 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7345 **/
7346static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7347{
7348 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7349 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007350 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7351 struct ipr_resource_entry *res = ipr_cmd->u.res;
7352
7353 ipr_cmd->job_step = ipr_ioa_reset_done;
7354
7355 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06007356 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007357 continue;
7358
7359 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007360 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007361
7362 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7363 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7364 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7365
7366 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007367 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007368 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7369 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7370
Wayne Boyera32c0552010-02-19 13:23:36 -08007371 ipr_init_ioadl(ipr_cmd,
7372 ioa_cfg->vpd_cbs_dma +
7373 offsetof(struct ipr_misc_cbs, supp_dev),
7374 sizeof(struct ipr_supported_device),
7375 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007376
7377 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7378 IPR_SET_SUP_DEVICE_TIMEOUT);
7379
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007380 if (!ioa_cfg->sis64)
7381 ipr_cmd->job_step = ipr_set_supported_devs;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007382 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007383 return IPR_RC_JOB_RETURN;
7384 }
7385
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007386 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007387 return IPR_RC_JOB_CONTINUE;
7388}
7389
7390/**
7391 * ipr_get_mode_page - Locate specified mode page
7392 * @mode_pages: mode page buffer
7393 * @page_code: page code to find
7394 * @len: minimum required length for mode page
7395 *
7396 * Return value:
7397 * pointer to mode page / NULL on failure
7398 **/
7399static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7400 u32 page_code, u32 len)
7401{
7402 struct ipr_mode_page_hdr *mode_hdr;
7403 u32 page_length;
7404 u32 length;
7405
7406 if (!mode_pages || (mode_pages->hdr.length == 0))
7407 return NULL;
7408
7409 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7410 mode_hdr = (struct ipr_mode_page_hdr *)
7411 (mode_pages->data + mode_pages->hdr.block_desc_len);
7412
7413 while (length) {
7414 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7415 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7416 return mode_hdr;
7417 break;
7418 } else {
7419 page_length = (sizeof(struct ipr_mode_page_hdr) +
7420 mode_hdr->page_length);
7421 length -= page_length;
7422 mode_hdr = (struct ipr_mode_page_hdr *)
7423 ((unsigned long)mode_hdr + page_length);
7424 }
7425 }
7426 return NULL;
7427}
7428
7429/**
7430 * ipr_check_term_power - Check for term power errors
7431 * @ioa_cfg: ioa config struct
7432 * @mode_pages: IOAFP mode pages buffer
7433 *
7434 * Check the IOAFP's mode page 28 for term power errors
7435 *
7436 * Return value:
7437 * nothing
7438 **/
7439static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7440 struct ipr_mode_pages *mode_pages)
7441{
7442 int i;
7443 int entry_length;
7444 struct ipr_dev_bus_entry *bus;
7445 struct ipr_mode_page28 *mode_page;
7446
7447 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7448 sizeof(struct ipr_mode_page28));
7449
7450 entry_length = mode_page->entry_length;
7451
7452 bus = mode_page->bus;
7453
7454 for (i = 0; i < mode_page->num_entries; i++) {
7455 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7456 dev_err(&ioa_cfg->pdev->dev,
7457 "Term power is absent on scsi bus %d\n",
7458 bus->res_addr.bus);
7459 }
7460
7461 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7462 }
7463}
7464
7465/**
7466 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7467 * @ioa_cfg: ioa config struct
7468 *
7469 * Looks through the config table checking for SES devices. If
7470 * the SES device is in the SES table indicating a maximum SCSI
7471 * bus speed, the speed is limited for the bus.
7472 *
7473 * Return value:
7474 * none
7475 **/
7476static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7477{
7478 u32 max_xfer_rate;
7479 int i;
7480
7481 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7482 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7483 ioa_cfg->bus_attr[i].bus_width);
7484
7485 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7486 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7487 }
7488}
7489
7490/**
7491 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7492 * @ioa_cfg: ioa config struct
7493 * @mode_pages: mode page 28 buffer
7494 *
7495 * Updates mode page 28 based on driver configuration
7496 *
7497 * Return value:
7498 * none
7499 **/
7500static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007501 struct ipr_mode_pages *mode_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007502{
7503 int i, entry_length;
7504 struct ipr_dev_bus_entry *bus;
7505 struct ipr_bus_attributes *bus_attr;
7506 struct ipr_mode_page28 *mode_page;
7507
7508 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7509 sizeof(struct ipr_mode_page28));
7510
7511 entry_length = mode_page->entry_length;
7512
7513 /* Loop for each device bus entry */
7514 for (i = 0, bus = mode_page->bus;
7515 i < mode_page->num_entries;
7516 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7517 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7518 dev_err(&ioa_cfg->pdev->dev,
7519 "Invalid resource address reported: 0x%08X\n",
7520 IPR_GET_PHYS_LOC(bus->res_addr));
7521 continue;
7522 }
7523
7524 bus_attr = &ioa_cfg->bus_attr[i];
7525 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7526 bus->bus_width = bus_attr->bus_width;
7527 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7528 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7529 if (bus_attr->qas_enabled)
7530 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7531 else
7532 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7533 }
7534}
7535
7536/**
7537 * ipr_build_mode_select - Build a mode select command
7538 * @ipr_cmd: ipr command struct
7539 * @res_handle: resource handle to send command to
7540 * @parm: Byte 2 of Mode Sense command
7541 * @dma_addr: DMA buffer address
7542 * @xfer_len: data transfer length
7543 *
7544 * Return value:
7545 * none
7546 **/
7547static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08007548 __be32 res_handle, u8 parm,
7549 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007550{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007551 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7552
7553 ioarcb->res_handle = res_handle;
7554 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7555 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7556 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7557 ioarcb->cmd_pkt.cdb[1] = parm;
7558 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7559
Wayne Boyera32c0552010-02-19 13:23:36 -08007560 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007561}
7562
7563/**
7564 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7565 * @ipr_cmd: ipr command struct
7566 *
7567 * This function sets up the SCSI bus attributes and sends
7568 * a Mode Select for Page 28 to activate them.
7569 *
7570 * Return value:
7571 * IPR_RC_JOB_RETURN
7572 **/
7573static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7574{
7575 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7576 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7577 int length;
7578
7579 ENTER;
Brian King47338042006-02-08 20:57:42 -06007580 ipr_scsi_bus_speed_limit(ioa_cfg);
7581 ipr_check_term_power(ioa_cfg, mode_pages);
7582 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7583 length = mode_pages->hdr.length + 1;
7584 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007585
7586 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7587 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7588 length);
7589
Wayne Boyerf72919e2010-02-19 13:24:21 -08007590 ipr_cmd->job_step = ipr_set_supported_devs;
7591 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7592 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007593 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7594
7595 LEAVE;
7596 return IPR_RC_JOB_RETURN;
7597}
7598
7599/**
7600 * ipr_build_mode_sense - Builds a mode sense command
7601 * @ipr_cmd: ipr command struct
7602 * @res: resource entry struct
7603 * @parm: Byte 2 of mode sense command
7604 * @dma_addr: DMA address of mode sense buffer
7605 * @xfer_len: Size of DMA buffer
7606 *
7607 * Return value:
7608 * none
7609 **/
7610static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7611 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08007612 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007613{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007614 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7615
7616 ioarcb->res_handle = res_handle;
7617 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7618 ioarcb->cmd_pkt.cdb[2] = parm;
7619 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7620 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7621
Wayne Boyera32c0552010-02-19 13:23:36 -08007622 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007623}
7624
7625/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007626 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7627 * @ipr_cmd: ipr command struct
7628 *
7629 * This function handles the failure of an IOA bringup command.
7630 *
7631 * Return value:
7632 * IPR_RC_JOB_RETURN
7633 **/
7634static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7635{
7636 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007637 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007638
7639 dev_err(&ioa_cfg->pdev->dev,
7640 "0x%02X failed with IOASC: 0x%08X\n",
7641 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7642
7643 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007644 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007645 return IPR_RC_JOB_RETURN;
7646}
7647
7648/**
7649 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7650 * @ipr_cmd: ipr command struct
7651 *
7652 * This function handles the failure of a Mode Sense to the IOAFP.
7653 * Some adapters do not handle all mode pages.
7654 *
7655 * Return value:
7656 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7657 **/
7658static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7659{
Wayne Boyerf72919e2010-02-19 13:24:21 -08007660 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007661 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007662
7663 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08007664 ipr_cmd->job_step = ipr_set_supported_devs;
7665 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7666 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007667 return IPR_RC_JOB_CONTINUE;
7668 }
7669
7670 return ipr_reset_cmd_failed(ipr_cmd);
7671}
7672
7673/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007674 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7675 * @ipr_cmd: ipr command struct
7676 *
7677 * This function send a Page 28 mode sense to the IOA to
7678 * retrieve SCSI bus attributes.
7679 *
7680 * Return value:
7681 * IPR_RC_JOB_RETURN
7682 **/
7683static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7684{
7685 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7686
7687 ENTER;
7688 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7689 0x28, ioa_cfg->vpd_cbs_dma +
7690 offsetof(struct ipr_misc_cbs, mode_pages),
7691 sizeof(struct ipr_mode_pages));
7692
7693 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007694 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007695
7696 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7697
7698 LEAVE;
7699 return IPR_RC_JOB_RETURN;
7700}
7701
7702/**
Brian Kingac09c342007-04-26 16:00:16 -05007703 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7704 * @ipr_cmd: ipr command struct
7705 *
7706 * This function enables dual IOA RAID support if possible.
7707 *
7708 * Return value:
7709 * IPR_RC_JOB_RETURN
7710 **/
7711static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7712{
7713 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7714 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7715 struct ipr_mode_page24 *mode_page;
7716 int length;
7717
7718 ENTER;
7719 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7720 sizeof(struct ipr_mode_page24));
7721
7722 if (mode_page)
7723 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7724
7725 length = mode_pages->hdr.length + 1;
7726 mode_pages->hdr.length = 0;
7727
7728 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7729 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7730 length);
7731
7732 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7733 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7734
7735 LEAVE;
7736 return IPR_RC_JOB_RETURN;
7737}
7738
7739/**
7740 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7741 * @ipr_cmd: ipr command struct
7742 *
7743 * This function handles the failure of a Mode Sense to the IOAFP.
7744 * Some adapters do not handle all mode pages.
7745 *
7746 * Return value:
7747 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7748 **/
7749static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7750{
Wayne Boyer96d21f02010-05-10 09:13:27 -07007751 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian Kingac09c342007-04-26 16:00:16 -05007752
7753 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7754 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7755 return IPR_RC_JOB_CONTINUE;
7756 }
7757
7758 return ipr_reset_cmd_failed(ipr_cmd);
7759}
7760
7761/**
7762 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7763 * @ipr_cmd: ipr command struct
7764 *
7765 * This function send a mode sense to the IOA to retrieve
7766 * the IOA Advanced Function Control mode page.
7767 *
7768 * Return value:
7769 * IPR_RC_JOB_RETURN
7770 **/
7771static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7772{
7773 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7774
7775 ENTER;
7776 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7777 0x24, ioa_cfg->vpd_cbs_dma +
7778 offsetof(struct ipr_misc_cbs, mode_pages),
7779 sizeof(struct ipr_mode_pages));
7780
7781 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7782 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7783
7784 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7785
7786 LEAVE;
7787 return IPR_RC_JOB_RETURN;
7788}
7789
7790/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007791 * ipr_init_res_table - Initialize the resource table
7792 * @ipr_cmd: ipr command struct
7793 *
7794 * This function looks through the existing resource table, comparing
7795 * it with the config table. This function will take care of old/new
7796 * devices and schedule adding/removing them from the mid-layer
7797 * as appropriate.
7798 *
7799 * Return value:
7800 * IPR_RC_JOB_CONTINUE
7801 **/
7802static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7803{
7804 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7805 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007806 struct ipr_config_table_entry_wrapper cfgtew;
7807 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007808 LIST_HEAD(old_res);
7809
7810 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007811 if (ioa_cfg->sis64)
7812 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7813 else
7814 flag = ioa_cfg->u.cfg_table->hdr.flags;
7815
7816 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007817 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7818
7819 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7820 list_move_tail(&res->queue, &old_res);
7821
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007822 if (ioa_cfg->sis64)
Wayne Boyer438b0332010-05-10 09:13:00 -07007823 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007824 else
7825 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7826
7827 for (i = 0; i < entries; i++) {
7828 if (ioa_cfg->sis64)
7829 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7830 else
7831 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007832 found = 0;
7833
7834 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007835 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007836 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7837 found = 1;
7838 break;
7839 }
7840 }
7841
7842 if (!found) {
7843 if (list_empty(&ioa_cfg->free_res_q)) {
7844 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7845 break;
7846 }
7847
7848 found = 1;
7849 res = list_entry(ioa_cfg->free_res_q.next,
7850 struct ipr_resource_entry, queue);
7851 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007852 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007853 res->add_to_ml = 1;
Wayne Boyer56115592010-06-10 14:46:34 -07007854 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7855 res->sdev->allow_restart = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007856
7857 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007858 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007859 }
7860
7861 list_for_each_entry_safe(res, temp, &old_res, queue) {
7862 if (res->sdev) {
7863 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007864 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007865 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007866 }
7867 }
7868
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007869 list_for_each_entry_safe(res, temp, &old_res, queue) {
7870 ipr_clear_res_target(res);
7871 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7872 }
7873
Brian Kingac09c342007-04-26 16:00:16 -05007874 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7875 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7876 else
7877 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007878
7879 LEAVE;
7880 return IPR_RC_JOB_CONTINUE;
7881}
7882
7883/**
7884 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7885 * @ipr_cmd: ipr command struct
7886 *
7887 * This function sends a Query IOA Configuration command
7888 * to the adapter to retrieve the IOA configuration table.
7889 *
7890 * Return value:
7891 * IPR_RC_JOB_RETURN
7892 **/
7893static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7894{
7895 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7896 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007897 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05007898 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007899
7900 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05007901 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7902 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007903 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7904 ucode_vpd->major_release, ucode_vpd->card_type,
7905 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7906 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7907 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7908
7909 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer438b0332010-05-10 09:13:00 -07007910 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007911 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7912 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007913
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007914 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08007915 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007916
7917 ipr_cmd->job_step = ipr_init_res_table;
7918
7919 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7920
7921 LEAVE;
7922 return IPR_RC_JOB_RETURN;
7923}
7924
Gabriel Krisman Bertazi1a47af22015-11-03 16:26:09 -02007925static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7926{
7927 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7928
7929 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7930 return IPR_RC_JOB_CONTINUE;
7931
7932 return ipr_reset_cmd_failed(ipr_cmd);
7933}
7934
7935static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7936 __be32 res_handle, u8 sa_code)
7937{
7938 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7939
7940 ioarcb->res_handle = res_handle;
7941 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7942 ioarcb->cmd_pkt.cdb[1] = sa_code;
7943 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7944}
7945
7946/**
7947 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7948 * action
7949 *
7950 * Return value:
7951 * none
7952 **/
7953static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7954{
7955 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7956 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7957 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7958
7959 ENTER;
7960
7961 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7962
7963 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7964 ipr_build_ioa_service_action(ipr_cmd,
7965 cpu_to_be32(IPR_IOA_RES_HANDLE),
7966 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7967
7968 ioarcb->cmd_pkt.cdb[2] = 0x40;
7969
7970 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7971 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7972 IPR_SET_SUP_DEVICE_TIMEOUT);
7973
7974 LEAVE;
7975 return IPR_RC_JOB_RETURN;
7976 }
7977
7978 LEAVE;
7979 return IPR_RC_JOB_CONTINUE;
7980}
7981
Linus Torvalds1da177e2005-04-16 15:20:36 -07007982/**
7983 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7984 * @ipr_cmd: ipr command struct
7985 *
7986 * This utility function sends an inquiry to the adapter.
7987 *
7988 * Return value:
7989 * none
7990 **/
7991static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08007992 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007993{
7994 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007995
7996 ENTER;
7997 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7998 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7999
8000 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
8001 ioarcb->cmd_pkt.cdb[1] = flags;
8002 ioarcb->cmd_pkt.cdb[2] = page;
8003 ioarcb->cmd_pkt.cdb[4] = xfer_len;
8004
Wayne Boyera32c0552010-02-19 13:23:36 -08008005 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008006
8007 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
8008 LEAVE;
8009}
8010
8011/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06008012 * ipr_inquiry_page_supported - Is the given inquiry page supported
8013 * @page0: inquiry page 0 buffer
8014 * @page: page code.
8015 *
8016 * This function determines if the specified inquiry page is supported.
8017 *
8018 * Return value:
8019 * 1 if page is supported / 0 if not
8020 **/
8021static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
8022{
8023 int i;
8024
8025 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
8026 if (page0->page[i] == page)
8027 return 1;
8028
8029 return 0;
8030}
8031
8032/**
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02008033 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
8034 * @ipr_cmd: ipr command struct
8035 *
8036 * This function sends a Page 0xC4 inquiry to the adapter
8037 * to retrieve software VPD information.
8038 *
8039 * Return value:
8040 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8041 **/
8042static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8043{
8044 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8045 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8046 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8047
8048 ENTER;
Gabriel Krisman Bertazi1a47af22015-11-03 16:26:09 -02008049 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02008050 memset(pageC4, 0, sizeof(*pageC4));
8051
8052 if (ipr_inquiry_page_supported(page0, 0xC4)) {
8053 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8054 (ioa_cfg->vpd_cbs_dma
8055 + offsetof(struct ipr_misc_cbs,
8056 pageC4_data)),
8057 sizeof(struct ipr_inquiry_pageC4));
8058 return IPR_RC_JOB_RETURN;
8059 }
8060
8061 LEAVE;
8062 return IPR_RC_JOB_CONTINUE;
8063}
8064
8065/**
Brian Kingac09c342007-04-26 16:00:16 -05008066 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8067 * @ipr_cmd: ipr command struct
8068 *
8069 * This function sends a Page 0xD0 inquiry to the adapter
8070 * to retrieve adapter capabilities.
8071 *
8072 * Return value:
8073 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8074 **/
8075static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8076{
8077 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8078 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8079 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8080
8081 ENTER;
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02008082 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
Brian Kingac09c342007-04-26 16:00:16 -05008083 memset(cap, 0, sizeof(*cap));
8084
8085 if (ipr_inquiry_page_supported(page0, 0xD0)) {
8086 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8087 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8088 sizeof(struct ipr_inquiry_cap));
8089 return IPR_RC_JOB_RETURN;
8090 }
8091
8092 LEAVE;
8093 return IPR_RC_JOB_CONTINUE;
8094}
8095
8096/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008097 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8098 * @ipr_cmd: ipr command struct
8099 *
8100 * This function sends a Page 3 inquiry to the adapter
8101 * to retrieve software VPD information.
8102 *
8103 * Return value:
8104 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8105 **/
8106static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8107{
8108 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008109
8110 ENTER;
8111
Brian Kingac09c342007-04-26 16:00:16 -05008112 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008113
8114 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8115 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8116 sizeof(struct ipr_inquiry_page3));
8117
8118 LEAVE;
8119 return IPR_RC_JOB_RETURN;
8120}
8121
8122/**
8123 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8124 * @ipr_cmd: ipr command struct
8125 *
8126 * This function sends a Page 0 inquiry to the adapter
8127 * to retrieve supported inquiry pages.
8128 *
8129 * Return value:
8130 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8131 **/
8132static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8133{
8134 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008135 char type[5];
8136
8137 ENTER;
8138
8139 /* Grab the type out of the VPD and store it away */
8140 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8141 type[4] = '\0';
8142 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8143
Brian Kingf688f962014-12-02 12:47:37 -06008144 if (ipr_invalid_adapter(ioa_cfg)) {
8145 dev_err(&ioa_cfg->pdev->dev,
8146 "Adapter not supported in this hardware configuration.\n");
8147
8148 if (!ipr_testmode) {
8149 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8150 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8151 list_add_tail(&ipr_cmd->queue,
8152 &ioa_cfg->hrrq->hrrq_free_q);
8153 return IPR_RC_JOB_RETURN;
8154 }
8155 }
8156
brking@us.ibm.com62275042005-11-01 17:01:14 -06008157 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008158
brking@us.ibm.com62275042005-11-01 17:01:14 -06008159 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8160 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8161 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008162
8163 LEAVE;
8164 return IPR_RC_JOB_RETURN;
8165}
8166
8167/**
8168 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8169 * @ipr_cmd: ipr command struct
8170 *
8171 * This function sends a standard inquiry to the adapter.
8172 *
8173 * Return value:
8174 * IPR_RC_JOB_RETURN
8175 **/
8176static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8177{
8178 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8179
8180 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008181 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008182
8183 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8184 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8185 sizeof(struct ipr_ioa_vpd));
8186
8187 LEAVE;
8188 return IPR_RC_JOB_RETURN;
8189}
8190
8191/**
Wayne Boyer214777b2010-02-19 13:24:26 -08008192 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008193 * @ipr_cmd: ipr command struct
8194 *
8195 * This function send an Identify Host Request Response Queue
8196 * command to establish the HRRQ with the adapter.
8197 *
8198 * Return value:
8199 * IPR_RC_JOB_RETURN
8200 **/
Wayne Boyer214777b2010-02-19 13:24:26 -08008201static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008202{
8203 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8204 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008205 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008206
8207 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008208 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
Brian King87adbe02016-09-16 16:51:37 -05008209 if (ioa_cfg->identify_hrrq_index == 0)
8210 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008211
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008212 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8213 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -07008214
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008215 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8216 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008217
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008218 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8219 if (ioa_cfg->sis64)
8220 ioarcb->cmd_pkt.cdb[1] = 0x1;
8221
8222 if (ioa_cfg->nvectors == 1)
8223 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8224 else
8225 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8226
8227 ioarcb->cmd_pkt.cdb[2] =
8228 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8229 ioarcb->cmd_pkt.cdb[3] =
8230 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8231 ioarcb->cmd_pkt.cdb[4] =
8232 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8233 ioarcb->cmd_pkt.cdb[5] =
8234 ((u64) hrrq->host_rrq_dma) & 0xff;
8235 ioarcb->cmd_pkt.cdb[7] =
8236 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8237 ioarcb->cmd_pkt.cdb[8] =
8238 (sizeof(u32) * hrrq->size) & 0xff;
8239
8240 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008241 ioarcb->cmd_pkt.cdb[9] =
8242 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008243
8244 if (ioa_cfg->sis64) {
8245 ioarcb->cmd_pkt.cdb[10] =
8246 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8247 ioarcb->cmd_pkt.cdb[11] =
8248 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8249 ioarcb->cmd_pkt.cdb[12] =
8250 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8251 ioarcb->cmd_pkt.cdb[13] =
8252 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8253 }
8254
8255 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008256 ioarcb->cmd_pkt.cdb[14] =
8257 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008258
8259 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8260 IPR_INTERNAL_TIMEOUT);
8261
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008262 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8263 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008264
8265 LEAVE;
8266 return IPR_RC_JOB_RETURN;
Wayne Boyer214777b2010-02-19 13:24:26 -08008267 }
8268
Linus Torvalds1da177e2005-04-16 15:20:36 -07008269 LEAVE;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008270 return IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008271}
8272
8273/**
8274 * ipr_reset_timer_done - Adapter reset timer function
8275 * @ipr_cmd: ipr command struct
8276 *
8277 * Description: This function is used in adapter reset processing
8278 * for timing events. If the reset_cmd pointer in the IOA
8279 * config struct is not this adapter's we are doing nested
8280 * resets and fail_all_ops will take care of freeing the
8281 * command block.
8282 *
8283 * Return value:
8284 * none
8285 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07008286static void ipr_reset_timer_done(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008287{
Kees Cook738c6ec2017-08-18 16:53:24 -07008288 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008289 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8290 unsigned long lock_flags = 0;
8291
8292 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8293
8294 if (ioa_cfg->reset_cmd == ipr_cmd) {
8295 list_del(&ipr_cmd->queue);
8296 ipr_cmd->done(ipr_cmd);
8297 }
8298
8299 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8300}
8301
8302/**
8303 * ipr_reset_start_timer - Start a timer for adapter reset job
8304 * @ipr_cmd: ipr command struct
8305 * @timeout: timeout value
8306 *
8307 * Description: This function is used in adapter reset processing
8308 * for timing events. If the reset_cmd pointer in the IOA
8309 * config struct is not this adapter's we are doing nested
8310 * resets and fail_all_ops will take care of freeing the
8311 * command block.
8312 *
8313 * Return value:
8314 * none
8315 **/
8316static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8317 unsigned long timeout)
8318{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008319
8320 ENTER;
8321 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008322 ipr_cmd->done = ipr_reset_ioa_job;
8323
Linus Torvalds1da177e2005-04-16 15:20:36 -07008324 ipr_cmd->timer.expires = jiffies + timeout;
Kees Cook841b86f2017-10-23 09:40:42 +02008325 ipr_cmd->timer.function = ipr_reset_timer_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008326 add_timer(&ipr_cmd->timer);
8327}
8328
8329/**
8330 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8331 * @ioa_cfg: ioa cfg struct
8332 *
8333 * Return value:
8334 * nothing
8335 **/
8336static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8337{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008338 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008339
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008340 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008341 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008342 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8343
8344 /* Initialize Host RRQ pointers */
8345 hrrq->hrrq_start = hrrq->host_rrq;
8346 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8347 hrrq->hrrq_curr = hrrq->hrrq_start;
8348 hrrq->toggle_bit = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008349 spin_unlock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008350 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008351 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008352
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008353 ioa_cfg->identify_hrrq_index = 0;
8354 if (ioa_cfg->hrrq_num == 1)
8355 atomic_set(&ioa_cfg->hrrq_index, 0);
8356 else
8357 atomic_set(&ioa_cfg->hrrq_index, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008358
8359 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008360 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008361}
8362
8363/**
Wayne Boyer214777b2010-02-19 13:24:26 -08008364 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8365 * @ipr_cmd: ipr command struct
8366 *
8367 * Return value:
8368 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8369 **/
8370static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8371{
8372 unsigned long stage, stage_time;
8373 u32 feedback;
8374 volatile u32 int_reg;
8375 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8376 u64 maskval = 0;
8377
8378 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8379 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8380 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8381
8382 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8383
8384 /* sanity check the stage_time value */
Wayne Boyer438b0332010-05-10 09:13:00 -07008385 if (stage_time == 0)
8386 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8387 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
Wayne Boyer214777b2010-02-19 13:24:26 -08008388 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8389 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8390 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8391
8392 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8393 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8394 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8395 stage_time = ioa_cfg->transop_timeout;
8396 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8397 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
Wayne Boyer1df79ca2010-07-14 10:49:43 -07008398 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8399 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8400 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8401 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8402 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8403 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8404 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8405 return IPR_RC_JOB_CONTINUE;
8406 }
Wayne Boyer214777b2010-02-19 13:24:26 -08008407 }
8408
Wayne Boyer214777b2010-02-19 13:24:26 -08008409 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
Kees Cook841b86f2017-10-23 09:40:42 +02008410 ipr_cmd->timer.function = ipr_oper_timeout;
Wayne Boyer214777b2010-02-19 13:24:26 -08008411 ipr_cmd->done = ipr_reset_ioa_job;
8412 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008413
8414 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Wayne Boyer214777b2010-02-19 13:24:26 -08008415
8416 return IPR_RC_JOB_RETURN;
8417}
8418
8419/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008420 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8421 * @ipr_cmd: ipr command struct
8422 *
8423 * This function reinitializes some control blocks and
8424 * enables destructive diagnostics on the adapter.
8425 *
8426 * Return value:
8427 * IPR_RC_JOB_RETURN
8428 **/
8429static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8430{
8431 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8432 volatile u32 int_reg;
Wayne Boyer7be96902010-05-10 09:14:07 -07008433 volatile u64 maskval;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008434 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008435
8436 ENTER;
Wayne Boyer214777b2010-02-19 13:24:26 -08008437 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008438 ipr_init_ioa_mem(ioa_cfg);
8439
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008440 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8441 spin_lock(&ioa_cfg->hrrq[i]._lock);
8442 ioa_cfg->hrrq[i].allow_interrupts = 1;
8443 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8444 }
Wayne Boyer8701f182010-06-04 10:26:50 -07008445 if (ioa_cfg->sis64) {
8446 /* Set the adapter to the correct endian mode. */
8447 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8448 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8449 }
8450
Wayne Boyer7be96902010-05-10 09:14:07 -07008451 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008452
8453 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8454 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
Wayne Boyer214777b2010-02-19 13:24:26 -08008455 ioa_cfg->regs.clr_interrupt_mask_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008456 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8457 return IPR_RC_JOB_CONTINUE;
8458 }
8459
8460 /* Enable destructive diagnostics on IOA */
Wayne Boyer214777b2010-02-19 13:24:26 -08008461 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008462
Wayne Boyer7be96902010-05-10 09:14:07 -07008463 if (ioa_cfg->sis64) {
8464 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8465 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8466 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8467 } else
8468 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer214777b2010-02-19 13:24:26 -08008469
Linus Torvalds1da177e2005-04-16 15:20:36 -07008470 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8471
8472 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8473
Wayne Boyer214777b2010-02-19 13:24:26 -08008474 if (ioa_cfg->sis64) {
8475 ipr_cmd->job_step = ipr_reset_next_stage;
8476 return IPR_RC_JOB_CONTINUE;
8477 }
8478
Brian King5469cb52007-03-29 12:42:40 -05008479 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Kees Cook841b86f2017-10-23 09:40:42 +02008480 ipr_cmd->timer.function = ipr_oper_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008481 ipr_cmd->done = ipr_reset_ioa_job;
8482 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008483 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008484
8485 LEAVE;
8486 return IPR_RC_JOB_RETURN;
8487}
8488
8489/**
8490 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8491 * @ipr_cmd: ipr command struct
8492 *
8493 * This function is invoked when an adapter dump has run out
8494 * of processing time.
8495 *
8496 * Return value:
8497 * IPR_RC_JOB_CONTINUE
8498 **/
8499static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8500{
8501 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8502
8503 if (ioa_cfg->sdt_state == GET_DUMP)
Brian King41e9a692011-09-21 08:51:11 -05008504 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8505 else if (ioa_cfg->sdt_state == READ_DUMP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008506 ioa_cfg->sdt_state = ABORT_DUMP;
8507
Brian King4c647e92011-10-15 09:08:56 -05008508 ioa_cfg->dump_timeout = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008509 ipr_cmd->job_step = ipr_reset_alert;
8510
8511 return IPR_RC_JOB_CONTINUE;
8512}
8513
8514/**
8515 * ipr_unit_check_no_data - Log a unit check/no data error log
8516 * @ioa_cfg: ioa config struct
8517 *
8518 * Logs an error indicating the adapter unit checked, but for some
8519 * reason, we were unable to fetch the unit check buffer.
8520 *
8521 * Return value:
8522 * nothing
8523 **/
8524static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8525{
8526 ioa_cfg->errors_logged++;
8527 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8528}
8529
8530/**
8531 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8532 * @ioa_cfg: ioa config struct
8533 *
8534 * Fetches the unit check buffer from the adapter by clocking the data
8535 * through the mailbox register.
8536 *
8537 * Return value:
8538 * nothing
8539 **/
8540static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8541{
8542 unsigned long mailbox;
8543 struct ipr_hostrcb *hostrcb;
8544 struct ipr_uc_sdt sdt;
8545 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05008546 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008547
8548 mailbox = readl(ioa_cfg->ioa_mailbox);
8549
Wayne Boyerdcbad002010-02-19 13:24:14 -08008550 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008551 ipr_unit_check_no_data(ioa_cfg);
8552 return;
8553 }
8554
8555 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8556 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8557 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8558
Wayne Boyerdcbad002010-02-19 13:24:14 -08008559 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8560 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8561 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008562 ipr_unit_check_no_data(ioa_cfg);
8563 return;
8564 }
8565
8566 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08008567 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8568 length = be32_to_cpu(sdt.entry[0].end_token);
8569 else
8570 length = (be32_to_cpu(sdt.entry[0].end_token) -
8571 be32_to_cpu(sdt.entry[0].start_token)) &
8572 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008573
8574 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8575 struct ipr_hostrcb, queue);
Brian Kingafc3f832016-08-24 12:56:51 -05008576 list_del_init(&hostrcb->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008577 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8578
8579 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08008580 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07008581 (__be32 *)&hostrcb->hcam,
8582 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8583
Brian King65f56472007-04-26 16:00:12 -05008584 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008585 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08008586 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05008587 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8588 ioa_cfg->sdt_state == GET_DUMP)
8589 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8590 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07008591 ipr_unit_check_no_data(ioa_cfg);
8592
8593 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8594}
8595
8596/**
Wayne Boyer110def82010-11-04 09:36:16 -07008597 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8598 * @ipr_cmd: ipr command struct
8599 *
8600 * Description: This function will call to get the unit check buffer.
8601 *
8602 * Return value:
8603 * IPR_RC_JOB_RETURN
8604 **/
8605static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8606{
8607 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8608
8609 ENTER;
8610 ioa_cfg->ioa_unit_checked = 0;
8611 ipr_get_unit_check_buffer(ioa_cfg);
8612 ipr_cmd->job_step = ipr_reset_alert;
8613 ipr_reset_start_timer(ipr_cmd, 0);
8614
8615 LEAVE;
8616 return IPR_RC_JOB_RETURN;
8617}
8618
Gabriel Krisman Bertazif41f1d92015-11-03 16:26:06 -02008619static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8620{
8621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8622
8623 ENTER;
8624
8625 if (ioa_cfg->sdt_state != GET_DUMP)
8626 return IPR_RC_JOB_RETURN;
8627
8628 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8629 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8630 IPR_PCII_MAILBOX_STABLE)) {
8631
8632 if (!ipr_cmd->u.time_left)
8633 dev_err(&ioa_cfg->pdev->dev,
8634 "Timed out waiting for Mailbox register.\n");
8635
8636 ioa_cfg->sdt_state = READ_DUMP;
8637 ioa_cfg->dump_timeout = 0;
8638 if (ioa_cfg->sis64)
8639 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8640 else
8641 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8642 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8643 schedule_work(&ioa_cfg->work_q);
8644
8645 } else {
8646 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8647 ipr_reset_start_timer(ipr_cmd,
8648 IPR_CHECK_FOR_RESET_TIMEOUT);
8649 }
8650
8651 LEAVE;
8652 return IPR_RC_JOB_RETURN;
8653}
8654
Wayne Boyer110def82010-11-04 09:36:16 -07008655/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008656 * ipr_reset_restore_cfg_space - Restore PCI config space.
8657 * @ipr_cmd: ipr command struct
8658 *
8659 * Description: This function restores the saved PCI config space of
8660 * the adapter, fails all outstanding ops back to the callers, and
8661 * fetches the dump/unit check if applicable to this reset.
8662 *
8663 * Return value:
8664 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8665 **/
8666static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8667{
8668 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer630ad8312011-04-07 12:12:30 -07008669 u32 int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008670
8671 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02008672 ioa_cfg->pdev->state_saved = true;
Jon Mason1d3c16a2010-11-30 17:43:26 -06008673 pci_restore_state(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008674
8675 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008676 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008677 return IPR_RC_JOB_CONTINUE;
8678 }
8679
8680 ipr_fail_all_ops(ioa_cfg);
8681
Wayne Boyer8701f182010-06-04 10:26:50 -07008682 if (ioa_cfg->sis64) {
8683 /* Set the adapter to the correct endian mode. */
8684 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8685 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8686 }
8687
Linus Torvalds1da177e2005-04-16 15:20:36 -07008688 if (ioa_cfg->ioa_unit_checked) {
Wayne Boyer110def82010-11-04 09:36:16 -07008689 if (ioa_cfg->sis64) {
8690 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8691 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8692 return IPR_RC_JOB_RETURN;
8693 } else {
8694 ioa_cfg->ioa_unit_checked = 0;
8695 ipr_get_unit_check_buffer(ioa_cfg);
8696 ipr_cmd->job_step = ipr_reset_alert;
8697 ipr_reset_start_timer(ipr_cmd, 0);
8698 return IPR_RC_JOB_RETURN;
8699 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008700 }
8701
8702 if (ioa_cfg->in_ioa_bringdown) {
8703 ipr_cmd->job_step = ipr_ioa_bringdown_done;
Gabriel Krisman Bertazif41f1d92015-11-03 16:26:06 -02008704 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8705 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8706 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008707 } else {
8708 ipr_cmd->job_step = ipr_reset_enable_ioa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008709 }
8710
Wayne Boyer438b0332010-05-10 09:13:00 -07008711 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008712 return IPR_RC_JOB_CONTINUE;
8713}
8714
8715/**
Brian Kinge619e1a2007-01-23 11:25:37 -06008716 * ipr_reset_bist_done - BIST has completed on the adapter.
8717 * @ipr_cmd: ipr command struct
8718 *
8719 * Description: Unblock config space and resume the reset process.
8720 *
8721 * Return value:
8722 * IPR_RC_JOB_CONTINUE
8723 **/
8724static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8725{
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008726 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8727
Brian Kinge619e1a2007-01-23 11:25:37 -06008728 ENTER;
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008729 if (ioa_cfg->cfg_locked)
8730 pci_cfg_access_unlock(ioa_cfg->pdev);
8731 ioa_cfg->cfg_locked = 0;
Brian Kinge619e1a2007-01-23 11:25:37 -06008732 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8733 LEAVE;
8734 return IPR_RC_JOB_CONTINUE;
8735}
8736
8737/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008738 * ipr_reset_start_bist - Run BIST on the adapter.
8739 * @ipr_cmd: ipr command struct
8740 *
8741 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8742 *
8743 * Return value:
8744 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8745 **/
8746static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8747{
8748 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008749 int rc = PCIBIOS_SUCCESSFUL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008750
8751 ENTER;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008752 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8753 writel(IPR_UPROCI_SIS64_START_BIST,
8754 ioa_cfg->regs.set_uproc_interrupt_reg32);
8755 else
8756 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8757
8758 if (rc == PCIBIOS_SUCCESSFUL) {
Brian Kinge619e1a2007-01-23 11:25:37 -06008759 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008760 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8761 rc = IPR_RC_JOB_RETURN;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008762 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008763 if (ioa_cfg->cfg_locked)
8764 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8765 ioa_cfg->cfg_locked = 0;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008766 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8767 rc = IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008768 }
8769
8770 LEAVE;
8771 return rc;
8772}
8773
8774/**
Brian King463fc692007-05-07 17:09:05 -05008775 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8776 * @ipr_cmd: ipr command struct
8777 *
8778 * Description: This clears PCI reset to the adapter and delays two seconds.
8779 *
8780 * Return value:
8781 * IPR_RC_JOB_RETURN
8782 **/
8783static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8784{
8785 ENTER;
Brian King463fc692007-05-07 17:09:05 -05008786 ipr_cmd->job_step = ipr_reset_bist_done;
8787 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8788 LEAVE;
8789 return IPR_RC_JOB_RETURN;
8790}
8791
8792/**
Brian King2796ca52015-03-26 11:23:52 -05008793 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8794 * @work: work struct
8795 *
8796 * Description: This pulses warm reset to a slot.
8797 *
8798 **/
8799static void ipr_reset_reset_work(struct work_struct *work)
8800{
8801 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8802 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8803 struct pci_dev *pdev = ioa_cfg->pdev;
8804 unsigned long lock_flags = 0;
8805
8806 ENTER;
8807 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8808 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8809 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8810
8811 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8812 if (ioa_cfg->reset_cmd == ipr_cmd)
8813 ipr_reset_ioa_job(ipr_cmd);
8814 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8815 LEAVE;
8816}
8817
8818/**
Brian King463fc692007-05-07 17:09:05 -05008819 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8820 * @ipr_cmd: ipr command struct
8821 *
8822 * Description: This asserts PCI reset to the adapter.
8823 *
8824 * Return value:
8825 * IPR_RC_JOB_RETURN
8826 **/
8827static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8828{
8829 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Brian King463fc692007-05-07 17:09:05 -05008830
8831 ENTER;
Brian King2796ca52015-03-26 11:23:52 -05008832 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8833 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
Brian King463fc692007-05-07 17:09:05 -05008834 ipr_cmd->job_step = ipr_reset_slot_reset_done;
Brian King463fc692007-05-07 17:09:05 -05008835 LEAVE;
8836 return IPR_RC_JOB_RETURN;
8837}
8838
8839/**
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008840 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8841 * @ipr_cmd: ipr command struct
8842 *
8843 * Description: This attempts to block config access to the IOA.
8844 *
8845 * Return value:
8846 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8847 **/
8848static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8849{
8850 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8851 int rc = IPR_RC_JOB_CONTINUE;
8852
8853 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8854 ioa_cfg->cfg_locked = 1;
8855 ipr_cmd->job_step = ioa_cfg->reset;
8856 } else {
8857 if (ipr_cmd->u.time_left) {
8858 rc = IPR_RC_JOB_RETURN;
8859 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8860 ipr_reset_start_timer(ipr_cmd,
8861 IPR_CHECK_FOR_RESET_TIMEOUT);
8862 } else {
8863 ipr_cmd->job_step = ioa_cfg->reset;
8864 dev_err(&ioa_cfg->pdev->dev,
8865 "Timed out waiting to lock config access. Resetting anyway.\n");
8866 }
8867 }
8868
8869 return rc;
8870}
8871
8872/**
8873 * ipr_reset_block_config_access - Block config access to the IOA
8874 * @ipr_cmd: ipr command struct
8875 *
8876 * Description: This attempts to block config access to the IOA
8877 *
8878 * Return value:
8879 * IPR_RC_JOB_CONTINUE
8880 **/
8881static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8882{
8883 ipr_cmd->ioa_cfg->cfg_locked = 0;
8884 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8885 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8886 return IPR_RC_JOB_CONTINUE;
8887}
8888
8889/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008890 * ipr_reset_allowed - Query whether or not IOA can be reset
8891 * @ioa_cfg: ioa config struct
8892 *
8893 * Return value:
8894 * 0 if reset not allowed / non-zero if reset is allowed
8895 **/
8896static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8897{
8898 volatile u32 temp_reg;
8899
8900 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8901 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8902}
8903
8904/**
8905 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8906 * @ipr_cmd: ipr command struct
8907 *
8908 * Description: This function waits for adapter permission to run BIST,
8909 * then runs BIST. If the adapter does not give permission after a
8910 * reasonable time, we will reset the adapter anyway. The impact of
8911 * resetting the adapter without warning the adapter is the risk of
8912 * losing the persistent error log on the adapter. If the adapter is
8913 * reset while it is writing to the flash on the adapter, the flash
8914 * segment will have bad ECC and be zeroed.
8915 *
8916 * Return value:
8917 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8918 **/
8919static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8920{
8921 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8922 int rc = IPR_RC_JOB_RETURN;
8923
8924 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8925 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8926 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8927 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008928 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008929 rc = IPR_RC_JOB_CONTINUE;
8930 }
8931
8932 return rc;
8933}
8934
8935/**
Wayne Boyer8701f182010-06-04 10:26:50 -07008936 * ipr_reset_alert - Alert the adapter of a pending reset
Linus Torvalds1da177e2005-04-16 15:20:36 -07008937 * @ipr_cmd: ipr command struct
8938 *
8939 * Description: This function alerts the adapter that it will be reset.
8940 * If memory space is not currently enabled, proceed directly
8941 * to running BIST on the adapter. The timer must always be started
8942 * so we guarantee we do not run BIST from ipr_isr.
8943 *
8944 * Return value:
8945 * IPR_RC_JOB_RETURN
8946 **/
8947static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8948{
8949 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8950 u16 cmd_reg;
8951 int rc;
8952
8953 ENTER;
8954 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8955
8956 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8957 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
Wayne Boyer214777b2010-02-19 13:24:26 -08008958 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008959 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8960 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008961 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008962 }
8963
8964 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8965 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8966
8967 LEAVE;
8968 return IPR_RC_JOB_RETURN;
8969}
8970
8971/**
Brian King4fdd7c72015-03-26 11:23:50 -05008972 * ipr_reset_quiesce_done - Complete IOA disconnect
8973 * @ipr_cmd: ipr command struct
8974 *
8975 * Description: Freeze the adapter to complete quiesce processing
8976 *
8977 * Return value:
8978 * IPR_RC_JOB_CONTINUE
8979 **/
8980static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8981{
8982 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8983
8984 ENTER;
8985 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8986 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8987 LEAVE;
8988 return IPR_RC_JOB_CONTINUE;
8989}
8990
8991/**
8992 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8993 * @ipr_cmd: ipr command struct
8994 *
8995 * Description: Ensure nothing is outstanding to the IOA and
8996 * proceed with IOA disconnect. Otherwise reset the IOA.
8997 *
8998 * Return value:
8999 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
9000 **/
9001static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
9002{
9003 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9004 struct ipr_cmnd *loop_cmd;
9005 struct ipr_hrr_queue *hrrq;
9006 int rc = IPR_RC_JOB_CONTINUE;
9007 int count = 0;
9008
9009 ENTER;
9010 ipr_cmd->job_step = ipr_reset_quiesce_done;
9011
9012 for_each_hrrq(hrrq, ioa_cfg) {
9013 spin_lock(&hrrq->_lock);
9014 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
9015 count++;
9016 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9017 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9018 rc = IPR_RC_JOB_RETURN;
9019 break;
9020 }
9021 spin_unlock(&hrrq->_lock);
9022
9023 if (count)
9024 break;
9025 }
9026
9027 LEAVE;
9028 return rc;
9029}
9030
9031/**
9032 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
9033 * @ipr_cmd: ipr command struct
9034 *
9035 * Description: Cancel any oustanding HCAMs to the IOA.
9036 *
9037 * Return value:
9038 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9039 **/
9040static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9041{
9042 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9043 int rc = IPR_RC_JOB_CONTINUE;
9044 struct ipr_cmd_pkt *cmd_pkt;
9045 struct ipr_cmnd *hcam_cmd;
9046 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9047
9048 ENTER;
9049 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9050
9051 if (!hrrq->ioa_is_dead) {
9052 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9053 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9054 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9055 continue;
9056
9057 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9058 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9059 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9060 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9061 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9062 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9063 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9064 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9065 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9066 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9067 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9068 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9069 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9070 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9071
9072 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9073 IPR_CANCEL_TIMEOUT);
9074
9075 rc = IPR_RC_JOB_RETURN;
9076 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9077 break;
9078 }
9079 }
9080 } else
9081 ipr_cmd->job_step = ipr_reset_alert;
9082
9083 LEAVE;
9084 return rc;
9085}
9086
9087/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009088 * ipr_reset_ucode_download_done - Microcode download completion
9089 * @ipr_cmd: ipr command struct
9090 *
9091 * Description: This function unmaps the microcode download buffer.
9092 *
9093 * Return value:
9094 * IPR_RC_JOB_CONTINUE
9095 **/
9096static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9097{
9098 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9099 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9100
Anton Blanchardd73341b2014-10-30 17:27:08 -05009101 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009102 sglist->num_sg, DMA_TO_DEVICE);
9103
9104 ipr_cmd->job_step = ipr_reset_alert;
9105 return IPR_RC_JOB_CONTINUE;
9106}
9107
9108/**
9109 * ipr_reset_ucode_download - Download microcode to the adapter
9110 * @ipr_cmd: ipr command struct
9111 *
9112 * Description: This function checks to see if it there is microcode
9113 * to download to the adapter. If there is, a download is performed.
9114 *
9115 * Return value:
9116 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9117 **/
9118static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9119{
9120 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9121 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9122
9123 ENTER;
9124 ipr_cmd->job_step = ipr_reset_alert;
9125
9126 if (!sglist)
9127 return IPR_RC_JOB_CONTINUE;
9128
9129 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9130 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9131 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9132 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9133 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9134 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9135 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9136
Wayne Boyera32c0552010-02-19 13:23:36 -08009137 if (ioa_cfg->sis64)
9138 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9139 else
9140 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009141 ipr_cmd->job_step = ipr_reset_ucode_download_done;
9142
9143 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9144 IPR_WRITE_BUFFER_TIMEOUT);
9145
9146 LEAVE;
9147 return IPR_RC_JOB_RETURN;
9148}
9149
9150/**
9151 * ipr_reset_shutdown_ioa - Shutdown the adapter
9152 * @ipr_cmd: ipr command struct
9153 *
9154 * Description: This function issues an adapter shutdown of the
9155 * specified type to the specified adapter as part of the
9156 * adapter reset job.
9157 *
9158 * Return value:
9159 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9160 **/
9161static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9162{
9163 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9164 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9165 unsigned long timeout;
9166 int rc = IPR_RC_JOB_CONTINUE;
9167
9168 ENTER;
Brian King4fdd7c72015-03-26 11:23:50 -05009169 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9170 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9171 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009172 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009173 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9174 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9175 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9176 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9177
Brian Kingac09c342007-04-26 16:00:16 -05009178 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9179 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009180 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9181 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05009182 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9183 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009184 else
Brian Kingac09c342007-04-26 16:00:16 -05009185 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009186
9187 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9188
9189 rc = IPR_RC_JOB_RETURN;
9190 ipr_cmd->job_step = ipr_reset_ucode_download;
9191 } else
9192 ipr_cmd->job_step = ipr_reset_alert;
9193
9194 LEAVE;
9195 return rc;
9196}
9197
9198/**
9199 * ipr_reset_ioa_job - Adapter reset job
9200 * @ipr_cmd: ipr command struct
9201 *
9202 * Description: This function is the job router for the adapter reset job.
9203 *
9204 * Return value:
9205 * none
9206 **/
9207static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9208{
9209 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009210 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9211
9212 do {
Wayne Boyer96d21f02010-05-10 09:13:27 -07009213 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009214
9215 if (ioa_cfg->reset_cmd != ipr_cmd) {
9216 /*
9217 * We are doing nested adapter resets and this is
9218 * not the current reset job.
9219 */
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009220 list_add_tail(&ipr_cmd->queue,
9221 &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009222 return;
9223 }
9224
9225 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06009226 rc = ipr_cmd->job_step_failed(ipr_cmd);
9227 if (rc == IPR_RC_JOB_RETURN)
9228 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009229 }
9230
9231 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06009232 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009233 rc = ipr_cmd->job_step(ipr_cmd);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009234 } while (rc == IPR_RC_JOB_CONTINUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009235}
9236
9237/**
9238 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9239 * @ioa_cfg: ioa config struct
9240 * @job_step: first job step of reset job
9241 * @shutdown_type: shutdown type
9242 *
9243 * Description: This function will initiate the reset of the given adapter
9244 * starting at the selected job step.
9245 * If the caller needs to wait on the completion of the reset,
9246 * the caller must sleep on the reset_wait_q.
9247 *
9248 * Return value:
9249 * none
9250 **/
9251static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9252 int (*job_step) (struct ipr_cmnd *),
9253 enum ipr_shutdown_type shutdown_type)
9254{
9255 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009256 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009257
9258 ioa_cfg->in_reset_reload = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009259 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9260 spin_lock(&ioa_cfg->hrrq[i]._lock);
9261 ioa_cfg->hrrq[i].allow_cmds = 0;
9262 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9263 }
9264 wmb();
Brian Kingb0e17a92017-08-01 10:21:30 -05009265 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9266 ioa_cfg->scsi_unblock = 0;
9267 ioa_cfg->scsi_blocked = 1;
Brian Kingbfae7822013-01-30 23:45:08 -06009268 scsi_block_requests(ioa_cfg->host);
Brian Kingb0e17a92017-08-01 10:21:30 -05009269 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009270
9271 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9272 ioa_cfg->reset_cmd = ipr_cmd;
9273 ipr_cmd->job_step = job_step;
9274 ipr_cmd->u.shutdown_type = shutdown_type;
9275
9276 ipr_reset_ioa_job(ipr_cmd);
9277}
9278
9279/**
9280 * ipr_initiate_ioa_reset - Initiate an adapter reset
9281 * @ioa_cfg: ioa config struct
9282 * @shutdown_type: shutdown type
9283 *
9284 * Description: This function will initiate the reset of the given adapter.
9285 * If the caller needs to wait on the completion of the reset,
9286 * the caller must sleep on the reset_wait_q.
9287 *
9288 * Return value:
9289 * none
9290 **/
9291static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9292 enum ipr_shutdown_type shutdown_type)
9293{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009294 int i;
9295
9296 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009297 return;
9298
Brian King41e9a692011-09-21 08:51:11 -05009299 if (ioa_cfg->in_reset_reload) {
9300 if (ioa_cfg->sdt_state == GET_DUMP)
9301 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9302 else if (ioa_cfg->sdt_state == READ_DUMP)
9303 ioa_cfg->sdt_state = ABORT_DUMP;
9304 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009305
9306 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9307 dev_err(&ioa_cfg->pdev->dev,
9308 "IOA taken offline - error recovery failed\n");
9309
9310 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009311 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9312 spin_lock(&ioa_cfg->hrrq[i]._lock);
9313 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9314 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9315 }
9316 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009317
9318 if (ioa_cfg->in_ioa_bringdown) {
9319 ioa_cfg->reset_cmd = NULL;
9320 ioa_cfg->in_reset_reload = 0;
9321 ipr_fail_all_ops(ioa_cfg);
9322 wake_up_all(&ioa_cfg->reset_wait_q);
9323
Brian Kingbfae7822013-01-30 23:45:08 -06009324 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
Brian Kingb0e17a92017-08-01 10:21:30 -05009325 ioa_cfg->scsi_unblock = 1;
9326 schedule_work(&ioa_cfg->work_q);
Brian Kingbfae7822013-01-30 23:45:08 -06009327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009328 return;
9329 } else {
9330 ioa_cfg->in_ioa_bringdown = 1;
9331 shutdown_type = IPR_SHUTDOWN_NONE;
9332 }
9333 }
9334
9335 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9336 shutdown_type);
9337}
9338
9339/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009340 * ipr_reset_freeze - Hold off all I/O activity
9341 * @ipr_cmd: ipr command struct
9342 *
9343 * Description: If the PCI slot is frozen, hold off all I/O
9344 * activity; then, as soon as the slot is available again,
9345 * initiate an adapter reset.
9346 */
9347static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9348{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009349 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9350 int i;
9351
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009352 /* Disallow new interrupts, avoid loop */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009353 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9354 spin_lock(&ioa_cfg->hrrq[i]._lock);
9355 ioa_cfg->hrrq[i].allow_interrupts = 0;
9356 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9357 }
9358 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009359 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009360 ipr_cmd->done = ipr_reset_ioa_job;
9361 return IPR_RC_JOB_RETURN;
9362}
9363
9364/**
Brian King6270e592014-01-21 12:16:41 -06009365 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9366 * @pdev: PCI device struct
9367 *
9368 * Description: This routine is called to tell us that the MMIO
9369 * access to the IOA has been restored
9370 */
9371static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9372{
9373 unsigned long flags = 0;
9374 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9375
9376 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9377 if (!ioa_cfg->probe_done)
9378 pci_save_state(pdev);
9379 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9380 return PCI_ERS_RESULT_NEED_RESET;
9381}
9382
9383/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009384 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9385 * @pdev: PCI device struct
9386 *
9387 * Description: This routine is called to tell us that the PCI bus
9388 * is down. Can't do anything here, except put the device driver
9389 * into a holding pattern, waiting for the PCI bus to come back.
9390 */
9391static void ipr_pci_frozen(struct pci_dev *pdev)
9392{
9393 unsigned long flags = 0;
9394 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9395
9396 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009397 if (ioa_cfg->probe_done)
9398 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009399 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9400}
9401
9402/**
9403 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9404 * @pdev: PCI device struct
9405 *
9406 * Description: This routine is called by the pci error recovery
9407 * code after the PCI slot has been reset, just before we
9408 * should resume normal operations.
9409 */
9410static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9411{
9412 unsigned long flags = 0;
9413 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9414
9415 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009416 if (ioa_cfg->probe_done) {
9417 if (ioa_cfg->needs_warm_reset)
9418 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9419 else
9420 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9421 IPR_SHUTDOWN_NONE);
9422 } else
9423 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009424 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9425 return PCI_ERS_RESULT_RECOVERED;
9426}
9427
9428/**
9429 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9430 * @pdev: PCI device struct
9431 *
9432 * Description: This routine is called when the PCI bus has
9433 * permanently failed.
9434 */
9435static void ipr_pci_perm_failure(struct pci_dev *pdev)
9436{
9437 unsigned long flags = 0;
9438 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009439 int i;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009440
9441 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009442 if (ioa_cfg->probe_done) {
9443 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9444 ioa_cfg->sdt_state = ABORT_DUMP;
9445 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9446 ioa_cfg->in_ioa_bringdown = 1;
9447 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9448 spin_lock(&ioa_cfg->hrrq[i]._lock);
9449 ioa_cfg->hrrq[i].allow_cmds = 0;
9450 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9451 }
9452 wmb();
9453 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9454 } else
9455 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009456 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9457}
9458
9459/**
9460 * ipr_pci_error_detected - Called when a PCI error is detected.
9461 * @pdev: PCI device struct
9462 * @state: PCI channel state
9463 *
9464 * Description: Called when a PCI error is detected.
9465 *
9466 * Return value:
9467 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9468 */
9469static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9470 pci_channel_state_t state)
9471{
9472 switch (state) {
9473 case pci_channel_io_frozen:
9474 ipr_pci_frozen(pdev);
Brian King6270e592014-01-21 12:16:41 -06009475 return PCI_ERS_RESULT_CAN_RECOVER;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009476 case pci_channel_io_perm_failure:
9477 ipr_pci_perm_failure(pdev);
9478 return PCI_ERS_RESULT_DISCONNECT;
9479 break;
9480 default:
9481 break;
9482 }
9483 return PCI_ERS_RESULT_NEED_RESET;
9484}
9485
9486/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009487 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9488 * @ioa_cfg: ioa cfg struct
9489 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08009490 * Description: This is the second phase of adapter initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -07009491 * This function takes care of initilizing the adapter to the point
9492 * where it can accept new commands.
9493
9494 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02009495 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009496 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009497static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009498{
9499 int rc = 0;
9500 unsigned long host_lock_flags = 0;
9501
9502 ENTER;
9503 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9504 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
Brian King6270e592014-01-21 12:16:41 -06009505 ioa_cfg->probe_done = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009506 if (ioa_cfg->needs_hard_reset) {
9507 ioa_cfg->needs_hard_reset = 0;
9508 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9509 } else
9510 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9511 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009512 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009513
9514 LEAVE;
9515 return rc;
9516}
9517
9518/**
9519 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9520 * @ioa_cfg: ioa config struct
9521 *
9522 * Return value:
9523 * none
9524 **/
9525static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9526{
9527 int i;
9528
Brian Kinga65e8f12015-03-26 11:23:55 -05009529 if (ioa_cfg->ipr_cmnd_list) {
9530 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9531 if (ioa_cfg->ipr_cmnd_list[i])
9532 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9533 ioa_cfg->ipr_cmnd_list[i],
9534 ioa_cfg->ipr_cmnd_list_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009535
Brian Kinga65e8f12015-03-26 11:23:55 -05009536 ioa_cfg->ipr_cmnd_list[i] = NULL;
9537 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009538 }
9539
9540 if (ioa_cfg->ipr_cmd_pool)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009541 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009542
Brian King89aad422012-03-14 21:20:10 -05009543 kfree(ioa_cfg->ipr_cmnd_list);
9544 kfree(ioa_cfg->ipr_cmnd_list_dma);
9545 ioa_cfg->ipr_cmnd_list = NULL;
9546 ioa_cfg->ipr_cmnd_list_dma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009547 ioa_cfg->ipr_cmd_pool = NULL;
9548}
9549
9550/**
9551 * ipr_free_mem - Frees memory allocated for an adapter
9552 * @ioa_cfg: ioa cfg struct
9553 *
9554 * Return value:
9555 * nothing
9556 **/
9557static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9558{
9559 int i;
9560
9561 kfree(ioa_cfg->res_entries);
Anton Blanchardd73341b2014-10-30 17:27:08 -05009562 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9563 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009564 ipr_free_cmd_blks(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009565
9566 for (i = 0; i < ioa_cfg->hrrq_num; i++)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009567 dma_free_coherent(&ioa_cfg->pdev->dev,
9568 sizeof(u32) * ioa_cfg->hrrq[i].size,
9569 ioa_cfg->hrrq[i].host_rrq,
9570 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009571
Anton Blanchardd73341b2014-10-30 17:27:08 -05009572 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9573 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009574
Brian Kingafc3f832016-08-24 12:56:51 -05009575 for (i = 0; i < IPR_MAX_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009576 dma_free_coherent(&ioa_cfg->pdev->dev,
9577 sizeof(struct ipr_hostrcb),
9578 ioa_cfg->hostrcb[i],
9579 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009580 }
9581
9582 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009583 kfree(ioa_cfg->trace);
9584}
9585
9586/**
Brian King2796ca52015-03-26 11:23:52 -05009587 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9588 * @ioa_cfg: ipr cfg struct
9589 *
9590 * This function frees all allocated IRQs for the
9591 * specified adapter.
9592 *
9593 * Return value:
9594 * none
9595 **/
9596static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9597{
9598 struct pci_dev *pdev = ioa_cfg->pdev;
Christoph Hellwiga299ee62016-09-11 15:31:24 +02009599 int i;
Brian King2796ca52015-03-26 11:23:52 -05009600
Christoph Hellwiga299ee62016-09-11 15:31:24 +02009601 for (i = 0; i < ioa_cfg->nvectors; i++)
9602 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9603 pci_free_irq_vectors(pdev);
Brian King2796ca52015-03-26 11:23:52 -05009604}
9605
9606/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009607 * ipr_free_all_resources - Free all allocated resources for an adapter.
9608 * @ipr_cmd: ipr command struct
9609 *
9610 * This function frees all allocated resources for the
9611 * specified adapter.
9612 *
9613 * Return value:
9614 * none
9615 **/
9616static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9617{
9618 struct pci_dev *pdev = ioa_cfg->pdev;
9619
9620 ENTER;
Brian King2796ca52015-03-26 11:23:52 -05009621 ipr_free_irqs(ioa_cfg);
9622 if (ioa_cfg->reset_work_q)
9623 destroy_workqueue(ioa_cfg->reset_work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009624 iounmap(ioa_cfg->hdw_dma_regs);
9625 pci_release_regions(pdev);
9626 ipr_free_mem(ioa_cfg);
9627 scsi_host_put(ioa_cfg->host);
9628 pci_disable_device(pdev);
9629 LEAVE;
9630}
9631
9632/**
9633 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9634 * @ioa_cfg: ioa config struct
9635 *
9636 * Return value:
9637 * 0 on success / -ENOMEM on allocation failure
9638 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009639static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009640{
9641 struct ipr_cmnd *ipr_cmd;
9642 struct ipr_ioarcb *ioarcb;
9643 dma_addr_t dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009644 int i, entries_each_hrrq, hrrq_id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009645
Anton Blanchardd73341b2014-10-30 17:27:08 -05009646 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009647 sizeof(struct ipr_cmnd), 512, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009648
9649 if (!ioa_cfg->ipr_cmd_pool)
9650 return -ENOMEM;
9651
Brian King89aad422012-03-14 21:20:10 -05009652 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9653 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9654
9655 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9656 ipr_free_cmd_blks(ioa_cfg);
9657 return -ENOMEM;
9658 }
9659
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009660 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9661 if (ioa_cfg->hrrq_num > 1) {
9662 if (i == 0) {
9663 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9664 ioa_cfg->hrrq[i].min_cmd_id = 0;
Colin Ian Kingb82378e2017-12-01 13:33:27 +00009665 ioa_cfg->hrrq[i].max_cmd_id =
9666 (entries_each_hrrq - 1);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009667 } else {
9668 entries_each_hrrq =
9669 IPR_NUM_BASE_CMD_BLKS/
9670 (ioa_cfg->hrrq_num - 1);
9671 ioa_cfg->hrrq[i].min_cmd_id =
9672 IPR_NUM_INTERNAL_CMD_BLKS +
9673 (i - 1) * entries_each_hrrq;
9674 ioa_cfg->hrrq[i].max_cmd_id =
9675 (IPR_NUM_INTERNAL_CMD_BLKS +
9676 i * entries_each_hrrq - 1);
9677 }
9678 } else {
9679 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9680 ioa_cfg->hrrq[i].min_cmd_id = 0;
9681 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9682 }
9683 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9684 }
9685
9686 BUG_ON(ioa_cfg->hrrq_num == 0);
9687
9688 i = IPR_NUM_CMD_BLKS -
9689 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9690 if (i > 0) {
9691 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9692 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9693 }
9694
Linus Torvalds1da177e2005-04-16 15:20:36 -07009695 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Souptick Joarder8b1bb6d2018-03-08 18:41:57 +05309696 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9697 GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009698
9699 if (!ipr_cmd) {
9700 ipr_free_cmd_blks(ioa_cfg);
9701 return -ENOMEM;
9702 }
9703
Linus Torvalds1da177e2005-04-16 15:20:36 -07009704 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9705 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9706
9707 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08009708 ipr_cmd->dma_addr = dma_addr;
9709 if (ioa_cfg->sis64)
9710 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9711 else
9712 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9713
Linus Torvalds1da177e2005-04-16 15:20:36 -07009714 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08009715 if (ioa_cfg->sis64) {
9716 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9717 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9718 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009719 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
Wayne Boyera32c0552010-02-19 13:23:36 -08009720 } else {
9721 ioarcb->write_ioadl_addr =
9722 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9723 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9724 ioarcb->ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009725 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
Wayne Boyera32c0552010-02-19 13:23:36 -08009726 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009727 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9728 ipr_cmd->cmd_index = i;
9729 ipr_cmd->ioa_cfg = ioa_cfg;
9730 ipr_cmd->sense_buffer_dma = dma_addr +
9731 offsetof(struct ipr_cmnd, sense_buffer);
9732
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009733 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9734 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9735 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9736 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9737 hrrq_id++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009738 }
9739
9740 return 0;
9741}
9742
9743/**
9744 * ipr_alloc_mem - Allocate memory for an adapter
9745 * @ioa_cfg: ioa config struct
9746 *
9747 * Return value:
9748 * 0 on success / non-zero for error
9749 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009750static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009751{
9752 struct pci_dev *pdev = ioa_cfg->pdev;
9753 int i, rc = -ENOMEM;
9754
9755 ENTER;
Kees Cook6396bb22018-06-12 14:03:40 -07009756 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9757 sizeof(struct ipr_resource_entry),
9758 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009759
9760 if (!ioa_cfg->res_entries)
9761 goto out;
9762
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009763 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009764 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009765 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009767
Anton Blanchardd73341b2014-10-30 17:27:08 -05009768 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9769 sizeof(struct ipr_misc_cbs),
9770 &ioa_cfg->vpd_cbs_dma,
9771 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009772
9773 if (!ioa_cfg->vpd_cbs)
9774 goto out_free_res_entries;
9775
9776 if (ipr_alloc_cmd_blks(ioa_cfg))
9777 goto out_free_vpd_cbs;
9778
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009779 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009780 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009781 sizeof(u32) * ioa_cfg->hrrq[i].size,
Anton Blanchardd73341b2014-10-30 17:27:08 -05009782 &ioa_cfg->hrrq[i].host_rrq_dma,
9783 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009784
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009785 if (!ioa_cfg->hrrq[i].host_rrq) {
9786 while (--i > 0)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009787 dma_free_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009788 sizeof(u32) * ioa_cfg->hrrq[i].size,
9789 ioa_cfg->hrrq[i].host_rrq,
9790 ioa_cfg->hrrq[i].host_rrq_dma);
9791 goto out_ipr_free_cmd_blocks;
9792 }
9793 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9794 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009795
Anton Blanchardd73341b2014-10-30 17:27:08 -05009796 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9797 ioa_cfg->cfg_table_size,
9798 &ioa_cfg->cfg_table_dma,
9799 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009800
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009801 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009802 goto out_free_host_rrq;
9803
Brian Kingafc3f832016-08-24 12:56:51 -05009804 for (i = 0; i < IPR_MAX_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009805 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9806 sizeof(struct ipr_hostrcb),
9807 &ioa_cfg->hostrcb_dma[i],
9808 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009809
9810 if (!ioa_cfg->hostrcb[i])
9811 goto out_free_hostrcb_dma;
9812
9813 ioa_cfg->hostrcb[i]->hostrcb_dma =
9814 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06009815 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009816 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9817 }
9818
Kees Cook6396bb22018-06-12 14:03:40 -07009819 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9820 sizeof(struct ipr_trace_entry),
9821 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009822
9823 if (!ioa_cfg->trace)
9824 goto out_free_hostrcb_dma;
9825
Linus Torvalds1da177e2005-04-16 15:20:36 -07009826 rc = 0;
9827out:
9828 LEAVE;
9829 return rc;
9830
9831out_free_hostrcb_dma:
9832 while (i-- > 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009833 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9834 ioa_cfg->hostrcb[i],
9835 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009836 }
Anton Blanchardd73341b2014-10-30 17:27:08 -05009837 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9838 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009839out_free_host_rrq:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009840 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009841 dma_free_coherent(&pdev->dev,
9842 sizeof(u32) * ioa_cfg->hrrq[i].size,
9843 ioa_cfg->hrrq[i].host_rrq,
9844 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009845 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009846out_ipr_free_cmd_blocks:
9847 ipr_free_cmd_blks(ioa_cfg);
9848out_free_vpd_cbs:
Anton Blanchardd73341b2014-10-30 17:27:08 -05009849 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9850 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009851out_free_res_entries:
9852 kfree(ioa_cfg->res_entries);
9853 goto out;
9854}
9855
9856/**
9857 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9858 * @ioa_cfg: ioa config struct
9859 *
9860 * Return value:
9861 * none
9862 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009863static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009864{
9865 int i;
9866
9867 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9868 ioa_cfg->bus_attr[i].bus = i;
9869 ioa_cfg->bus_attr[i].qas_enabled = 0;
9870 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9871 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9872 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9873 else
9874 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9875 }
9876}
9877
9878/**
Brian King6270e592014-01-21 12:16:41 -06009879 * ipr_init_regs - Initialize IOA registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07009880 * @ioa_cfg: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07009881 *
9882 * Return value:
Brian King6270e592014-01-21 12:16:41 -06009883 * none
Linus Torvalds1da177e2005-04-16 15:20:36 -07009884 **/
Brian King6270e592014-01-21 12:16:41 -06009885static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009886{
9887 const struct ipr_interrupt_offsets *p;
9888 struct ipr_interrupts *t;
9889 void __iomem *base;
9890
Linus Torvalds1da177e2005-04-16 15:20:36 -07009891 p = &ioa_cfg->chip_cfg->regs;
9892 t = &ioa_cfg->regs;
9893 base = ioa_cfg->hdw_dma_regs;
9894
9895 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9896 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009897 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009898 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009899 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009900 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009901 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009902 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009903 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009904 t->ioarrin_reg = base + p->ioarrin_reg;
9905 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009906 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009907 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009908 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009909 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009910 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009911
9912 if (ioa_cfg->sis64) {
Wayne Boyer214777b2010-02-19 13:24:26 -08009913 t->init_feedback_reg = base + p->init_feedback_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009914 t->dump_addr_reg = base + p->dump_addr_reg;
9915 t->dump_data_reg = base + p->dump_data_reg;
Wayne Boyer8701f182010-06-04 10:26:50 -07009916 t->endian_swap_reg = base + p->endian_swap_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009917 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009918}
9919
9920/**
Brian King6270e592014-01-21 12:16:41 -06009921 * ipr_init_ioa_cfg - Initialize IOA config struct
9922 * @ioa_cfg: ioa config struct
9923 * @host: scsi host struct
9924 * @pdev: PCI dev struct
9925 *
9926 * Return value:
9927 * none
9928 **/
9929static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9930 struct Scsi_Host *host, struct pci_dev *pdev)
9931{
9932 int i;
9933
9934 ioa_cfg->host = host;
9935 ioa_cfg->pdev = pdev;
9936 ioa_cfg->log_level = ipr_log_level;
9937 ioa_cfg->doorbell = IPR_DOORBELL;
9938 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9939 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9940 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9941 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9942 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9943 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9944
9945 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9946 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
Brian Kingafc3f832016-08-24 12:56:51 -05009947 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
Brian King6270e592014-01-21 12:16:41 -06009948 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9949 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9950 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
Wen Xiong318ddb32018-09-20 19:32:12 -05009951 INIT_WORK(&ioa_cfg->scsi_add_work_q, ipr_add_remove_thread);
Brian King6270e592014-01-21 12:16:41 -06009952 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9953 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9954 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9955 ioa_cfg->sdt_state = INACTIVE;
9956
9957 ipr_initialize_bus_attr(ioa_cfg);
9958 ioa_cfg->max_devs_supported = ipr_max_devs;
9959
9960 if (ioa_cfg->sis64) {
9961 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9962 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9963 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9964 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9965 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9966 + ((sizeof(struct ipr_config_table_entry64)
9967 * ioa_cfg->max_devs_supported)));
9968 } else {
9969 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9970 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9971 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9972 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9973 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9974 + ((sizeof(struct ipr_config_table_entry)
9975 * ioa_cfg->max_devs_supported)));
9976 }
9977
Brian Kingf688f962014-12-02 12:47:37 -06009978 host->max_channel = IPR_VSET_BUS;
Brian King6270e592014-01-21 12:16:41 -06009979 host->unique_id = host->host_no;
9980 host->max_cmd_len = IPR_MAX_CDB_LEN;
9981 host->can_queue = ioa_cfg->max_cmds;
9982 pci_set_drvdata(pdev, ioa_cfg);
9983
9984 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9985 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9986 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9987 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9988 if (i == 0)
9989 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9990 else
9991 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9992 }
9993}
9994
9995/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009996 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07009997 * @dev_id: PCI device id struct
9998 *
9999 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010000 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -070010001 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010002static const struct ipr_chip_t *
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010003ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010004{
10005 int i;
10006
Linus Torvalds1da177e2005-04-16 15:20:36 -070010007 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
10008 if (ipr_chip[i].vendor == dev_id->vendor &&
10009 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010010 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -070010011 return NULL;
10012}
10013
Brian King6270e592014-01-21 12:16:41 -060010014/**
10015 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
10016 * during probe time
10017 * @ioa_cfg: ioa config struct
10018 *
10019 * Return value:
10020 * None
10021 **/
10022static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
10023{
10024 struct pci_dev *pdev = ioa_cfg->pdev;
10025
10026 if (pci_channel_offline(pdev)) {
10027 wait_event_timeout(ioa_cfg->eeh_wait_q,
10028 !pci_channel_offline(pdev),
10029 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
10030 pci_restore_state(pdev);
10031 }
10032}
10033
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010034static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
10035{
10036 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
10037
10038 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10039 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10040 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10041 ioa_cfg->vectors_info[vec_idx].
10042 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10043 }
10044}
10045
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010046static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10047 struct pci_dev *pdev)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010048{
10049 int i, rc;
10050
10051 for (i = 1; i < ioa_cfg->nvectors; i++) {
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010052 rc = request_irq(pci_irq_vector(pdev, i),
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010053 ipr_isr_mhrrq,
10054 0,
10055 ioa_cfg->vectors_info[i].desc,
10056 &ioa_cfg->hrrq[i]);
10057 if (rc) {
10058 while (--i >= 0)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010059 free_irq(pci_irq_vector(pdev, i),
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010060 &ioa_cfg->hrrq[i]);
10061 return rc;
10062 }
10063 }
10064 return 0;
10065}
10066
Linus Torvalds1da177e2005-04-16 15:20:36 -070010067/**
Wayne Boyer95fecd92009-06-16 15:13:28 -070010068 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10069 * @pdev: PCI device struct
10070 *
10071 * Description: Simply set the msi_received flag to 1 indicating that
10072 * Message Signaled Interrupts are supported.
10073 *
10074 * Return value:
10075 * 0 on success / non-zero on failure
10076 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010077static irqreturn_t ipr_test_intr(int irq, void *devp)
Wayne Boyer95fecd92009-06-16 15:13:28 -070010078{
10079 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10080 unsigned long lock_flags = 0;
10081 irqreturn_t rc = IRQ_HANDLED;
10082
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010083 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010084 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10085
10086 ioa_cfg->msi_received = 1;
10087 wake_up(&ioa_cfg->msi_wait_q);
10088
10089 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10090 return rc;
10091}
10092
10093/**
10094 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10095 * @pdev: PCI device struct
10096 *
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010097 * Description: This routine sets up and initiates a test interrupt to determine
Wayne Boyer95fecd92009-06-16 15:13:28 -070010098 * if the interrupt is received via the ipr_test_intr() service routine.
10099 * If the tests fails, the driver will fall back to LSI.
10100 *
10101 * Return value:
10102 * 0 on success / non-zero on failure
10103 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010104static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
Wayne Boyer95fecd92009-06-16 15:13:28 -070010105{
10106 int rc;
10107 volatile u32 int_reg;
10108 unsigned long lock_flags = 0;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010109 int irq = pci_irq_vector(pdev, 0);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010110
10111 ENTER;
10112
10113 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10114 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10115 ioa_cfg->msi_received = 0;
10116 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer214777b2010-02-19 13:24:26 -080010117 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010118 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10119 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10120
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010121 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010122 if (rc) {
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010123 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010124 return rc;
10125 } else if (ipr_debug)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010126 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010127
Wayne Boyer214777b2010-02-19 13:24:26 -080010128 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010129 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10130 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010131 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010132 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10133
Wayne Boyer95fecd92009-06-16 15:13:28 -070010134 if (!ioa_cfg->msi_received) {
10135 /* MSI test failed */
10136 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10137 rc = -EOPNOTSUPP;
10138 } else if (ipr_debug)
10139 dev_info(&pdev->dev, "MSI test succeeded.\n");
10140
10141 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10142
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010143 free_irq(irq, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010144
10145 LEAVE;
10146
10147 return rc;
10148}
10149
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010150 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -070010151 * @pdev: PCI device struct
10152 * @dev_id: PCI device id struct
10153 *
10154 * Return value:
10155 * 0 on success / non-zero on failure
10156 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010157static int ipr_probe_ioa(struct pci_dev *pdev,
10158 const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010159{
10160 struct ipr_ioa_cfg *ioa_cfg;
10161 struct Scsi_Host *host;
10162 unsigned long ipr_regs_pci;
10163 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -070010164 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -050010165 volatile u32 mask, uproc, interrupts;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010166 unsigned long lock_flags, driver_lock_flags;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010167 unsigned int irq_flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010168
10169 ENTER;
10170
Linus Torvalds1da177e2005-04-16 15:20:36 -070010171 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010172 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10173
10174 if (!host) {
10175 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10176 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -060010177 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010178 }
10179
10180 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10181 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Dan Williams8d8e7d12012-07-09 21:06:08 -070010182 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010183
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010184 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010185
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010186 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010187 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10188 dev_id->vendor, dev_id->device);
10189 goto out_scsi_host_put;
10190 }
10191
Wayne Boyera32c0552010-02-19 13:23:36 -080010192 /* set SIS 32 or SIS 64 */
10193 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010194 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
Brian King7dd21302012-03-14 21:20:08 -050010195 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
Brian King89aad422012-03-14 21:20:10 -050010196 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010197
Brian King5469cb52007-03-29 12:42:40 -050010198 if (ipr_transop_timeout)
10199 ioa_cfg->transop_timeout = ipr_transop_timeout;
10200 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10201 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10202 else
10203 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10204
Auke Kok44c10132007-06-08 15:46:36 -070010205 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -050010206
Brian King6270e592014-01-21 12:16:41 -060010207 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10208
Linus Torvalds1da177e2005-04-16 15:20:36 -070010209 ipr_regs_pci = pci_resource_start(pdev, 0);
10210
10211 rc = pci_request_regions(pdev, IPR_NAME);
10212 if (rc < 0) {
10213 dev_err(&pdev->dev,
10214 "Couldn't register memory range of registers\n");
10215 goto out_scsi_host_put;
10216 }
10217
Brian King6270e592014-01-21 12:16:41 -060010218 rc = pci_enable_device(pdev);
10219
10220 if (rc || pci_channel_offline(pdev)) {
10221 if (pci_channel_offline(pdev)) {
10222 ipr_wait_for_pci_err_recovery(ioa_cfg);
10223 rc = pci_enable_device(pdev);
10224 }
10225
10226 if (rc) {
10227 dev_err(&pdev->dev, "Cannot enable adapter\n");
10228 ipr_wait_for_pci_err_recovery(ioa_cfg);
10229 goto out_release_regions;
10230 }
10231 }
10232
Arjan van de Ven25729a72008-09-28 16:18:02 -070010233 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010234
10235 if (!ipr_regs) {
10236 dev_err(&pdev->dev,
10237 "Couldn't map memory range of registers\n");
10238 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -060010239 goto out_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010240 }
10241
10242 ioa_cfg->hdw_dma_regs = ipr_regs;
10243 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10244 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10245
Brian King6270e592014-01-21 12:16:41 -060010246 ipr_init_regs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010247
Wayne Boyera32c0552010-02-19 13:23:36 -080010248 if (ioa_cfg->sis64) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010249 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Wayne Boyera32c0552010-02-19 13:23:36 -080010250 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010251 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10252 rc = dma_set_mask_and_coherent(&pdev->dev,
10253 DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -080010254 }
Wayne Boyera32c0552010-02-19 13:23:36 -080010255 } else
Anton Blanchard869404c2014-10-30 17:27:09 -050010256 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -080010257
Linus Torvalds1da177e2005-04-16 15:20:36 -070010258 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010259 dev_err(&pdev->dev, "Failed to set DMA mask\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070010260 goto cleanup_nomem;
10261 }
10262
10263 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10264 ioa_cfg->chip_cfg->cache_line_size);
10265
10266 if (rc != PCIBIOS_SUCCESSFUL) {
10267 dev_err(&pdev->dev, "Write of cache line size failed\n");
Brian King6270e592014-01-21 12:16:41 -060010268 ipr_wait_for_pci_err_recovery(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010269 rc = -EIO;
10270 goto cleanup_nomem;
10271 }
10272
Brian King6270e592014-01-21 12:16:41 -060010273 /* Issue MMIO read to ensure card is not in EEH */
10274 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10275 ipr_wait_for_pci_err_recovery(ioa_cfg);
10276
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010277 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10278 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10279 IPR_MAX_MSIX_VECTORS);
10280 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10281 }
10282
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010283 irq_flag = PCI_IRQ_LEGACY;
10284 if (ioa_cfg->ipr_chip->has_msi)
10285 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10286 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10287 if (rc < 0) {
10288 ipr_wait_for_pci_err_recovery(ioa_cfg);
10289 goto cleanup_nomem;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010290 }
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010291 ioa_cfg->nvectors = rc;
10292
10293 if (!pdev->msi_enabled && !pdev->msix_enabled)
10294 ioa_cfg->clear_isr = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010295
Brian King6270e592014-01-21 12:16:41 -060010296 pci_set_master(pdev);
10297
10298 if (pci_channel_offline(pdev)) {
10299 ipr_wait_for_pci_err_recovery(ioa_cfg);
10300 pci_set_master(pdev);
10301 if (pci_channel_offline(pdev)) {
10302 rc = -EIO;
10303 goto out_msi_disable;
10304 }
10305 }
10306
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010307 if (pdev->msi_enabled || pdev->msix_enabled) {
Wayne Boyer95fecd92009-06-16 15:13:28 -070010308 rc = ipr_test_msi(ioa_cfg, pdev);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010309 switch (rc) {
10310 case 0:
10311 dev_info(&pdev->dev,
10312 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10313 pdev->msix_enabled ? "-X" : "");
10314 break;
10315 case -EOPNOTSUPP:
Brian King6270e592014-01-21 12:16:41 -060010316 ipr_wait_for_pci_err_recovery(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010317 pci_free_irq_vectors(pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010318
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010319 ioa_cfg->nvectors = 1;
Benjamin Herrenschmidt9dadfb92016-11-30 15:28:55 -060010320 ioa_cfg->clear_isr = 1;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010321 break;
10322 default:
Wayne Boyer95fecd92009-06-16 15:13:28 -070010323 goto out_msi_disable;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010324 }
10325 }
10326
10327 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10328 (unsigned int)num_online_cpus(),
10329 (unsigned int)IPR_MAX_HRRQ_NUM);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010330
Linus Torvalds1da177e2005-04-16 15:20:36 -070010331 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -070010332 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010333
10334 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -070010335 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010336
10337 rc = ipr_alloc_mem(ioa_cfg);
10338 if (rc < 0) {
10339 dev_err(&pdev->dev,
10340 "Couldn't allocate enough memory for device driver!\n");
Julia Lawallf170c682011-07-11 14:08:25 -070010341 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010342 }
10343
Brian King6270e592014-01-21 12:16:41 -060010344 /* Save away PCI config space for use following IOA reset */
10345 rc = pci_save_state(pdev);
10346
10347 if (rc != PCIBIOS_SUCCESSFUL) {
10348 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10349 rc = -EIO;
10350 goto cleanup_nolog;
10351 }
10352
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010353 /*
10354 * If HRRQ updated interrupt is not masked, or reset alert is set,
10355 * the card is in an unknown state and needs a hard reset
10356 */
Wayne Boyer214777b2010-02-19 13:24:26 -080010357 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10358 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10359 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010360 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10361 ioa_cfg->needs_hard_reset = 1;
Anton Blanchard5d7c20b2011-08-01 19:43:45 +100010362 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
Brian King473b1e82007-05-02 10:44:11 -050010363 ioa_cfg->needs_hard_reset = 1;
10364 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10365 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010366
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010367 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010368 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010369 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010370
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010371 if (pdev->msi_enabled || pdev->msix_enabled) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010372 name_msi_vectors(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010373 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010374 ioa_cfg->vectors_info[0].desc,
10375 &ioa_cfg->hrrq[0]);
10376 if (!rc)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010377 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010378 } else {
10379 rc = request_irq(pdev->irq, ipr_isr,
10380 IRQF_SHARED,
10381 IPR_NAME, &ioa_cfg->hrrq[0]);
10382 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010383 if (rc) {
10384 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10385 pdev->irq, rc);
10386 goto cleanup_nolog;
10387 }
10388
Brian King463fc692007-05-07 17:09:05 -050010389 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10390 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10391 ioa_cfg->needs_warm_reset = 1;
10392 ioa_cfg->reset = ipr_reset_slot_reset;
Brian King2796ca52015-03-26 11:23:52 -050010393
10394 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10395 WQ_MEM_RECLAIM, host->host_no);
10396
10397 if (!ioa_cfg->reset_work_q) {
10398 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
Wei Yongjunc8e18ac2016-07-29 16:00:45 +000010399 rc = -ENOMEM;
Brian King2796ca52015-03-26 11:23:52 -050010400 goto out_free_irq;
10401 }
Brian King463fc692007-05-07 17:09:05 -050010402 } else
10403 ioa_cfg->reset = ipr_reset_start_bist;
10404
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010405 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010406 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010407 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010408
10409 LEAVE;
10410out:
10411 return rc;
10412
Brian King2796ca52015-03-26 11:23:52 -050010413out_free_irq:
10414 ipr_free_irqs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010415cleanup_nolog:
10416 ipr_free_mem(ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010417out_msi_disable:
Brian King6270e592014-01-21 12:16:41 -060010418 ipr_wait_for_pci_err_recovery(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010419 pci_free_irq_vectors(pdev);
Julia Lawallf170c682011-07-11 14:08:25 -070010420cleanup_nomem:
10421 iounmap(ipr_regs);
Brian King6270e592014-01-21 12:16:41 -060010422out_disable:
10423 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010424out_release_regions:
10425 pci_release_regions(pdev);
10426out_scsi_host_put:
10427 scsi_host_put(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010428 goto out;
10429}
10430
10431/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010432 * ipr_initiate_ioa_bringdown - Bring down an adapter
10433 * @ioa_cfg: ioa config struct
10434 * @shutdown_type: shutdown type
10435 *
10436 * Description: This function will initiate bringing down the adapter.
10437 * This consists of issuing an IOA shutdown to the adapter
10438 * to flush the cache, and running BIST.
10439 * If the caller needs to wait on the completion of the reset,
10440 * the caller must sleep on the reset_wait_q.
10441 *
10442 * Return value:
10443 * none
10444 **/
10445static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10446 enum ipr_shutdown_type shutdown_type)
10447{
10448 ENTER;
10449 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10450 ioa_cfg->sdt_state = ABORT_DUMP;
10451 ioa_cfg->reset_retries = 0;
10452 ioa_cfg->in_ioa_bringdown = 1;
10453 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10454 LEAVE;
10455}
10456
10457/**
10458 * __ipr_remove - Remove a single adapter
10459 * @pdev: pci device struct
10460 *
10461 * Adapter hot plug remove entry point.
10462 *
10463 * Return value:
10464 * none
10465 **/
10466static void __ipr_remove(struct pci_dev *pdev)
10467{
10468 unsigned long host_lock_flags = 0;
10469 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Brian Kingbfae7822013-01-30 23:45:08 -060010470 int i;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010471 unsigned long driver_lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010472 ENTER;
10473
10474 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -030010475 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -050010476 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10477 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10478 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10479 }
10480
Brian Kingbfae7822013-01-30 23:45:08 -060010481 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10482 spin_lock(&ioa_cfg->hrrq[i]._lock);
10483 ioa_cfg->hrrq[i].removing_ioa = 1;
10484 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10485 }
10486 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070010487 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10488
10489 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10490 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Tejun Heo43829732012-08-20 14:51:24 -070010491 flush_work(&ioa_cfg->work_q);
Brian King2796ca52015-03-26 11:23:52 -050010492 if (ioa_cfg->reset_work_q)
10493 flush_workqueue(ioa_cfg->reset_work_q);
wenxiong@linux.vnet.ibm.com9077a942013-03-14 13:52:24 -050010494 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010495 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10496
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010497 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010498 list_del(&ioa_cfg->queue);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010499 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010500
10501 if (ioa_cfg->sdt_state == ABORT_DUMP)
10502 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10503 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10504
10505 ipr_free_all_resources(ioa_cfg);
10506
10507 LEAVE;
10508}
10509
10510/**
10511 * ipr_remove - IOA hot plug remove entry point
10512 * @pdev: pci device struct
10513 *
10514 * Adapter hot plug remove entry point.
10515 *
10516 * Return value:
10517 * none
10518 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010519static void ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010520{
10521 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10522
10523 ENTER;
10524
Tony Jonesee959b02008-02-22 00:13:36 +010010525 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010526 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +010010527 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010528 &ipr_dump_attr);
Brian Kingafc3f832016-08-24 12:56:51 -050010529 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10530 &ipr_ioa_async_err_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010531 scsi_remove_host(ioa_cfg->host);
10532
10533 __ipr_remove(pdev);
10534
10535 LEAVE;
10536}
10537
10538/**
10539 * ipr_probe - Adapter hot plug add entry point
10540 *
10541 * Return value:
10542 * 0 on success / non-zero on failure
10543 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010544static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010545{
10546 struct ipr_ioa_cfg *ioa_cfg;
Brian Kingb195d5e2016-07-15 14:48:03 -050010547 unsigned long flags;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010548 int rc, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010549
10550 rc = ipr_probe_ioa(pdev, dev_id);
10551
10552 if (rc)
10553 return rc;
10554
10555 ioa_cfg = pci_get_drvdata(pdev);
10556 rc = ipr_probe_ioa_part2(ioa_cfg);
10557
10558 if (rc) {
10559 __ipr_remove(pdev);
10560 return rc;
10561 }
10562
10563 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10564
10565 if (rc) {
10566 __ipr_remove(pdev);
10567 return rc;
10568 }
10569
Tony Jonesee959b02008-02-22 00:13:36 +010010570 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010571 &ipr_trace_attr);
10572
10573 if (rc) {
10574 scsi_remove_host(ioa_cfg->host);
10575 __ipr_remove(pdev);
10576 return rc;
10577 }
10578
Brian Kingafc3f832016-08-24 12:56:51 -050010579 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10580 &ipr_ioa_async_err_log);
10581
10582 if (rc) {
10583 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10584 &ipr_dump_attr);
10585 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10586 &ipr_trace_attr);
10587 scsi_remove_host(ioa_cfg->host);
10588 __ipr_remove(pdev);
10589 return rc;
10590 }
10591
Tony Jonesee959b02008-02-22 00:13:36 +010010592 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010593 &ipr_dump_attr);
10594
10595 if (rc) {
Brian Kingafc3f832016-08-24 12:56:51 -050010596 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10597 &ipr_ioa_async_err_log);
Tony Jonesee959b02008-02-22 00:13:36 +010010598 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010599 &ipr_trace_attr);
10600 scsi_remove_host(ioa_cfg->host);
10601 __ipr_remove(pdev);
10602 return rc;
10603 }
Brian Kinga3d1ddd2016-08-08 17:53:12 -050010604 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10605 ioa_cfg->scan_enabled = 1;
10606 schedule_work(&ioa_cfg->work_q);
10607 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010608
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010609 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10610
Jens Axboe89f8b332014-03-13 09:38:42 -060010611 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010612 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +010010613 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010614 ioa_cfg->iopoll_weight, ipr_iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010615 }
10616 }
10617
Brian Kinga3d1ddd2016-08-08 17:53:12 -050010618 scsi_scan_host(ioa_cfg->host);
10619
Linus Torvalds1da177e2005-04-16 15:20:36 -070010620 return 0;
10621}
10622
10623/**
10624 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010625 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -070010626 *
10627 * This function is invoked upon system shutdown/reboot. It will issue
10628 * an adapter shutdown to the adapter to flush the write cache.
10629 *
10630 * Return value:
10631 * none
10632 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010633static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010634{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010635 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010636 unsigned long lock_flags = 0;
Brian King4fdd7c72015-03-26 11:23:50 -050010637 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010638 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010639
10640 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Jens Axboe89f8b332014-03-13 09:38:42 -060010641 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010642 ioa_cfg->iopoll_weight = 0;
10643 for (i = 1; i < ioa_cfg->hrrq_num; i++)
Christoph Hellwig511cbce2015-11-10 14:56:14 +010010644 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010645 }
10646
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -030010647 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -050010648 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10649 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10650 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10651 }
10652
Brian King4fdd7c72015-03-26 11:23:50 -050010653 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10654 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10655
10656 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010657 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10658 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Brian King4fdd7c72015-03-26 11:23:50 -050010659 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
Brian King2796ca52015-03-26 11:23:52 -050010660 ipr_free_irqs(ioa_cfg);
Brian King4fdd7c72015-03-26 11:23:50 -050010661 pci_disable_device(ioa_cfg->pdev);
10662 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010663}
10664
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010665static struct pci_device_id ipr_pci_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010666 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010667 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010668 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010669 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010670 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010671 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010672 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010674 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010675 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010676 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010677 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010678 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010679 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010680 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -050010681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10682 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010683 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -060010684 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010685 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -050010686 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10687 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010688 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -050010689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10690 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010691 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -060010692 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010693 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -050010694 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10695 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -060010696 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -050010697 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10698 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010699 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -050010700 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10701 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -050010702 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King185eb312007-03-29 12:42:53 -050010703 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10704 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Wayne Boyerb0f56d32010-06-24 13:34:14 -070010705 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10706 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -050010707 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -050010708 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010709 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -060010710 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010711 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -060010712 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010713 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010714 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10715 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010716 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010717 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10718 IPR_USE_LONG_TRANSOP_TIMEOUT },
Wayne Boyerd7b46272010-02-19 13:24:38 -080010719 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10720 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10721 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10722 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10723 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10724 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
Wayne Boyer32622bd2010-10-18 20:24:34 -070010725 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010726 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10727 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer5a918352011-10-27 11:58:21 -070010728 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10729 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer32622bd2010-10-18 20:24:34 -070010730 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010731 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010732 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010733 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010734 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010735 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010736 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010737 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10738 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10739 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010740 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010741 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10742 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10743 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10744 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10745 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10746 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10747 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10748 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010749 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10750 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10751 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wendy Xiongf94d9962014-01-21 12:16:40 -060010752 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10753 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010754 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10755 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10756 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10757 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10758 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10759 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10760 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10761 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10762 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10763 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10764 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
Wendy Xiong5eeac3e2014-03-12 16:08:52 -050010765 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10766 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10767 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10768 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10769 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10770 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
Wen Xiong00da9ff2016-07-12 16:02:07 -050010771 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10772 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10773 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10774 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010775 { }
10776};
10777MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10778
Stephen Hemmingera55b2d22012-09-07 09:33:16 -070010779static const struct pci_error_handlers ipr_err_handler = {
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010780 .error_detected = ipr_pci_error_detected,
Brian King6270e592014-01-21 12:16:41 -060010781 .mmio_enabled = ipr_pci_mmio_enabled,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010782 .slot_reset = ipr_pci_slot_reset,
10783};
10784
Linus Torvalds1da177e2005-04-16 15:20:36 -070010785static struct pci_driver ipr_driver = {
10786 .name = IPR_NAME,
10787 .id_table = ipr_pci_table,
10788 .probe = ipr_probe,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010789 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010790 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010791 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010792};
10793
10794/**
Wayne Boyerf72919e2010-02-19 13:24:21 -080010795 * ipr_halt_done - Shutdown prepare completion
10796 *
10797 * Return value:
10798 * none
10799 **/
10800static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10801{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010802 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010803}
10804
10805/**
10806 * ipr_halt - Issue shutdown prepare to all adapters
10807 *
10808 * Return value:
10809 * NOTIFY_OK on success / NOTIFY_DONE on failure
10810 **/
10811static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10812{
10813 struct ipr_cmnd *ipr_cmd;
10814 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010815 unsigned long flags = 0, driver_lock_flags;
Wayne Boyerf72919e2010-02-19 13:24:21 -080010816
10817 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10818 return NOTIFY_DONE;
10819
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010820 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010821
10822 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10823 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King4fdd7c72015-03-26 11:23:50 -050010824 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10825 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
Wayne Boyerf72919e2010-02-19 13:24:21 -080010826 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10827 continue;
10828 }
10829
10830 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10831 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10832 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10833 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10834 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10835
10836 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10837 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10838 }
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010839 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010840
10841 return NOTIFY_OK;
10842}
10843
10844static struct notifier_block ipr_notifier = {
10845 ipr_halt, NULL, 0
10846};
10847
10848/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010849 * ipr_init - Module entry point
10850 *
10851 * Return value:
10852 * 0 on success / negative value on failure
10853 **/
10854static int __init ipr_init(void)
10855{
10856 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10857 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10858
Wayne Boyerf72919e2010-02-19 13:24:21 -080010859 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -070010860 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010861}
10862
10863/**
10864 * ipr_exit - Module unload
10865 *
10866 * Module unload entry point.
10867 *
10868 * Return value:
10869 * none
10870 **/
10871static void __exit ipr_exit(void)
10872{
Wayne Boyerf72919e2010-02-19 13:24:21 -080010873 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010874 pci_unregister_driver(&ipr_driver);
10875}
10876
10877module_init(ipr_init);
10878module_exit(ipr_exit);