blob: e63785d5df322be3a33cf3a4caaa6e2fb882d06f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -030063#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050075#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050076#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080077#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080078#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include "ipr.h"
88
89/*
90 * Global Data
91 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080092static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050097static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060098static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080099static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -0500100static unsigned int ipr_dual_ioa_raid = 1;
Wen Xiongcb05cbb2016-07-12 16:02:08 -0500101static unsigned int ipr_number_of_msix = 16;
Brian King4fdd7c72015-03-26 11:23:50 -0500102static unsigned int ipr_fast_reboot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103static DEFINE_SPINLOCK(ipr_driver_lock);
104
105/* This table describes the differences between DMA controller chips */
106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 .mailbox = 0x0042C,
Brian King89aad422012-03-14 21:20:10 -0500109 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500111 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600112 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
Wayne Boyer214777b2010-02-19 13:24:26 -0800116 .clr_interrupt_mask_reg32 = 0x00230,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 .sense_interrupt_mask_reg = 0x0022C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800118 .sense_interrupt_mask_reg32 = 0x0022C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .clr_interrupt_reg = 0x00228,
Wayne Boyer214777b2010-02-19 13:24:26 -0800120 .clr_interrupt_reg32 = 0x00228,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 .sense_interrupt_reg = 0x00224,
Wayne Boyer214777b2010-02-19 13:24:26 -0800122 .sense_interrupt_reg32 = 0x00224,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800125 .sense_uproc_interrupt_reg32 = 0x00214,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 .set_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
131 },
132 { /* Snipe and Scamp */
133 .mailbox = 0x0052C,
Brian King89aad422012-03-14 21:20:10 -0500134 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500136 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600137 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800141 .clr_interrupt_mask_reg32 = 0x0028C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 .sense_interrupt_mask_reg = 0x00288,
Wayne Boyer214777b2010-02-19 13:24:26 -0800143 .sense_interrupt_mask_reg32 = 0x00288,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 .clr_interrupt_reg = 0x00284,
Wayne Boyer214777b2010-02-19 13:24:26 -0800145 .clr_interrupt_reg32 = 0x00284,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 .sense_interrupt_reg = 0x00280,
Wayne Boyer214777b2010-02-19 13:24:26 -0800147 .sense_interrupt_reg32 = 0x00280,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800150 .sense_uproc_interrupt_reg32 = 0x00290,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 .set_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 }
156 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800157 { /* CRoC */
Wayne Boyer110def82010-11-04 09:36:16 -0700158 .mailbox = 0x00044,
Brian King89aad422012-03-14 21:20:10 -0500159 .max_cmds = 1000,
Wayne Boyera74c1632010-02-19 13:23:51 -0800160 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500161 .clear_isr = 0,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600162 .iopoll_weight = 64,
Wayne Boyera74c1632010-02-19 13:23:51 -0800163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
Wayne Boyer214777b2010-02-19 13:24:26 -0800166 .clr_interrupt_mask_reg32 = 0x0001C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800167 .sense_interrupt_mask_reg = 0x00010,
Wayne Boyer214777b2010-02-19 13:24:26 -0800168 .sense_interrupt_mask_reg32 = 0x00014,
Wayne Boyera74c1632010-02-19 13:23:51 -0800169 .clr_interrupt_reg = 0x00008,
Wayne Boyer214777b2010-02-19 13:24:26 -0800170 .clr_interrupt_reg32 = 0x0000C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800171 .sense_interrupt_reg = 0x00000,
Wayne Boyer214777b2010-02-19 13:24:26 -0800172 .sense_interrupt_reg32 = 0x00004,
Wayne Boyera74c1632010-02-19 13:23:51 -0800173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800175 .sense_uproc_interrupt_reg32 = 0x00024,
Wayne Boyera74c1632010-02-19 13:23:51 -0800176 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800177 .set_uproc_interrupt_reg32 = 0x00024,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800178 .clr_uproc_interrupt_reg = 0x00028,
Wayne Boyer214777b2010-02-19 13:24:26 -0800179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800181 .dump_addr_reg = 0x00064,
Wayne Boyer8701f182010-06-04 10:26:50 -0700182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
Wayne Boyera74c1632010-02-19 13:23:51 -0800184 }
185 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186};
187
188static const struct ipr_chip_t ipr_chip[] = {
Christoph Hellwiga299ee62016-09-11 15:31:24 +0200189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199};
200
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300201static int ipr_max_bus_speeds[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203};
204
205MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207module_param_named(max_speed, ipr_max_speed, uint, 0);
208MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209module_param_named(log_level, ipr_log_level, uint, 0);
210MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211module_param_named(testmode, ipr_testmode, int, 0);
212MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800213module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800217module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600218MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500219module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800221module_param_named(max_devs, ipr_max_devs, int, 0);
222MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600224module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
Wen Xiongcb05cbb2016-07-12 16:02:08 -0500225MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
Brian King4fdd7c72015-03-26 11:23:50 -0500226module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228MODULE_LICENSE("GPL");
229MODULE_VERSION(IPR_DRIVER_VERSION);
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231/* A constant array of IOASCs/URCs/Error Messages */
232static const
233struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 "8155: An unknown error was received"},
236 {0x00330000, 0, 0,
237 "Soft underlength error"},
238 {0x005A0000, 0, 0,
239 "Command to be cancelled not found"},
240 {0x00808000, 0, 0,
241 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500245 "4101: Soft device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFFD: Logical block guard error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500283 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 "8009: Impending cache battery pack failure"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500288 {0x02040100, 0, 0,
289 "Logical Unit in process of becoming ready"},
290 {0x02040200, 0, 0,
291 "Initializing command required"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 {0x02040400, 0, 0,
293 "34FF: Disk device format in progress"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500294 {0x02040C00, 0, 0,
295 "Logical unit not accessible, target port in unavailable state"},
Brian King65f56472007-04-26 16:00:12 -0500296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 {0x023F0000, 0, 0,
299 "Synchronization required"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500300 {0x02408500, 0, 0,
301 "IOA microcode download required"},
302 {0x02408600, 0, 0,
303 "Device bus connection is prohibited by host"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 {0x024E0000, 0, 0,
305 "No ready, IOA shutdown"},
306 {0x025A0000, 0, 0,
307 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 "3020: Storage subsystem configuration error"},
310 {0x03110B00, 0, 0,
311 "FFF5: Medium error, data unreadable, recommend reassign"},
312 {0x03110C00, 0, 0,
313 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 "3109: IOA timed out a device command"},
322 {0x04088000, 0, 0,
323 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500325 "4100: Hard device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "310D: Logical block guard error detected by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 "9002: IOA reserved area LRC error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347 "Hardware Error, IOA metadata access error"},
Brian King933916f2007-03-29 12:43:30 -0500348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 "8150: PCI bus error"},
360 {0x04430000, 1, 0,
361 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 "8151: IOA microcode error"},
370 {0x04448500, 0, 0,
371 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500374 {0x04448700, 0, 0,
375 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 {0x04490000, 0, 0,
377 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500389 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600393 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500395 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500397 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500399 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500401 "4110: Unsupported enclosure function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403 "4120: SAS cable VPD cannot be read"},
Brian King933916f2007-03-29 12:43:30 -0500404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 "FFF4: Command to logical unit failed"},
406 {0x05240000, 1, 0,
407 "Illegal request, invalid request type or request packet"},
408 {0x05250000, 0, 0,
409 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600410 {0x05258000, 0, 0,
411 "Illegal request, commands not allowed to this device"},
412 {0x05258100, 0, 0,
413 "Illegal request, command not allowed to a secondary adapter"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800414 {0x05258200, 0, 0,
415 "Illegal request, command not allowed to a non-optimized resource"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 {0x05260000, 0, 0,
417 "Illegal request, invalid field in parameter list"},
418 {0x05260100, 0, 0,
419 "Illegal request, parameter not supported"},
420 {0x05260200, 0, 0,
421 "Illegal request, parameter value invalid"},
422 {0x052C0000, 0, 0,
423 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600424 {0x052C8000, 1, 0,
425 "Illegal request, dual adapter support not enabled"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500426 {0x052C8100, 1, 0,
427 "Illegal request, another cable connector was physically disabled"},
428 {0x054E8000, 1, 0,
429 "Illegal request, inconsistent group id/group count"},
Brian King933916f2007-03-29 12:43:30 -0500430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 "9040: Array protection temporarily suspended, protection resuming"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"},
Wen Xiong81471b02018-05-09 13:47:54 -0500438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4086: SAS Adapter Hardware Configuration Error"},
Brian King933916f2007-03-29 12:43:30 -0500440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500441 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 "FFFB: SCSI bus was reset"},
444 {0x06290500, 0, 0,
445 "FFFE: SCSI bus transition to single ended"},
446 {0x06290600, 0, 0,
447 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500448 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500450 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 "3029: A device replacement has occurred"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500452 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453 "4102: Device bus fabric performance degradation"},
Brian King933916f2007-03-29 12:43:30 -0500454 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500456 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600457 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500458 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500460 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500462 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500464 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600465 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500466 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500467 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500468 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500469 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500470 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500471 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500472 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500473 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500474 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500475 "4050: Enclosure does not support a required multipath function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500476 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4121: Configuration error, required cable is missing"},
478 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4122: Cable is not plugged into the correct location on remote IOA"},
480 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "4123: Configuration error, invalid cable vital product data"},
482 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483 "4124: Configuration error, both cable ends are plugged into the same IOA"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800484 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500486 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500488 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500490 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500492 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600493 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500494 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600495 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500496 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 "9032: Array exposed but still protected"},
Brian King7b3871f2016-09-16 16:51:36 -0500498 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
Brian Kinge4353402007-03-29 12:43:37 -0500499 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500500 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500501 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500502 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500503 "4060: Multipath redundancy level got worse"},
Brian King7b3871f2016-09-16 16:51:36 -0500504 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
Wen Xiongf8ee25d2015-03-26 11:23:58 -0500505 "9083: Device raw mode enabled"},
Brian King7b3871f2016-09-16 16:51:36 -0500506 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
Wen Xiongf8ee25d2015-03-26 11:23:58 -0500507 "9084: Device raw mode disabled"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 {0x07270000, 0, 0,
509 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500510 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500512 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500514 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500516 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500518 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500520 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500522 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500524 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500526 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500528 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500530 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500532 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500534 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500536 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500538 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500540 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500542 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500544 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500546 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500548 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 "9063: Maximum number of functional arrays has been exceeded"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500550 {0x07279A00, 0, 0,
551 "Data protect, other volume set problem"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 {0x0B260000, 0, 0,
553 "Aborted command, invalid descriptor"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500554 {0x0B3F9000, 0, 0,
555 "Target operating conditions have changed, dual adapter takeover"},
556 {0x0B530200, 0, 0,
557 "Aborted command, medium removal prevented"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 {0x0B5A0000, 0, 0,
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500559 "Command terminated by host"},
560 {0x0B5B8000, 0, 0,
561 "Aborted command, command terminated by host"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562};
563
564static const struct ipr_ses_table_entry ipr_ses_table[] = {
565 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
566 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
567 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
572 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
576 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578};
579
580/*
581 * Function Prototypes
582 */
583static int ipr_reset_alert(struct ipr_cmnd *);
584static void ipr_process_ccn(struct ipr_cmnd *);
585static void ipr_process_error(struct ipr_cmnd *);
586static void ipr_reset_ioa_job(struct ipr_cmnd *);
587static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588 enum ipr_shutdown_type);
589
590#ifdef CONFIG_SCSI_IPR_TRACE
591/**
592 * ipr_trc_hook - Add a trace entry to the driver trace
593 * @ipr_cmd: ipr command struct
594 * @type: trace type
595 * @add_data: additional data
596 *
597 * Return value:
598 * none
599 **/
600static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601 u8 type, u32 add_data)
602{
603 struct ipr_trace_entry *trace_entry;
604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Brian Kingbb7c5432015-07-14 11:41:31 -0500605 unsigned int trace_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Brian Kingbb7c5432015-07-14 11:41:31 -0500607 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608 trace_entry = &ioa_cfg->trace[trace_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 trace_entry->time = jiffies;
610 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800612 if (ipr_cmd->ioa_cfg->sis64)
613 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
614 else
615 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500616 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618 trace_entry->u.add_data = add_data;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600619 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620}
621#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300622#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623#endif
624
625/**
Brian King172cd6e2012-07-17 08:14:40 -0500626 * ipr_lock_and_done - Acquire lock and complete command
627 * @ipr_cmd: ipr command struct
628 *
629 * Return value:
630 * none
631 **/
632static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
633{
634 unsigned long lock_flags;
635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636
637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638 ipr_cmd->done(ipr_cmd);
639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
640}
641
642/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644 * @ipr_cmd: ipr command struct
645 *
646 * Return value:
647 * none
648 **/
649static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
650{
651 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700652 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
Wayne Boyera32c0552010-02-19 13:23:36 -0800654 dma_addr_t dma_addr = ipr_cmd->dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600655 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600657 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600659 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
Wayne Boyera32c0552010-02-19 13:23:36 -0800660 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800662 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800664
Wayne Boyer96d21f02010-05-10 09:13:27 -0700665 if (ipr_cmd->ioa_cfg->sis64) {
Wayne Boyera32c0552010-02-19 13:23:36 -0800666 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
Wayne Boyer96d21f02010-05-10 09:13:27 -0700668 ioasa64->u.gata.status = 0;
669 } else {
Wayne Boyera32c0552010-02-19 13:23:36 -0800670 ioarcb->write_ioadl_addr =
671 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700673 ioasa->u.gata.status = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800674 }
675
Wayne Boyer96d21f02010-05-10 09:13:27 -0700676 ioasa->hdr.ioasc = 0;
677 ioasa->hdr.residual_data_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500679 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 ipr_cmd->sense_buffer[0] = 0;
681 ipr_cmd->dma_use_sg = 0;
682}
683
684/**
685 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686 * @ipr_cmd: ipr command struct
687 *
688 * Return value:
689 * none
690 **/
Brian King172cd6e2012-07-17 08:14:40 -0500691static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692 void (*fast_done) (struct ipr_cmnd *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693{
694 ipr_reinit_ipr_cmnd(ipr_cmd);
695 ipr_cmd->u.scratch = 0;
696 ipr_cmd->sibling = NULL;
Brian King6cdb0812014-10-30 17:27:10 -0500697 ipr_cmd->eh_comp = NULL;
Brian King172cd6e2012-07-17 08:14:40 -0500698 ipr_cmd->fast_done = fast_done;
Kees Cook738c6ec2017-08-18 16:53:24 -0700699 timer_setup(&ipr_cmd->timer, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700}
701
702/**
Brian King00bfef22012-07-17 08:13:52 -0500703 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 * @ioa_cfg: ioa config struct
705 *
706 * Return value:
707 * pointer to ipr command struct
708 **/
709static
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600710struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600712 struct ipr_cmnd *ipr_cmd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600714 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716 struct ipr_cmnd, queue);
717 list_del(&ipr_cmd->queue);
718 }
719
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 return ipr_cmd;
722}
723
724/**
Brian King00bfef22012-07-17 08:13:52 -0500725 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726 * @ioa_cfg: ioa config struct
727 *
728 * Return value:
729 * pointer to ipr command struct
730 **/
731static
732struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
733{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600734 struct ipr_cmnd *ipr_cmd =
735 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
Brian King172cd6e2012-07-17 08:14:40 -0500736 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King00bfef22012-07-17 08:13:52 -0500737 return ipr_cmd;
738}
739
740/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742 * @ioa_cfg: ioa config struct
743 * @clr_ints: interrupts to clear
744 *
745 * This function masks all interrupts on the adapter, then clears the
746 * interrupts specified in the mask
747 *
748 * Return value:
749 * none
750 **/
751static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752 u32 clr_ints)
753{
754 volatile u32 int_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600755 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
757 /* Stop new interrupts */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600758 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759 spin_lock(&ioa_cfg->hrrq[i]._lock);
760 ioa_cfg->hrrq[i].allow_interrupts = 0;
761 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762 }
763 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
765 /* Set interrupt mask to stop all new interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800766 if (ioa_cfg->sis64)
767 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768 else
769 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
771 /* Clear any pending interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800772 if (ioa_cfg->sis64)
773 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
774 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
776}
777
778/**
779 * ipr_save_pcix_cmd_reg - Save PCI-X command register
780 * @ioa_cfg: ioa config struct
781 *
782 * Return value:
783 * 0 on success / -EIO on failure
784 **/
785static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
786{
787 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
788
Brian King7dce0e12007-01-23 11:25:30 -0600789 if (pcix_cmd_reg == 0)
790 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791
792 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
793 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
794 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
795 return -EIO;
796 }
797
798 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
799 return 0;
800}
801
802/**
803 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
804 * @ioa_cfg: ioa config struct
805 *
806 * Return value:
807 * 0 on success / -EIO on failure
808 **/
809static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
810{
811 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
812
813 if (pcix_cmd_reg) {
814 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
815 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
816 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
817 return -EIO;
818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 }
820
821 return 0;
822}
823
824/**
Brian Kingf646f322017-03-15 16:58:39 -0500825 * __ipr_sata_eh_done - done function for aborted SATA commands
826 * @ipr_cmd: ipr command struct
827 *
828 * This function is invoked for ops generated to SATA
829 * devices which are being aborted.
830 *
831 * Return value:
832 * none
833 **/
834static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
835{
836 struct ata_queued_cmd *qc = ipr_cmd->qc;
837 struct ipr_sata_port *sata_port = qc->ap->private_data;
838
839 qc->err_mask |= AC_ERR_OTHER;
840 sata_port->ioasa.status |= ATA_BUSY;
841 ata_qc_complete(qc);
842 if (ipr_cmd->eh_comp)
843 complete(ipr_cmd->eh_comp);
844 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
845}
846
847/**
Brian King35a39692006-09-25 12:39:20 -0500848 * ipr_sata_eh_done - done function for aborted SATA commands
849 * @ipr_cmd: ipr command struct
850 *
851 * This function is invoked for ops generated to SATA
852 * devices which are being aborted.
853 *
854 * Return value:
855 * none
856 **/
857static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
858{
Brian Kingf646f322017-03-15 16:58:39 -0500859 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
860 unsigned long hrrq_flags;
Brian King35a39692006-09-25 12:39:20 -0500861
Brian Kingf646f322017-03-15 16:58:39 -0500862 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
863 __ipr_sata_eh_done(ipr_cmd);
864 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
865}
866
867/**
868 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
869 * @ipr_cmd: ipr command struct
870 *
871 * This function is invoked by the interrupt handler for
872 * ops generated by the SCSI mid-layer which are being aborted.
873 *
874 * Return value:
875 * none
876 **/
877static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
878{
879 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
880
881 scsi_cmd->result |= (DID_ERROR << 16);
882
883 scsi_dma_unmap(ipr_cmd->scsi_cmd);
884 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -0500885 if (ipr_cmd->eh_comp)
886 complete(ipr_cmd->eh_comp);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600887 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King35a39692006-09-25 12:39:20 -0500888}
889
890/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 * ipr_scsi_eh_done - mid-layer done function for aborted ops
892 * @ipr_cmd: ipr command struct
893 *
894 * This function is invoked by the interrupt handler for
895 * ops generated by the SCSI mid-layer which are being aborted.
896 *
897 * Return value:
898 * none
899 **/
900static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
901{
Brian Kingf646f322017-03-15 16:58:39 -0500902 unsigned long hrrq_flags;
903 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Brian Kingf646f322017-03-15 16:58:39 -0500905 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
906 __ipr_scsi_eh_done(ipr_cmd);
907 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908}
909
910/**
911 * ipr_fail_all_ops - Fails all outstanding ops.
912 * @ioa_cfg: ioa config struct
913 *
914 * This function fails all outstanding ops.
915 *
916 * Return value:
917 * none
918 **/
919static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
920{
921 struct ipr_cmnd *ipr_cmd, *temp;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600922 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
924 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600925 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600926 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600927 list_for_each_entry_safe(ipr_cmd,
928 temp, &hrrq->hrrq_pending_q, queue) {
929 list_del(&ipr_cmd->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600931 ipr_cmd->s.ioasa.hdr.ioasc =
932 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
933 ipr_cmd->s.ioasa.hdr.ilid =
934 cpu_to_be32(IPR_DRIVER_ILID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600936 if (ipr_cmd->scsi_cmd)
Brian Kingf646f322017-03-15 16:58:39 -0500937 ipr_cmd->done = __ipr_scsi_eh_done;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600938 else if (ipr_cmd->qc)
Brian Kingf646f322017-03-15 16:58:39 -0500939 ipr_cmd->done = __ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600941 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
942 IPR_IOASC_IOA_WAS_RESET);
943 del_timer(&ipr_cmd->timer);
944 ipr_cmd->done(ipr_cmd);
945 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600946 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 LEAVE;
949}
950
951/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800952 * ipr_send_command - Send driver initiated requests.
953 * @ipr_cmd: ipr command struct
954 *
955 * This function sends a command to the adapter using the correct write call.
956 * In the case of sis64, calculate the ioarcb size required. Then or in the
957 * appropriate bits.
958 *
959 * Return value:
960 * none
961 **/
962static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
963{
964 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
965 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
966
967 if (ioa_cfg->sis64) {
968 /* The default size is 256 bytes */
969 send_dma_addr |= 0x1;
970
971 /* If the number of ioadls * size of ioadl > 128 bytes,
972 then use a 512 byte ioarcb */
973 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
974 send_dma_addr |= 0x4;
975 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976 } else
977 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
978}
979
980/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 * ipr_do_req - Send driver initiated requests.
982 * @ipr_cmd: ipr command struct
983 * @done: done function
984 * @timeout_func: timeout function
985 * @timeout: timeout value
986 *
987 * This function sends the specified command to the adapter with the
988 * timeout given. The done function is invoked on command completion.
989 *
990 * Return value:
991 * none
992 **/
993static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
994 void (*done) (struct ipr_cmnd *),
Kees Cook738c6ec2017-08-18 16:53:24 -0700995 void (*timeout_func) (struct timer_list *), u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600997 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
999 ipr_cmd->done = done;
1000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 ipr_cmd->timer.expires = jiffies + timeout;
Kees Cook841b86f2017-10-23 09:40:42 +02001002 ipr_cmd->timer.function = timeout_func;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
1004 add_timer(&ipr_cmd->timer);
1005
1006 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1007
Wayne Boyera32c0552010-02-19 13:23:36 -08001008 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009}
1010
1011/**
1012 * ipr_internal_cmd_done - Op done function for an internally generated op.
1013 * @ipr_cmd: ipr command struct
1014 *
1015 * This function is the op done function for an internally generated,
1016 * blocking op. It simply wakes the sleeping thread.
1017 *
1018 * Return value:
1019 * none
1020 **/
1021static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1022{
1023 if (ipr_cmd->sibling)
1024 ipr_cmd->sibling = NULL;
1025 else
1026 complete(&ipr_cmd->completion);
1027}
1028
1029/**
Wayne Boyera32c0552010-02-19 13:23:36 -08001030 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1031 * @ipr_cmd: ipr command struct
1032 * @dma_addr: dma address
1033 * @len: transfer length
1034 * @flags: ioadl flag value
1035 *
1036 * This function initializes an ioadl in the case where there is only a single
1037 * descriptor.
1038 *
1039 * Return value:
1040 * nothing
1041 **/
1042static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1043 u32 len, int flags)
1044{
1045 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1046 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1047
1048 ipr_cmd->dma_use_sg = 1;
1049
1050 if (ipr_cmd->ioa_cfg->sis64) {
1051 ioadl64->flags = cpu_to_be32(flags);
1052 ioadl64->data_len = cpu_to_be32(len);
1053 ioadl64->address = cpu_to_be64(dma_addr);
1054
1055 ipr_cmd->ioarcb.ioadl_len =
1056 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1057 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1058 } else {
1059 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1060 ioadl->address = cpu_to_be32(dma_addr);
1061
1062 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1063 ipr_cmd->ioarcb.read_ioadl_len =
1064 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1065 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1066 } else {
1067 ipr_cmd->ioarcb.ioadl_len =
1068 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1069 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1070 }
1071 }
1072}
1073
1074/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1076 * @ipr_cmd: ipr command struct
1077 * @timeout_func: function to invoke if command times out
1078 * @timeout: timeout
1079 *
1080 * Return value:
1081 * none
1082 **/
1083static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
Kees Cook738c6ec2017-08-18 16:53:24 -07001084 void (*timeout_func) (struct timer_list *),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 u32 timeout)
1086{
1087 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1088
1089 init_completion(&ipr_cmd->completion);
1090 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1091
1092 spin_unlock_irq(ioa_cfg->host->host_lock);
1093 wait_for_completion(&ipr_cmd->completion);
1094 spin_lock_irq(ioa_cfg->host->host_lock);
1095}
1096
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001097static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1098{
Brian King3f1c0582015-07-14 11:41:33 -05001099 unsigned int hrrq;
1100
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001101 if (ioa_cfg->hrrq_num == 1)
Brian King3f1c0582015-07-14 11:41:33 -05001102 hrrq = 0;
1103 else {
1104 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1105 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1106 }
1107 return hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001108}
1109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110/**
1111 * ipr_send_hcam - Send an HCAM to the adapter.
1112 * @ioa_cfg: ioa config struct
1113 * @type: HCAM type
1114 * @hostrcb: hostrcb struct
1115 *
1116 * This function will send a Host Controlled Async command to the adapter.
1117 * If HCAMs are currently not allowed to be issued to the adapter, it will
1118 * place the hostrcb on the free queue.
1119 *
1120 * Return value:
1121 * none
1122 **/
1123static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1124 struct ipr_hostrcb *hostrcb)
1125{
1126 struct ipr_cmnd *ipr_cmd;
1127 struct ipr_ioarcb *ioarcb;
1128
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001129 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001131 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1133
1134 ipr_cmd->u.hostrcb = hostrcb;
1135 ioarcb = &ipr_cmd->ioarcb;
1136
1137 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1138 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1139 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1140 ioarcb->cmd_pkt.cdb[1] = type;
1141 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1142 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1143
Wayne Boyera32c0552010-02-19 13:23:36 -08001144 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1145 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
1147 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1148 ipr_cmd->done = ipr_process_ccn;
1149 else
1150 ipr_cmd->done = ipr_process_error;
1151
1152 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1153
Wayne Boyera32c0552010-02-19 13:23:36 -08001154 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 } else {
1156 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1157 }
1158}
1159
1160/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001161 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001163 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 *
1165 * Return value:
1166 * none
1167 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001168static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03001170 switch (proto) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001171 case IPR_PROTO_SATA:
1172 case IPR_PROTO_SAS_STP:
1173 res->ata_class = ATA_DEV_ATA;
1174 break;
1175 case IPR_PROTO_SATA_ATAPI:
1176 case IPR_PROTO_SAS_STP_ATAPI:
1177 res->ata_class = ATA_DEV_ATAPI;
1178 break;
1179 default:
1180 res->ata_class = ATA_DEV_UNKNOWN;
1181 break;
1182 };
1183}
1184
1185/**
1186 * ipr_init_res_entry - Initialize a resource entry struct.
1187 * @res: resource entry struct
1188 * @cfgtew: config table entry wrapper struct
1189 *
1190 * Return value:
1191 * none
1192 **/
1193static void ipr_init_res_entry(struct ipr_resource_entry *res,
1194 struct ipr_config_table_entry_wrapper *cfgtew)
1195{
1196 int found = 0;
1197 unsigned int proto;
1198 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1199 struct ipr_resource_entry *gscsi_res = NULL;
1200
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06001201 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 res->in_erp = 0;
1203 res->add_to_ml = 0;
1204 res->del_from_ml = 0;
1205 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06001206 res->reset_occurred = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05001208 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001209
1210 if (ioa_cfg->sis64) {
1211 proto = cfgtew->u.cfgte64->proto;
Brian King359d96e2015-06-11 20:45:20 -05001212 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1213 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001214 res->qmodel = IPR_QUEUEING_MODEL64(res);
Wayne Boyer438b0332010-05-10 09:13:00 -07001215 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001216
1217 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1218 sizeof(res->res_path));
1219
1220 res->bus = 0;
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001221 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1222 sizeof(res->dev_lun.scsi_lun));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001223 res->lun = scsilun_to_int(&res->dev_lun);
1224
1225 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1226 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1227 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1228 found = 1;
1229 res->target = gscsi_res->target;
1230 break;
1231 }
1232 }
1233 if (!found) {
1234 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1235 ioa_cfg->max_devs_supported);
1236 set_bit(res->target, ioa_cfg->target_ids);
1237 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001238 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1239 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1240 res->target = 0;
1241 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1242 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1243 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1244 ioa_cfg->max_devs_supported);
1245 set_bit(res->target, ioa_cfg->array_ids);
1246 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1247 res->bus = IPR_VSET_VIRTUAL_BUS;
1248 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1249 ioa_cfg->max_devs_supported);
1250 set_bit(res->target, ioa_cfg->vset_ids);
1251 } else {
1252 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1253 ioa_cfg->max_devs_supported);
1254 set_bit(res->target, ioa_cfg->target_ids);
1255 }
1256 } else {
1257 proto = cfgtew->u.cfgte->proto;
1258 res->qmodel = IPR_QUEUEING_MODEL(res);
1259 res->flags = cfgtew->u.cfgte->flags;
1260 if (res->flags & IPR_IS_IOA_RESOURCE)
1261 res->type = IPR_RES_TYPE_IOAFP;
1262 else
1263 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1264
1265 res->bus = cfgtew->u.cfgte->res_addr.bus;
1266 res->target = cfgtew->u.cfgte->res_addr.target;
1267 res->lun = cfgtew->u.cfgte->res_addr.lun;
Wayne Boyer46d74562010-08-11 07:15:17 -07001268 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001269 }
1270
1271 ipr_update_ata_class(res, proto);
1272}
1273
1274/**
1275 * ipr_is_same_device - Determine if two devices are the same.
1276 * @res: resource entry struct
1277 * @cfgtew: config table entry wrapper struct
1278 *
1279 * Return value:
1280 * 1 if the devices are the same / 0 otherwise
1281 **/
1282static int ipr_is_same_device(struct ipr_resource_entry *res,
1283 struct ipr_config_table_entry_wrapper *cfgtew)
1284{
1285 if (res->ioa_cfg->sis64) {
1286 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1287 sizeof(cfgtew->u.cfgte64->dev_id)) &&
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001288 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001289 sizeof(cfgtew->u.cfgte64->lun))) {
1290 return 1;
1291 }
1292 } else {
1293 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1294 res->target == cfgtew->u.cfgte->res_addr.target &&
1295 res->lun == cfgtew->u.cfgte->res_addr.lun)
1296 return 1;
1297 }
1298
1299 return 0;
1300}
1301
1302/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001303 * __ipr_format_res_path - Format the resource path for printing.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001304 * @res_path: resource path
1305 * @buf: buffer
Brian Kingb3b3b402013-01-11 17:43:49 -06001306 * @len: length of buffer provided
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001307 *
1308 * Return value:
1309 * pointer to buffer
1310 **/
Brian Kingb3b3b402013-01-11 17:43:49 -06001311static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001312{
1313 int i;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001314 char *p = buffer;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001315
Wayne Boyer46d74562010-08-11 07:15:17 -07001316 *p = '\0';
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001317 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1318 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1319 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001320
1321 return buffer;
1322}
1323
1324/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001325 * ipr_format_res_path - Format the resource path for printing.
1326 * @ioa_cfg: ioa config struct
1327 * @res_path: resource path
1328 * @buf: buffer
1329 * @len: length of buffer provided
1330 *
1331 * Return value:
1332 * pointer to buffer
1333 **/
1334static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1335 u8 *res_path, char *buffer, int len)
1336{
1337 char *p = buffer;
1338
1339 *p = '\0';
1340 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1341 __ipr_format_res_path(res_path, p, len - (buffer - p));
1342 return buffer;
1343}
1344
1345/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001346 * ipr_update_res_entry - Update the resource entry.
1347 * @res: resource entry struct
1348 * @cfgtew: config table entry wrapper struct
1349 *
1350 * Return value:
1351 * none
1352 **/
1353static void ipr_update_res_entry(struct ipr_resource_entry *res,
1354 struct ipr_config_table_entry_wrapper *cfgtew)
1355{
1356 char buffer[IPR_MAX_RES_PATH_LENGTH];
1357 unsigned int proto;
1358 int new_path = 0;
1359
1360 if (res->ioa_cfg->sis64) {
Brian King359d96e2015-06-11 20:45:20 -05001361 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1362 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
Wayne Boyer75576bb2010-07-14 10:50:14 -07001363 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001364
1365 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1366 sizeof(struct ipr_std_inq_data));
1367
1368 res->qmodel = IPR_QUEUEING_MODEL64(res);
1369 proto = cfgtew->u.cfgte64->proto;
1370 res->res_handle = cfgtew->u.cfgte64->res_handle;
1371 res->dev_id = cfgtew->u.cfgte64->dev_id;
1372
1373 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1374 sizeof(res->dev_lun.scsi_lun));
1375
1376 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1377 sizeof(res->res_path))) {
1378 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1379 sizeof(res->res_path));
1380 new_path = 1;
1381 }
1382
1383 if (res->sdev && new_path)
1384 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06001385 ipr_format_res_path(res->ioa_cfg,
1386 res->res_path, buffer, sizeof(buffer)));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001387 } else {
1388 res->flags = cfgtew->u.cfgte->flags;
1389 if (res->flags & IPR_IS_IOA_RESOURCE)
1390 res->type = IPR_RES_TYPE_IOAFP;
1391 else
1392 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1393
1394 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1395 sizeof(struct ipr_std_inq_data));
1396
1397 res->qmodel = IPR_QUEUEING_MODEL(res);
1398 proto = cfgtew->u.cfgte->proto;
1399 res->res_handle = cfgtew->u.cfgte->res_handle;
1400 }
1401
1402 ipr_update_ata_class(res, proto);
1403}
1404
1405/**
1406 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1407 * for the resource.
1408 * @res: resource entry struct
1409 * @cfgtew: config table entry wrapper struct
1410 *
1411 * Return value:
1412 * none
1413 **/
1414static void ipr_clear_res_target(struct ipr_resource_entry *res)
1415{
1416 struct ipr_resource_entry *gscsi_res = NULL;
1417 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1418
1419 if (!ioa_cfg->sis64)
1420 return;
1421
1422 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1423 clear_bit(res->target, ioa_cfg->array_ids);
1424 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1425 clear_bit(res->target, ioa_cfg->vset_ids);
1426 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1427 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1428 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1429 return;
1430 clear_bit(res->target, ioa_cfg->target_ids);
1431
1432 } else if (res->bus == 0)
1433 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434}
1435
1436/**
1437 * ipr_handle_config_change - Handle a config change from the adapter
1438 * @ioa_cfg: ioa config struct
1439 * @hostrcb: hostrcb
1440 *
1441 * Return value:
1442 * none
1443 **/
1444static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001445 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446{
1447 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001448 struct ipr_config_table_entry_wrapper cfgtew;
1449 __be32 cc_res_handle;
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 u32 is_ndn = 1;
1452
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001453 if (ioa_cfg->sis64) {
1454 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1455 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1456 } else {
1457 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1458 cc_res_handle = cfgtew.u.cfgte->res_handle;
1459 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460
1461 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001462 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 is_ndn = 0;
1464 break;
1465 }
1466 }
1467
1468 if (is_ndn) {
1469 if (list_empty(&ioa_cfg->free_res_q)) {
1470 ipr_send_hcam(ioa_cfg,
1471 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1472 hostrcb);
1473 return;
1474 }
1475
1476 res = list_entry(ioa_cfg->free_res_q.next,
1477 struct ipr_resource_entry, queue);
1478
1479 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001480 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1482 }
1483
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001484 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
1486 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1487 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001489 res->res_handle = IPR_INVALID_RES_HANDLE;
Brian Kingf688f962014-12-02 12:47:37 -06001490 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001491 } else {
1492 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001494 }
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02001495 } else if (!res->sdev || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 res->add_to_ml = 1;
Brian Kingf688f962014-12-02 12:47:37 -06001497 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 }
1499
1500 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1501}
1502
1503/**
1504 * ipr_process_ccn - Op done function for a CCN.
1505 * @ipr_cmd: ipr command struct
1506 *
1507 * This function is the op done function for a configuration
1508 * change notification host controlled async from the adapter.
1509 *
1510 * Return value:
1511 * none
1512 **/
1513static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1514{
1515 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1516 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07001517 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
Brian Kingafc3f832016-08-24 12:56:51 -05001519 list_del_init(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001520 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521
1522 if (ioasc) {
Brian King4fdd7c72015-03-26 11:23:50 -05001523 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1524 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 dev_err(&ioa_cfg->pdev->dev,
1526 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1527
1528 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1529 } else {
1530 ipr_handle_config_change(ioa_cfg, hostrcb);
1531 }
1532}
1533
1534/**
Brian King8cf093e2007-04-26 16:00:14 -05001535 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1536 * @i: index into buffer
1537 * @buf: string to modify
1538 *
1539 * This function will strip all trailing whitespace, pad the end
1540 * of the string with a single space, and NULL terminate the string.
1541 *
1542 * Return value:
1543 * new length of string
1544 **/
1545static int strip_and_pad_whitespace(int i, char *buf)
1546{
1547 while (i && buf[i] == ' ')
1548 i--;
1549 buf[i+1] = ' ';
1550 buf[i+2] = '\0';
1551 return i + 2;
1552}
1553
1554/**
1555 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1556 * @prefix: string to print at start of printk
1557 * @hostrcb: hostrcb pointer
1558 * @vpd: vendor/product id/sn struct
1559 *
1560 * Return value:
1561 * none
1562 **/
1563static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1564 struct ipr_vpd *vpd)
1565{
1566 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1567 int i = 0;
1568
1569 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1570 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1571
1572 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1573 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1574
1575 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1576 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1577
1578 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1579}
1580
1581/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001583 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 *
1585 * Return value:
1586 * none
1587 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001588static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589{
1590 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1591 + IPR_SERIAL_NUM_LEN];
1592
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001593 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1594 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 IPR_PROD_ID_LEN);
1596 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1597 ipr_err("Vendor/Product ID: %s\n", buffer);
1598
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001599 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1601 ipr_err(" Serial Number: %s\n", buffer);
1602}
1603
1604/**
Brian King8cf093e2007-04-26 16:00:14 -05001605 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1606 * @prefix: string to print at start of printk
1607 * @hostrcb: hostrcb pointer
1608 * @vpd: vendor/product id/sn/wwn struct
1609 *
1610 * Return value:
1611 * none
1612 **/
1613static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1614 struct ipr_ext_vpd *vpd)
1615{
1616 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1617 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1618 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1619}
1620
1621/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001622 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1623 * @vpd: vendor/product id/sn/wwn struct
1624 *
1625 * Return value:
1626 * none
1627 **/
1628static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1629{
1630 ipr_log_vpd(&vpd->vpd);
1631 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1632 be32_to_cpu(vpd->wwid[1]));
1633}
1634
1635/**
1636 * ipr_log_enhanced_cache_error - Log a cache error.
1637 * @ioa_cfg: ioa config struct
1638 * @hostrcb: hostrcb struct
1639 *
1640 * Return value:
1641 * none
1642 **/
1643static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1644 struct ipr_hostrcb *hostrcb)
1645{
Wayne Boyer4565e372010-02-19 13:24:07 -08001646 struct ipr_hostrcb_type_12_error *error;
1647
1648 if (ioa_cfg->sis64)
1649 error = &hostrcb->hcam.u.error64.u.type_12_error;
1650 else
1651 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001652
1653 ipr_err("-----Current Configuration-----\n");
1654 ipr_err("Cache Directory Card Information:\n");
1655 ipr_log_ext_vpd(&error->ioa_vpd);
1656 ipr_err("Adapter Card Information:\n");
1657 ipr_log_ext_vpd(&error->cfc_vpd);
1658
1659 ipr_err("-----Expected Configuration-----\n");
1660 ipr_err("Cache Directory Card Information:\n");
1661 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1662 ipr_err("Adapter Card Information:\n");
1663 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1664
1665 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1666 be32_to_cpu(error->ioa_data[0]),
1667 be32_to_cpu(error->ioa_data[1]),
1668 be32_to_cpu(error->ioa_data[2]));
1669}
1670
1671/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 * ipr_log_cache_error - Log a cache error.
1673 * @ioa_cfg: ioa config struct
1674 * @hostrcb: hostrcb struct
1675 *
1676 * Return value:
1677 * none
1678 **/
1679static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1680 struct ipr_hostrcb *hostrcb)
1681{
1682 struct ipr_hostrcb_type_02_error *error =
1683 &hostrcb->hcam.u.error.u.type_02_error;
1684
1685 ipr_err("-----Current Configuration-----\n");
1686 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001687 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001689 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
1691 ipr_err("-----Expected Configuration-----\n");
1692 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001693 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001695 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
1697 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1698 be32_to_cpu(error->ioa_data[0]),
1699 be32_to_cpu(error->ioa_data[1]),
1700 be32_to_cpu(error->ioa_data[2]));
1701}
1702
1703/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001704 * ipr_log_enhanced_config_error - Log a configuration error.
1705 * @ioa_cfg: ioa config struct
1706 * @hostrcb: hostrcb struct
1707 *
1708 * Return value:
1709 * none
1710 **/
1711static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1712 struct ipr_hostrcb *hostrcb)
1713{
1714 int errors_logged, i;
1715 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1716 struct ipr_hostrcb_type_13_error *error;
1717
1718 error = &hostrcb->hcam.u.error.u.type_13_error;
1719 errors_logged = be32_to_cpu(error->errors_logged);
1720
1721 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722 be32_to_cpu(error->errors_detected), errors_logged);
1723
1724 dev_entry = error->dev;
1725
1726 for (i = 0; i < errors_logged; i++, dev_entry++) {
1727 ipr_err_separator;
1728
1729 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1730 ipr_log_ext_vpd(&dev_entry->vpd);
1731
1732 ipr_err("-----New Device Information-----\n");
1733 ipr_log_ext_vpd(&dev_entry->new_vpd);
1734
1735 ipr_err("Cache Directory Card Information:\n");
1736 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1737
1738 ipr_err("Adapter Card Information:\n");
1739 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1740 }
1741}
1742
1743/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001744 * ipr_log_sis64_config_error - Log a device error.
1745 * @ioa_cfg: ioa config struct
1746 * @hostrcb: hostrcb struct
1747 *
1748 * Return value:
1749 * none
1750 **/
1751static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1752 struct ipr_hostrcb *hostrcb)
1753{
1754 int errors_logged, i;
1755 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1756 struct ipr_hostrcb_type_23_error *error;
1757 char buffer[IPR_MAX_RES_PATH_LENGTH];
1758
1759 error = &hostrcb->hcam.u.error64.u.type_23_error;
1760 errors_logged = be32_to_cpu(error->errors_logged);
1761
1762 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1763 be32_to_cpu(error->errors_detected), errors_logged);
1764
1765 dev_entry = error->dev;
1766
1767 for (i = 0; i < errors_logged; i++, dev_entry++) {
1768 ipr_err_separator;
1769
1770 ipr_err("Device %d : %s", i + 1,
Brian Kingb3b3b402013-01-11 17:43:49 -06001771 __ipr_format_res_path(dev_entry->res_path,
1772 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001773 ipr_log_ext_vpd(&dev_entry->vpd);
1774
1775 ipr_err("-----New Device Information-----\n");
1776 ipr_log_ext_vpd(&dev_entry->new_vpd);
1777
1778 ipr_err("Cache Directory Card Information:\n");
1779 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1780
1781 ipr_err("Adapter Card Information:\n");
1782 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1783 }
1784}
1785
1786/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 * ipr_log_config_error - Log a configuration error.
1788 * @ioa_cfg: ioa config struct
1789 * @hostrcb: hostrcb struct
1790 *
1791 * Return value:
1792 * none
1793 **/
1794static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1795 struct ipr_hostrcb *hostrcb)
1796{
1797 int errors_logged, i;
1798 struct ipr_hostrcb_device_data_entry *dev_entry;
1799 struct ipr_hostrcb_type_03_error *error;
1800
1801 error = &hostrcb->hcam.u.error.u.type_03_error;
1802 errors_logged = be32_to_cpu(error->errors_logged);
1803
1804 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1805 be32_to_cpu(error->errors_detected), errors_logged);
1806
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001807 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809 for (i = 0; i < errors_logged; i++, dev_entry++) {
1810 ipr_err_separator;
1811
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001812 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001813 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
1815 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001816 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817
1818 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001819 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
1821 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001822 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823
1824 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1825 be32_to_cpu(dev_entry->ioa_data[0]),
1826 be32_to_cpu(dev_entry->ioa_data[1]),
1827 be32_to_cpu(dev_entry->ioa_data[2]),
1828 be32_to_cpu(dev_entry->ioa_data[3]),
1829 be32_to_cpu(dev_entry->ioa_data[4]));
1830 }
1831}
1832
1833/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001834 * ipr_log_enhanced_array_error - Log an array configuration error.
1835 * @ioa_cfg: ioa config struct
1836 * @hostrcb: hostrcb struct
1837 *
1838 * Return value:
1839 * none
1840 **/
1841static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1842 struct ipr_hostrcb *hostrcb)
1843{
1844 int i, num_entries;
1845 struct ipr_hostrcb_type_14_error *error;
1846 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1847 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1848
1849 error = &hostrcb->hcam.u.error.u.type_14_error;
1850
1851 ipr_err_separator;
1852
1853 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854 error->protection_level,
1855 ioa_cfg->host->host_no,
1856 error->last_func_vset_res_addr.bus,
1857 error->last_func_vset_res_addr.target,
1858 error->last_func_vset_res_addr.lun);
1859
1860 ipr_err_separator;
1861
1862 array_entry = error->array_member;
1863 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
Wayne Boyer72620262010-09-27 10:45:28 -07001864 ARRAY_SIZE(error->array_member));
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001865
1866 for (i = 0; i < num_entries; i++, array_entry++) {
1867 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1868 continue;
1869
1870 if (be32_to_cpu(error->exposed_mode_adn) == i)
1871 ipr_err("Exposed Array Member %d:\n", i);
1872 else
1873 ipr_err("Array Member %d:\n", i);
1874
1875 ipr_log_ext_vpd(&array_entry->vpd);
1876 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1877 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1878 "Expected Location");
1879
1880 ipr_err_separator;
1881 }
1882}
1883
1884/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 * ipr_log_array_error - Log an array configuration error.
1886 * @ioa_cfg: ioa config struct
1887 * @hostrcb: hostrcb struct
1888 *
1889 * Return value:
1890 * none
1891 **/
1892static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1893 struct ipr_hostrcb *hostrcb)
1894{
1895 int i;
1896 struct ipr_hostrcb_type_04_error *error;
1897 struct ipr_hostrcb_array_data_entry *array_entry;
1898 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1899
1900 error = &hostrcb->hcam.u.error.u.type_04_error;
1901
1902 ipr_err_separator;
1903
1904 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1905 error->protection_level,
1906 ioa_cfg->host->host_no,
1907 error->last_func_vset_res_addr.bus,
1908 error->last_func_vset_res_addr.target,
1909 error->last_func_vset_res_addr.lun);
1910
1911 ipr_err_separator;
1912
1913 array_entry = error->array_member;
1914
1915 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001916 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 continue;
1918
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001919 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001921 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001924 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001926 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1927 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1928 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
1930 ipr_err_separator;
1931
1932 if (i == 9)
1933 array_entry = error->array_member2;
1934 else
1935 array_entry++;
1936 }
1937}
1938
1939/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001940 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001941 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001942 * @data: IOA error data
1943 * @len: data length
1944 *
1945 * Return value:
1946 * none
1947 **/
Brian King359d96e2015-06-11 20:45:20 -05001948static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001949{
1950 int i;
1951
1952 if (len == 0)
1953 return;
1954
Brian Kingac719ab2006-11-21 10:28:42 -06001955 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1956 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1957
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001958 for (i = 0; i < len / 4; i += 4) {
1959 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1960 be32_to_cpu(data[i]),
1961 be32_to_cpu(data[i+1]),
1962 be32_to_cpu(data[i+2]),
1963 be32_to_cpu(data[i+3]));
1964 }
1965}
1966
1967/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001968 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1969 * @ioa_cfg: ioa config struct
1970 * @hostrcb: hostrcb struct
1971 *
1972 * Return value:
1973 * none
1974 **/
1975static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1976 struct ipr_hostrcb *hostrcb)
1977{
1978 struct ipr_hostrcb_type_17_error *error;
1979
Wayne Boyer4565e372010-02-19 13:24:07 -08001980 if (ioa_cfg->sis64)
1981 error = &hostrcb->hcam.u.error64.u.type_17_error;
1982 else
1983 error = &hostrcb->hcam.u.error.u.type_17_error;
1984
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001985 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001986 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001987
Brian King8cf093e2007-04-26 16:00:14 -05001988 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1989 be32_to_cpu(hostrcb->hcam.u.error.prc));
1990 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001991 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001992 be32_to_cpu(hostrcb->hcam.length) -
1993 (offsetof(struct ipr_hostrcb_error, u) +
1994 offsetof(struct ipr_hostrcb_type_17_error, data)));
1995}
1996
1997/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001998 * ipr_log_dual_ioa_error - Log a dual adapter error.
1999 * @ioa_cfg: ioa config struct
2000 * @hostrcb: hostrcb struct
2001 *
2002 * Return value:
2003 * none
2004 **/
2005static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2006 struct ipr_hostrcb *hostrcb)
2007{
2008 struct ipr_hostrcb_type_07_error *error;
2009
2010 error = &hostrcb->hcam.u.error.u.type_07_error;
2011 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08002012 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002013
Brian King8cf093e2007-04-26 16:00:14 -05002014 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2015 be32_to_cpu(hostrcb->hcam.u.error.prc));
2016 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06002017 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002018 be32_to_cpu(hostrcb->hcam.length) -
2019 (offsetof(struct ipr_hostrcb_error, u) +
2020 offsetof(struct ipr_hostrcb_type_07_error, data)));
2021}
2022
Brian King49dc6a12006-11-21 10:28:35 -06002023static const struct {
2024 u8 active;
2025 char *desc;
2026} path_active_desc[] = {
2027 { IPR_PATH_NO_INFO, "Path" },
2028 { IPR_PATH_ACTIVE, "Active path" },
2029 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2030};
2031
2032static const struct {
2033 u8 state;
2034 char *desc;
2035} path_state_desc[] = {
2036 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2037 { IPR_PATH_HEALTHY, "is healthy" },
2038 { IPR_PATH_DEGRADED, "is degraded" },
2039 { IPR_PATH_FAILED, "is failed" }
2040};
2041
2042/**
2043 * ipr_log_fabric_path - Log a fabric path error
2044 * @hostrcb: hostrcb struct
2045 * @fabric: fabric descriptor
2046 *
2047 * Return value:
2048 * none
2049 **/
2050static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2051 struct ipr_hostrcb_fabric_desc *fabric)
2052{
2053 int i, j;
2054 u8 path_state = fabric->path_state;
2055 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2056 u8 state = path_state & IPR_PATH_STATE_MASK;
2057
2058 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2059 if (path_active_desc[i].active != active)
2060 continue;
2061
2062 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2063 if (path_state_desc[j].state != state)
2064 continue;
2065
2066 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2067 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2068 path_active_desc[i].desc, path_state_desc[j].desc,
2069 fabric->ioa_port);
2070 } else if (fabric->cascaded_expander == 0xff) {
2071 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2072 path_active_desc[i].desc, path_state_desc[j].desc,
2073 fabric->ioa_port, fabric->phy);
2074 } else if (fabric->phy == 0xff) {
2075 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2076 path_active_desc[i].desc, path_state_desc[j].desc,
2077 fabric->ioa_port, fabric->cascaded_expander);
2078 } else {
2079 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2080 path_active_desc[i].desc, path_state_desc[j].desc,
2081 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2082 }
2083 return;
2084 }
2085 }
2086
2087 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2088 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2089}
2090
Wayne Boyer4565e372010-02-19 13:24:07 -08002091/**
2092 * ipr_log64_fabric_path - Log a fabric path error
2093 * @hostrcb: hostrcb struct
2094 * @fabric: fabric descriptor
2095 *
2096 * Return value:
2097 * none
2098 **/
2099static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2100 struct ipr_hostrcb64_fabric_desc *fabric)
2101{
2102 int i, j;
2103 u8 path_state = fabric->path_state;
2104 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2105 u8 state = path_state & IPR_PATH_STATE_MASK;
2106 char buffer[IPR_MAX_RES_PATH_LENGTH];
2107
2108 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2109 if (path_active_desc[i].active != active)
2110 continue;
2111
2112 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2113 if (path_state_desc[j].state != state)
2114 continue;
2115
2116 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2117 path_active_desc[i].desc, path_state_desc[j].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002118 ipr_format_res_path(hostrcb->ioa_cfg,
2119 fabric->res_path,
2120 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002121 return;
2122 }
2123 }
2124
2125 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
Brian Kingb3b3b402013-01-11 17:43:49 -06002126 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2127 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002128}
2129
Brian King49dc6a12006-11-21 10:28:35 -06002130static const struct {
2131 u8 type;
2132 char *desc;
2133} path_type_desc[] = {
2134 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2135 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2136 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2137 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2138};
2139
2140static const struct {
2141 u8 status;
2142 char *desc;
2143} path_status_desc[] = {
2144 { IPR_PATH_CFG_NO_PROB, "Functional" },
2145 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2146 { IPR_PATH_CFG_FAILED, "Failed" },
2147 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2148 { IPR_PATH_NOT_DETECTED, "Missing" },
2149 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2150};
2151
2152static const char *link_rate[] = {
2153 "unknown",
2154 "disabled",
2155 "phy reset problem",
2156 "spinup hold",
2157 "port selector",
2158 "unknown",
2159 "unknown",
2160 "unknown",
2161 "1.5Gbps",
2162 "3.0Gbps",
2163 "unknown",
2164 "unknown",
2165 "unknown",
2166 "unknown",
2167 "unknown",
2168 "unknown"
2169};
2170
2171/**
2172 * ipr_log_path_elem - Log a fabric path element.
2173 * @hostrcb: hostrcb struct
2174 * @cfg: fabric path element struct
2175 *
2176 * Return value:
2177 * none
2178 **/
2179static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2180 struct ipr_hostrcb_config_element *cfg)
2181{
2182 int i, j;
2183 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2184 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2185
2186 if (type == IPR_PATH_CFG_NOT_EXIST)
2187 return;
2188
2189 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2190 if (path_type_desc[i].type != type)
2191 continue;
2192
2193 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2194 if (path_status_desc[j].status != status)
2195 continue;
2196
2197 if (type == IPR_PATH_CFG_IOA_PORT) {
2198 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2199 path_status_desc[j].desc, path_type_desc[i].desc,
2200 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2201 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2202 } else {
2203 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2204 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2205 path_status_desc[j].desc, path_type_desc[i].desc,
2206 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2207 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2208 } else if (cfg->cascaded_expander == 0xff) {
2209 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2210 "WWN=%08X%08X\n", path_status_desc[j].desc,
2211 path_type_desc[i].desc, cfg->phy,
2212 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2213 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2214 } else if (cfg->phy == 0xff) {
2215 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2216 "WWN=%08X%08X\n", path_status_desc[j].desc,
2217 path_type_desc[i].desc, cfg->cascaded_expander,
2218 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2220 } else {
2221 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2222 "WWN=%08X%08X\n", path_status_desc[j].desc,
2223 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2224 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2225 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2226 }
2227 }
2228 return;
2229 }
2230 }
2231
2232 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2233 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2234 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2235 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2236}
2237
2238/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002239 * ipr_log64_path_elem - Log a fabric path element.
2240 * @hostrcb: hostrcb struct
2241 * @cfg: fabric path element struct
2242 *
2243 * Return value:
2244 * none
2245 **/
2246static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2247 struct ipr_hostrcb64_config_element *cfg)
2248{
2249 int i, j;
2250 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2251 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2252 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2253 char buffer[IPR_MAX_RES_PATH_LENGTH];
2254
2255 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2256 return;
2257
2258 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2259 if (path_type_desc[i].type != type)
2260 continue;
2261
2262 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2263 if (path_status_desc[j].status != status)
2264 continue;
2265
2266 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2267 path_status_desc[j].desc, path_type_desc[i].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002268 ipr_format_res_path(hostrcb->ioa_cfg,
2269 cfg->res_path, buffer, sizeof(buffer)),
2270 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2271 be32_to_cpu(cfg->wwid[0]),
2272 be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002273 return;
2274 }
2275 }
2276 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2277 "WWN=%08X%08X\n", cfg->type_status,
Brian Kingb3b3b402013-01-11 17:43:49 -06002278 ipr_format_res_path(hostrcb->ioa_cfg,
2279 cfg->res_path, buffer, sizeof(buffer)),
2280 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2281 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002282}
2283
2284/**
Brian King49dc6a12006-11-21 10:28:35 -06002285 * ipr_log_fabric_error - Log a fabric error.
2286 * @ioa_cfg: ioa config struct
2287 * @hostrcb: hostrcb struct
2288 *
2289 * Return value:
2290 * none
2291 **/
2292static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2293 struct ipr_hostrcb *hostrcb)
2294{
2295 struct ipr_hostrcb_type_20_error *error;
2296 struct ipr_hostrcb_fabric_desc *fabric;
2297 struct ipr_hostrcb_config_element *cfg;
2298 int i, add_len;
2299
2300 error = &hostrcb->hcam.u.error.u.type_20_error;
2301 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2302 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2303
2304 add_len = be32_to_cpu(hostrcb->hcam.length) -
2305 (offsetof(struct ipr_hostrcb_error, u) +
2306 offsetof(struct ipr_hostrcb_type_20_error, desc));
2307
2308 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2309 ipr_log_fabric_path(hostrcb, fabric);
2310 for_each_fabric_cfg(fabric, cfg)
2311 ipr_log_path_elem(hostrcb, cfg);
2312
2313 add_len -= be16_to_cpu(fabric->length);
2314 fabric = (struct ipr_hostrcb_fabric_desc *)
2315 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2316 }
2317
Brian King359d96e2015-06-11 20:45:20 -05002318 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002319}
2320
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002321/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002322 * ipr_log_sis64_array_error - Log a sis64 array error.
2323 * @ioa_cfg: ioa config struct
2324 * @hostrcb: hostrcb struct
2325 *
2326 * Return value:
2327 * none
2328 **/
2329static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2330 struct ipr_hostrcb *hostrcb)
2331{
2332 int i, num_entries;
2333 struct ipr_hostrcb_type_24_error *error;
2334 struct ipr_hostrcb64_array_data_entry *array_entry;
2335 char buffer[IPR_MAX_RES_PATH_LENGTH];
2336 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2337
2338 error = &hostrcb->hcam.u.error64.u.type_24_error;
2339
2340 ipr_err_separator;
2341
2342 ipr_err("RAID %s Array Configuration: %s\n",
2343 error->protection_level,
Brian Kingb3b3b402013-01-11 17:43:49 -06002344 ipr_format_res_path(ioa_cfg, error->last_res_path,
2345 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002346
2347 ipr_err_separator;
2348
2349 array_entry = error->array_member;
Wayne Boyer72620262010-09-27 10:45:28 -07002350 num_entries = min_t(u32, error->num_entries,
2351 ARRAY_SIZE(error->array_member));
Wayne Boyer4565e372010-02-19 13:24:07 -08002352
2353 for (i = 0; i < num_entries; i++, array_entry++) {
2354
2355 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2356 continue;
2357
2358 if (error->exposed_mode_adn == i)
2359 ipr_err("Exposed Array Member %d:\n", i);
2360 else
2361 ipr_err("Array Member %d:\n", i);
2362
2363 ipr_err("Array Member %d:\n", i);
2364 ipr_log_ext_vpd(&array_entry->vpd);
Wayne Boyer72620262010-09-27 10:45:28 -07002365 ipr_err("Current Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002366 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2367 buffer, sizeof(buffer)));
Wayne Boyer72620262010-09-27 10:45:28 -07002368 ipr_err("Expected Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002369 ipr_format_res_path(ioa_cfg,
2370 array_entry->expected_res_path,
2371 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002372
2373 ipr_err_separator;
2374 }
2375}
2376
2377/**
2378 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2379 * @ioa_cfg: ioa config struct
2380 * @hostrcb: hostrcb struct
2381 *
2382 * Return value:
2383 * none
2384 **/
2385static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2386 struct ipr_hostrcb *hostrcb)
2387{
2388 struct ipr_hostrcb_type_30_error *error;
2389 struct ipr_hostrcb64_fabric_desc *fabric;
2390 struct ipr_hostrcb64_config_element *cfg;
2391 int i, add_len;
2392
2393 error = &hostrcb->hcam.u.error64.u.type_30_error;
2394
2395 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2396 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2397
2398 add_len = be32_to_cpu(hostrcb->hcam.length) -
2399 (offsetof(struct ipr_hostrcb64_error, u) +
2400 offsetof(struct ipr_hostrcb_type_30_error, desc));
2401
2402 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2403 ipr_log64_fabric_path(hostrcb, fabric);
2404 for_each_fabric_cfg(fabric, cfg)
2405 ipr_log64_path_elem(hostrcb, cfg);
2406
2407 add_len -= be16_to_cpu(fabric->length);
2408 fabric = (struct ipr_hostrcb64_fabric_desc *)
2409 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2410 }
2411
Brian King359d96e2015-06-11 20:45:20 -05002412 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
Wayne Boyer4565e372010-02-19 13:24:07 -08002413}
2414
2415/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 * ipr_log_generic_error - Log an adapter error.
2417 * @ioa_cfg: ioa config struct
2418 * @hostrcb: hostrcb struct
2419 *
2420 * Return value:
2421 * none
2422 **/
2423static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2424 struct ipr_hostrcb *hostrcb)
2425{
Brian Kingac719ab2006-11-21 10:28:42 -06002426 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002427 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428}
2429
2430/**
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002431 * ipr_log_sis64_device_error - Log a cache error.
2432 * @ioa_cfg: ioa config struct
2433 * @hostrcb: hostrcb struct
2434 *
2435 * Return value:
2436 * none
2437 **/
2438static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2439 struct ipr_hostrcb *hostrcb)
2440{
2441 struct ipr_hostrcb_type_21_error *error;
2442 char buffer[IPR_MAX_RES_PATH_LENGTH];
2443
2444 error = &hostrcb->hcam.u.error64.u.type_21_error;
2445
2446 ipr_err("-----Failing Device Information-----\n");
2447 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2448 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2449 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2450 ipr_err("Device Resource Path: %s\n",
2451 __ipr_format_res_path(error->res_path,
2452 buffer, sizeof(buffer)));
2453 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2454 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2455 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2456 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2457 ipr_err("SCSI Sense Data:\n");
2458 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2459 ipr_err("SCSI Command Descriptor Block: \n");
2460 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2461
2462 ipr_err("Additional IOA Data:\n");
2463 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2464}
2465
2466/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2468 * @ioasc: IOASC
2469 *
2470 * This function will return the index of into the ipr_error_table
2471 * for the specified IOASC. If the IOASC is not in the table,
2472 * 0 will be returned, which points to the entry used for unknown errors.
2473 *
2474 * Return value:
2475 * index into the ipr_error_table
2476 **/
2477static u32 ipr_get_error(u32 ioasc)
2478{
2479 int i;
2480
2481 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002482 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 return i;
2484
2485 return 0;
2486}
2487
2488/**
2489 * ipr_handle_log_data - Log an adapter error.
2490 * @ioa_cfg: ioa config struct
2491 * @hostrcb: hostrcb struct
2492 *
2493 * This function logs an adapter error to the system.
2494 *
2495 * Return value:
2496 * none
2497 **/
2498static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2499 struct ipr_hostrcb *hostrcb)
2500{
2501 u32 ioasc;
2502 int error_index;
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002503 struct ipr_hostrcb_type_21_error *error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504
2505 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2506 return;
2507
2508 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2509 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2510
Wayne Boyer4565e372010-02-19 13:24:07 -08002511 if (ioa_cfg->sis64)
2512 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2513 else
2514 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
Wayne Boyer4565e372010-02-19 13:24:07 -08002516 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2517 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2519 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002520 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 }
2522
2523 error_index = ipr_get_error(ioasc);
2524
2525 if (!ipr_error_table[error_index].log_hcam)
2526 return;
2527
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002528 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2529 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2530 error = &hostrcb->hcam.u.error64.u.type_21_error;
2531
2532 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2533 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2534 return;
2535 }
2536
Brian King49dc6a12006-11-21 10:28:35 -06002537 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538
2539 /* Set indication we have logged an error */
2540 ioa_cfg->errors_logged++;
2541
Brian King933916f2007-03-29 12:43:30 -05002542 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002544 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2545 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
2547 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 case IPR_HOST_RCB_OVERLAY_ID_2:
2549 ipr_log_cache_error(ioa_cfg, hostrcb);
2550 break;
2551 case IPR_HOST_RCB_OVERLAY_ID_3:
2552 ipr_log_config_error(ioa_cfg, hostrcb);
2553 break;
2554 case IPR_HOST_RCB_OVERLAY_ID_4:
2555 case IPR_HOST_RCB_OVERLAY_ID_6:
2556 ipr_log_array_error(ioa_cfg, hostrcb);
2557 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002558 case IPR_HOST_RCB_OVERLAY_ID_7:
2559 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2560 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002561 case IPR_HOST_RCB_OVERLAY_ID_12:
2562 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2563 break;
2564 case IPR_HOST_RCB_OVERLAY_ID_13:
2565 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2566 break;
2567 case IPR_HOST_RCB_OVERLAY_ID_14:
2568 case IPR_HOST_RCB_OVERLAY_ID_16:
2569 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2570 break;
2571 case IPR_HOST_RCB_OVERLAY_ID_17:
2572 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2573 break;
Brian King49dc6a12006-11-21 10:28:35 -06002574 case IPR_HOST_RCB_OVERLAY_ID_20:
2575 ipr_log_fabric_error(ioa_cfg, hostrcb);
2576 break;
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002577 case IPR_HOST_RCB_OVERLAY_ID_21:
2578 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2579 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002580 case IPR_HOST_RCB_OVERLAY_ID_23:
2581 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2582 break;
2583 case IPR_HOST_RCB_OVERLAY_ID_24:
2584 case IPR_HOST_RCB_OVERLAY_ID_26:
2585 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2586 break;
2587 case IPR_HOST_RCB_OVERLAY_ID_30:
2588 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2589 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002590 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002593 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 break;
2595 }
2596}
2597
Brian Kingafc3f832016-08-24 12:56:51 -05002598static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2599{
2600 struct ipr_hostrcb *hostrcb;
2601
2602 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2603 struct ipr_hostrcb, queue);
2604
2605 if (unlikely(!hostrcb)) {
2606 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2607 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2608 struct ipr_hostrcb, queue);
2609 }
2610
2611 list_del_init(&hostrcb->queue);
2612 return hostrcb;
2613}
2614
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615/**
2616 * ipr_process_error - Op done function for an adapter error log.
2617 * @ipr_cmd: ipr command struct
2618 *
2619 * This function is the op done function for an error log host
2620 * controlled async from the adapter. It will log the error and
2621 * send the HCAM back to the adapter.
2622 *
2623 * Return value:
2624 * none
2625 **/
2626static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2627{
2628 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2629 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07002630 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002631 u32 fd_ioasc;
2632
2633 if (ioa_cfg->sis64)
2634 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2635 else
2636 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637
Brian Kingafc3f832016-08-24 12:56:51 -05002638 list_del_init(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06002639 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640
2641 if (!ioasc) {
2642 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002643 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2644 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Brian King4fdd7c72015-03-26 11:23:50 -05002645 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2646 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 dev_err(&ioa_cfg->pdev->dev,
2648 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2649 }
2650
Brian Kingafc3f832016-08-24 12:56:51 -05002651 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
Brian King8a4236a2016-10-13 14:45:24 -05002652 schedule_work(&ioa_cfg->work_q);
Brian Kingafc3f832016-08-24 12:56:51 -05002653 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
Brian Kingafc3f832016-08-24 12:56:51 -05002654
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2656}
2657
2658/**
2659 * ipr_timeout - An internally generated op has timed out.
2660 * @ipr_cmd: ipr command struct
2661 *
2662 * This function blocks host requests and initiates an
2663 * adapter reset.
2664 *
2665 * Return value:
2666 * none
2667 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07002668static void ipr_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669{
Kees Cook738c6ec2017-08-18 16:53:24 -07002670 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 unsigned long lock_flags = 0;
2672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2673
2674 ENTER;
2675 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2676
2677 ioa_cfg->errors_logged++;
2678 dev_err(&ioa_cfg->pdev->dev,
2679 "Adapter being reset due to command timeout.\n");
2680
2681 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2682 ioa_cfg->sdt_state = GET_DUMP;
2683
2684 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2685 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2686
2687 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2688 LEAVE;
2689}
2690
2691/**
2692 * ipr_oper_timeout - Adapter timed out transitioning to operational
2693 * @ipr_cmd: ipr command struct
2694 *
2695 * This function blocks host requests and initiates an
2696 * adapter reset.
2697 *
2698 * Return value:
2699 * none
2700 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07002701static void ipr_oper_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702{
Kees Cook738c6ec2017-08-18 16:53:24 -07002703 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 unsigned long lock_flags = 0;
2705 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2706
2707 ENTER;
2708 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2709
2710 ioa_cfg->errors_logged++;
2711 dev_err(&ioa_cfg->pdev->dev,
2712 "Adapter timed out transitioning to operational.\n");
2713
2714 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2715 ioa_cfg->sdt_state = GET_DUMP;
2716
2717 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2718 if (ipr_fastfail)
2719 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2720 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2721 }
2722
2723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2724 LEAVE;
2725}
2726
2727/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 * ipr_find_ses_entry - Find matching SES in SES table
2729 * @res: resource entry struct of SES
2730 *
2731 * Return value:
2732 * pointer to SES table entry / NULL on failure
2733 **/
2734static const struct ipr_ses_table_entry *
2735ipr_find_ses_entry(struct ipr_resource_entry *res)
2736{
2737 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002738 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2740
2741 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2742 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2743 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002744 vpids = &res->std_inq_data.vpids;
2745 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 matches++;
2747 else
2748 break;
2749 } else
2750 matches++;
2751 }
2752
2753 if (matches == IPR_PROD_ID_LEN)
2754 return ste;
2755 }
2756
2757 return NULL;
2758}
2759
2760/**
2761 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2762 * @ioa_cfg: ioa config struct
2763 * @bus: SCSI bus
2764 * @bus_width: bus width
2765 *
2766 * Return value:
2767 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2768 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2769 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2770 * max 160MHz = max 320MB/sec).
2771 **/
2772static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2773{
2774 struct ipr_resource_entry *res;
2775 const struct ipr_ses_table_entry *ste;
2776 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2777
2778 /* Loop through each config table entry in the config table buffer */
2779 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002780 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 continue;
2782
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002783 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 continue;
2785
2786 if (!(ste = ipr_find_ses_entry(res)))
2787 continue;
2788
2789 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2790 }
2791
2792 return max_xfer_rate;
2793}
2794
2795/**
2796 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2797 * @ioa_cfg: ioa config struct
2798 * @max_delay: max delay in micro-seconds to wait
2799 *
2800 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2801 *
2802 * Return value:
2803 * 0 on success / other on failure
2804 **/
2805static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2806{
2807 volatile u32 pcii_reg;
2808 int delay = 1;
2809
2810 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2811 while (delay < max_delay) {
2812 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2813
2814 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2815 return 0;
2816
2817 /* udelay cannot be used if delay is more than a few milliseconds */
2818 if ((delay / 1000) > MAX_UDELAY_MS)
2819 mdelay(delay / 1000);
2820 else
2821 udelay(delay);
2822
2823 delay += delay;
2824 }
2825 return -EIO;
2826}
2827
2828/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002829 * ipr_get_sis64_dump_data_section - Dump IOA memory
2830 * @ioa_cfg: ioa config struct
2831 * @start_addr: adapter address to dump
2832 * @dest: destination kernel buffer
2833 * @length_in_words: length to dump in 4 byte words
2834 *
2835 * Return value:
2836 * 0 on success
2837 **/
2838static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2839 u32 start_addr,
2840 __be32 *dest, u32 length_in_words)
2841{
2842 int i;
2843
2844 for (i = 0; i < length_in_words; i++) {
2845 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2846 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2847 dest++;
2848 }
2849
2850 return 0;
2851}
2852
2853/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 * ipr_get_ldump_data_section - Dump IOA memory
2855 * @ioa_cfg: ioa config struct
2856 * @start_addr: adapter address to dump
2857 * @dest: destination kernel buffer
2858 * @length_in_words: length to dump in 4 byte words
2859 *
2860 * Return value:
2861 * 0 on success / -EIO on failure
2862 **/
2863static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2864 u32 start_addr,
2865 __be32 *dest, u32 length_in_words)
2866{
2867 volatile u32 temp_pcii_reg;
2868 int i, delay = 0;
2869
Wayne Boyerdcbad002010-02-19 13:24:14 -08002870 if (ioa_cfg->sis64)
2871 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2872 dest, length_in_words);
2873
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 /* Write IOA interrupt reg starting LDUMP state */
2875 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
Wayne Boyer214777b2010-02-19 13:24:26 -08002876 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877
2878 /* Wait for IO debug acknowledge */
2879 if (ipr_wait_iodbg_ack(ioa_cfg,
2880 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2881 dev_err(&ioa_cfg->pdev->dev,
2882 "IOA dump long data transfer timeout\n");
2883 return -EIO;
2884 }
2885
2886 /* Signal LDUMP interlocked - clear IO debug ack */
2887 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2888 ioa_cfg->regs.clr_interrupt_reg);
2889
2890 /* Write Mailbox with starting address */
2891 writel(start_addr, ioa_cfg->ioa_mailbox);
2892
2893 /* Signal address valid - clear IOA Reset alert */
2894 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002895 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896
2897 for (i = 0; i < length_in_words; i++) {
2898 /* Wait for IO debug acknowledge */
2899 if (ipr_wait_iodbg_ack(ioa_cfg,
2900 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2901 dev_err(&ioa_cfg->pdev->dev,
2902 "IOA dump short data transfer timeout\n");
2903 return -EIO;
2904 }
2905
2906 /* Read data from mailbox and increment destination pointer */
2907 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2908 dest++;
2909
2910 /* For all but the last word of data, signal data received */
2911 if (i < (length_in_words - 1)) {
2912 /* Signal dump data received - Clear IO debug Ack */
2913 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2914 ioa_cfg->regs.clr_interrupt_reg);
2915 }
2916 }
2917
2918 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2919 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002920 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921
2922 writel(IPR_UPROCI_IO_DEBUG_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002923 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924
2925 /* Signal dump data received - Clear IO debug Ack */
2926 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2927 ioa_cfg->regs.clr_interrupt_reg);
2928
2929 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2930 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2931 temp_pcii_reg =
Wayne Boyer214777b2010-02-19 13:24:26 -08002932 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933
2934 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2935 return 0;
2936
2937 udelay(10);
2938 delay += 10;
2939 }
2940
2941 return 0;
2942}
2943
2944#ifdef CONFIG_SCSI_IPR_DUMP
2945/**
2946 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2947 * @ioa_cfg: ioa config struct
2948 * @pci_address: adapter address
2949 * @length: length of data to copy
2950 *
2951 * Copy data from PCI adapter to kernel buffer.
2952 * Note: length MUST be a 4 byte multiple
2953 * Return value:
2954 * 0 on success / other on failure
2955 **/
2956static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2957 unsigned long pci_address, u32 length)
2958{
2959 int bytes_copied = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002960 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 __be32 *page;
2962 unsigned long lock_flags = 0;
2963 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2964
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002965 if (ioa_cfg->sis64)
2966 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2967 else
2968 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2969
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 while (bytes_copied < length &&
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002971 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 if (ioa_dump->page_offset >= PAGE_SIZE ||
2973 ioa_dump->page_offset == 0) {
2974 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2975
2976 if (!page) {
2977 ipr_trace;
2978 return bytes_copied;
2979 }
2980
2981 ioa_dump->page_offset = 0;
2982 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2983 ioa_dump->next_page_index++;
2984 } else
2985 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2986
2987 rem_len = length - bytes_copied;
2988 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2989 cur_len = min(rem_len, rem_page_len);
2990
2991 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2992 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2993 rc = -EIO;
2994 } else {
2995 rc = ipr_get_ldump_data_section(ioa_cfg,
2996 pci_address + bytes_copied,
2997 &page[ioa_dump->page_offset / 4],
2998 (cur_len / sizeof(u32)));
2999 }
3000 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3001
3002 if (!rc) {
3003 ioa_dump->page_offset += cur_len;
3004 bytes_copied += cur_len;
3005 } else {
3006 ipr_trace;
3007 break;
3008 }
3009 schedule();
3010 }
3011
3012 return bytes_copied;
3013}
3014
3015/**
3016 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3017 * @hdr: dump entry header struct
3018 *
3019 * Return value:
3020 * nothing
3021 **/
3022static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3023{
3024 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3025 hdr->num_elems = 1;
3026 hdr->offset = sizeof(*hdr);
3027 hdr->status = IPR_DUMP_STATUS_SUCCESS;
3028}
3029
3030/**
3031 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3032 * @ioa_cfg: ioa config struct
3033 * @driver_dump: driver dump struct
3034 *
3035 * Return value:
3036 * nothing
3037 **/
3038static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3039 struct ipr_driver_dump *driver_dump)
3040{
3041 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3042
3043 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3044 driver_dump->ioa_type_entry.hdr.len =
3045 sizeof(struct ipr_dump_ioa_type_entry) -
3046 sizeof(struct ipr_dump_entry_header);
3047 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3048 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3049 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3050 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3051 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3052 ucode_vpd->minor_release[1];
3053 driver_dump->hdr.num_entries++;
3054}
3055
3056/**
3057 * ipr_dump_version_data - Fill in the driver version in the dump.
3058 * @ioa_cfg: ioa config struct
3059 * @driver_dump: driver dump struct
3060 *
3061 * Return value:
3062 * nothing
3063 **/
3064static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3065 struct ipr_driver_dump *driver_dump)
3066{
3067 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3068 driver_dump->version_entry.hdr.len =
3069 sizeof(struct ipr_dump_version_entry) -
3070 sizeof(struct ipr_dump_entry_header);
3071 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3072 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3073 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3074 driver_dump->hdr.num_entries++;
3075}
3076
3077/**
3078 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3079 * @ioa_cfg: ioa config struct
3080 * @driver_dump: driver dump struct
3081 *
3082 * Return value:
3083 * nothing
3084 **/
3085static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3086 struct ipr_driver_dump *driver_dump)
3087{
3088 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3089 driver_dump->trace_entry.hdr.len =
3090 sizeof(struct ipr_dump_trace_entry) -
3091 sizeof(struct ipr_dump_entry_header);
3092 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3093 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3094 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3095 driver_dump->hdr.num_entries++;
3096}
3097
3098/**
3099 * ipr_dump_location_data - Fill in the IOA location in the dump.
3100 * @ioa_cfg: ioa config struct
3101 * @driver_dump: driver dump struct
3102 *
3103 * Return value:
3104 * nothing
3105 **/
3106static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3107 struct ipr_driver_dump *driver_dump)
3108{
3109 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3110 driver_dump->location_entry.hdr.len =
3111 sizeof(struct ipr_dump_location_entry) -
3112 sizeof(struct ipr_dump_entry_header);
3113 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3114 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01003115 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 driver_dump->hdr.num_entries++;
3117}
3118
3119/**
3120 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3121 * @ioa_cfg: ioa config struct
3122 * @dump: dump struct
3123 *
3124 * Return value:
3125 * nothing
3126 **/
3127static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3128{
3129 unsigned long start_addr, sdt_word;
3130 unsigned long lock_flags = 0;
3131 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3132 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003133 u32 num_entries, max_num_entries, start_off, end_off;
3134 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08003136 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 int i;
3138
3139 ENTER;
3140
3141 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3142
Brian King41e9a692011-09-21 08:51:11 -05003143 if (ioa_cfg->sdt_state != READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3145 return;
3146 }
3147
Wayne Boyer110def82010-11-04 09:36:16 -07003148 if (ioa_cfg->sis64) {
3149 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3150 ssleep(IPR_DUMP_DELAY_SECONDS);
3151 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3152 }
3153
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 start_addr = readl(ioa_cfg->ioa_mailbox);
3155
Wayne Boyerdcbad002010-02-19 13:24:14 -08003156 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157 dev_err(&ioa_cfg->pdev->dev,
3158 "Invalid dump table format: %lx\n", start_addr);
3159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3160 return;
3161 }
3162
3163 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3164
3165 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3166
3167 /* Initialize the overall dump header */
3168 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3169 driver_dump->hdr.num_entries = 1;
3170 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3171 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3172 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3173 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3174
3175 ipr_dump_version_data(ioa_cfg, driver_dump);
3176 ipr_dump_location_data(ioa_cfg, driver_dump);
3177 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3178 ipr_dump_trace_data(ioa_cfg, driver_dump);
3179
3180 /* Update dump_header */
3181 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3182
3183 /* IOA Dump entry */
3184 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185 ioa_dump->hdr.len = 0;
3186 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3187 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3188
3189 /* First entries in sdt are actually a list of dump addresses and
3190 lengths to gather the real dump data. sdt represents the pointer
3191 to the ioa generated dump table. Dump data will be extracted based
3192 on entries in this table */
3193 sdt = &ioa_dump->sdt;
3194
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003195 if (ioa_cfg->sis64) {
3196 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3197 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3198 } else {
3199 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3200 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3201 }
3202
3203 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3204 (max_num_entries * sizeof(struct ipr_sdt_entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003206 bytes_to_copy / sizeof(__be32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
3208 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08003209 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3210 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 dev_err(&ioa_cfg->pdev->dev,
3212 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3213 rc, be32_to_cpu(sdt->hdr.state));
3214 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3215 ioa_cfg->sdt_state = DUMP_OBTAINED;
3216 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3217 return;
3218 }
3219
3220 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3221
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003222 if (num_entries > max_num_entries)
3223 num_entries = max_num_entries;
3224
3225 /* Update dump length to the actual data to be copied */
3226 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3227 if (ioa_cfg->sis64)
3228 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3229 else
3230 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231
3232 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3233
3234 for (i = 0; i < num_entries; i++) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003235 if (ioa_dump->hdr.len > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3237 break;
3238 }
3239
3240 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08003241 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3242 if (ioa_cfg->sis64)
3243 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3244 else {
3245 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3246 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247
Wayne Boyerdcbad002010-02-19 13:24:14 -08003248 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3249 bytes_to_copy = end_off - start_off;
3250 else
3251 valid = 0;
3252 }
3253 if (valid) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003254 if (bytes_to_copy > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3256 continue;
3257 }
3258
3259 /* Copy data from adapter to driver buffers */
3260 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3261 bytes_to_copy);
3262
3263 ioa_dump->hdr.len += bytes_copied;
3264
3265 if (bytes_copied != bytes_to_copy) {
3266 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3267 break;
3268 }
3269 }
3270 }
3271 }
3272
3273 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3274
3275 /* Update dump_header */
3276 driver_dump->hdr.len += ioa_dump->hdr.len;
3277 wmb();
3278 ioa_cfg->sdt_state = DUMP_OBTAINED;
3279 LEAVE;
3280}
3281
3282#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003283#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284#endif
3285
3286/**
3287 * ipr_release_dump - Free adapter dump memory
3288 * @kref: kref struct
3289 *
3290 * Return value:
3291 * nothing
3292 **/
3293static void ipr_release_dump(struct kref *kref)
3294{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003295 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3297 unsigned long lock_flags = 0;
3298 int i;
3299
3300 ENTER;
3301 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3302 ioa_cfg->dump = NULL;
3303 ioa_cfg->sdt_state = INACTIVE;
3304 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3305
3306 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3307 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3308
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003309 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310 kfree(dump);
3311 LEAVE;
3312}
3313
3314/**
3315 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00003316 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 *
3318 * Called at task level from a work thread. This function takes care
3319 * of adding and removing device from the mid-layer as configuration
3320 * changes are detected by the adapter.
3321 *
3322 * Return value:
3323 * nothing
3324 **/
David Howellsc4028952006-11-22 14:57:56 +00003325static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326{
3327 unsigned long lock_flags;
3328 struct ipr_resource_entry *res;
3329 struct scsi_device *sdev;
3330 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00003331 struct ipr_ioa_cfg *ioa_cfg =
3332 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333 u8 bus, target, lun;
3334 int did_work;
3335
3336 ENTER;
3337 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3338
Brian King41e9a692011-09-21 08:51:11 -05003339 if (ioa_cfg->sdt_state == READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340 dump = ioa_cfg->dump;
3341 if (!dump) {
3342 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3343 return;
3344 }
3345 kref_get(&dump->kref);
3346 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3347 ipr_get_ioa_dump(ioa_cfg, dump);
3348 kref_put(&dump->kref, ipr_release_dump);
3349
3350 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King4c647e92011-10-15 09:08:56 -05003351 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3354 return;
3355 }
3356
Brian Kingb0e17a92017-08-01 10:21:30 -05003357 if (ioa_cfg->scsi_unblock) {
3358 ioa_cfg->scsi_unblock = 0;
3359 ioa_cfg->scsi_blocked = 0;
3360 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3361 scsi_unblock_requests(ioa_cfg->host);
3362 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3363 if (ioa_cfg->scsi_blocked)
3364 scsi_block_requests(ioa_cfg->host);
3365 }
3366
Brian Kingb195d5e2016-07-15 14:48:03 -05003367 if (!ioa_cfg->scan_enabled) {
3368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3369 return;
3370 }
3371
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372restart:
3373 do {
3374 did_work = 0;
Brian Kingf688f962014-12-02 12:47:37 -06003375 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377 return;
3378 }
3379
3380 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3381 if (res->del_from_ml && res->sdev) {
3382 did_work = 1;
3383 sdev = res->sdev;
3384 if (!scsi_device_get(sdev)) {
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02003385 if (!res->add_to_ml)
3386 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3387 else
3388 res->del_from_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3390 scsi_remove_device(sdev);
3391 scsi_device_put(sdev);
3392 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3393 }
3394 break;
3395 }
3396 }
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003397 } while (did_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398
3399 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3400 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003401 bus = res->bus;
3402 target = res->target;
3403 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003404 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3406 scsi_add_device(ioa_cfg->host, bus, target, lun);
3407 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3408 goto restart;
3409 }
3410 }
3411
Brian Kingf688f962014-12-02 12:47:37 -06003412 ioa_cfg->scan_done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003414 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 LEAVE;
3416}
3417
3418#ifdef CONFIG_SCSI_IPR_TRACE
3419/**
3420 * ipr_read_trace - Dump the adapter trace
Chris Wright2c3c8be2010-05-12 18:28:57 -07003421 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003423 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 * @buf: buffer
3425 * @off: offset
3426 * @count: buffer size
3427 *
3428 * Return value:
3429 * number of bytes printed to buffer
3430 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003431static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003432 struct bin_attribute *bin_attr,
3433 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434{
Tony Jonesee959b02008-02-22 00:13:36 +01003435 struct device *dev = container_of(kobj, struct device, kobj);
3436 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3438 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003439 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440
3441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003442 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3443 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003445
3446 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447}
3448
3449static struct bin_attribute ipr_trace_attr = {
3450 .attr = {
3451 .name = "trace",
3452 .mode = S_IRUGO,
3453 },
3454 .size = 0,
3455 .read = ipr_read_trace,
3456};
3457#endif
3458
3459/**
3460 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003461 * @dev: class device struct
3462 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 *
3464 * Return value:
3465 * number of bytes printed to buffer
3466 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003467static ssize_t ipr_show_fw_version(struct device *dev,
3468 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469{
Tony Jonesee959b02008-02-22 00:13:36 +01003470 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3472 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3473 unsigned long lock_flags = 0;
3474 int len;
3475
3476 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3477 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3478 ucode_vpd->major_release, ucode_vpd->card_type,
3479 ucode_vpd->minor_release[0],
3480 ucode_vpd->minor_release[1]);
3481 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3482 return len;
3483}
3484
Tony Jonesee959b02008-02-22 00:13:36 +01003485static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 .attr = {
3487 .name = "fw_version",
3488 .mode = S_IRUGO,
3489 },
3490 .show = ipr_show_fw_version,
3491};
3492
3493/**
3494 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003495 * @dev: class device struct
3496 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 *
3498 * Return value:
3499 * number of bytes printed to buffer
3500 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003501static ssize_t ipr_show_log_level(struct device *dev,
3502 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503{
Tony Jonesee959b02008-02-22 00:13:36 +01003504 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3506 unsigned long lock_flags = 0;
3507 int len;
3508
3509 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3510 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3512 return len;
3513}
3514
3515/**
3516 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003517 * @dev: class device struct
3518 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 *
3520 * Return value:
3521 * number of bytes printed to buffer
3522 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003523static ssize_t ipr_store_log_level(struct device *dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003524 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525 const char *buf, size_t count)
3526{
Tony Jonesee959b02008-02-22 00:13:36 +01003527 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3529 unsigned long lock_flags = 0;
3530
3531 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3532 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3533 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3534 return strlen(buf);
3535}
3536
Tony Jonesee959b02008-02-22 00:13:36 +01003537static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 .attr = {
3539 .name = "log_level",
3540 .mode = S_IRUGO | S_IWUSR,
3541 },
3542 .show = ipr_show_log_level,
3543 .store = ipr_store_log_level
3544};
3545
3546/**
3547 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003548 * @dev: device struct
3549 * @buf: buffer
3550 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 *
3552 * This function will reset the adapter and wait a reasonable
3553 * amount of time for any errors that the adapter might log.
3554 *
3555 * Return value:
3556 * count on success / other on failure
3557 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003558static ssize_t ipr_store_diagnostics(struct device *dev,
3559 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560 const char *buf, size_t count)
3561{
Tony Jonesee959b02008-02-22 00:13:36 +01003562 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3564 unsigned long lock_flags = 0;
3565 int rc = count;
3566
3567 if (!capable(CAP_SYS_ADMIN))
3568 return -EACCES;
3569
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003571 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003572 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3573 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3574 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3575 }
3576
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 ioa_cfg->errors_logged = 0;
3578 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3579
3580 if (ioa_cfg->in_reset_reload) {
3581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3582 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3583
3584 /* Wait for a second for any errors to be logged */
3585 msleep(1000);
3586 } else {
3587 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3588 return -EIO;
3589 }
3590
3591 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3592 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3593 rc = -EIO;
3594 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3595
3596 return rc;
3597}
3598
Tony Jonesee959b02008-02-22 00:13:36 +01003599static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 .attr = {
3601 .name = "run_diagnostics",
3602 .mode = S_IWUSR,
3603 },
3604 .store = ipr_store_diagnostics
3605};
3606
3607/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003608 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003609 * @class_dev: device struct
3610 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003611 *
3612 * Return value:
3613 * number of bytes printed to buffer
3614 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003615static ssize_t ipr_show_adapter_state(struct device *dev,
3616 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003617{
Tony Jonesee959b02008-02-22 00:13:36 +01003618 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003619 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3620 unsigned long lock_flags = 0;
3621 int len;
3622
3623 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003624 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003625 len = snprintf(buf, PAGE_SIZE, "offline\n");
3626 else
3627 len = snprintf(buf, PAGE_SIZE, "online\n");
3628 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3629 return len;
3630}
3631
3632/**
3633 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003634 * @dev: device struct
3635 * @buf: buffer
3636 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003637 *
3638 * This function will change the adapter's state.
3639 *
3640 * Return value:
3641 * count on success / other on failure
3642 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003643static ssize_t ipr_store_adapter_state(struct device *dev,
3644 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003645 const char *buf, size_t count)
3646{
Tony Jonesee959b02008-02-22 00:13:36 +01003647 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003648 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3649 unsigned long lock_flags;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003650 int result = count, i;
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003651
3652 if (!capable(CAP_SYS_ADMIN))
3653 return -EACCES;
3654
3655 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003656 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3657 !strncmp(buf, "online", 6)) {
3658 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3659 spin_lock(&ioa_cfg->hrrq[i]._lock);
3660 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3661 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3662 }
3663 wmb();
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003664 ioa_cfg->reset_retries = 0;
3665 ioa_cfg->in_ioa_bringdown = 0;
3666 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3667 }
3668 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3669 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3670
3671 return result;
3672}
3673
Tony Jonesee959b02008-02-22 00:13:36 +01003674static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003675 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003676 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003677 .mode = S_IRUGO | S_IWUSR,
3678 },
3679 .show = ipr_show_adapter_state,
3680 .store = ipr_store_adapter_state
3681};
3682
3683/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003685 * @dev: device struct
3686 * @buf: buffer
3687 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688 *
3689 * This function will reset the adapter.
3690 *
3691 * Return value:
3692 * count on success / other on failure
3693 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003694static ssize_t ipr_store_reset_adapter(struct device *dev,
3695 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696 const char *buf, size_t count)
3697{
Tony Jonesee959b02008-02-22 00:13:36 +01003698 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3700 unsigned long lock_flags;
3701 int result = count;
3702
3703 if (!capable(CAP_SYS_ADMIN))
3704 return -EACCES;
3705
3706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3707 if (!ioa_cfg->in_reset_reload)
3708 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3709 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3710 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3711
3712 return result;
3713}
3714
Tony Jonesee959b02008-02-22 00:13:36 +01003715static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716 .attr = {
3717 .name = "reset_host",
3718 .mode = S_IWUSR,
3719 },
3720 .store = ipr_store_reset_adapter
3721};
3722
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003723static int ipr_iopoll(struct irq_poll *iop, int budget);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003724 /**
3725 * ipr_show_iopoll_weight - Show ipr polling mode
3726 * @dev: class device struct
3727 * @buf: buffer
3728 *
3729 * Return value:
3730 * number of bytes printed to buffer
3731 **/
3732static ssize_t ipr_show_iopoll_weight(struct device *dev,
3733 struct device_attribute *attr, char *buf)
3734{
3735 struct Scsi_Host *shost = class_to_shost(dev);
3736 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3737 unsigned long lock_flags = 0;
3738 int len;
3739
3740 spin_lock_irqsave(shost->host_lock, lock_flags);
3741 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3742 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3743
3744 return len;
3745}
3746
3747/**
3748 * ipr_store_iopoll_weight - Change the adapter's polling mode
3749 * @dev: class device struct
3750 * @buf: buffer
3751 *
3752 * Return value:
3753 * number of bytes printed to buffer
3754 **/
3755static ssize_t ipr_store_iopoll_weight(struct device *dev,
3756 struct device_attribute *attr,
3757 const char *buf, size_t count)
3758{
3759 struct Scsi_Host *shost = class_to_shost(dev);
3760 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3761 unsigned long user_iopoll_weight;
3762 unsigned long lock_flags = 0;
3763 int i;
3764
3765 if (!ioa_cfg->sis64) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003766 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003767 return -EINVAL;
3768 }
3769 if (kstrtoul(buf, 10, &user_iopoll_weight))
3770 return -EINVAL;
3771
3772 if (user_iopoll_weight > 256) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003773 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003774 return -EINVAL;
3775 }
3776
3777 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003778 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003779 return strlen(buf);
3780 }
3781
Jens Axboe89f8b332014-03-13 09:38:42 -06003782 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003783 for (i = 1; i < ioa_cfg->hrrq_num; i++)
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003784 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003785 }
3786
3787 spin_lock_irqsave(shost->host_lock, lock_flags);
3788 ioa_cfg->iopoll_weight = user_iopoll_weight;
Jens Axboe89f8b332014-03-13 09:38:42 -06003789 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003790 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003791 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003792 ioa_cfg->iopoll_weight, ipr_iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003793 }
3794 }
3795 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3796
3797 return strlen(buf);
3798}
3799
3800static struct device_attribute ipr_iopoll_weight_attr = {
3801 .attr = {
3802 .name = "iopoll_weight",
3803 .mode = S_IRUGO | S_IWUSR,
3804 },
3805 .show = ipr_show_iopoll_weight,
3806 .store = ipr_store_iopoll_weight
3807};
3808
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809/**
3810 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3811 * @buf_len: buffer length
3812 *
3813 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3814 * list to use for microcode download
3815 *
3816 * Return value:
3817 * pointer to sglist / NULL on failure
3818 **/
3819static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3820{
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003821 int sg_size, order;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823
3824 /* Get the minimum size per scatter/gather element */
3825 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3826
3827 /* Get the actual size per element */
3828 order = get_order(sg_size);
3829
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 /* Allocate a scatter/gather list for the DMA */
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003831 sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832 if (sglist == NULL) {
3833 ipr_trace;
3834 return NULL;
3835 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836 sglist->order = order;
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003837 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3838 &sglist->num_sg);
3839 if (!sglist->scatterlist) {
3840 kfree(sglist);
3841 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842 }
3843
3844 return sglist;
3845}
3846
3847/**
3848 * ipr_free_ucode_buffer - Frees a microcode download buffer
3849 * @p_dnld: scatter/gather list pointer
3850 *
3851 * Free a DMA'able ucode download buffer previously allocated with
3852 * ipr_alloc_ucode_buffer
3853 *
3854 * Return value:
3855 * nothing
3856 **/
3857static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3858{
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003859 sgl_free_order(sglist->scatterlist, sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860 kfree(sglist);
3861}
3862
3863/**
3864 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3865 * @sglist: scatter/gather list pointer
3866 * @buffer: buffer pointer
3867 * @len: buffer length
3868 *
3869 * Copy a microcode image from a user buffer into a buffer allocated by
3870 * ipr_alloc_ucode_buffer
3871 *
3872 * Return value:
3873 * 0 on success / other on failure
3874 **/
3875static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3876 u8 *buffer, u32 len)
3877{
3878 int bsize_elem, i, result = 0;
3879 struct scatterlist *scatterlist;
3880 void *kaddr;
3881
3882 /* Determine the actual number of bytes per element */
3883 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3884
3885 scatterlist = sglist->scatterlist;
3886
3887 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003888 struct page *page = sg_page(&scatterlist[i]);
3889
3890 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003892 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893
3894 scatterlist[i].length = bsize_elem;
3895
3896 if (result != 0) {
3897 ipr_trace;
3898 return result;
3899 }
3900 }
3901
3902 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003903 struct page *page = sg_page(&scatterlist[i]);
3904
3905 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003907 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003908
3909 scatterlist[i].length = len % bsize_elem;
3910 }
3911
3912 sglist->buffer_len = len;
3913 return result;
3914}
3915
3916/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003917 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3918 * @ipr_cmd: ipr command struct
3919 * @sglist: scatter/gather list
3920 *
3921 * Builds a microcode download IOA data list (IOADL).
3922 *
3923 **/
3924static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3925 struct ipr_sglist *sglist)
3926{
3927 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3928 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3929 struct scatterlist *scatterlist = sglist->scatterlist;
3930 int i;
3931
3932 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3933 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3934 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3935
3936 ioarcb->ioadl_len =
3937 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3938 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3939 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3940 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3941 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3942 }
3943
3944 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3945}
3946
3947/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003948 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 * @ipr_cmd: ipr command struct
3950 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003952 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003955static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3956 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003959 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960 struct scatterlist *scatterlist = sglist->scatterlist;
3961 int i;
3962
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003963 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003965 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3966
3967 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3969
3970 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3971 ioadl[i].flags_and_data_len =
3972 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3973 ioadl[i].address =
3974 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3975 }
3976
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003977 ioadl[i-1].flags_and_data_len |=
3978 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3979}
3980
3981/**
3982 * ipr_update_ioa_ucode - Update IOA's microcode
3983 * @ioa_cfg: ioa config struct
3984 * @sglist: scatter/gather list
3985 *
3986 * Initiate an adapter reset to update the IOA's microcode
3987 *
3988 * Return value:
3989 * 0 on success / -EIO on failure
3990 **/
3991static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3992 struct ipr_sglist *sglist)
3993{
3994 unsigned long lock_flags;
3995
3996 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003997 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003998 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3999 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4000 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4001 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004002
4003 if (ioa_cfg->ucode_sglist) {
4004 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4005 dev_err(&ioa_cfg->pdev->dev,
4006 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 return -EIO;
4008 }
4009
Anton Blanchardd73341b2014-10-30 17:27:08 -05004010 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4011 sglist->scatterlist, sglist->num_sg,
4012 DMA_TO_DEVICE);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004013
4014 if (!sglist->num_dma_sg) {
4015 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4016 dev_err(&ioa_cfg->pdev->dev,
4017 "Failed to map microcode download buffer!\n");
4018 return -EIO;
4019 }
4020
4021 ioa_cfg->ucode_sglist = sglist;
4022 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4024 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4025
4026 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4027 ioa_cfg->ucode_sglist = NULL;
4028 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029 return 0;
4030}
4031
4032/**
4033 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01004034 * @class_dev: device struct
4035 * @buf: buffer
4036 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037 *
4038 * This function will update the firmware on the adapter.
4039 *
4040 * Return value:
4041 * count on success / other on failure
4042 **/
Tony Jonesee959b02008-02-22 00:13:36 +01004043static ssize_t ipr_store_update_fw(struct device *dev,
4044 struct device_attribute *attr,
4045 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046{
Tony Jonesee959b02008-02-22 00:13:36 +01004047 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4049 struct ipr_ucode_image_header *image_hdr;
4050 const struct firmware *fw_entry;
4051 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052 char fname[100];
4053 char *src;
Gabriel Krisman Bertazi21b81712016-02-25 13:54:20 -03004054 char *endline;
Insu Yund63c7dd2016-01-06 12:44:01 -05004055 int result, dnld_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056
4057 if (!capable(CAP_SYS_ADMIN))
4058 return -EACCES;
4059
Insu Yund63c7dd2016-01-06 12:44:01 -05004060 snprintf(fname, sizeof(fname), "%s", buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061
Gabriel Krisman Bertazi21b81712016-02-25 13:54:20 -03004062 endline = strchr(fname, '\n');
4063 if (endline)
4064 *endline = '\0';
4065
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004066 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4068 return -EIO;
4069 }
4070
4071 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4072
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4074 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4075 sglist = ipr_alloc_ucode_buffer(dnld_size);
4076
4077 if (!sglist) {
4078 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4079 release_firmware(fw_entry);
4080 return -ENOMEM;
4081 }
4082
4083 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4084
4085 if (result) {
4086 dev_err(&ioa_cfg->pdev->dev,
4087 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004088 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089 }
4090
Wayne Boyer14ed9cc2011-10-03 20:54:37 -07004091 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4092
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004093 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004095 if (!result)
4096 result = count;
4097out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098 ipr_free_ucode_buffer(sglist);
4099 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004100 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101}
4102
Tony Jonesee959b02008-02-22 00:13:36 +01004103static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104 .attr = {
4105 .name = "update_fw",
4106 .mode = S_IWUSR,
4107 },
4108 .store = ipr_store_update_fw
4109};
4110
Wayne Boyer75576bb2010-07-14 10:50:14 -07004111/**
4112 * ipr_show_fw_type - Show the adapter's firmware type.
4113 * @dev: class device struct
4114 * @buf: buffer
4115 *
4116 * Return value:
4117 * number of bytes printed to buffer
4118 **/
4119static ssize_t ipr_show_fw_type(struct device *dev,
4120 struct device_attribute *attr, char *buf)
4121{
4122 struct Scsi_Host *shost = class_to_shost(dev);
4123 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4124 unsigned long lock_flags = 0;
4125 int len;
4126
4127 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4128 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4129 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4130 return len;
4131}
4132
4133static struct device_attribute ipr_ioa_fw_type_attr = {
4134 .attr = {
4135 .name = "fw_type",
4136 .mode = S_IRUGO,
4137 },
4138 .show = ipr_show_fw_type
4139};
4140
Brian Kingafc3f832016-08-24 12:56:51 -05004141static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4142 struct bin_attribute *bin_attr, char *buf,
4143 loff_t off, size_t count)
4144{
4145 struct device *cdev = container_of(kobj, struct device, kobj);
4146 struct Scsi_Host *shost = class_to_shost(cdev);
4147 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4148 struct ipr_hostrcb *hostrcb;
4149 unsigned long lock_flags = 0;
4150 int ret;
4151
4152 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4153 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4154 struct ipr_hostrcb, queue);
4155 if (!hostrcb) {
4156 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4157 return 0;
4158 }
4159 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4160 sizeof(hostrcb->hcam));
4161 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4162 return ret;
4163}
4164
4165static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4166 struct bin_attribute *bin_attr, char *buf,
4167 loff_t off, size_t count)
4168{
4169 struct device *cdev = container_of(kobj, struct device, kobj);
4170 struct Scsi_Host *shost = class_to_shost(cdev);
4171 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4172 struct ipr_hostrcb *hostrcb;
4173 unsigned long lock_flags = 0;
4174
4175 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4176 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4177 struct ipr_hostrcb, queue);
4178 if (!hostrcb) {
4179 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4180 return count;
4181 }
4182
4183 /* Reclaim hostrcb before exit */
4184 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4186 return count;
4187}
4188
4189static struct bin_attribute ipr_ioa_async_err_log = {
4190 .attr = {
4191 .name = "async_err_log",
4192 .mode = S_IRUGO | S_IWUSR,
4193 },
4194 .size = 0,
4195 .read = ipr_read_async_err_log,
4196 .write = ipr_next_async_err_log
4197};
4198
Tony Jonesee959b02008-02-22 00:13:36 +01004199static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200 &ipr_fw_version_attr,
4201 &ipr_log_level_attr,
4202 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06004203 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204 &ipr_ioa_reset_attr,
4205 &ipr_update_fw_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004206 &ipr_ioa_fw_type_attr,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06004207 &ipr_iopoll_weight_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 NULL,
4209};
4210
4211#ifdef CONFIG_SCSI_IPR_DUMP
4212/**
4213 * ipr_read_dump - Dump the adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004214 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004216 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217 * @buf: buffer
4218 * @off: offset
4219 * @count: buffer size
4220 *
4221 * Return value:
4222 * number of bytes printed to buffer
4223 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004224static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004225 struct bin_attribute *bin_attr,
4226 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227{
Tony Jonesee959b02008-02-22 00:13:36 +01004228 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229 struct Scsi_Host *shost = class_to_shost(cdev);
4230 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4231 struct ipr_dump *dump;
4232 unsigned long lock_flags = 0;
4233 char *src;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004234 int len, sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235 size_t rc = count;
4236
4237 if (!capable(CAP_SYS_ADMIN))
4238 return -EACCES;
4239
4240 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4241 dump = ioa_cfg->dump;
4242
4243 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4244 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4245 return 0;
4246 }
4247 kref_get(&dump->kref);
4248 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4249
4250 if (off > dump->driver_dump.hdr.len) {
4251 kref_put(&dump->kref, ipr_release_dump);
4252 return 0;
4253 }
4254
4255 if (off + count > dump->driver_dump.hdr.len) {
4256 count = dump->driver_dump.hdr.len - off;
4257 rc = count;
4258 }
4259
4260 if (count && off < sizeof(dump->driver_dump)) {
4261 if (off + count > sizeof(dump->driver_dump))
4262 len = sizeof(dump->driver_dump) - off;
4263 else
4264 len = count;
4265 src = (u8 *)&dump->driver_dump + off;
4266 memcpy(buf, src, len);
4267 buf += len;
4268 off += len;
4269 count -= len;
4270 }
4271
4272 off -= sizeof(dump->driver_dump);
4273
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004274 if (ioa_cfg->sis64)
4275 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4276 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4277 sizeof(struct ipr_sdt_entry));
4278 else
4279 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4280 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4281
4282 if (count && off < sdt_end) {
4283 if (off + count > sdt_end)
4284 len = sdt_end - off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 else
4286 len = count;
4287 src = (u8 *)&dump->ioa_dump + off;
4288 memcpy(buf, src, len);
4289 buf += len;
4290 off += len;
4291 count -= len;
4292 }
4293
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004294 off -= sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295
4296 while (count) {
4297 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4298 len = PAGE_ALIGN(off) - off;
4299 else
4300 len = count;
4301 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4302 src += off & ~PAGE_MASK;
4303 memcpy(buf, src, len);
4304 buf += len;
4305 off += len;
4306 count -= len;
4307 }
4308
4309 kref_put(&dump->kref, ipr_release_dump);
4310 return rc;
4311}
4312
4313/**
4314 * ipr_alloc_dump - Prepare for adapter dump
4315 * @ioa_cfg: ioa config struct
4316 *
4317 * Return value:
4318 * 0 on success / other on failure
4319 **/
4320static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4321{
4322 struct ipr_dump *dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004323 __be32 **ioa_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 unsigned long lock_flags = 0;
4325
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06004326 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327
4328 if (!dump) {
4329 ipr_err("Dump memory allocation failed\n");
4330 return -ENOMEM;
4331 }
4332
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004333 if (ioa_cfg->sis64)
4334 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4335 else
4336 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4337
4338 if (!ioa_data) {
4339 ipr_err("Dump memory allocation failed\n");
4340 kfree(dump);
4341 return -ENOMEM;
4342 }
4343
4344 dump->ioa_dump.ioa_data = ioa_data;
4345
Linus Torvalds1da177e2005-04-16 15:20:36 -07004346 kref_init(&dump->kref);
4347 dump->ioa_cfg = ioa_cfg;
4348
4349 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4350
4351 if (INACTIVE != ioa_cfg->sdt_state) {
4352 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004353 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354 kfree(dump);
4355 return 0;
4356 }
4357
4358 ioa_cfg->dump = dump;
4359 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004360 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004361 ioa_cfg->dump_taken = 1;
4362 schedule_work(&ioa_cfg->work_q);
4363 }
4364 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4365
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366 return 0;
4367}
4368
4369/**
4370 * ipr_free_dump - Free adapter dump memory
4371 * @ioa_cfg: ioa config struct
4372 *
4373 * Return value:
4374 * 0 on success / other on failure
4375 **/
4376static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4377{
4378 struct ipr_dump *dump;
4379 unsigned long lock_flags = 0;
4380
4381 ENTER;
4382
4383 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4384 dump = ioa_cfg->dump;
4385 if (!dump) {
4386 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4387 return 0;
4388 }
4389
4390 ioa_cfg->dump = NULL;
4391 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4392
4393 kref_put(&dump->kref, ipr_release_dump);
4394
4395 LEAVE;
4396 return 0;
4397}
4398
4399/**
4400 * ipr_write_dump - Setup dump state of adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004401 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004403 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 * @buf: buffer
4405 * @off: offset
4406 * @count: buffer size
4407 *
4408 * Return value:
4409 * number of bytes printed to buffer
4410 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004411static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004412 struct bin_attribute *bin_attr,
4413 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414{
Tony Jonesee959b02008-02-22 00:13:36 +01004415 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416 struct Scsi_Host *shost = class_to_shost(cdev);
4417 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4418 int rc;
4419
4420 if (!capable(CAP_SYS_ADMIN))
4421 return -EACCES;
4422
4423 if (buf[0] == '1')
4424 rc = ipr_alloc_dump(ioa_cfg);
4425 else if (buf[0] == '0')
4426 rc = ipr_free_dump(ioa_cfg);
4427 else
4428 return -EINVAL;
4429
4430 if (rc)
4431 return rc;
4432 else
4433 return count;
4434}
4435
4436static struct bin_attribute ipr_dump_attr = {
4437 .attr = {
4438 .name = "dump",
4439 .mode = S_IRUSR | S_IWUSR,
4440 },
4441 .size = 0,
4442 .read = ipr_read_dump,
4443 .write = ipr_write_dump
4444};
4445#else
4446static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4447#endif
4448
4449/**
4450 * ipr_change_queue_depth - Change the device's queue depth
4451 * @sdev: scsi device struct
4452 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07004453 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07004454 *
4455 * Return value:
4456 * actual depth set
4457 **/
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004458static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004459{
Brian King35a39692006-09-25 12:39:20 -05004460 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4461 struct ipr_resource_entry *res;
4462 unsigned long lock_flags = 0;
4463
4464 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4465 res = (struct ipr_resource_entry *)sdev->hostdata;
4466
4467 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4468 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4469 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4470
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004471 scsi_change_queue_depth(sdev, qdepth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004472 return sdev->queue_depth;
4473}
4474
4475/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004476 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4477 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004478 * @attr: device attribute structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07004479 * @buf: buffer
4480 *
4481 * Return value:
4482 * number of bytes printed to buffer
4483 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004484static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004485{
4486 struct scsi_device *sdev = to_scsi_device(dev);
4487 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4488 struct ipr_resource_entry *res;
4489 unsigned long lock_flags = 0;
4490 ssize_t len = -ENXIO;
4491
4492 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4493 res = (struct ipr_resource_entry *)sdev->hostdata;
4494 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004495 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004496 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4497 return len;
4498}
4499
4500static struct device_attribute ipr_adapter_handle_attr = {
4501 .attr = {
4502 .name = "adapter_handle",
4503 .mode = S_IRUSR,
4504 },
4505 .show = ipr_show_adapter_handle
4506};
4507
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004508/**
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004509 * ipr_show_resource_path - Show the resource path or the resource address for
4510 * this device.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004511 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004512 * @attr: device attribute structure
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004513 * @buf: buffer
4514 *
4515 * Return value:
4516 * number of bytes printed to buffer
4517 **/
4518static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4519{
4520 struct scsi_device *sdev = to_scsi_device(dev);
4521 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4522 struct ipr_resource_entry *res;
4523 unsigned long lock_flags = 0;
4524 ssize_t len = -ENXIO;
4525 char buffer[IPR_MAX_RES_PATH_LENGTH];
4526
4527 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4528 res = (struct ipr_resource_entry *)sdev->hostdata;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004529 if (res && ioa_cfg->sis64)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004530 len = snprintf(buf, PAGE_SIZE, "%s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004531 __ipr_format_res_path(res->res_path, buffer,
4532 sizeof(buffer)));
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004533 else if (res)
4534 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4535 res->bus, res->target, res->lun);
4536
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004537 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4538 return len;
4539}
4540
4541static struct device_attribute ipr_resource_path_attr = {
4542 .attr = {
4543 .name = "resource_path",
Wayne Boyer75576bb2010-07-14 10:50:14 -07004544 .mode = S_IRUGO,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004545 },
4546 .show = ipr_show_resource_path
4547};
4548
Wayne Boyer75576bb2010-07-14 10:50:14 -07004549/**
Wayne Boyer46d74562010-08-11 07:15:17 -07004550 * ipr_show_device_id - Show the device_id for this device.
4551 * @dev: device struct
4552 * @attr: device attribute structure
4553 * @buf: buffer
4554 *
4555 * Return value:
4556 * number of bytes printed to buffer
4557 **/
4558static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4559{
4560 struct scsi_device *sdev = to_scsi_device(dev);
4561 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4562 struct ipr_resource_entry *res;
4563 unsigned long lock_flags = 0;
4564 ssize_t len = -ENXIO;
4565
4566 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4567 res = (struct ipr_resource_entry *)sdev->hostdata;
4568 if (res && ioa_cfg->sis64)
Wen Xiongbb8647e2015-06-11 20:45:18 -05004569 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
Wayne Boyer46d74562010-08-11 07:15:17 -07004570 else if (res)
4571 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4572
4573 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4574 return len;
4575}
4576
4577static struct device_attribute ipr_device_id_attr = {
4578 .attr = {
4579 .name = "device_id",
4580 .mode = S_IRUGO,
4581 },
4582 .show = ipr_show_device_id
4583};
4584
4585/**
Wayne Boyer75576bb2010-07-14 10:50:14 -07004586 * ipr_show_resource_type - Show the resource type for this device.
4587 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004588 * @attr: device attribute structure
Wayne Boyer75576bb2010-07-14 10:50:14 -07004589 * @buf: buffer
4590 *
4591 * Return value:
4592 * number of bytes printed to buffer
4593 **/
4594static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4595{
4596 struct scsi_device *sdev = to_scsi_device(dev);
4597 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4598 struct ipr_resource_entry *res;
4599 unsigned long lock_flags = 0;
4600 ssize_t len = -ENXIO;
4601
4602 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4603 res = (struct ipr_resource_entry *)sdev->hostdata;
4604
4605 if (res)
4606 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4607
4608 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4609 return len;
4610}
4611
4612static struct device_attribute ipr_resource_type_attr = {
4613 .attr = {
4614 .name = "resource_type",
4615 .mode = S_IRUGO,
4616 },
4617 .show = ipr_show_resource_type
4618};
4619
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004620/**
4621 * ipr_show_raw_mode - Show the adapter's raw mode
4622 * @dev: class device struct
4623 * @buf: buffer
4624 *
4625 * Return value:
4626 * number of bytes printed to buffer
4627 **/
4628static ssize_t ipr_show_raw_mode(struct device *dev,
4629 struct device_attribute *attr, char *buf)
4630{
4631 struct scsi_device *sdev = to_scsi_device(dev);
4632 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4633 struct ipr_resource_entry *res;
4634 unsigned long lock_flags = 0;
4635 ssize_t len;
4636
4637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4638 res = (struct ipr_resource_entry *)sdev->hostdata;
4639 if (res)
4640 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4641 else
4642 len = -ENXIO;
4643 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4644 return len;
4645}
4646
4647/**
4648 * ipr_store_raw_mode - Change the adapter's raw mode
4649 * @dev: class device struct
4650 * @buf: buffer
4651 *
4652 * Return value:
4653 * number of bytes printed to buffer
4654 **/
4655static ssize_t ipr_store_raw_mode(struct device *dev,
4656 struct device_attribute *attr,
4657 const char *buf, size_t count)
4658{
4659 struct scsi_device *sdev = to_scsi_device(dev);
4660 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4661 struct ipr_resource_entry *res;
4662 unsigned long lock_flags = 0;
4663 ssize_t len;
4664
4665 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4666 res = (struct ipr_resource_entry *)sdev->hostdata;
4667 if (res) {
Gabriel Krisman Bertazie35d7f272015-08-19 11:47:06 -03004668 if (ipr_is_af_dasd_device(res)) {
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004669 res->raw_mode = simple_strtoul(buf, NULL, 10);
4670 len = strlen(buf);
4671 if (res->sdev)
4672 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4673 res->raw_mode ? "enabled" : "disabled");
4674 } else
4675 len = -EINVAL;
4676 } else
4677 len = -ENXIO;
4678 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4679 return len;
4680}
4681
4682static struct device_attribute ipr_raw_mode_attr = {
4683 .attr = {
4684 .name = "raw_mode",
4685 .mode = S_IRUGO | S_IWUSR,
4686 },
4687 .show = ipr_show_raw_mode,
4688 .store = ipr_store_raw_mode
4689};
4690
Linus Torvalds1da177e2005-04-16 15:20:36 -07004691static struct device_attribute *ipr_dev_attrs[] = {
4692 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004693 &ipr_resource_path_attr,
Wayne Boyer46d74562010-08-11 07:15:17 -07004694 &ipr_device_id_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004695 &ipr_resource_type_attr,
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004696 &ipr_raw_mode_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697 NULL,
4698};
4699
4700/**
4701 * ipr_biosparam - Return the HSC mapping
4702 * @sdev: scsi device struct
4703 * @block_device: block device pointer
4704 * @capacity: capacity of the device
4705 * @parm: Array containing returned HSC values.
4706 *
4707 * This function generates the HSC parms that fdisk uses.
4708 * We want to make sure we return something that places partitions
4709 * on 4k boundaries for best performance with the IOA.
4710 *
4711 * Return value:
4712 * 0 on success
4713 **/
4714static int ipr_biosparam(struct scsi_device *sdev,
4715 struct block_device *block_device,
4716 sector_t capacity, int *parm)
4717{
4718 int heads, sectors;
4719 sector_t cylinders;
4720
4721 heads = 128;
4722 sectors = 32;
4723
4724 cylinders = capacity;
4725 sector_div(cylinders, (128 * 32));
4726
4727 /* return result */
4728 parm[0] = heads;
4729 parm[1] = sectors;
4730 parm[2] = cylinders;
4731
4732 return 0;
4733}
4734
4735/**
Brian King35a39692006-09-25 12:39:20 -05004736 * ipr_find_starget - Find target based on bus/target.
4737 * @starget: scsi target struct
4738 *
4739 * Return value:
4740 * resource entry pointer if found / NULL if not found
4741 **/
4742static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4743{
4744 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4745 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4746 struct ipr_resource_entry *res;
4747
4748 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004749 if ((res->bus == starget->channel) &&
Brian King0ee1d712012-03-14 21:20:06 -05004750 (res->target == starget->id)) {
Brian King35a39692006-09-25 12:39:20 -05004751 return res;
4752 }
4753 }
4754
4755 return NULL;
4756}
4757
4758static struct ata_port_info sata_port_info;
4759
4760/**
4761 * ipr_target_alloc - Prepare for commands to a SCSI target
4762 * @starget: scsi target struct
4763 *
4764 * If the device is a SATA device, this function allocates an
4765 * ATA port with libata, else it does nothing.
4766 *
4767 * Return value:
4768 * 0 on success / non-0 on failure
4769 **/
4770static int ipr_target_alloc(struct scsi_target *starget)
4771{
4772 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4773 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4774 struct ipr_sata_port *sata_port;
4775 struct ata_port *ap;
4776 struct ipr_resource_entry *res;
4777 unsigned long lock_flags;
4778
4779 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4780 res = ipr_find_starget(starget);
4781 starget->hostdata = NULL;
4782
4783 if (res && ipr_is_gata(res)) {
4784 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4785 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4786 if (!sata_port)
4787 return -ENOMEM;
4788
4789 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4790 if (ap) {
4791 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4792 sata_port->ioa_cfg = ioa_cfg;
4793 sata_port->ap = ap;
4794 sata_port->res = res;
4795
4796 res->sata_port = sata_port;
4797 ap->private_data = sata_port;
4798 starget->hostdata = sata_port;
4799 } else {
4800 kfree(sata_port);
4801 return -ENOMEM;
4802 }
4803 }
4804 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4805
4806 return 0;
4807}
4808
4809/**
4810 * ipr_target_destroy - Destroy a SCSI target
4811 * @starget: scsi target struct
4812 *
4813 * If the device was a SATA device, this function frees the libata
4814 * ATA port, else it does nothing.
4815 *
4816 **/
4817static void ipr_target_destroy(struct scsi_target *starget)
4818{
4819 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004820 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4821 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4822
4823 if (ioa_cfg->sis64) {
Brian King0ee1d712012-03-14 21:20:06 -05004824 if (!ipr_find_starget(starget)) {
4825 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4826 clear_bit(starget->id, ioa_cfg->array_ids);
4827 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4828 clear_bit(starget->id, ioa_cfg->vset_ids);
4829 else if (starget->channel == 0)
4830 clear_bit(starget->id, ioa_cfg->target_ids);
4831 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004832 }
Brian King35a39692006-09-25 12:39:20 -05004833
4834 if (sata_port) {
4835 starget->hostdata = NULL;
4836 ata_sas_port_destroy(sata_port->ap);
4837 kfree(sata_port);
4838 }
4839}
4840
4841/**
4842 * ipr_find_sdev - Find device based on bus/target/lun.
4843 * @sdev: scsi device struct
4844 *
4845 * Return value:
4846 * resource entry pointer if found / NULL if not found
4847 **/
4848static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4849{
4850 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4851 struct ipr_resource_entry *res;
4852
4853 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004854 if ((res->bus == sdev->channel) &&
4855 (res->target == sdev->id) &&
4856 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004857 return res;
4858 }
4859
4860 return NULL;
4861}
4862
4863/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004864 * ipr_slave_destroy - Unconfigure a SCSI device
4865 * @sdev: scsi device struct
4866 *
4867 * Return value:
4868 * nothing
4869 **/
4870static void ipr_slave_destroy(struct scsi_device *sdev)
4871{
4872 struct ipr_resource_entry *res;
4873 struct ipr_ioa_cfg *ioa_cfg;
4874 unsigned long lock_flags = 0;
4875
4876 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4877
4878 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4879 res = (struct ipr_resource_entry *) sdev->hostdata;
4880 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004881 if (res->sata_port)
Tejun Heo3e4ec342010-05-10 21:41:30 +02004882 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004883 sdev->hostdata = NULL;
4884 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004885 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886 }
4887 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4888}
4889
4890/**
4891 * ipr_slave_configure - Configure a SCSI device
4892 * @sdev: scsi device struct
4893 *
4894 * This function configures the specified scsi device.
4895 *
4896 * Return value:
4897 * 0 on success
4898 **/
4899static int ipr_slave_configure(struct scsi_device *sdev)
4900{
4901 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4902 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004903 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004904 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004905 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906
4907 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4908 res = sdev->hostdata;
4909 if (res) {
4910 if (ipr_is_af_dasd_device(res))
4911 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004912 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004913 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004914 sdev->no_uld_attach = 1;
4915 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004916 if (ipr_is_vset_device(res)) {
Brian King60654e22014-12-02 12:47:46 -06004917 sdev->scsi_level = SCSI_SPC_3;
Brian King723cd772017-08-18 16:17:32 -05004918 sdev->no_report_opcodes = 1;
Jens Axboe242f9dc2008-09-14 05:55:09 -07004919 blk_queue_rq_timeout(sdev->request_queue,
4920 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004921 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004922 }
Brian Kingdd406ef2009-04-22 08:58:02 -05004923 if (ipr_is_gata(res) && res->sata_port)
4924 ap = res->sata_port->ap;
4925 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4926
4927 if (ap) {
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004928 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004929 ata_sas_slave_configure(sdev, ap);
Christoph Hellwigc8b09f62014-11-03 20:15:14 +01004930 }
4931
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004932 if (ioa_cfg->sis64)
4933 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004934 ipr_format_res_path(ioa_cfg,
4935 res->res_path, buffer, sizeof(buffer)));
Brian Kingdd406ef2009-04-22 08:58:02 -05004936 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004937 }
4938 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4939 return 0;
4940}
4941
4942/**
Brian King35a39692006-09-25 12:39:20 -05004943 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4944 * @sdev: scsi device struct
4945 *
4946 * This function initializes an ATA port so that future commands
4947 * sent through queuecommand will work.
4948 *
4949 * Return value:
4950 * 0 on success
4951 **/
4952static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4953{
4954 struct ipr_sata_port *sata_port = NULL;
4955 int rc = -ENXIO;
4956
4957 ENTER;
4958 if (sdev->sdev_target)
4959 sata_port = sdev->sdev_target->hostdata;
Dan Williamsb2024452012-03-21 21:09:07 -07004960 if (sata_port) {
Brian King35a39692006-09-25 12:39:20 -05004961 rc = ata_sas_port_init(sata_port->ap);
Dan Williamsb2024452012-03-21 21:09:07 -07004962 if (rc == 0)
4963 rc = ata_sas_sync_probe(sata_port->ap);
4964 }
4965
Brian King35a39692006-09-25 12:39:20 -05004966 if (rc)
4967 ipr_slave_destroy(sdev);
4968
4969 LEAVE;
4970 return rc;
4971}
4972
4973/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004974 * ipr_slave_alloc - Prepare for commands to a device.
4975 * @sdev: scsi device struct
4976 *
4977 * This function saves a pointer to the resource entry
4978 * in the scsi device struct if the device exists. We
4979 * can then use this pointer in ipr_queuecommand when
4980 * handling new commands.
4981 *
4982 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004983 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07004984 **/
4985static int ipr_slave_alloc(struct scsi_device *sdev)
4986{
4987 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4988 struct ipr_resource_entry *res;
4989 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004990 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004991
4992 sdev->hostdata = NULL;
4993
4994 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4995
Brian King35a39692006-09-25 12:39:20 -05004996 res = ipr_find_sdev(sdev);
4997 if (res) {
4998 res->sdev = sdev;
4999 res->add_to_ml = 0;
5000 res->in_erp = 0;
5001 sdev->hostdata = res;
5002 if (!ipr_is_naca_model(res))
5003 res->needs_sync_complete = 1;
5004 rc = 0;
5005 if (ipr_is_gata(res)) {
5006 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5007 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005008 }
5009 }
5010
5011 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5012
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06005013 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005014}
5015
Brian King6cdb0812014-10-30 17:27:10 -05005016/**
5017 * ipr_match_lun - Match function for specified LUN
5018 * @ipr_cmd: ipr command struct
5019 * @device: device to match (sdev)
5020 *
5021 * Returns:
5022 * 1 if command matches sdev / 0 if command does not match sdev
5023 **/
5024static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5025{
5026 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5027 return 1;
5028 return 0;
5029}
5030
5031/**
Brian King439ae282017-03-15 16:58:39 -05005032 * ipr_cmnd_is_free - Check if a command is free or not
5033 * @ipr_cmd ipr command struct
5034 *
5035 * Returns:
5036 * true / false
5037 **/
5038static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5039{
5040 struct ipr_cmnd *loop_cmd;
5041
5042 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5043 if (loop_cmd == ipr_cmd)
5044 return true;
5045 }
5046
5047 return false;
5048}
5049
5050/**
Brian Kingef97d8a2017-03-15 16:58:41 -05005051 * ipr_match_res - Match function for specified resource entry
5052 * @ipr_cmd: ipr command struct
5053 * @resource: resource entry to match
5054 *
5055 * Returns:
5056 * 1 if command matches sdev / 0 if command does not match sdev
5057 **/
5058static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5059{
5060 struct ipr_resource_entry *res = resource;
5061
5062 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5063 return 1;
5064 return 0;
5065}
5066
5067/**
Brian King6cdb0812014-10-30 17:27:10 -05005068 * ipr_wait_for_ops - Wait for matching commands to complete
5069 * @ipr_cmd: ipr command struct
5070 * @device: device to match (sdev)
5071 * @match: match function to use
5072 *
5073 * Returns:
5074 * SUCCESS / FAILED
5075 **/
5076static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5077 int (*match)(struct ipr_cmnd *, void *))
5078{
5079 struct ipr_cmnd *ipr_cmd;
Brian King439ae282017-03-15 16:58:39 -05005080 int wait, i;
Brian King6cdb0812014-10-30 17:27:10 -05005081 unsigned long flags;
5082 struct ipr_hrr_queue *hrrq;
5083 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5084 DECLARE_COMPLETION_ONSTACK(comp);
5085
5086 ENTER;
5087 do {
5088 wait = 0;
5089
5090 for_each_hrrq(hrrq, ioa_cfg) {
5091 spin_lock_irqsave(hrrq->lock, flags);
Brian King439ae282017-03-15 16:58:39 -05005092 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5093 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5094 if (!ipr_cmnd_is_free(ipr_cmd)) {
5095 if (match(ipr_cmd, device)) {
5096 ipr_cmd->eh_comp = &comp;
5097 wait++;
5098 }
Brian King6cdb0812014-10-30 17:27:10 -05005099 }
5100 }
5101 spin_unlock_irqrestore(hrrq->lock, flags);
5102 }
5103
5104 if (wait) {
5105 timeout = wait_for_completion_timeout(&comp, timeout);
5106
5107 if (!timeout) {
5108 wait = 0;
5109
5110 for_each_hrrq(hrrq, ioa_cfg) {
5111 spin_lock_irqsave(hrrq->lock, flags);
Brian King439ae282017-03-15 16:58:39 -05005112 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5113 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5114 if (!ipr_cmnd_is_free(ipr_cmd)) {
5115 if (match(ipr_cmd, device)) {
5116 ipr_cmd->eh_comp = NULL;
5117 wait++;
5118 }
Brian King6cdb0812014-10-30 17:27:10 -05005119 }
5120 }
5121 spin_unlock_irqrestore(hrrq->lock, flags);
5122 }
5123
5124 if (wait)
5125 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5126 LEAVE;
5127 return wait ? FAILED : SUCCESS;
5128 }
5129 }
5130 } while (wait);
5131
5132 LEAVE;
5133 return SUCCESS;
5134}
5135
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005136static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005137{
5138 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005139 unsigned long lock_flags = 0;
5140 int rc = SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005141
5142 ENTER;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005143 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5144 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005145
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05005146 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005147 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005148 dev_err(&ioa_cfg->pdev->dev,
5149 "Adapter being reset as a result of error recovery.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005150
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005151 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5152 ioa_cfg->sdt_state = GET_DUMP;
5153 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005154
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005155 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5156 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5157 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005158
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005159 /* If we got hit with a host reset while we were already resetting
5160 the adapter for some reason, and the reset failed. */
5161 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5162 ipr_trace;
5163 rc = FAILED;
5164 }
5165
5166 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167 LEAVE;
5168 return rc;
5169}
5170
5171/**
Brian Kingc6513092006-03-29 09:37:43 -06005172 * ipr_device_reset - Reset the device
5173 * @ioa_cfg: ioa config struct
5174 * @res: resource entry struct
5175 *
5176 * This function issues a device reset to the affected device.
5177 * If the device is a SCSI device, a LUN reset will be sent
5178 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05005179 * will be sent. If the device is a SATA device, a PHY reset will
5180 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06005181 *
5182 * Return value:
5183 * 0 on success / non-zero on failure
5184 **/
5185static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5186 struct ipr_resource_entry *res)
5187{
5188 struct ipr_cmnd *ipr_cmd;
5189 struct ipr_ioarcb *ioarcb;
5190 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05005191 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06005192 u32 ioasc;
5193
5194 ENTER;
5195 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5196 ioarcb = &ipr_cmd->ioarcb;
5197 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08005198
5199 if (ipr_cmd->ioa_cfg->sis64) {
5200 regs = &ipr_cmd->i.ata_ioadl.regs;
5201 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5202 } else
5203 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06005204
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005205 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06005206 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5207 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05005208 if (ipr_is_gata(res)) {
5209 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08005210 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05005211 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5212 }
Brian Kingc6513092006-03-29 09:37:43 -06005213
5214 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005215 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005216 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005217 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5218 if (ipr_cmd->ioa_cfg->sis64)
5219 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5220 sizeof(struct ipr_ioasa_gata));
5221 else
5222 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5223 sizeof(struct ipr_ioasa_gata));
5224 }
Brian Kingc6513092006-03-29 09:37:43 -06005225
5226 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005227 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
Brian Kingc6513092006-03-29 09:37:43 -06005228}
5229
5230/**
Brian King35a39692006-09-25 12:39:20 -05005231 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09005232 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05005233 * @classes: class of the attached device
5234 *
Tejun Heocc0680a2007-08-06 18:36:23 +09005235 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05005236 *
5237 * Return value:
5238 * 0 on success / non-zero on failure
5239 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09005240static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07005241 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05005242{
Tejun Heocc0680a2007-08-06 18:36:23 +09005243 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05005244 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5245 struct ipr_resource_entry *res;
5246 unsigned long lock_flags = 0;
Brian Kingef97d8a2017-03-15 16:58:41 -05005247 int rc = -ENXIO, ret;
Brian King35a39692006-09-25 12:39:20 -05005248
5249 ENTER;
5250 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005251 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06005252 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5253 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5254 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5255 }
5256
Brian King35a39692006-09-25 12:39:20 -05005257 res = sata_port->res;
5258 if (res) {
5259 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005260 *classes = res->ata_class;
Brian Kingef97d8a2017-03-15 16:58:41 -05005261 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Brian King35a39692006-09-25 12:39:20 -05005262
Brian Kingef97d8a2017-03-15 16:58:41 -05005263 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5264 if (ret != SUCCESS) {
5265 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5266 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5267 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5268
5269 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5270 }
5271 } else
5272 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5273
Brian King35a39692006-09-25 12:39:20 -05005274 LEAVE;
5275 return rc;
5276}
5277
5278/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005279 * ipr_eh_dev_reset - Reset the device
5280 * @scsi_cmd: scsi command struct
5281 *
5282 * This function issues a device reset to the affected device.
5283 * A LUN reset will be sent to the device first. If that does
5284 * not work, a target reset will be sent.
5285 *
5286 * Return value:
5287 * SUCCESS / FAILED
5288 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005289static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005290{
5291 struct ipr_cmnd *ipr_cmd;
5292 struct ipr_ioa_cfg *ioa_cfg;
5293 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05005294 struct ata_port *ap;
Brian King439ae282017-03-15 16:58:39 -05005295 int rc = 0, i;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005296 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005297
5298 ENTER;
5299 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5300 res = scsi_cmd->device->hostdata;
5301
Linus Torvalds1da177e2005-04-16 15:20:36 -07005302 /*
5303 * If we are currently going through reset/reload, return failed. This will force the
5304 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5305 * reset to complete
5306 */
5307 if (ioa_cfg->in_reset_reload)
5308 return FAILED;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005309 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005310 return FAILED;
5311
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005312 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005313 spin_lock(&hrrq->_lock);
Brian King439ae282017-03-15 16:58:39 -05005314 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5315 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5316
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005317 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Brian King960e9642017-03-15 16:58:37 -05005318 if (!ipr_cmd->qc)
5319 continue;
Brian King439ae282017-03-15 16:58:39 -05005320 if (ipr_cmnd_is_free(ipr_cmd))
5321 continue;
Brian King960e9642017-03-15 16:58:37 -05005322
5323 ipr_cmd->done = ipr_sata_eh_done;
5324 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005325 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5326 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5327 }
Brian King7402ece2006-11-21 10:28:23 -06005328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005329 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005330 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005331 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005332 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005333 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05005334
5335 if (ipr_is_gata(res) && res->sata_port) {
5336 ap = res->sata_port->ap;
5337 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09005338 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05005339 spin_lock_irq(scsi_cmd->device->host->host_lock);
5340 } else
5341 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06005343 res->reset_occurred = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005344
Linus Torvalds1da177e2005-04-16 15:20:36 -07005345 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005346 return rc ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005347}
5348
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005349static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005350{
5351 int rc;
Brian King6cdb0812014-10-30 17:27:10 -05005352 struct ipr_ioa_cfg *ioa_cfg;
Brian Kingef97d8a2017-03-15 16:58:41 -05005353 struct ipr_resource_entry *res;
Brian King6cdb0812014-10-30 17:27:10 -05005354
5355 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
Brian Kingef97d8a2017-03-15 16:58:41 -05005356 res = cmd->device->hostdata;
5357
5358 if (!res)
5359 return FAILED;
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005360
5361 spin_lock_irq(cmd->device->host->host_lock);
5362 rc = __ipr_eh_dev_reset(cmd);
5363 spin_unlock_irq(cmd->device->host->host_lock);
5364
Brian Kingef97d8a2017-03-15 16:58:41 -05005365 if (rc == SUCCESS) {
5366 if (ipr_is_gata(res) && res->sata_port)
5367 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5368 else
5369 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5370 }
Brian King6cdb0812014-10-30 17:27:10 -05005371
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005372 return rc;
5373}
5374
Linus Torvalds1da177e2005-04-16 15:20:36 -07005375/**
5376 * ipr_bus_reset_done - Op done function for bus reset.
5377 * @ipr_cmd: ipr command struct
5378 *
5379 * This function is the op done function for a bus reset
5380 *
5381 * Return value:
5382 * none
5383 **/
5384static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5385{
5386 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5387 struct ipr_resource_entry *res;
5388
5389 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005390 if (!ioa_cfg->sis64)
5391 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5392 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5393 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5394 break;
5395 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005397
5398 /*
5399 * If abort has not completed, indicate the reset has, else call the
5400 * abort's done function to wake the sleeping eh thread
5401 */
5402 if (ipr_cmd->sibling->sibling)
5403 ipr_cmd->sibling->sibling = NULL;
5404 else
5405 ipr_cmd->sibling->done(ipr_cmd->sibling);
5406
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005407 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005408 LEAVE;
5409}
5410
5411/**
5412 * ipr_abort_timeout - An abort task has timed out
5413 * @ipr_cmd: ipr command struct
5414 *
5415 * This function handles when an abort task times out. If this
5416 * happens we issue a bus reset since we have resources tied
5417 * up that must be freed before returning to the midlayer.
5418 *
5419 * Return value:
5420 * none
5421 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07005422static void ipr_abort_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423{
Kees Cook738c6ec2017-08-18 16:53:24 -07005424 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425 struct ipr_cmnd *reset_cmd;
5426 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5427 struct ipr_cmd_pkt *cmd_pkt;
5428 unsigned long lock_flags = 0;
5429
5430 ENTER;
5431 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5432 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5433 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5434 return;
5435 }
5436
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005437 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005438 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5439 ipr_cmd->sibling = reset_cmd;
5440 reset_cmd->sibling = ipr_cmd;
5441 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5442 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5443 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5444 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5445 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5446
5447 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5448 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5449 LEAVE;
5450}
5451
5452/**
5453 * ipr_cancel_op - Cancel specified op
5454 * @scsi_cmd: scsi command struct
5455 *
5456 * This function cancels specified op.
5457 *
5458 * Return value:
5459 * SUCCESS / FAILED
5460 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005461static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005462{
5463 struct ipr_cmnd *ipr_cmd;
5464 struct ipr_ioa_cfg *ioa_cfg;
5465 struct ipr_resource_entry *res;
5466 struct ipr_cmd_pkt *cmd_pkt;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005467 u32 ioasc, int_reg;
Brian King439ae282017-03-15 16:58:39 -05005468 int i, op_found = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005469 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470
5471 ENTER;
5472 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5473 res = scsi_cmd->device->hostdata;
5474
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005475 /* If we are currently going through reset/reload, return failed.
5476 * This will force the mid-layer to call ipr_eh_host_reset,
5477 * which will then go to sleep and wait for the reset to complete
5478 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005479 if (ioa_cfg->in_reset_reload ||
5480 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005481 return FAILED;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005482 if (!res)
5483 return FAILED;
5484
5485 /*
5486 * If we are aborting a timed out op, chances are that the timeout was caused
5487 * by a still not detected EEH error. In such cases, reading a register will
5488 * trigger the EEH recovery infrastructure.
5489 */
5490 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5491
5492 if (!ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005493 return FAILED;
5494
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005495 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005496 spin_lock(&hrrq->_lock);
Brian King439ae282017-03-15 16:58:39 -05005497 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5498 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5499 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5500 op_found = 1;
5501 break;
5502 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005503 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005504 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005505 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005506 }
5507
5508 if (!op_found)
5509 return SUCCESS;
5510
5511 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005512 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005513 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5514 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5515 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5516 ipr_cmd->u.sdev = scsi_cmd->device;
5517
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005518 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5519 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005520 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005521 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005522
5523 /*
5524 * If the abort task timed out and we sent a bus reset, we will get
5525 * one the following responses to the abort
5526 */
5527 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5528 ioasc = 0;
5529 ipr_trace;
5530 }
5531
Kleber Sacilotto de Souzac4ee22a2013-03-14 13:52:23 -05005532 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005533 if (!ipr_is_naca_model(res))
5534 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005535
5536 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005537 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005538}
5539
5540/**
5541 * ipr_eh_abort - Abort a single op
5542 * @scsi_cmd: scsi command struct
5543 *
5544 * Return value:
Brian Kingf688f962014-12-02 12:47:37 -06005545 * 0 if scan in progress / 1 if scan is complete
5546 **/
5547static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5548{
5549 unsigned long lock_flags;
5550 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5551 int rc = 0;
5552
5553 spin_lock_irqsave(shost->host_lock, lock_flags);
5554 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5555 rc = 1;
5556 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5557 rc = 1;
5558 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5559 return rc;
5560}
5561
5562/**
5563 * ipr_eh_host_reset - Reset the host adapter
5564 * @scsi_cmd: scsi command struct
5565 *
5566 * Return value:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005567 * SUCCESS / FAILED
5568 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005569static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005570{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005571 unsigned long flags;
5572 int rc;
Brian King6cdb0812014-10-30 17:27:10 -05005573 struct ipr_ioa_cfg *ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005574
5575 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005576
Brian King6cdb0812014-10-30 17:27:10 -05005577 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5578
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005579 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5580 rc = ipr_cancel_op(scsi_cmd);
5581 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005582
Brian King6cdb0812014-10-30 17:27:10 -05005583 if (rc == SUCCESS)
5584 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005585 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005586 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005587}
5588
5589/**
5590 * ipr_handle_other_interrupt - Handle "other" interrupts
5591 * @ioa_cfg: ioa config struct
Wayne Boyer634651f2010-08-27 14:45:07 -07005592 * @int_reg: interrupt register
Linus Torvalds1da177e2005-04-16 15:20:36 -07005593 *
5594 * Return value:
5595 * IRQ_NONE / IRQ_HANDLED
5596 **/
Wayne Boyer634651f2010-08-27 14:45:07 -07005597static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer630ad8312011-04-07 12:12:30 -07005598 u32 int_reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005599{
5600 irqreturn_t rc = IRQ_HANDLED;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005601 u32 int_mask_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005602
Wayne Boyer7dacb642011-04-12 10:29:02 -07005603 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5604 int_reg &= ~int_mask_reg;
5605
5606 /* If an interrupt on the adapter did not occur, ignore it.
5607 * Or in the case of SIS 64, check for a stage change interrupt.
5608 */
5609 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5610 if (ioa_cfg->sis64) {
5611 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5612 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5613 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5614
5615 /* clear stage change */
5616 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5617 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5618 list_del(&ioa_cfg->reset_cmd->queue);
5619 del_timer(&ioa_cfg->reset_cmd->timer);
5620 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5621 return IRQ_HANDLED;
5622 }
5623 }
5624
5625 return IRQ_NONE;
5626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005627
5628 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5629 /* Mask the interrupt */
5630 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005631 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5632
5633 list_del(&ioa_cfg->reset_cmd->queue);
5634 del_timer(&ioa_cfg->reset_cmd->timer);
5635 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005636 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
Brian King7dd21302012-03-14 21:20:08 -05005637 if (ioa_cfg->clear_isr) {
5638 if (ipr_debug && printk_ratelimit())
5639 dev_err(&ioa_cfg->pdev->dev,
5640 "Spurious interrupt detected. 0x%08X\n", int_reg);
5641 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5642 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5643 return IRQ_NONE;
5644 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005645 } else {
5646 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5647 ioa_cfg->ioa_unit_checked = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005648 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5649 dev_err(&ioa_cfg->pdev->dev,
5650 "No Host RRQ. 0x%08X\n", int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005651 else
5652 dev_err(&ioa_cfg->pdev->dev,
5653 "Permanent IOA failure. 0x%08X\n", int_reg);
5654
5655 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5656 ioa_cfg->sdt_state = GET_DUMP;
5657
5658 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5659 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5660 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005661
Linus Torvalds1da177e2005-04-16 15:20:36 -07005662 return rc;
5663}
5664
5665/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005666 * ipr_isr_eh - Interrupt service routine error handler
5667 * @ioa_cfg: ioa config struct
5668 * @msg: message to log
5669 *
5670 * Return value:
5671 * none
5672 **/
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005673static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005674{
5675 ioa_cfg->errors_logged++;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005676 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005677
5678 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5679 ioa_cfg->sdt_state = GET_DUMP;
5680
5681 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5682}
5683
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005684static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005685 struct list_head *doneq)
5686{
5687 u32 ioasc;
5688 u16 cmd_index;
5689 struct ipr_cmnd *ipr_cmd;
5690 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5691 int num_hrrq = 0;
5692
5693 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005694 if (!hrr_queue->allow_interrupts)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005695 return 0;
5696
5697 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5698 hrr_queue->toggle_bit) {
5699
5700 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5701 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5702 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5703
5704 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5705 cmd_index < hrr_queue->min_cmd_id)) {
5706 ipr_isr_eh(ioa_cfg,
5707 "Invalid response handle from IOA: ",
5708 cmd_index);
5709 break;
5710 }
5711
5712 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5713 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5714
5715 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5716
5717 list_move_tail(&ipr_cmd->queue, doneq);
5718
5719 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5720 hrr_queue->hrrq_curr++;
5721 } else {
5722 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5723 hrr_queue->toggle_bit ^= 1u;
5724 }
5725 num_hrrq++;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005726 if (budget > 0 && num_hrrq >= budget)
5727 break;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005728 }
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005729
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005730 return num_hrrq;
5731}
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005732
Christoph Hellwig511cbce2015-11-10 14:56:14 +01005733static int ipr_iopoll(struct irq_poll *iop, int budget)
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005734{
5735 struct ipr_ioa_cfg *ioa_cfg;
5736 struct ipr_hrr_queue *hrrq;
5737 struct ipr_cmnd *ipr_cmd, *temp;
5738 unsigned long hrrq_flags;
5739 int completed_ops;
5740 LIST_HEAD(doneq);
5741
5742 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5743 ioa_cfg = hrrq->ioa_cfg;
5744
5745 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5746 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5747
5748 if (completed_ops < budget)
Christoph Hellwig511cbce2015-11-10 14:56:14 +01005749 irq_poll_complete(iop);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005750 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5751
5752 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5753 list_del(&ipr_cmd->queue);
5754 del_timer(&ipr_cmd->timer);
5755 ipr_cmd->fast_done(ipr_cmd);
5756 }
5757
5758 return completed_ops;
5759}
5760
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005761/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005762 * ipr_isr - Interrupt service routine
5763 * @irq: irq number
5764 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005765 *
5766 * Return value:
5767 * IRQ_NONE / IRQ_HANDLED
5768 **/
David Howells7d12e782006-10-05 14:55:46 +01005769static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005770{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005771 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5772 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005773 unsigned long hrrq_flags = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005774 u32 int_reg = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005775 int num_hrrq = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005776 int irq_none = 0;
Brian King172cd6e2012-07-17 08:14:40 -05005777 struct ipr_cmnd *ipr_cmd, *temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005778 irqreturn_t rc = IRQ_NONE;
Brian King172cd6e2012-07-17 08:14:40 -05005779 LIST_HEAD(doneq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005780
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005781 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005782 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005783 if (!hrrq->allow_interrupts) {
5784 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005785 return IRQ_NONE;
5786 }
5787
Linus Torvalds1da177e2005-04-16 15:20:36 -07005788 while (1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005789 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5790 rc = IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005791
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005792 if (!ioa_cfg->clear_isr)
5793 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005794
Linus Torvalds1da177e2005-04-16 15:20:36 -07005795 /* Clear the PCI interrupt */
Wayne Boyera5442ba2011-05-17 09:18:53 -07005796 num_hrrq = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005797 do {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005798 writel(IPR_PCII_HRRQ_UPDATED,
5799 ioa_cfg->regs.clr_interrupt_reg32);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005800 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005801 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005802 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005803
Wayne Boyer7dacb642011-04-12 10:29:02 -07005804 } else if (rc == IRQ_NONE && irq_none == 0) {
5805 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5806 irq_none++;
Wayne Boyera5442ba2011-05-17 09:18:53 -07005807 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5808 int_reg & IPR_PCII_HRRQ_UPDATED) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005809 ipr_isr_eh(ioa_cfg,
5810 "Error clearing HRRQ: ", num_hrrq);
Brian King172cd6e2012-07-17 08:14:40 -05005811 rc = IRQ_HANDLED;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005812 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005813 } else
5814 break;
5815 }
5816
5817 if (unlikely(rc == IRQ_NONE))
Wayne Boyer634651f2010-08-27 14:45:07 -07005818 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005819
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005820 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05005821 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5822 list_del(&ipr_cmd->queue);
5823 del_timer(&ipr_cmd->timer);
5824 ipr_cmd->fast_done(ipr_cmd);
5825 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005826 return rc;
5827}
Brian King172cd6e2012-07-17 08:14:40 -05005828
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005829/**
5830 * ipr_isr_mhrrq - Interrupt service routine
5831 * @irq: irq number
5832 * @devp: pointer to ioa config struct
5833 *
5834 * Return value:
5835 * IRQ_NONE / IRQ_HANDLED
5836 **/
5837static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5838{
5839 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005840 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005841 unsigned long hrrq_flags = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005842 struct ipr_cmnd *ipr_cmd, *temp;
5843 irqreturn_t rc = IRQ_NONE;
5844 LIST_HEAD(doneq);
5845
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005846 spin_lock_irqsave(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005847
5848 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005849 if (!hrrq->allow_interrupts) {
5850 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005851 return IRQ_NONE;
5852 }
5853
Jens Axboe89f8b332014-03-13 09:38:42 -06005854 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005855 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5856 hrrq->toggle_bit) {
Christoph Hellwigea511902015-12-07 06:41:11 -08005857 irq_poll_sched(&hrrq->iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005858 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5859 return IRQ_HANDLED;
5860 }
5861 } else {
5862 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5863 hrrq->toggle_bit)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005864
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005865 if (ipr_process_hrrq(hrrq, -1, &doneq))
5866 rc = IRQ_HANDLED;
5867 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005868
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005869 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005870
5871 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5872 list_del(&ipr_cmd->queue);
5873 del_timer(&ipr_cmd->timer);
5874 ipr_cmd->fast_done(ipr_cmd);
5875 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005876 return rc;
5877}
5878
5879/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005880 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07005881 * @ioa_cfg: ioa config struct
5882 * @ipr_cmd: ipr command struct
5883 *
5884 * Return value:
5885 * 0 on success / -1 on failure
5886 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08005887static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5888 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005889{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005890 int i, nseg;
5891 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005892 u32 length;
5893 u32 ioadl_flags = 0;
5894 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5895 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005896 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005897
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005898 length = scsi_bufflen(scsi_cmd);
5899 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005900 return 0;
5901
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005902 nseg = scsi_dma_map(scsi_cmd);
5903 if (nseg < 0) {
Anton Blanchard51f52a42011-05-09 10:07:40 +10005904 if (printk_ratelimit())
Anton Blanchardd73341b2014-10-30 17:27:08 -05005905 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005906 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005907 }
5908
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005909 ipr_cmd->dma_use_sg = nseg;
5910
Wayne Boyer438b0332010-05-10 09:13:00 -07005911 ioarcb->data_transfer_length = cpu_to_be32(length);
Wayne Boyerb8803b12010-05-14 08:55:13 -07005912 ioarcb->ioadl_len =
5913 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
Wayne Boyer438b0332010-05-10 09:13:00 -07005914
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005915 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5916 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5917 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005918 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5919 ioadl_flags = IPR_IOADL_FLAGS_READ;
5920
5921 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5922 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5923 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5924 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5925 }
5926
5927 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5928 return 0;
5929}
5930
5931/**
5932 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5933 * @ioa_cfg: ioa config struct
5934 * @ipr_cmd: ipr command struct
5935 *
5936 * Return value:
5937 * 0 on success / -1 on failure
5938 **/
5939static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5940 struct ipr_cmnd *ipr_cmd)
5941{
5942 int i, nseg;
5943 struct scatterlist *sg;
5944 u32 length;
5945 u32 ioadl_flags = 0;
5946 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5947 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5948 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5949
5950 length = scsi_bufflen(scsi_cmd);
5951 if (!length)
5952 return 0;
5953
5954 nseg = scsi_dma_map(scsi_cmd);
5955 if (nseg < 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05005956 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
Wayne Boyera32c0552010-02-19 13:23:36 -08005957 return -1;
5958 }
5959
5960 ipr_cmd->dma_use_sg = nseg;
5961
5962 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5963 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5964 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5965 ioarcb->data_transfer_length = cpu_to_be32(length);
5966 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005967 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5968 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5969 ioadl_flags = IPR_IOADL_FLAGS_READ;
5970 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5971 ioarcb->read_ioadl_len =
5972 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5973 }
5974
Wayne Boyera32c0552010-02-19 13:23:36 -08005975 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5976 ioadl = ioarcb->u.add_data.u.ioadl;
5977 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5978 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005979 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5980 }
5981
5982 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5983 ioadl[i].flags_and_data_len =
5984 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5985 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5986 }
5987
5988 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5989 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005990}
5991
5992/**
Brian Kingf646f322017-03-15 16:58:39 -05005993 * __ipr_erp_done - Process completion of ERP for a device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005994 * @ipr_cmd: ipr command struct
5995 *
5996 * This function copies the sense buffer into the scsi_cmd
5997 * struct and pushes the scsi_done function.
5998 *
5999 * Return value:
6000 * nothing
6001 **/
Brian Kingf646f322017-03-15 16:58:39 -05006002static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006003{
6004 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6005 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006006 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006007
6008 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6009 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06006010 scmd_printk(KERN_ERR, scsi_cmd,
6011 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006012 } else {
6013 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6014 SCSI_SENSE_BUFFERSIZE);
6015 }
6016
6017 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006018 if (!ipr_is_naca_model(res))
6019 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006020 res->in_erp = 0;
6021 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006022 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006023 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006024 if (ipr_cmd->eh_comp)
6025 complete(ipr_cmd->eh_comp);
6026 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006027}
6028
6029/**
Brian Kingf646f322017-03-15 16:58:39 -05006030 * ipr_erp_done - Process completion of ERP for a device
6031 * @ipr_cmd: ipr command struct
6032 *
6033 * This function copies the sense buffer into the scsi_cmd
6034 * struct and pushes the scsi_done function.
6035 *
6036 * Return value:
6037 * nothing
6038 **/
6039static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6040{
6041 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6042 unsigned long hrrq_flags;
6043
6044 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6045 __ipr_erp_done(ipr_cmd);
6046 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006047}
6048
6049/**
6050 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6051 * @ipr_cmd: ipr command struct
6052 *
6053 * Return value:
6054 * none
6055 **/
6056static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6057{
Brian King51b1c7e2007-03-29 12:43:50 -05006058 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006059 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08006060 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006061
6062 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08006063 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006064 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08006065 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006066 ioarcb->read_ioadl_len = 0;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006067 ioasa->hdr.ioasc = 0;
6068 ioasa->hdr.residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08006069
6070 if (ipr_cmd->ioa_cfg->sis64)
6071 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6072 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6073 else {
6074 ioarcb->write_ioadl_addr =
6075 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6076 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6077 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006078}
6079
6080/**
Brian Kingf646f322017-03-15 16:58:39 -05006081 * __ipr_erp_request_sense - Send request sense to a device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006082 * @ipr_cmd: ipr command struct
6083 *
6084 * This function sends a request sense to a device as a result
6085 * of a check condition.
6086 *
6087 * Return value:
6088 * nothing
6089 **/
Brian Kingf646f322017-03-15 16:58:39 -05006090static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006091{
6092 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006093 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006094
6095 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
Brian Kingf646f322017-03-15 16:58:39 -05006096 __ipr_erp_done(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006097 return;
6098 }
6099
6100 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6101
6102 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6103 cmd_pkt->cdb[0] = REQUEST_SENSE;
6104 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6105 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6106 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6107 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6108
Wayne Boyera32c0552010-02-19 13:23:36 -08006109 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6110 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006111
6112 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6113 IPR_REQUEST_SENSE_TIMEOUT * 2);
6114}
6115
6116/**
Brian Kingf646f322017-03-15 16:58:39 -05006117 * ipr_erp_request_sense - Send request sense to a device
6118 * @ipr_cmd: ipr command struct
6119 *
6120 * This function sends a request sense to a device as a result
6121 * of a check condition.
6122 *
6123 * Return value:
6124 * nothing
6125 **/
6126static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6127{
6128 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6129 unsigned long hrrq_flags;
6130
6131 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6132 __ipr_erp_request_sense(ipr_cmd);
6133 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6134}
6135
6136/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006137 * ipr_erp_cancel_all - Send cancel all to a device
6138 * @ipr_cmd: ipr command struct
6139 *
6140 * This function sends a cancel all to a device to clear the
6141 * queue. If we are running TCQ on the device, QERR is set to 1,
6142 * which means all outstanding ops have been dropped on the floor.
6143 * Cancel all will return them to us.
6144 *
6145 * Return value:
6146 * nothing
6147 **/
6148static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6149{
6150 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6151 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6152 struct ipr_cmd_pkt *cmd_pkt;
6153
6154 res->in_erp = 1;
6155
6156 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6157
Christoph Hellwig17ea0122014-11-24 15:36:20 +01006158 if (!scsi_cmd->device->simple_tags) {
Brian Kingf646f322017-03-15 16:58:39 -05006159 __ipr_erp_request_sense(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006160 return;
6161 }
6162
6163 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6164 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6165 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6166
6167 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6168 IPR_CANCEL_ALL_TIMEOUT);
6169}
6170
6171/**
6172 * ipr_dump_ioasa - Dump contents of IOASA
6173 * @ioa_cfg: ioa config struct
6174 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06006175 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006176 *
6177 * This function is invoked by the interrupt handler when ops
6178 * fail. It will log the IOASA if appropriate. Only called
6179 * for GPDD ops.
6180 *
6181 * Return value:
6182 * none
6183 **/
6184static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06006185 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006186{
6187 int i;
6188 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05006189 u32 ioasc, fd_ioasc;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006190 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006191 __be32 *ioasa_data = (__be32 *)ioasa;
6192 int error_index;
6193
Wayne Boyer96d21f02010-05-10 09:13:27 -07006194 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6195 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006196
6197 if (0 == ioasc)
6198 return;
6199
6200 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6201 return;
6202
Brian Kingb0692dd2007-03-29 12:43:09 -05006203 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6204 error_index = ipr_get_error(fd_ioasc);
6205 else
6206 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006207
6208 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6209 /* Don't log an error if the IOA already logged one */
Wayne Boyer96d21f02010-05-10 09:13:27 -07006210 if (ioasa->hdr.ilid != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006211 return;
6212
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006213 if (!ipr_is_gscsi(res))
6214 return;
6215
Linus Torvalds1da177e2005-04-16 15:20:36 -07006216 if (ipr_error_table[error_index].log_ioasa == 0)
6217 return;
6218 }
6219
Brian Kingfe964d02006-03-29 09:37:29 -06006220 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006221
Wayne Boyer96d21f02010-05-10 09:13:27 -07006222 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6223 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6224 data_len = sizeof(struct ipr_ioasa64);
6225 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006226 data_len = sizeof(struct ipr_ioasa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006227
6228 ipr_err("IOASA Dump:\n");
6229
6230 for (i = 0; i < data_len / 4; i += 4) {
6231 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6232 be32_to_cpu(ioasa_data[i]),
6233 be32_to_cpu(ioasa_data[i+1]),
6234 be32_to_cpu(ioasa_data[i+2]),
6235 be32_to_cpu(ioasa_data[i+3]));
6236 }
6237}
6238
6239/**
6240 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6241 * @ioasa: IOASA
6242 * @sense_buf: sense data buffer
6243 *
6244 * Return value:
6245 * none
6246 **/
6247static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6248{
6249 u32 failing_lba;
6250 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6251 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006252 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6253 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006254
6255 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6256
6257 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6258 return;
6259
6260 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6261
6262 if (ipr_is_vset_device(res) &&
6263 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6264 ioasa->u.vset.failing_lba_hi != 0) {
6265 sense_buf[0] = 0x72;
6266 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6267 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6268 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6269
6270 sense_buf[7] = 12;
6271 sense_buf[8] = 0;
6272 sense_buf[9] = 0x0A;
6273 sense_buf[10] = 0x80;
6274
6275 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6276
6277 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6278 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6279 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6280 sense_buf[15] = failing_lba & 0x000000ff;
6281
6282 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6283
6284 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6285 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6286 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6287 sense_buf[19] = failing_lba & 0x000000ff;
6288 } else {
6289 sense_buf[0] = 0x70;
6290 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6291 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6292 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6293
6294 /* Illegal request */
6295 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
Wayne Boyer96d21f02010-05-10 09:13:27 -07006296 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006297 sense_buf[7] = 10; /* additional length */
6298
6299 /* IOARCB was in error */
6300 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6301 sense_buf[15] = 0xC0;
6302 else /* Parameter data was invalid */
6303 sense_buf[15] = 0x80;
6304
6305 sense_buf[16] =
6306 ((IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07006307 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006308 sense_buf[17] =
6309 (IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07006310 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006311 } else {
6312 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6313 if (ipr_is_vset_device(res))
6314 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6315 else
6316 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6317
6318 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6319 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6320 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6321 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6322 sense_buf[6] = failing_lba & 0x000000ff;
6323 }
6324
6325 sense_buf[7] = 6; /* additional length */
6326 }
6327 }
6328}
6329
6330/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006331 * ipr_get_autosense - Copy autosense data to sense buffer
6332 * @ipr_cmd: ipr command struct
6333 *
6334 * This function copies the autosense buffer to the buffer
6335 * in the scsi_cmd, if there is autosense available.
6336 *
6337 * Return value:
6338 * 1 if autosense was available / 0 if not
6339 **/
6340static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6341{
Wayne Boyer96d21f02010-05-10 09:13:27 -07006342 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6343 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006344
Wayne Boyer96d21f02010-05-10 09:13:27 -07006345 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006346 return 0;
6347
Wayne Boyer96d21f02010-05-10 09:13:27 -07006348 if (ipr_cmd->ioa_cfg->sis64)
6349 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6350 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6351 SCSI_SENSE_BUFFERSIZE));
6352 else
6353 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6354 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6355 SCSI_SENSE_BUFFERSIZE));
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006356 return 1;
6357}
6358
6359/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006360 * ipr_erp_start - Process an error response for a SCSI op
6361 * @ioa_cfg: ioa config struct
6362 * @ipr_cmd: ipr command struct
6363 *
6364 * This function determines whether or not to initiate ERP
6365 * on the affected device.
6366 *
6367 * Return value:
6368 * nothing
6369 **/
6370static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6371 struct ipr_cmnd *ipr_cmd)
6372{
6373 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6374 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006375 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King8a048992007-04-26 16:00:10 -05006376 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006377
6378 if (!res) {
Brian Kingf646f322017-03-15 16:58:39 -05006379 __ipr_scsi_eh_done(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006380 return;
6381 }
6382
Brian King8a048992007-04-26 16:00:10 -05006383 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006384 ipr_gen_sense(ipr_cmd);
6385
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006386 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6387
Brian King8a048992007-04-26 16:00:10 -05006388 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006389 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006390 if (ipr_is_naca_model(res))
6391 scsi_cmd->result |= (DID_ABORT << 16);
6392 else
6393 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006394 break;
6395 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006396 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006397 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6398 break;
6399 case IPR_IOASC_HW_SEL_TIMEOUT:
6400 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006401 if (!ipr_is_naca_model(res))
6402 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006403 break;
6404 case IPR_IOASC_SYNC_REQUIRED:
6405 if (!res->in_erp)
6406 res->needs_sync_complete = 1;
6407 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6408 break;
6409 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006410 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Mauricio Faria de Oliveira785a4702017-04-11 11:46:04 -03006411 /*
6412 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6413 * so SCSI mid-layer and upper layers handle it accordingly.
6414 */
6415 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6416 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006417 break;
6418 case IPR_IOASC_BUS_WAS_RESET:
6419 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6420 /*
6421 * Report the bus reset and ask for a retry. The device
6422 * will give CC/UA the next command.
6423 */
6424 if (!res->resetting_device)
6425 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6426 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006427 if (!ipr_is_naca_model(res))
6428 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006429 break;
6430 case IPR_IOASC_HW_DEV_BUS_STATUS:
6431 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6432 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006433 if (!ipr_get_autosense(ipr_cmd)) {
6434 if (!ipr_is_naca_model(res)) {
6435 ipr_erp_cancel_all(ipr_cmd);
6436 return;
6437 }
6438 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006439 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006440 if (!ipr_is_naca_model(res))
6441 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006442 break;
6443 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6444 break;
Wen Xiongf8ee25d2015-03-26 11:23:58 -05006445 case IPR_IOASC_IR_NON_OPTIMIZED:
6446 if (res->raw_mode) {
6447 res->raw_mode = 0;
6448 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6449 } else
6450 scsi_cmd->result |= (DID_ERROR << 16);
6451 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006452 default:
Brian King5b7304f2006-08-02 14:57:51 -05006453 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6454 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006455 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006456 res->needs_sync_complete = 1;
6457 break;
6458 }
6459
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006460 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006461 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006462 if (ipr_cmd->eh_comp)
6463 complete(ipr_cmd->eh_comp);
6464 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006465}
6466
6467/**
6468 * ipr_scsi_done - mid-layer done function
6469 * @ipr_cmd: ipr command struct
6470 *
6471 * This function is invoked by the interrupt handler for
6472 * ops generated by the SCSI mid-layer
6473 *
6474 * Return value:
6475 * none
6476 **/
6477static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6478{
6479 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6480 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006481 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King36b8e182015-07-14 11:41:29 -05006482 unsigned long lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006483
Wayne Boyer96d21f02010-05-10 09:13:27 -07006484 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006485
6486 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
Brian King172cd6e2012-07-17 08:14:40 -05006487 scsi_dma_unmap(scsi_cmd);
6488
Brian King36b8e182015-07-14 11:41:29 -05006489 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006490 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006491 if (ipr_cmd->eh_comp)
6492 complete(ipr_cmd->eh_comp);
6493 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King36b8e182015-07-14 11:41:29 -05006494 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006495 } else {
Brian King36b8e182015-07-14 11:41:29 -05006496 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6497 spin_lock(&ipr_cmd->hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006498 ipr_erp_start(ioa_cfg, ipr_cmd);
Brian King36b8e182015-07-14 11:41:29 -05006499 spin_unlock(&ipr_cmd->hrrq->_lock);
6500 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006501 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006502}
6503
6504/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006505 * ipr_queuecommand - Queue a mid-layer request
Brian King00bfef22012-07-17 08:13:52 -05006506 * @shost: scsi host struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006507 * @scsi_cmd: scsi command struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006508 *
6509 * This function queues a request generated by the mid-layer.
6510 *
6511 * Return value:
6512 * 0 on success
6513 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6514 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6515 **/
Brian King00bfef22012-07-17 08:13:52 -05006516static int ipr_queuecommand(struct Scsi_Host *shost,
6517 struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006518{
6519 struct ipr_ioa_cfg *ioa_cfg;
6520 struct ipr_resource_entry *res;
6521 struct ipr_ioarcb *ioarcb;
6522 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006523 unsigned long hrrq_flags, lock_flags;
Dan Carpenterd12f1572012-07-30 11:18:22 +03006524 int rc;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006525 struct ipr_hrr_queue *hrrq;
6526 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006527
Brian King00bfef22012-07-17 08:13:52 -05006528 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6529
Linus Torvalds1da177e2005-04-16 15:20:36 -07006530 scsi_cmd->result = (DID_OK << 16);
Brian King00bfef22012-07-17 08:13:52 -05006531 res = scsi_cmd->device->hostdata;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006532
6533 if (ipr_is_gata(res) && res->sata_port) {
6534 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6535 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6536 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6537 return rc;
6538 }
6539
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006540 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6541 hrrq = &ioa_cfg->hrrq[hrrq_id];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006542
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006543 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006544 /*
6545 * We are currently blocking all devices due to a host reset
6546 * We have told the host to stop giving us new requests, but
6547 * ERP ops don't count. FIXME
6548 */
Brian Kingbfae7822013-01-30 23:45:08 -06006549 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006550 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006551 return SCSI_MLQUEUE_HOST_BUSY;
Brian King00bfef22012-07-17 08:13:52 -05006552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006553
6554 /*
6555 * FIXME - Create scsi_set_host_offline interface
6556 * and the ioa_is_dead check can be removed
6557 */
Brian Kingbfae7822013-01-30 23:45:08 -06006558 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006559 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006560 goto err_nodev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006561 }
6562
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006563 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6564 if (ipr_cmd == NULL) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006565 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006566 return SCSI_MLQUEUE_HOST_BUSY;
6567 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006568 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006569
Brian King172cd6e2012-07-17 08:14:40 -05006570 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006571 ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006572
6573 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6574 ipr_cmd->scsi_cmd = scsi_cmd;
Brian King172cd6e2012-07-17 08:14:40 -05006575 ipr_cmd->done = ipr_scsi_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006576
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006577 if (ipr_is_gscsi(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006578 if (scsi_cmd->underflow == 0)
6579 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6580
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006581 if (res->reset_occurred) {
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006582 res->reset_occurred = 0;
Wayne Boyerab6c10b2011-03-31 09:56:10 -07006583 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006584 }
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006585 }
6586
6587 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6588 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6589
Linus Torvalds1da177e2005-04-16 15:20:36 -07006590 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
Christoph Hellwig50668632014-10-30 14:30:06 +01006591 if (scsi_cmd->flags & SCMD_TAGGED)
6592 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6593 else
6594 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006595 }
6596
6597 if (scsi_cmd->cmnd[0] >= 0xC0 &&
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006598 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006599 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006600 }
Gabriel Krisman Bertazi3cb4fc12015-08-19 11:47:05 -03006601 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
Wen Xiongf8ee25d2015-03-26 11:23:58 -05006602 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006603
Gabriel Krisman Bertazi3cb4fc12015-08-19 11:47:05 -03006604 if (scsi_cmd->underflow == 0)
6605 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6606 }
6607
Dan Carpenterd12f1572012-07-30 11:18:22 +03006608 if (ioa_cfg->sis64)
6609 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6610 else
6611 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006612
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006613 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6614 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006615 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006616 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006617 if (!rc)
6618 scsi_dma_unmap(scsi_cmd);
Brian Kinga5fb4072012-03-14 21:20:09 -05006619 return SCSI_MLQUEUE_HOST_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006620 }
6621
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006622 if (unlikely(hrrq->ioa_is_dead)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006623 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006624 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006625 scsi_dma_unmap(scsi_cmd);
6626 goto err_nodev;
6627 }
6628
6629 ioarcb->res_handle = res->res_handle;
6630 if (res->needs_sync_complete) {
6631 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6632 res->needs_sync_complete = 0;
6633 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006634 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
Brian King00bfef22012-07-17 08:13:52 -05006635 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian Kinga5fb4072012-03-14 21:20:09 -05006636 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006637 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006638 return 0;
6639
6640err_nodev:
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006641 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006642 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6643 scsi_cmd->result = (DID_NO_CONNECT << 16);
6644 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006645 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006646 return 0;
6647}
6648
6649/**
Brian King35a39692006-09-25 12:39:20 -05006650 * ipr_ioctl - IOCTL handler
6651 * @sdev: scsi device struct
6652 * @cmd: IOCTL cmd
6653 * @arg: IOCTL arg
6654 *
6655 * Return value:
6656 * 0 on success / other on failure
6657 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06006658static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05006659{
6660 struct ipr_resource_entry *res;
6661
6662 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05006663 if (res && ipr_is_gata(res)) {
6664 if (cmd == HDIO_GET_IDENTITY)
6665 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05006666 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05006667 }
Brian King35a39692006-09-25 12:39:20 -05006668
6669 return -EINVAL;
6670}
6671
6672/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006673 * ipr_info - Get information about the card/driver
6674 * @scsi_host: scsi host struct
6675 *
6676 * Return value:
6677 * pointer to buffer with description string
6678 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006679static const char *ipr_ioa_info(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006680{
6681 static char buffer[512];
6682 struct ipr_ioa_cfg *ioa_cfg;
6683 unsigned long lock_flags = 0;
6684
6685 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6686
6687 spin_lock_irqsave(host->host_lock, lock_flags);
6688 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6689 spin_unlock_irqrestore(host->host_lock, lock_flags);
6690
6691 return buffer;
6692}
6693
6694static struct scsi_host_template driver_template = {
6695 .module = THIS_MODULE,
6696 .name = "IPR",
6697 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05006698 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006699 .queuecommand = ipr_queuecommand,
6700 .eh_abort_handler = ipr_eh_abort,
6701 .eh_device_reset_handler = ipr_eh_dev_reset,
6702 .eh_host_reset_handler = ipr_eh_host_reset,
6703 .slave_alloc = ipr_slave_alloc,
6704 .slave_configure = ipr_slave_configure,
6705 .slave_destroy = ipr_slave_destroy,
Brian Kingf688f962014-12-02 12:47:37 -06006706 .scan_finished = ipr_scan_finished,
Brian King35a39692006-09-25 12:39:20 -05006707 .target_alloc = ipr_target_alloc,
6708 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006709 .change_queue_depth = ipr_change_queue_depth,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006710 .bios_param = ipr_biosparam,
6711 .can_queue = IPR_MAX_COMMANDS,
6712 .this_id = -1,
6713 .sg_tablesize = IPR_MAX_SGLIST,
6714 .max_sectors = IPR_IOA_MAX_SECTORS,
6715 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6716 .use_clustering = ENABLE_CLUSTERING,
6717 .shost_attrs = ipr_ioa_attrs,
6718 .sdev_attrs = ipr_dev_attrs,
Martin K. Petersen54b2b502013-10-23 06:25:40 -04006719 .proc_name = IPR_NAME,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006720};
6721
Brian King35a39692006-09-25 12:39:20 -05006722/**
6723 * ipr_ata_phy_reset - libata phy_reset handler
6724 * @ap: ata port to reset
6725 *
6726 **/
6727static void ipr_ata_phy_reset(struct ata_port *ap)
6728{
6729 unsigned long flags;
6730 struct ipr_sata_port *sata_port = ap->private_data;
6731 struct ipr_resource_entry *res = sata_port->res;
6732 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6733 int rc;
6734
6735 ENTER;
6736 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006737 while (ioa_cfg->in_reset_reload) {
Brian King35a39692006-09-25 12:39:20 -05006738 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6739 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6740 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6741 }
6742
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006743 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Brian King35a39692006-09-25 12:39:20 -05006744 goto out_unlock;
6745
6746 rc = ipr_device_reset(ioa_cfg, res);
6747
6748 if (rc) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02006749 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006750 goto out_unlock;
6751 }
6752
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006753 ap->link.device[0].class = res->ata_class;
6754 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Tejun Heo3e4ec342010-05-10 21:41:30 +02006755 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006756
6757out_unlock:
6758 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6759 LEAVE;
6760}
6761
6762/**
6763 * ipr_ata_post_internal - Cleanup after an internal command
6764 * @qc: ATA queued command
6765 *
6766 * Return value:
6767 * none
6768 **/
6769static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6770{
6771 struct ipr_sata_port *sata_port = qc->ap->private_data;
6772 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6773 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006774 struct ipr_hrr_queue *hrrq;
Brian King35a39692006-09-25 12:39:20 -05006775 unsigned long flags;
6776
6777 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006778 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06006779 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6780 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6781 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6782 }
6783
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006784 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006785 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006786 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6787 if (ipr_cmd->qc == qc) {
6788 ipr_device_reset(ioa_cfg, sata_port->res);
6789 break;
6790 }
Brian King35a39692006-09-25 12:39:20 -05006791 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006792 spin_unlock(&hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006793 }
6794 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6795}
6796
6797/**
Brian King35a39692006-09-25 12:39:20 -05006798 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6799 * @regs: destination
6800 * @tf: source ATA taskfile
6801 *
6802 * Return value:
6803 * none
6804 **/
6805static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6806 struct ata_taskfile *tf)
6807{
6808 regs->feature = tf->feature;
6809 regs->nsect = tf->nsect;
6810 regs->lbal = tf->lbal;
6811 regs->lbam = tf->lbam;
6812 regs->lbah = tf->lbah;
6813 regs->device = tf->device;
6814 regs->command = tf->command;
6815 regs->hob_feature = tf->hob_feature;
6816 regs->hob_nsect = tf->hob_nsect;
6817 regs->hob_lbal = tf->hob_lbal;
6818 regs->hob_lbam = tf->hob_lbam;
6819 regs->hob_lbah = tf->hob_lbah;
6820 regs->ctl = tf->ctl;
6821}
6822
6823/**
6824 * ipr_sata_done - done function for SATA commands
6825 * @ipr_cmd: ipr command struct
6826 *
6827 * This function is invoked by the interrupt handler for
6828 * ops generated by the SCSI mid-layer to SATA devices
6829 *
6830 * Return value:
6831 * none
6832 **/
6833static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6834{
6835 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6836 struct ata_queued_cmd *qc = ipr_cmd->qc;
6837 struct ipr_sata_port *sata_port = qc->ap->private_data;
6838 struct ipr_resource_entry *res = sata_port->res;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006839 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King35a39692006-09-25 12:39:20 -05006840
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006841 spin_lock(&ipr_cmd->hrrq->_lock);
Wayne Boyer96d21f02010-05-10 09:13:27 -07006842 if (ipr_cmd->ioa_cfg->sis64)
6843 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6844 sizeof(struct ipr_ioasa_gata));
6845 else
6846 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6847 sizeof(struct ipr_ioasa_gata));
Brian King35a39692006-09-25 12:39:20 -05006848 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6849
Wayne Boyer96d21f02010-05-10 09:13:27 -07006850 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006851 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05006852
6853 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
Wayne Boyer96d21f02010-05-10 09:13:27 -07006854 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
Brian King35a39692006-09-25 12:39:20 -05006855 else
Wayne Boyer96d21f02010-05-10 09:13:27 -07006856 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006857 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006858 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006859 ata_qc_complete(qc);
6860}
6861
6862/**
Wayne Boyera32c0552010-02-19 13:23:36 -08006863 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6864 * @ipr_cmd: ipr command struct
6865 * @qc: ATA queued command
6866 *
6867 **/
6868static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6869 struct ata_queued_cmd *qc)
6870{
6871 u32 ioadl_flags = 0;
6872 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006873 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
Wayne Boyera32c0552010-02-19 13:23:36 -08006874 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6875 int len = qc->nbytes;
6876 struct scatterlist *sg;
6877 unsigned int si;
6878 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6879
6880 if (len == 0)
6881 return;
6882
6883 if (qc->dma_dir == DMA_TO_DEVICE) {
6884 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6885 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6886 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6887 ioadl_flags = IPR_IOADL_FLAGS_READ;
6888
6889 ioarcb->data_transfer_length = cpu_to_be32(len);
6890 ioarcb->ioadl_len =
6891 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6892 ioarcb->u.sis64_addr_data.data_ioadl_addr =
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006893 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
Wayne Boyera32c0552010-02-19 13:23:36 -08006894
6895 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6896 ioadl64->flags = cpu_to_be32(ioadl_flags);
6897 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6898 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6899
6900 last_ioadl64 = ioadl64;
6901 ioadl64++;
6902 }
6903
6904 if (likely(last_ioadl64))
6905 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6906}
6907
6908/**
Brian King35a39692006-09-25 12:39:20 -05006909 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6910 * @ipr_cmd: ipr command struct
6911 * @qc: ATA queued command
6912 *
6913 **/
6914static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6915 struct ata_queued_cmd *qc)
6916{
6917 u32 ioadl_flags = 0;
6918 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08006919 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006920 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01006921 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05006922 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09006923 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05006924
6925 if (len == 0)
6926 return;
6927
6928 if (qc->dma_dir == DMA_TO_DEVICE) {
6929 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6930 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08006931 ioarcb->data_transfer_length = cpu_to_be32(len);
6932 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05006933 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6934 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6935 ioadl_flags = IPR_IOADL_FLAGS_READ;
6936 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6937 ioarcb->read_ioadl_len =
6938 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6939 }
6940
Tejun Heoff2aeb12007-12-05 16:43:11 +09006941 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05006942 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6943 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006944
6945 last_ioadl = ioadl;
6946 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05006947 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006948
6949 if (likely(last_ioadl))
6950 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05006951}
6952
6953/**
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006954 * ipr_qc_defer - Get a free ipr_cmd
6955 * @qc: queued command
6956 *
6957 * Return value:
6958 * 0 if success
6959 **/
6960static int ipr_qc_defer(struct ata_queued_cmd *qc)
6961{
6962 struct ata_port *ap = qc->ap;
6963 struct ipr_sata_port *sata_port = ap->private_data;
6964 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6965 struct ipr_cmnd *ipr_cmd;
6966 struct ipr_hrr_queue *hrrq;
6967 int hrrq_id;
6968
6969 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6970 hrrq = &ioa_cfg->hrrq[hrrq_id];
6971
6972 qc->lldd_task = NULL;
6973 spin_lock(&hrrq->_lock);
6974 if (unlikely(hrrq->ioa_is_dead)) {
6975 spin_unlock(&hrrq->_lock);
6976 return 0;
6977 }
6978
6979 if (unlikely(!hrrq->allow_cmds)) {
6980 spin_unlock(&hrrq->_lock);
6981 return ATA_DEFER_LINK;
6982 }
6983
6984 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6985 if (ipr_cmd == NULL) {
6986 spin_unlock(&hrrq->_lock);
6987 return ATA_DEFER_LINK;
6988 }
6989
6990 qc->lldd_task = ipr_cmd;
6991 spin_unlock(&hrrq->_lock);
6992 return 0;
6993}
6994
6995/**
Brian King35a39692006-09-25 12:39:20 -05006996 * ipr_qc_issue - Issue a SATA qc to a device
6997 * @qc: queued command
6998 *
6999 * Return value:
7000 * 0 if success
7001 **/
7002static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7003{
7004 struct ata_port *ap = qc->ap;
7005 struct ipr_sata_port *sata_port = ap->private_data;
7006 struct ipr_resource_entry *res = sata_port->res;
7007 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7008 struct ipr_cmnd *ipr_cmd;
7009 struct ipr_ioarcb *ioarcb;
7010 struct ipr_ioarcb_ata_regs *regs;
7011
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007012 if (qc->lldd_task == NULL)
7013 ipr_qc_defer(qc);
7014
7015 ipr_cmd = qc->lldd_task;
7016 if (ipr_cmd == NULL)
Brian King0feeed82007-03-29 12:43:43 -05007017 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05007018
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007019 qc->lldd_task = NULL;
7020 spin_lock(&ipr_cmd->hrrq->_lock);
7021 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7022 ipr_cmd->hrrq->ioa_is_dead)) {
7023 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7024 spin_unlock(&ipr_cmd->hrrq->_lock);
7025 return AC_ERR_SYSTEM;
7026 }
7027
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007028 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King35a39692006-09-25 12:39:20 -05007029 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05007030
Wayne Boyera32c0552010-02-19 13:23:36 -08007031 if (ioa_cfg->sis64) {
7032 regs = &ipr_cmd->i.ata_ioadl.regs;
7033 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7034 } else
7035 regs = &ioarcb->u.add_data.u.regs;
7036
7037 memset(regs, 0, sizeof(*regs));
7038 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05007039
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007040 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Brian King35a39692006-09-25 12:39:20 -05007041 ipr_cmd->qc = qc;
7042 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007043 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05007044 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7045 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7046 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01007047 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05007048
Wayne Boyera32c0552010-02-19 13:23:36 -08007049 if (ioa_cfg->sis64)
7050 ipr_build_ata_ioadl64(ipr_cmd, qc);
7051 else
7052 ipr_build_ata_ioadl(ipr_cmd, qc);
7053
Brian King35a39692006-09-25 12:39:20 -05007054 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7055 ipr_copy_sata_tf(regs, &qc->tf);
7056 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007057 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05007058
7059 switch (qc->tf.protocol) {
7060 case ATA_PROT_NODATA:
7061 case ATA_PROT_PIO:
7062 break;
7063
7064 case ATA_PROT_DMA:
7065 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7066 break;
7067
Tejun Heo0dc36882007-12-18 16:34:43 -05007068 case ATAPI_PROT_PIO:
7069 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05007070 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7071 break;
7072
Tejun Heo0dc36882007-12-18 16:34:43 -05007073 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05007074 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7075 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7076 break;
7077
7078 default:
7079 WARN_ON(1);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007080 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King0feeed82007-03-29 12:43:43 -05007081 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05007082 }
7083
Wayne Boyera32c0552010-02-19 13:23:36 -08007084 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007085 spin_unlock(&ipr_cmd->hrrq->_lock);
Wayne Boyera32c0552010-02-19 13:23:36 -08007086
Brian King35a39692006-09-25 12:39:20 -05007087 return 0;
7088}
7089
7090/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007091 * ipr_qc_fill_rtf - Read result TF
7092 * @qc: ATA queued command
7093 *
7094 * Return value:
7095 * true
7096 **/
7097static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7098{
7099 struct ipr_sata_port *sata_port = qc->ap->private_data;
7100 struct ipr_ioasa_gata *g = &sata_port->ioasa;
7101 struct ata_taskfile *tf = &qc->result_tf;
7102
7103 tf->feature = g->error;
7104 tf->nsect = g->nsect;
7105 tf->lbal = g->lbal;
7106 tf->lbam = g->lbam;
7107 tf->lbah = g->lbah;
7108 tf->device = g->device;
7109 tf->command = g->status;
7110 tf->hob_nsect = g->hob_nsect;
7111 tf->hob_lbal = g->hob_lbal;
7112 tf->hob_lbam = g->hob_lbam;
7113 tf->hob_lbah = g->hob_lbah;
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007114
7115 return true;
7116}
7117
Brian King35a39692006-09-25 12:39:20 -05007118static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05007119 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09007120 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05007121 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05007122 .qc_prep = ata_noop_qc_prep,
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007123 .qc_defer = ipr_qc_defer,
Brian King35a39692006-09-25 12:39:20 -05007124 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007125 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05007126 .port_start = ata_sas_port_start,
7127 .port_stop = ata_sas_port_stop
7128};
7129
7130static struct ata_port_info sata_port_info = {
Shaohua Li5067c042015-03-12 10:32:18 -07007131 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7132 ATA_FLAG_SAS_HOST,
Sergei Shtylyov0f2e0332011-01-21 20:32:01 +03007133 .pio_mask = ATA_PIO4_ONLY,
7134 .mwdma_mask = ATA_MWDMA2,
7135 .udma_mask = ATA_UDMA6,
Brian King35a39692006-09-25 12:39:20 -05007136 .port_ops = &ipr_sata_ops
7137};
7138
Linus Torvalds1da177e2005-04-16 15:20:36 -07007139#ifdef CONFIG_PPC_PSERIES
7140static const u16 ipr_blocked_processors[] = {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00007141 PVR_NORTHSTAR,
7142 PVR_PULSAR,
7143 PVR_POWER4,
7144 PVR_ICESTAR,
7145 PVR_SSTAR,
7146 PVR_POWER4p,
7147 PVR_630,
7148 PVR_630p
Linus Torvalds1da177e2005-04-16 15:20:36 -07007149};
7150
7151/**
7152 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7153 * @ioa_cfg: ioa cfg struct
7154 *
7155 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7156 * certain pSeries hardware. This function determines if the given
7157 * adapter is in one of these confgurations or not.
7158 *
7159 * Return value:
7160 * 1 if adapter is not supported / 0 if adapter is supported
7161 **/
7162static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7163{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007164 int i;
7165
Auke Kok44c10132007-06-08 15:46:36 -07007166 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007167 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00007168 if (pvr_version_is(ipr_blocked_processors[i]))
Auke Kok44c10132007-06-08 15:46:36 -07007169 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007170 }
7171 }
7172 return 0;
7173}
7174#else
7175#define ipr_invalid_adapter(ioa_cfg) 0
7176#endif
7177
7178/**
7179 * ipr_ioa_bringdown_done - IOA bring down completion.
7180 * @ipr_cmd: ipr command struct
7181 *
7182 * This function processes the completion of an adapter bring down.
7183 * It wakes any reset sleepers.
7184 *
7185 * Return value:
7186 * IPR_RC_JOB_RETURN
7187 **/
7188static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7189{
7190 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05007191 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007192
7193 ENTER;
Brian Kingbfae7822013-01-30 23:45:08 -06007194 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7195 ipr_trace;
Brian Kingb0e17a92017-08-01 10:21:30 -05007196 ioa_cfg->scsi_unblock = 1;
7197 schedule_work(&ioa_cfg->work_q);
Brian Kingbfae7822013-01-30 23:45:08 -06007198 }
7199
Linus Torvalds1da177e2005-04-16 15:20:36 -07007200 ioa_cfg->in_reset_reload = 0;
7201 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05007202 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7203 spin_lock(&ioa_cfg->hrrq[i]._lock);
7204 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7205 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7206 }
7207 wmb();
7208
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007209 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007210 wake_up_all(&ioa_cfg->reset_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007211 LEAVE;
7212
7213 return IPR_RC_JOB_RETURN;
7214}
7215
7216/**
7217 * ipr_ioa_reset_done - IOA reset completion.
7218 * @ipr_cmd: ipr command struct
7219 *
7220 * This function processes the completion of an adapter reset.
7221 * It schedules any necessary mid-layer add/removes and
7222 * wakes any reset sleepers.
7223 *
7224 * Return value:
7225 * IPR_RC_JOB_RETURN
7226 **/
7227static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7228{
7229 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7230 struct ipr_resource_entry *res;
Brian Kingafc3f832016-08-24 12:56:51 -05007231 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007232
7233 ENTER;
7234 ioa_cfg->in_reset_reload = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007235 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7236 spin_lock(&ioa_cfg->hrrq[j]._lock);
7237 ioa_cfg->hrrq[j].allow_cmds = 1;
7238 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7239 }
7240 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007241 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06007242 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007243
7244 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Brian Kingf688f962014-12-02 12:47:37 -06007245 if (res->add_to_ml || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007246 ipr_trace;
7247 break;
7248 }
7249 }
7250 schedule_work(&ioa_cfg->work_q);
7251
Brian Kingafc3f832016-08-24 12:56:51 -05007252 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7253 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7254 if (j < IPR_NUM_LOG_HCAMS)
7255 ipr_send_hcam(ioa_cfg,
7256 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7257 ioa_cfg->hostrcb[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007258 else
Brian Kingafc3f832016-08-24 12:56:51 -05007259 ipr_send_hcam(ioa_cfg,
7260 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7261 ioa_cfg->hostrcb[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007262 }
7263
Brian King6bb04172007-04-26 16:00:08 -05007264 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007265 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7266
7267 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007268 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007269 wake_up_all(&ioa_cfg->reset_wait_q);
7270
Brian Kingb0e17a92017-08-01 10:21:30 -05007271 ioa_cfg->scsi_unblock = 1;
Brian Kingf688f962014-12-02 12:47:37 -06007272 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007273 LEAVE;
7274 return IPR_RC_JOB_RETURN;
7275}
7276
7277/**
7278 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7279 * @supported_dev: supported device struct
7280 * @vpids: vendor product id struct
7281 *
7282 * Return value:
7283 * none
7284 **/
7285static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7286 struct ipr_std_inq_vpids *vpids)
7287{
7288 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7289 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7290 supported_dev->num_records = 1;
7291 supported_dev->data_length =
7292 cpu_to_be16(sizeof(struct ipr_supported_device));
7293 supported_dev->reserved = 0;
7294}
7295
7296/**
7297 * ipr_set_supported_devs - Send Set Supported Devices for a device
7298 * @ipr_cmd: ipr command struct
7299 *
Wayne Boyera32c0552010-02-19 13:23:36 -08007300 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07007301 *
7302 * Return value:
7303 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7304 **/
7305static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7306{
7307 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7308 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007309 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7310 struct ipr_resource_entry *res = ipr_cmd->u.res;
7311
7312 ipr_cmd->job_step = ipr_ioa_reset_done;
7313
7314 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06007315 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007316 continue;
7317
7318 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007319 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007320
7321 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7322 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7323 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7324
7325 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007326 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007327 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7328 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7329
Wayne Boyera32c0552010-02-19 13:23:36 -08007330 ipr_init_ioadl(ipr_cmd,
7331 ioa_cfg->vpd_cbs_dma +
7332 offsetof(struct ipr_misc_cbs, supp_dev),
7333 sizeof(struct ipr_supported_device),
7334 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007335
7336 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7337 IPR_SET_SUP_DEVICE_TIMEOUT);
7338
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007339 if (!ioa_cfg->sis64)
7340 ipr_cmd->job_step = ipr_set_supported_devs;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007341 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007342 return IPR_RC_JOB_RETURN;
7343 }
7344
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007345 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007346 return IPR_RC_JOB_CONTINUE;
7347}
7348
7349/**
7350 * ipr_get_mode_page - Locate specified mode page
7351 * @mode_pages: mode page buffer
7352 * @page_code: page code to find
7353 * @len: minimum required length for mode page
7354 *
7355 * Return value:
7356 * pointer to mode page / NULL on failure
7357 **/
7358static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7359 u32 page_code, u32 len)
7360{
7361 struct ipr_mode_page_hdr *mode_hdr;
7362 u32 page_length;
7363 u32 length;
7364
7365 if (!mode_pages || (mode_pages->hdr.length == 0))
7366 return NULL;
7367
7368 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7369 mode_hdr = (struct ipr_mode_page_hdr *)
7370 (mode_pages->data + mode_pages->hdr.block_desc_len);
7371
7372 while (length) {
7373 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7374 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7375 return mode_hdr;
7376 break;
7377 } else {
7378 page_length = (sizeof(struct ipr_mode_page_hdr) +
7379 mode_hdr->page_length);
7380 length -= page_length;
7381 mode_hdr = (struct ipr_mode_page_hdr *)
7382 ((unsigned long)mode_hdr + page_length);
7383 }
7384 }
7385 return NULL;
7386}
7387
7388/**
7389 * ipr_check_term_power - Check for term power errors
7390 * @ioa_cfg: ioa config struct
7391 * @mode_pages: IOAFP mode pages buffer
7392 *
7393 * Check the IOAFP's mode page 28 for term power errors
7394 *
7395 * Return value:
7396 * nothing
7397 **/
7398static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7399 struct ipr_mode_pages *mode_pages)
7400{
7401 int i;
7402 int entry_length;
7403 struct ipr_dev_bus_entry *bus;
7404 struct ipr_mode_page28 *mode_page;
7405
7406 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7407 sizeof(struct ipr_mode_page28));
7408
7409 entry_length = mode_page->entry_length;
7410
7411 bus = mode_page->bus;
7412
7413 for (i = 0; i < mode_page->num_entries; i++) {
7414 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7415 dev_err(&ioa_cfg->pdev->dev,
7416 "Term power is absent on scsi bus %d\n",
7417 bus->res_addr.bus);
7418 }
7419
7420 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7421 }
7422}
7423
7424/**
7425 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7426 * @ioa_cfg: ioa config struct
7427 *
7428 * Looks through the config table checking for SES devices. If
7429 * the SES device is in the SES table indicating a maximum SCSI
7430 * bus speed, the speed is limited for the bus.
7431 *
7432 * Return value:
7433 * none
7434 **/
7435static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7436{
7437 u32 max_xfer_rate;
7438 int i;
7439
7440 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7441 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7442 ioa_cfg->bus_attr[i].bus_width);
7443
7444 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7445 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7446 }
7447}
7448
7449/**
7450 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7451 * @ioa_cfg: ioa config struct
7452 * @mode_pages: mode page 28 buffer
7453 *
7454 * Updates mode page 28 based on driver configuration
7455 *
7456 * Return value:
7457 * none
7458 **/
7459static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007460 struct ipr_mode_pages *mode_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007461{
7462 int i, entry_length;
7463 struct ipr_dev_bus_entry *bus;
7464 struct ipr_bus_attributes *bus_attr;
7465 struct ipr_mode_page28 *mode_page;
7466
7467 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7468 sizeof(struct ipr_mode_page28));
7469
7470 entry_length = mode_page->entry_length;
7471
7472 /* Loop for each device bus entry */
7473 for (i = 0, bus = mode_page->bus;
7474 i < mode_page->num_entries;
7475 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7476 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7477 dev_err(&ioa_cfg->pdev->dev,
7478 "Invalid resource address reported: 0x%08X\n",
7479 IPR_GET_PHYS_LOC(bus->res_addr));
7480 continue;
7481 }
7482
7483 bus_attr = &ioa_cfg->bus_attr[i];
7484 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7485 bus->bus_width = bus_attr->bus_width;
7486 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7487 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7488 if (bus_attr->qas_enabled)
7489 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7490 else
7491 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7492 }
7493}
7494
7495/**
7496 * ipr_build_mode_select - Build a mode select command
7497 * @ipr_cmd: ipr command struct
7498 * @res_handle: resource handle to send command to
7499 * @parm: Byte 2 of Mode Sense command
7500 * @dma_addr: DMA buffer address
7501 * @xfer_len: data transfer length
7502 *
7503 * Return value:
7504 * none
7505 **/
7506static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08007507 __be32 res_handle, u8 parm,
7508 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007509{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007510 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7511
7512 ioarcb->res_handle = res_handle;
7513 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7514 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7515 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7516 ioarcb->cmd_pkt.cdb[1] = parm;
7517 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7518
Wayne Boyera32c0552010-02-19 13:23:36 -08007519 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007520}
7521
7522/**
7523 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7524 * @ipr_cmd: ipr command struct
7525 *
7526 * This function sets up the SCSI bus attributes and sends
7527 * a Mode Select for Page 28 to activate them.
7528 *
7529 * Return value:
7530 * IPR_RC_JOB_RETURN
7531 **/
7532static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7533{
7534 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7535 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7536 int length;
7537
7538 ENTER;
Brian King47338042006-02-08 20:57:42 -06007539 ipr_scsi_bus_speed_limit(ioa_cfg);
7540 ipr_check_term_power(ioa_cfg, mode_pages);
7541 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7542 length = mode_pages->hdr.length + 1;
7543 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007544
7545 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7546 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7547 length);
7548
Wayne Boyerf72919e2010-02-19 13:24:21 -08007549 ipr_cmd->job_step = ipr_set_supported_devs;
7550 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7551 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007552 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7553
7554 LEAVE;
7555 return IPR_RC_JOB_RETURN;
7556}
7557
7558/**
7559 * ipr_build_mode_sense - Builds a mode sense command
7560 * @ipr_cmd: ipr command struct
7561 * @res: resource entry struct
7562 * @parm: Byte 2 of mode sense command
7563 * @dma_addr: DMA address of mode sense buffer
7564 * @xfer_len: Size of DMA buffer
7565 *
7566 * Return value:
7567 * none
7568 **/
7569static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7570 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08007571 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007572{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007573 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7574
7575 ioarcb->res_handle = res_handle;
7576 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7577 ioarcb->cmd_pkt.cdb[2] = parm;
7578 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7579 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7580
Wayne Boyera32c0552010-02-19 13:23:36 -08007581 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007582}
7583
7584/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007585 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7586 * @ipr_cmd: ipr command struct
7587 *
7588 * This function handles the failure of an IOA bringup command.
7589 *
7590 * Return value:
7591 * IPR_RC_JOB_RETURN
7592 **/
7593static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7594{
7595 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007596 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007597
7598 dev_err(&ioa_cfg->pdev->dev,
7599 "0x%02X failed with IOASC: 0x%08X\n",
7600 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7601
7602 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007603 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007604 return IPR_RC_JOB_RETURN;
7605}
7606
7607/**
7608 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7609 * @ipr_cmd: ipr command struct
7610 *
7611 * This function handles the failure of a Mode Sense to the IOAFP.
7612 * Some adapters do not handle all mode pages.
7613 *
7614 * Return value:
7615 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7616 **/
7617static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7618{
Wayne Boyerf72919e2010-02-19 13:24:21 -08007619 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007620 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007621
7622 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08007623 ipr_cmd->job_step = ipr_set_supported_devs;
7624 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7625 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007626 return IPR_RC_JOB_CONTINUE;
7627 }
7628
7629 return ipr_reset_cmd_failed(ipr_cmd);
7630}
7631
7632/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007633 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7634 * @ipr_cmd: ipr command struct
7635 *
7636 * This function send a Page 28 mode sense to the IOA to
7637 * retrieve SCSI bus attributes.
7638 *
7639 * Return value:
7640 * IPR_RC_JOB_RETURN
7641 **/
7642static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7643{
7644 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7645
7646 ENTER;
7647 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7648 0x28, ioa_cfg->vpd_cbs_dma +
7649 offsetof(struct ipr_misc_cbs, mode_pages),
7650 sizeof(struct ipr_mode_pages));
7651
7652 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007653 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007654
7655 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7656
7657 LEAVE;
7658 return IPR_RC_JOB_RETURN;
7659}
7660
7661/**
Brian Kingac09c342007-04-26 16:00:16 -05007662 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7663 * @ipr_cmd: ipr command struct
7664 *
7665 * This function enables dual IOA RAID support if possible.
7666 *
7667 * Return value:
7668 * IPR_RC_JOB_RETURN
7669 **/
7670static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7671{
7672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7673 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7674 struct ipr_mode_page24 *mode_page;
7675 int length;
7676
7677 ENTER;
7678 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7679 sizeof(struct ipr_mode_page24));
7680
7681 if (mode_page)
7682 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7683
7684 length = mode_pages->hdr.length + 1;
7685 mode_pages->hdr.length = 0;
7686
7687 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7688 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7689 length);
7690
7691 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7692 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7693
7694 LEAVE;
7695 return IPR_RC_JOB_RETURN;
7696}
7697
7698/**
7699 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7700 * @ipr_cmd: ipr command struct
7701 *
7702 * This function handles the failure of a Mode Sense to the IOAFP.
7703 * Some adapters do not handle all mode pages.
7704 *
7705 * Return value:
7706 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7707 **/
7708static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7709{
Wayne Boyer96d21f02010-05-10 09:13:27 -07007710 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian Kingac09c342007-04-26 16:00:16 -05007711
7712 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7713 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7714 return IPR_RC_JOB_CONTINUE;
7715 }
7716
7717 return ipr_reset_cmd_failed(ipr_cmd);
7718}
7719
7720/**
7721 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7722 * @ipr_cmd: ipr command struct
7723 *
7724 * This function send a mode sense to the IOA to retrieve
7725 * the IOA Advanced Function Control mode page.
7726 *
7727 * Return value:
7728 * IPR_RC_JOB_RETURN
7729 **/
7730static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7731{
7732 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7733
7734 ENTER;
7735 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7736 0x24, ioa_cfg->vpd_cbs_dma +
7737 offsetof(struct ipr_misc_cbs, mode_pages),
7738 sizeof(struct ipr_mode_pages));
7739
7740 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7741 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7742
7743 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7744
7745 LEAVE;
7746 return IPR_RC_JOB_RETURN;
7747}
7748
7749/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007750 * ipr_init_res_table - Initialize the resource table
7751 * @ipr_cmd: ipr command struct
7752 *
7753 * This function looks through the existing resource table, comparing
7754 * it with the config table. This function will take care of old/new
7755 * devices and schedule adding/removing them from the mid-layer
7756 * as appropriate.
7757 *
7758 * Return value:
7759 * IPR_RC_JOB_CONTINUE
7760 **/
7761static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7762{
7763 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7764 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007765 struct ipr_config_table_entry_wrapper cfgtew;
7766 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007767 LIST_HEAD(old_res);
7768
7769 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007770 if (ioa_cfg->sis64)
7771 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7772 else
7773 flag = ioa_cfg->u.cfg_table->hdr.flags;
7774
7775 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007776 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7777
7778 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7779 list_move_tail(&res->queue, &old_res);
7780
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007781 if (ioa_cfg->sis64)
Wayne Boyer438b0332010-05-10 09:13:00 -07007782 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007783 else
7784 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7785
7786 for (i = 0; i < entries; i++) {
7787 if (ioa_cfg->sis64)
7788 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7789 else
7790 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007791 found = 0;
7792
7793 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007794 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007795 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7796 found = 1;
7797 break;
7798 }
7799 }
7800
7801 if (!found) {
7802 if (list_empty(&ioa_cfg->free_res_q)) {
7803 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7804 break;
7805 }
7806
7807 found = 1;
7808 res = list_entry(ioa_cfg->free_res_q.next,
7809 struct ipr_resource_entry, queue);
7810 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007811 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007812 res->add_to_ml = 1;
Wayne Boyer56115592010-06-10 14:46:34 -07007813 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7814 res->sdev->allow_restart = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007815
7816 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007817 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007818 }
7819
7820 list_for_each_entry_safe(res, temp, &old_res, queue) {
7821 if (res->sdev) {
7822 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007823 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007824 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007825 }
7826 }
7827
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007828 list_for_each_entry_safe(res, temp, &old_res, queue) {
7829 ipr_clear_res_target(res);
7830 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7831 }
7832
Brian Kingac09c342007-04-26 16:00:16 -05007833 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7834 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7835 else
7836 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007837
7838 LEAVE;
7839 return IPR_RC_JOB_CONTINUE;
7840}
7841
7842/**
7843 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7844 * @ipr_cmd: ipr command struct
7845 *
7846 * This function sends a Query IOA Configuration command
7847 * to the adapter to retrieve the IOA configuration table.
7848 *
7849 * Return value:
7850 * IPR_RC_JOB_RETURN
7851 **/
7852static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7853{
7854 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7855 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007856 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05007857 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007858
7859 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05007860 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7861 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007862 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7863 ucode_vpd->major_release, ucode_vpd->card_type,
7864 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7865 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7866 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7867
7868 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer438b0332010-05-10 09:13:00 -07007869 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007870 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7871 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007872
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007873 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08007874 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007875
7876 ipr_cmd->job_step = ipr_init_res_table;
7877
7878 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7879
7880 LEAVE;
7881 return IPR_RC_JOB_RETURN;
7882}
7883
Gabriel Krisman Bertazi1a47af22015-11-03 16:26:09 -02007884static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7885{
7886 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7887
7888 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7889 return IPR_RC_JOB_CONTINUE;
7890
7891 return ipr_reset_cmd_failed(ipr_cmd);
7892}
7893
7894static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7895 __be32 res_handle, u8 sa_code)
7896{
7897 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7898
7899 ioarcb->res_handle = res_handle;
7900 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7901 ioarcb->cmd_pkt.cdb[1] = sa_code;
7902 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7903}
7904
7905/**
7906 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7907 * action
7908 *
7909 * Return value:
7910 * none
7911 **/
7912static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7913{
7914 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7915 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7916 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7917
7918 ENTER;
7919
7920 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7921
7922 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7923 ipr_build_ioa_service_action(ipr_cmd,
7924 cpu_to_be32(IPR_IOA_RES_HANDLE),
7925 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7926
7927 ioarcb->cmd_pkt.cdb[2] = 0x40;
7928
7929 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7930 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7931 IPR_SET_SUP_DEVICE_TIMEOUT);
7932
7933 LEAVE;
7934 return IPR_RC_JOB_RETURN;
7935 }
7936
7937 LEAVE;
7938 return IPR_RC_JOB_CONTINUE;
7939}
7940
Linus Torvalds1da177e2005-04-16 15:20:36 -07007941/**
7942 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7943 * @ipr_cmd: ipr command struct
7944 *
7945 * This utility function sends an inquiry to the adapter.
7946 *
7947 * Return value:
7948 * none
7949 **/
7950static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08007951 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007952{
7953 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007954
7955 ENTER;
7956 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7957 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7958
7959 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7960 ioarcb->cmd_pkt.cdb[1] = flags;
7961 ioarcb->cmd_pkt.cdb[2] = page;
7962 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7963
Wayne Boyera32c0552010-02-19 13:23:36 -08007964 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007965
7966 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7967 LEAVE;
7968}
7969
7970/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06007971 * ipr_inquiry_page_supported - Is the given inquiry page supported
7972 * @page0: inquiry page 0 buffer
7973 * @page: page code.
7974 *
7975 * This function determines if the specified inquiry page is supported.
7976 *
7977 * Return value:
7978 * 1 if page is supported / 0 if not
7979 **/
7980static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7981{
7982 int i;
7983
7984 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7985 if (page0->page[i] == page)
7986 return 1;
7987
7988 return 0;
7989}
7990
7991/**
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02007992 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7993 * @ipr_cmd: ipr command struct
7994 *
7995 * This function sends a Page 0xC4 inquiry to the adapter
7996 * to retrieve software VPD information.
7997 *
7998 * Return value:
7999 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8000 **/
8001static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8002{
8003 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8004 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8005 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8006
8007 ENTER;
Gabriel Krisman Bertazi1a47af22015-11-03 16:26:09 -02008008 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02008009 memset(pageC4, 0, sizeof(*pageC4));
8010
8011 if (ipr_inquiry_page_supported(page0, 0xC4)) {
8012 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8013 (ioa_cfg->vpd_cbs_dma
8014 + offsetof(struct ipr_misc_cbs,
8015 pageC4_data)),
8016 sizeof(struct ipr_inquiry_pageC4));
8017 return IPR_RC_JOB_RETURN;
8018 }
8019
8020 LEAVE;
8021 return IPR_RC_JOB_CONTINUE;
8022}
8023
8024/**
Brian Kingac09c342007-04-26 16:00:16 -05008025 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8026 * @ipr_cmd: ipr command struct
8027 *
8028 * This function sends a Page 0xD0 inquiry to the adapter
8029 * to retrieve adapter capabilities.
8030 *
8031 * Return value:
8032 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8033 **/
8034static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8035{
8036 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8037 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8038 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8039
8040 ENTER;
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02008041 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
Brian Kingac09c342007-04-26 16:00:16 -05008042 memset(cap, 0, sizeof(*cap));
8043
8044 if (ipr_inquiry_page_supported(page0, 0xD0)) {
8045 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8046 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8047 sizeof(struct ipr_inquiry_cap));
8048 return IPR_RC_JOB_RETURN;
8049 }
8050
8051 LEAVE;
8052 return IPR_RC_JOB_CONTINUE;
8053}
8054
8055/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008056 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8057 * @ipr_cmd: ipr command struct
8058 *
8059 * This function sends a Page 3 inquiry to the adapter
8060 * to retrieve software VPD information.
8061 *
8062 * Return value:
8063 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8064 **/
8065static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8066{
8067 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008068
8069 ENTER;
8070
Brian Kingac09c342007-04-26 16:00:16 -05008071 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008072
8073 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8074 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8075 sizeof(struct ipr_inquiry_page3));
8076
8077 LEAVE;
8078 return IPR_RC_JOB_RETURN;
8079}
8080
8081/**
8082 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8083 * @ipr_cmd: ipr command struct
8084 *
8085 * This function sends a Page 0 inquiry to the adapter
8086 * to retrieve supported inquiry pages.
8087 *
8088 * Return value:
8089 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8090 **/
8091static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8092{
8093 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008094 char type[5];
8095
8096 ENTER;
8097
8098 /* Grab the type out of the VPD and store it away */
8099 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8100 type[4] = '\0';
8101 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8102
Brian Kingf688f962014-12-02 12:47:37 -06008103 if (ipr_invalid_adapter(ioa_cfg)) {
8104 dev_err(&ioa_cfg->pdev->dev,
8105 "Adapter not supported in this hardware configuration.\n");
8106
8107 if (!ipr_testmode) {
8108 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8109 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8110 list_add_tail(&ipr_cmd->queue,
8111 &ioa_cfg->hrrq->hrrq_free_q);
8112 return IPR_RC_JOB_RETURN;
8113 }
8114 }
8115
brking@us.ibm.com62275042005-11-01 17:01:14 -06008116 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008117
brking@us.ibm.com62275042005-11-01 17:01:14 -06008118 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8119 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8120 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008121
8122 LEAVE;
8123 return IPR_RC_JOB_RETURN;
8124}
8125
8126/**
8127 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8128 * @ipr_cmd: ipr command struct
8129 *
8130 * This function sends a standard inquiry to the adapter.
8131 *
8132 * Return value:
8133 * IPR_RC_JOB_RETURN
8134 **/
8135static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8136{
8137 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8138
8139 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008140 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008141
8142 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8143 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8144 sizeof(struct ipr_ioa_vpd));
8145
8146 LEAVE;
8147 return IPR_RC_JOB_RETURN;
8148}
8149
8150/**
Wayne Boyer214777b2010-02-19 13:24:26 -08008151 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008152 * @ipr_cmd: ipr command struct
8153 *
8154 * This function send an Identify Host Request Response Queue
8155 * command to establish the HRRQ with the adapter.
8156 *
8157 * Return value:
8158 * IPR_RC_JOB_RETURN
8159 **/
Wayne Boyer214777b2010-02-19 13:24:26 -08008160static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008161{
8162 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8163 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008164 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008165
8166 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008167 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
Brian King87adbe02016-09-16 16:51:37 -05008168 if (ioa_cfg->identify_hrrq_index == 0)
8169 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008170
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008171 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8172 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -07008173
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008174 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8175 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008176
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008177 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8178 if (ioa_cfg->sis64)
8179 ioarcb->cmd_pkt.cdb[1] = 0x1;
8180
8181 if (ioa_cfg->nvectors == 1)
8182 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8183 else
8184 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8185
8186 ioarcb->cmd_pkt.cdb[2] =
8187 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8188 ioarcb->cmd_pkt.cdb[3] =
8189 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8190 ioarcb->cmd_pkt.cdb[4] =
8191 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8192 ioarcb->cmd_pkt.cdb[5] =
8193 ((u64) hrrq->host_rrq_dma) & 0xff;
8194 ioarcb->cmd_pkt.cdb[7] =
8195 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8196 ioarcb->cmd_pkt.cdb[8] =
8197 (sizeof(u32) * hrrq->size) & 0xff;
8198
8199 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008200 ioarcb->cmd_pkt.cdb[9] =
8201 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008202
8203 if (ioa_cfg->sis64) {
8204 ioarcb->cmd_pkt.cdb[10] =
8205 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8206 ioarcb->cmd_pkt.cdb[11] =
8207 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8208 ioarcb->cmd_pkt.cdb[12] =
8209 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8210 ioarcb->cmd_pkt.cdb[13] =
8211 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8212 }
8213
8214 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008215 ioarcb->cmd_pkt.cdb[14] =
8216 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008217
8218 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8219 IPR_INTERNAL_TIMEOUT);
8220
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008221 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8222 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008223
8224 LEAVE;
8225 return IPR_RC_JOB_RETURN;
Wayne Boyer214777b2010-02-19 13:24:26 -08008226 }
8227
Linus Torvalds1da177e2005-04-16 15:20:36 -07008228 LEAVE;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008229 return IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008230}
8231
8232/**
8233 * ipr_reset_timer_done - Adapter reset timer function
8234 * @ipr_cmd: ipr command struct
8235 *
8236 * Description: This function is used in adapter reset processing
8237 * for timing events. If the reset_cmd pointer in the IOA
8238 * config struct is not this adapter's we are doing nested
8239 * resets and fail_all_ops will take care of freeing the
8240 * command block.
8241 *
8242 * Return value:
8243 * none
8244 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07008245static void ipr_reset_timer_done(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008246{
Kees Cook738c6ec2017-08-18 16:53:24 -07008247 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008248 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8249 unsigned long lock_flags = 0;
8250
8251 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8252
8253 if (ioa_cfg->reset_cmd == ipr_cmd) {
8254 list_del(&ipr_cmd->queue);
8255 ipr_cmd->done(ipr_cmd);
8256 }
8257
8258 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8259}
8260
8261/**
8262 * ipr_reset_start_timer - Start a timer for adapter reset job
8263 * @ipr_cmd: ipr command struct
8264 * @timeout: timeout value
8265 *
8266 * Description: This function is used in adapter reset processing
8267 * for timing events. If the reset_cmd pointer in the IOA
8268 * config struct is not this adapter's we are doing nested
8269 * resets and fail_all_ops will take care of freeing the
8270 * command block.
8271 *
8272 * Return value:
8273 * none
8274 **/
8275static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8276 unsigned long timeout)
8277{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008278
8279 ENTER;
8280 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008281 ipr_cmd->done = ipr_reset_ioa_job;
8282
Linus Torvalds1da177e2005-04-16 15:20:36 -07008283 ipr_cmd->timer.expires = jiffies + timeout;
Kees Cook841b86f2017-10-23 09:40:42 +02008284 ipr_cmd->timer.function = ipr_reset_timer_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008285 add_timer(&ipr_cmd->timer);
8286}
8287
8288/**
8289 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8290 * @ioa_cfg: ioa cfg struct
8291 *
8292 * Return value:
8293 * nothing
8294 **/
8295static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8296{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008297 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008298
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008299 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008300 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008301 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8302
8303 /* Initialize Host RRQ pointers */
8304 hrrq->hrrq_start = hrrq->host_rrq;
8305 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8306 hrrq->hrrq_curr = hrrq->hrrq_start;
8307 hrrq->toggle_bit = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008308 spin_unlock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008309 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008310 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008311
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008312 ioa_cfg->identify_hrrq_index = 0;
8313 if (ioa_cfg->hrrq_num == 1)
8314 atomic_set(&ioa_cfg->hrrq_index, 0);
8315 else
8316 atomic_set(&ioa_cfg->hrrq_index, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008317
8318 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008319 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008320}
8321
8322/**
Wayne Boyer214777b2010-02-19 13:24:26 -08008323 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8324 * @ipr_cmd: ipr command struct
8325 *
8326 * Return value:
8327 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8328 **/
8329static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8330{
8331 unsigned long stage, stage_time;
8332 u32 feedback;
8333 volatile u32 int_reg;
8334 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8335 u64 maskval = 0;
8336
8337 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8338 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8339 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8340
8341 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8342
8343 /* sanity check the stage_time value */
Wayne Boyer438b0332010-05-10 09:13:00 -07008344 if (stage_time == 0)
8345 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8346 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
Wayne Boyer214777b2010-02-19 13:24:26 -08008347 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8348 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8349 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8350
8351 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8352 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8353 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8354 stage_time = ioa_cfg->transop_timeout;
8355 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8356 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
Wayne Boyer1df79ca2010-07-14 10:49:43 -07008357 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8358 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8359 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8360 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8361 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8362 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8363 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8364 return IPR_RC_JOB_CONTINUE;
8365 }
Wayne Boyer214777b2010-02-19 13:24:26 -08008366 }
8367
Wayne Boyer214777b2010-02-19 13:24:26 -08008368 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
Kees Cook841b86f2017-10-23 09:40:42 +02008369 ipr_cmd->timer.function = ipr_oper_timeout;
Wayne Boyer214777b2010-02-19 13:24:26 -08008370 ipr_cmd->done = ipr_reset_ioa_job;
8371 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008372
8373 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Wayne Boyer214777b2010-02-19 13:24:26 -08008374
8375 return IPR_RC_JOB_RETURN;
8376}
8377
8378/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008379 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8380 * @ipr_cmd: ipr command struct
8381 *
8382 * This function reinitializes some control blocks and
8383 * enables destructive diagnostics on the adapter.
8384 *
8385 * Return value:
8386 * IPR_RC_JOB_RETURN
8387 **/
8388static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8389{
8390 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8391 volatile u32 int_reg;
Wayne Boyer7be96902010-05-10 09:14:07 -07008392 volatile u64 maskval;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008393 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008394
8395 ENTER;
Wayne Boyer214777b2010-02-19 13:24:26 -08008396 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008397 ipr_init_ioa_mem(ioa_cfg);
8398
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008399 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8400 spin_lock(&ioa_cfg->hrrq[i]._lock);
8401 ioa_cfg->hrrq[i].allow_interrupts = 1;
8402 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8403 }
8404 wmb();
Wayne Boyer8701f182010-06-04 10:26:50 -07008405 if (ioa_cfg->sis64) {
8406 /* Set the adapter to the correct endian mode. */
8407 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8408 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8409 }
8410
Wayne Boyer7be96902010-05-10 09:14:07 -07008411 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008412
8413 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8414 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
Wayne Boyer214777b2010-02-19 13:24:26 -08008415 ioa_cfg->regs.clr_interrupt_mask_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008416 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8417 return IPR_RC_JOB_CONTINUE;
8418 }
8419
8420 /* Enable destructive diagnostics on IOA */
Wayne Boyer214777b2010-02-19 13:24:26 -08008421 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008422
Wayne Boyer7be96902010-05-10 09:14:07 -07008423 if (ioa_cfg->sis64) {
8424 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8425 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8426 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8427 } else
8428 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer214777b2010-02-19 13:24:26 -08008429
Linus Torvalds1da177e2005-04-16 15:20:36 -07008430 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8431
8432 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8433
Wayne Boyer214777b2010-02-19 13:24:26 -08008434 if (ioa_cfg->sis64) {
8435 ipr_cmd->job_step = ipr_reset_next_stage;
8436 return IPR_RC_JOB_CONTINUE;
8437 }
8438
Brian King5469cb52007-03-29 12:42:40 -05008439 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Kees Cook841b86f2017-10-23 09:40:42 +02008440 ipr_cmd->timer.function = ipr_oper_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008441 ipr_cmd->done = ipr_reset_ioa_job;
8442 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008443 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008444
8445 LEAVE;
8446 return IPR_RC_JOB_RETURN;
8447}
8448
8449/**
8450 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8451 * @ipr_cmd: ipr command struct
8452 *
8453 * This function is invoked when an adapter dump has run out
8454 * of processing time.
8455 *
8456 * Return value:
8457 * IPR_RC_JOB_CONTINUE
8458 **/
8459static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8460{
8461 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8462
8463 if (ioa_cfg->sdt_state == GET_DUMP)
Brian King41e9a692011-09-21 08:51:11 -05008464 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8465 else if (ioa_cfg->sdt_state == READ_DUMP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008466 ioa_cfg->sdt_state = ABORT_DUMP;
8467
Brian King4c647e92011-10-15 09:08:56 -05008468 ioa_cfg->dump_timeout = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008469 ipr_cmd->job_step = ipr_reset_alert;
8470
8471 return IPR_RC_JOB_CONTINUE;
8472}
8473
8474/**
8475 * ipr_unit_check_no_data - Log a unit check/no data error log
8476 * @ioa_cfg: ioa config struct
8477 *
8478 * Logs an error indicating the adapter unit checked, but for some
8479 * reason, we were unable to fetch the unit check buffer.
8480 *
8481 * Return value:
8482 * nothing
8483 **/
8484static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8485{
8486 ioa_cfg->errors_logged++;
8487 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8488}
8489
8490/**
8491 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8492 * @ioa_cfg: ioa config struct
8493 *
8494 * Fetches the unit check buffer from the adapter by clocking the data
8495 * through the mailbox register.
8496 *
8497 * Return value:
8498 * nothing
8499 **/
8500static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8501{
8502 unsigned long mailbox;
8503 struct ipr_hostrcb *hostrcb;
8504 struct ipr_uc_sdt sdt;
8505 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05008506 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008507
8508 mailbox = readl(ioa_cfg->ioa_mailbox);
8509
Wayne Boyerdcbad002010-02-19 13:24:14 -08008510 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008511 ipr_unit_check_no_data(ioa_cfg);
8512 return;
8513 }
8514
8515 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8516 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8517 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8518
Wayne Boyerdcbad002010-02-19 13:24:14 -08008519 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8520 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8521 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008522 ipr_unit_check_no_data(ioa_cfg);
8523 return;
8524 }
8525
8526 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08008527 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8528 length = be32_to_cpu(sdt.entry[0].end_token);
8529 else
8530 length = (be32_to_cpu(sdt.entry[0].end_token) -
8531 be32_to_cpu(sdt.entry[0].start_token)) &
8532 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008533
8534 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8535 struct ipr_hostrcb, queue);
Brian Kingafc3f832016-08-24 12:56:51 -05008536 list_del_init(&hostrcb->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008537 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8538
8539 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08008540 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07008541 (__be32 *)&hostrcb->hcam,
8542 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8543
Brian King65f56472007-04-26 16:00:12 -05008544 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008545 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08008546 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05008547 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8548 ioa_cfg->sdt_state == GET_DUMP)
8549 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8550 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07008551 ipr_unit_check_no_data(ioa_cfg);
8552
8553 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8554}
8555
8556/**
Wayne Boyer110def82010-11-04 09:36:16 -07008557 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8558 * @ipr_cmd: ipr command struct
8559 *
8560 * Description: This function will call to get the unit check buffer.
8561 *
8562 * Return value:
8563 * IPR_RC_JOB_RETURN
8564 **/
8565static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8566{
8567 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8568
8569 ENTER;
8570 ioa_cfg->ioa_unit_checked = 0;
8571 ipr_get_unit_check_buffer(ioa_cfg);
8572 ipr_cmd->job_step = ipr_reset_alert;
8573 ipr_reset_start_timer(ipr_cmd, 0);
8574
8575 LEAVE;
8576 return IPR_RC_JOB_RETURN;
8577}
8578
Gabriel Krisman Bertazif41f1d92015-11-03 16:26:06 -02008579static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8580{
8581 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8582
8583 ENTER;
8584
8585 if (ioa_cfg->sdt_state != GET_DUMP)
8586 return IPR_RC_JOB_RETURN;
8587
8588 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8589 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8590 IPR_PCII_MAILBOX_STABLE)) {
8591
8592 if (!ipr_cmd->u.time_left)
8593 dev_err(&ioa_cfg->pdev->dev,
8594 "Timed out waiting for Mailbox register.\n");
8595
8596 ioa_cfg->sdt_state = READ_DUMP;
8597 ioa_cfg->dump_timeout = 0;
8598 if (ioa_cfg->sis64)
8599 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8600 else
8601 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8602 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8603 schedule_work(&ioa_cfg->work_q);
8604
8605 } else {
8606 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8607 ipr_reset_start_timer(ipr_cmd,
8608 IPR_CHECK_FOR_RESET_TIMEOUT);
8609 }
8610
8611 LEAVE;
8612 return IPR_RC_JOB_RETURN;
8613}
8614
Wayne Boyer110def82010-11-04 09:36:16 -07008615/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008616 * ipr_reset_restore_cfg_space - Restore PCI config space.
8617 * @ipr_cmd: ipr command struct
8618 *
8619 * Description: This function restores the saved PCI config space of
8620 * the adapter, fails all outstanding ops back to the callers, and
8621 * fetches the dump/unit check if applicable to this reset.
8622 *
8623 * Return value:
8624 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8625 **/
8626static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8627{
8628 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer630ad8312011-04-07 12:12:30 -07008629 u32 int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008630
8631 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02008632 ioa_cfg->pdev->state_saved = true;
Jon Mason1d3c16a2010-11-30 17:43:26 -06008633 pci_restore_state(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008634
8635 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008636 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008637 return IPR_RC_JOB_CONTINUE;
8638 }
8639
8640 ipr_fail_all_ops(ioa_cfg);
8641
Wayne Boyer8701f182010-06-04 10:26:50 -07008642 if (ioa_cfg->sis64) {
8643 /* Set the adapter to the correct endian mode. */
8644 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8645 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8646 }
8647
Linus Torvalds1da177e2005-04-16 15:20:36 -07008648 if (ioa_cfg->ioa_unit_checked) {
Wayne Boyer110def82010-11-04 09:36:16 -07008649 if (ioa_cfg->sis64) {
8650 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8651 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8652 return IPR_RC_JOB_RETURN;
8653 } else {
8654 ioa_cfg->ioa_unit_checked = 0;
8655 ipr_get_unit_check_buffer(ioa_cfg);
8656 ipr_cmd->job_step = ipr_reset_alert;
8657 ipr_reset_start_timer(ipr_cmd, 0);
8658 return IPR_RC_JOB_RETURN;
8659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008660 }
8661
8662 if (ioa_cfg->in_ioa_bringdown) {
8663 ipr_cmd->job_step = ipr_ioa_bringdown_done;
Gabriel Krisman Bertazif41f1d92015-11-03 16:26:06 -02008664 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8665 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8666 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008667 } else {
8668 ipr_cmd->job_step = ipr_reset_enable_ioa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008669 }
8670
Wayne Boyer438b0332010-05-10 09:13:00 -07008671 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008672 return IPR_RC_JOB_CONTINUE;
8673}
8674
8675/**
Brian Kinge619e1a2007-01-23 11:25:37 -06008676 * ipr_reset_bist_done - BIST has completed on the adapter.
8677 * @ipr_cmd: ipr command struct
8678 *
8679 * Description: Unblock config space and resume the reset process.
8680 *
8681 * Return value:
8682 * IPR_RC_JOB_CONTINUE
8683 **/
8684static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8685{
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008686 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8687
Brian Kinge619e1a2007-01-23 11:25:37 -06008688 ENTER;
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008689 if (ioa_cfg->cfg_locked)
8690 pci_cfg_access_unlock(ioa_cfg->pdev);
8691 ioa_cfg->cfg_locked = 0;
Brian Kinge619e1a2007-01-23 11:25:37 -06008692 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8693 LEAVE;
8694 return IPR_RC_JOB_CONTINUE;
8695}
8696
8697/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008698 * ipr_reset_start_bist - Run BIST on the adapter.
8699 * @ipr_cmd: ipr command struct
8700 *
8701 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8702 *
8703 * Return value:
8704 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8705 **/
8706static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8707{
8708 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008709 int rc = PCIBIOS_SUCCESSFUL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008710
8711 ENTER;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008712 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8713 writel(IPR_UPROCI_SIS64_START_BIST,
8714 ioa_cfg->regs.set_uproc_interrupt_reg32);
8715 else
8716 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8717
8718 if (rc == PCIBIOS_SUCCESSFUL) {
Brian Kinge619e1a2007-01-23 11:25:37 -06008719 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008720 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8721 rc = IPR_RC_JOB_RETURN;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008722 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008723 if (ioa_cfg->cfg_locked)
8724 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8725 ioa_cfg->cfg_locked = 0;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008726 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8727 rc = IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008728 }
8729
8730 LEAVE;
8731 return rc;
8732}
8733
8734/**
Brian King463fc692007-05-07 17:09:05 -05008735 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8736 * @ipr_cmd: ipr command struct
8737 *
8738 * Description: This clears PCI reset to the adapter and delays two seconds.
8739 *
8740 * Return value:
8741 * IPR_RC_JOB_RETURN
8742 **/
8743static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8744{
8745 ENTER;
Brian King463fc692007-05-07 17:09:05 -05008746 ipr_cmd->job_step = ipr_reset_bist_done;
8747 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8748 LEAVE;
8749 return IPR_RC_JOB_RETURN;
8750}
8751
8752/**
Brian King2796ca52015-03-26 11:23:52 -05008753 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8754 * @work: work struct
8755 *
8756 * Description: This pulses warm reset to a slot.
8757 *
8758 **/
8759static void ipr_reset_reset_work(struct work_struct *work)
8760{
8761 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8762 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8763 struct pci_dev *pdev = ioa_cfg->pdev;
8764 unsigned long lock_flags = 0;
8765
8766 ENTER;
8767 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8768 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8769 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8770
8771 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8772 if (ioa_cfg->reset_cmd == ipr_cmd)
8773 ipr_reset_ioa_job(ipr_cmd);
8774 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8775 LEAVE;
8776}
8777
8778/**
Brian King463fc692007-05-07 17:09:05 -05008779 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8780 * @ipr_cmd: ipr command struct
8781 *
8782 * Description: This asserts PCI reset to the adapter.
8783 *
8784 * Return value:
8785 * IPR_RC_JOB_RETURN
8786 **/
8787static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8788{
8789 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Brian King463fc692007-05-07 17:09:05 -05008790
8791 ENTER;
Brian King2796ca52015-03-26 11:23:52 -05008792 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8793 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
Brian King463fc692007-05-07 17:09:05 -05008794 ipr_cmd->job_step = ipr_reset_slot_reset_done;
Brian King463fc692007-05-07 17:09:05 -05008795 LEAVE;
8796 return IPR_RC_JOB_RETURN;
8797}
8798
8799/**
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008800 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8801 * @ipr_cmd: ipr command struct
8802 *
8803 * Description: This attempts to block config access to the IOA.
8804 *
8805 * Return value:
8806 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8807 **/
8808static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8809{
8810 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8811 int rc = IPR_RC_JOB_CONTINUE;
8812
8813 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8814 ioa_cfg->cfg_locked = 1;
8815 ipr_cmd->job_step = ioa_cfg->reset;
8816 } else {
8817 if (ipr_cmd->u.time_left) {
8818 rc = IPR_RC_JOB_RETURN;
8819 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8820 ipr_reset_start_timer(ipr_cmd,
8821 IPR_CHECK_FOR_RESET_TIMEOUT);
8822 } else {
8823 ipr_cmd->job_step = ioa_cfg->reset;
8824 dev_err(&ioa_cfg->pdev->dev,
8825 "Timed out waiting to lock config access. Resetting anyway.\n");
8826 }
8827 }
8828
8829 return rc;
8830}
8831
8832/**
8833 * ipr_reset_block_config_access - Block config access to the IOA
8834 * @ipr_cmd: ipr command struct
8835 *
8836 * Description: This attempts to block config access to the IOA
8837 *
8838 * Return value:
8839 * IPR_RC_JOB_CONTINUE
8840 **/
8841static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8842{
8843 ipr_cmd->ioa_cfg->cfg_locked = 0;
8844 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8845 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8846 return IPR_RC_JOB_CONTINUE;
8847}
8848
8849/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008850 * ipr_reset_allowed - Query whether or not IOA can be reset
8851 * @ioa_cfg: ioa config struct
8852 *
8853 * Return value:
8854 * 0 if reset not allowed / non-zero if reset is allowed
8855 **/
8856static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8857{
8858 volatile u32 temp_reg;
8859
8860 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8861 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8862}
8863
8864/**
8865 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8866 * @ipr_cmd: ipr command struct
8867 *
8868 * Description: This function waits for adapter permission to run BIST,
8869 * then runs BIST. If the adapter does not give permission after a
8870 * reasonable time, we will reset the adapter anyway. The impact of
8871 * resetting the adapter without warning the adapter is the risk of
8872 * losing the persistent error log on the adapter. If the adapter is
8873 * reset while it is writing to the flash on the adapter, the flash
8874 * segment will have bad ECC and be zeroed.
8875 *
8876 * Return value:
8877 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8878 **/
8879static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8880{
8881 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8882 int rc = IPR_RC_JOB_RETURN;
8883
8884 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8885 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8886 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8887 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008888 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008889 rc = IPR_RC_JOB_CONTINUE;
8890 }
8891
8892 return rc;
8893}
8894
8895/**
Wayne Boyer8701f182010-06-04 10:26:50 -07008896 * ipr_reset_alert - Alert the adapter of a pending reset
Linus Torvalds1da177e2005-04-16 15:20:36 -07008897 * @ipr_cmd: ipr command struct
8898 *
8899 * Description: This function alerts the adapter that it will be reset.
8900 * If memory space is not currently enabled, proceed directly
8901 * to running BIST on the adapter. The timer must always be started
8902 * so we guarantee we do not run BIST from ipr_isr.
8903 *
8904 * Return value:
8905 * IPR_RC_JOB_RETURN
8906 **/
8907static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8908{
8909 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8910 u16 cmd_reg;
8911 int rc;
8912
8913 ENTER;
8914 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8915
8916 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8917 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
Wayne Boyer214777b2010-02-19 13:24:26 -08008918 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008919 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8920 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008921 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008922 }
8923
8924 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8925 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8926
8927 LEAVE;
8928 return IPR_RC_JOB_RETURN;
8929}
8930
8931/**
Brian King4fdd7c72015-03-26 11:23:50 -05008932 * ipr_reset_quiesce_done - Complete IOA disconnect
8933 * @ipr_cmd: ipr command struct
8934 *
8935 * Description: Freeze the adapter to complete quiesce processing
8936 *
8937 * Return value:
8938 * IPR_RC_JOB_CONTINUE
8939 **/
8940static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8941{
8942 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8943
8944 ENTER;
8945 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8946 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8947 LEAVE;
8948 return IPR_RC_JOB_CONTINUE;
8949}
8950
8951/**
8952 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8953 * @ipr_cmd: ipr command struct
8954 *
8955 * Description: Ensure nothing is outstanding to the IOA and
8956 * proceed with IOA disconnect. Otherwise reset the IOA.
8957 *
8958 * Return value:
8959 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8960 **/
8961static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8962{
8963 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8964 struct ipr_cmnd *loop_cmd;
8965 struct ipr_hrr_queue *hrrq;
8966 int rc = IPR_RC_JOB_CONTINUE;
8967 int count = 0;
8968
8969 ENTER;
8970 ipr_cmd->job_step = ipr_reset_quiesce_done;
8971
8972 for_each_hrrq(hrrq, ioa_cfg) {
8973 spin_lock(&hrrq->_lock);
8974 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8975 count++;
8976 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8977 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8978 rc = IPR_RC_JOB_RETURN;
8979 break;
8980 }
8981 spin_unlock(&hrrq->_lock);
8982
8983 if (count)
8984 break;
8985 }
8986
8987 LEAVE;
8988 return rc;
8989}
8990
8991/**
8992 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8993 * @ipr_cmd: ipr command struct
8994 *
8995 * Description: Cancel any oustanding HCAMs to the IOA.
8996 *
8997 * Return value:
8998 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8999 **/
9000static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9001{
9002 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9003 int rc = IPR_RC_JOB_CONTINUE;
9004 struct ipr_cmd_pkt *cmd_pkt;
9005 struct ipr_cmnd *hcam_cmd;
9006 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9007
9008 ENTER;
9009 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9010
9011 if (!hrrq->ioa_is_dead) {
9012 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9013 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9014 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9015 continue;
9016
9017 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9018 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9019 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9020 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9021 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9022 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9023 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9024 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9025 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9026 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9027 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9028 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9029 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9030 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9031
9032 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9033 IPR_CANCEL_TIMEOUT);
9034
9035 rc = IPR_RC_JOB_RETURN;
9036 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9037 break;
9038 }
9039 }
9040 } else
9041 ipr_cmd->job_step = ipr_reset_alert;
9042
9043 LEAVE;
9044 return rc;
9045}
9046
9047/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009048 * ipr_reset_ucode_download_done - Microcode download completion
9049 * @ipr_cmd: ipr command struct
9050 *
9051 * Description: This function unmaps the microcode download buffer.
9052 *
9053 * Return value:
9054 * IPR_RC_JOB_CONTINUE
9055 **/
9056static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9057{
9058 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9059 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9060
Anton Blanchardd73341b2014-10-30 17:27:08 -05009061 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009062 sglist->num_sg, DMA_TO_DEVICE);
9063
9064 ipr_cmd->job_step = ipr_reset_alert;
9065 return IPR_RC_JOB_CONTINUE;
9066}
9067
9068/**
9069 * ipr_reset_ucode_download - Download microcode to the adapter
9070 * @ipr_cmd: ipr command struct
9071 *
9072 * Description: This function checks to see if it there is microcode
9073 * to download to the adapter. If there is, a download is performed.
9074 *
9075 * Return value:
9076 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9077 **/
9078static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9079{
9080 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9081 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9082
9083 ENTER;
9084 ipr_cmd->job_step = ipr_reset_alert;
9085
9086 if (!sglist)
9087 return IPR_RC_JOB_CONTINUE;
9088
9089 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9090 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9091 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9092 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9093 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9094 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9095 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9096
Wayne Boyera32c0552010-02-19 13:23:36 -08009097 if (ioa_cfg->sis64)
9098 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9099 else
9100 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009101 ipr_cmd->job_step = ipr_reset_ucode_download_done;
9102
9103 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9104 IPR_WRITE_BUFFER_TIMEOUT);
9105
9106 LEAVE;
9107 return IPR_RC_JOB_RETURN;
9108}
9109
9110/**
9111 * ipr_reset_shutdown_ioa - Shutdown the adapter
9112 * @ipr_cmd: ipr command struct
9113 *
9114 * Description: This function issues an adapter shutdown of the
9115 * specified type to the specified adapter as part of the
9116 * adapter reset job.
9117 *
9118 * Return value:
9119 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9120 **/
9121static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9122{
9123 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9124 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9125 unsigned long timeout;
9126 int rc = IPR_RC_JOB_CONTINUE;
9127
9128 ENTER;
Brian King4fdd7c72015-03-26 11:23:50 -05009129 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9130 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9131 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009132 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009133 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9134 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9135 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9136 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9137
Brian Kingac09c342007-04-26 16:00:16 -05009138 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9139 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009140 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9141 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05009142 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9143 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009144 else
Brian Kingac09c342007-04-26 16:00:16 -05009145 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009146
9147 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9148
9149 rc = IPR_RC_JOB_RETURN;
9150 ipr_cmd->job_step = ipr_reset_ucode_download;
9151 } else
9152 ipr_cmd->job_step = ipr_reset_alert;
9153
9154 LEAVE;
9155 return rc;
9156}
9157
9158/**
9159 * ipr_reset_ioa_job - Adapter reset job
9160 * @ipr_cmd: ipr command struct
9161 *
9162 * Description: This function is the job router for the adapter reset job.
9163 *
9164 * Return value:
9165 * none
9166 **/
9167static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9168{
9169 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009170 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9171
9172 do {
Wayne Boyer96d21f02010-05-10 09:13:27 -07009173 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009174
9175 if (ioa_cfg->reset_cmd != ipr_cmd) {
9176 /*
9177 * We are doing nested adapter resets and this is
9178 * not the current reset job.
9179 */
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009180 list_add_tail(&ipr_cmd->queue,
9181 &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009182 return;
9183 }
9184
9185 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06009186 rc = ipr_cmd->job_step_failed(ipr_cmd);
9187 if (rc == IPR_RC_JOB_RETURN)
9188 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009189 }
9190
9191 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06009192 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009193 rc = ipr_cmd->job_step(ipr_cmd);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009194 } while (rc == IPR_RC_JOB_CONTINUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009195}
9196
9197/**
9198 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9199 * @ioa_cfg: ioa config struct
9200 * @job_step: first job step of reset job
9201 * @shutdown_type: shutdown type
9202 *
9203 * Description: This function will initiate the reset of the given adapter
9204 * starting at the selected job step.
9205 * If the caller needs to wait on the completion of the reset,
9206 * the caller must sleep on the reset_wait_q.
9207 *
9208 * Return value:
9209 * none
9210 **/
9211static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9212 int (*job_step) (struct ipr_cmnd *),
9213 enum ipr_shutdown_type shutdown_type)
9214{
9215 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009216 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009217
9218 ioa_cfg->in_reset_reload = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009219 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9220 spin_lock(&ioa_cfg->hrrq[i]._lock);
9221 ioa_cfg->hrrq[i].allow_cmds = 0;
9222 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9223 }
9224 wmb();
Brian Kingb0e17a92017-08-01 10:21:30 -05009225 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9226 ioa_cfg->scsi_unblock = 0;
9227 ioa_cfg->scsi_blocked = 1;
Brian Kingbfae7822013-01-30 23:45:08 -06009228 scsi_block_requests(ioa_cfg->host);
Brian Kingb0e17a92017-08-01 10:21:30 -05009229 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009230
9231 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9232 ioa_cfg->reset_cmd = ipr_cmd;
9233 ipr_cmd->job_step = job_step;
9234 ipr_cmd->u.shutdown_type = shutdown_type;
9235
9236 ipr_reset_ioa_job(ipr_cmd);
9237}
9238
9239/**
9240 * ipr_initiate_ioa_reset - Initiate an adapter reset
9241 * @ioa_cfg: ioa config struct
9242 * @shutdown_type: shutdown type
9243 *
9244 * Description: This function will initiate the reset of the given adapter.
9245 * If the caller needs to wait on the completion of the reset,
9246 * the caller must sleep on the reset_wait_q.
9247 *
9248 * Return value:
9249 * none
9250 **/
9251static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9252 enum ipr_shutdown_type shutdown_type)
9253{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009254 int i;
9255
9256 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009257 return;
9258
Brian King41e9a692011-09-21 08:51:11 -05009259 if (ioa_cfg->in_reset_reload) {
9260 if (ioa_cfg->sdt_state == GET_DUMP)
9261 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9262 else if (ioa_cfg->sdt_state == READ_DUMP)
9263 ioa_cfg->sdt_state = ABORT_DUMP;
9264 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009265
9266 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9267 dev_err(&ioa_cfg->pdev->dev,
9268 "IOA taken offline - error recovery failed\n");
9269
9270 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009271 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9272 spin_lock(&ioa_cfg->hrrq[i]._lock);
9273 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9274 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9275 }
9276 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009277
9278 if (ioa_cfg->in_ioa_bringdown) {
9279 ioa_cfg->reset_cmd = NULL;
9280 ioa_cfg->in_reset_reload = 0;
9281 ipr_fail_all_ops(ioa_cfg);
9282 wake_up_all(&ioa_cfg->reset_wait_q);
9283
Brian Kingbfae7822013-01-30 23:45:08 -06009284 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
Brian Kingb0e17a92017-08-01 10:21:30 -05009285 ioa_cfg->scsi_unblock = 1;
9286 schedule_work(&ioa_cfg->work_q);
Brian Kingbfae7822013-01-30 23:45:08 -06009287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009288 return;
9289 } else {
9290 ioa_cfg->in_ioa_bringdown = 1;
9291 shutdown_type = IPR_SHUTDOWN_NONE;
9292 }
9293 }
9294
9295 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9296 shutdown_type);
9297}
9298
9299/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009300 * ipr_reset_freeze - Hold off all I/O activity
9301 * @ipr_cmd: ipr command struct
9302 *
9303 * Description: If the PCI slot is frozen, hold off all I/O
9304 * activity; then, as soon as the slot is available again,
9305 * initiate an adapter reset.
9306 */
9307static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9308{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009309 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9310 int i;
9311
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009312 /* Disallow new interrupts, avoid loop */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009313 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9314 spin_lock(&ioa_cfg->hrrq[i]._lock);
9315 ioa_cfg->hrrq[i].allow_interrupts = 0;
9316 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9317 }
9318 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009319 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009320 ipr_cmd->done = ipr_reset_ioa_job;
9321 return IPR_RC_JOB_RETURN;
9322}
9323
9324/**
Brian King6270e592014-01-21 12:16:41 -06009325 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9326 * @pdev: PCI device struct
9327 *
9328 * Description: This routine is called to tell us that the MMIO
9329 * access to the IOA has been restored
9330 */
9331static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9332{
9333 unsigned long flags = 0;
9334 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9335
9336 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9337 if (!ioa_cfg->probe_done)
9338 pci_save_state(pdev);
9339 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9340 return PCI_ERS_RESULT_NEED_RESET;
9341}
9342
9343/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009344 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9345 * @pdev: PCI device struct
9346 *
9347 * Description: This routine is called to tell us that the PCI bus
9348 * is down. Can't do anything here, except put the device driver
9349 * into a holding pattern, waiting for the PCI bus to come back.
9350 */
9351static void ipr_pci_frozen(struct pci_dev *pdev)
9352{
9353 unsigned long flags = 0;
9354 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9355
9356 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009357 if (ioa_cfg->probe_done)
9358 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009359 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9360}
9361
9362/**
9363 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9364 * @pdev: PCI device struct
9365 *
9366 * Description: This routine is called by the pci error recovery
9367 * code after the PCI slot has been reset, just before we
9368 * should resume normal operations.
9369 */
9370static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9371{
9372 unsigned long flags = 0;
9373 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9374
9375 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009376 if (ioa_cfg->probe_done) {
9377 if (ioa_cfg->needs_warm_reset)
9378 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9379 else
9380 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9381 IPR_SHUTDOWN_NONE);
9382 } else
9383 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9385 return PCI_ERS_RESULT_RECOVERED;
9386}
9387
9388/**
9389 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9390 * @pdev: PCI device struct
9391 *
9392 * Description: This routine is called when the PCI bus has
9393 * permanently failed.
9394 */
9395static void ipr_pci_perm_failure(struct pci_dev *pdev)
9396{
9397 unsigned long flags = 0;
9398 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009399 int i;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009400
9401 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009402 if (ioa_cfg->probe_done) {
9403 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9404 ioa_cfg->sdt_state = ABORT_DUMP;
9405 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9406 ioa_cfg->in_ioa_bringdown = 1;
9407 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9408 spin_lock(&ioa_cfg->hrrq[i]._lock);
9409 ioa_cfg->hrrq[i].allow_cmds = 0;
9410 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9411 }
9412 wmb();
9413 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9414 } else
9415 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009416 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9417}
9418
9419/**
9420 * ipr_pci_error_detected - Called when a PCI error is detected.
9421 * @pdev: PCI device struct
9422 * @state: PCI channel state
9423 *
9424 * Description: Called when a PCI error is detected.
9425 *
9426 * Return value:
9427 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9428 */
9429static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9430 pci_channel_state_t state)
9431{
9432 switch (state) {
9433 case pci_channel_io_frozen:
9434 ipr_pci_frozen(pdev);
Brian King6270e592014-01-21 12:16:41 -06009435 return PCI_ERS_RESULT_CAN_RECOVER;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009436 case pci_channel_io_perm_failure:
9437 ipr_pci_perm_failure(pdev);
9438 return PCI_ERS_RESULT_DISCONNECT;
9439 break;
9440 default:
9441 break;
9442 }
9443 return PCI_ERS_RESULT_NEED_RESET;
9444}
9445
9446/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009447 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9448 * @ioa_cfg: ioa cfg struct
9449 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08009450 * Description: This is the second phase of adapter initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -07009451 * This function takes care of initilizing the adapter to the point
9452 * where it can accept new commands.
9453
9454 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02009455 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009456 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009457static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009458{
9459 int rc = 0;
9460 unsigned long host_lock_flags = 0;
9461
9462 ENTER;
9463 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9464 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
Brian King6270e592014-01-21 12:16:41 -06009465 ioa_cfg->probe_done = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009466 if (ioa_cfg->needs_hard_reset) {
9467 ioa_cfg->needs_hard_reset = 0;
9468 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9469 } else
9470 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9471 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009472 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009473
9474 LEAVE;
9475 return rc;
9476}
9477
9478/**
9479 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9480 * @ioa_cfg: ioa config struct
9481 *
9482 * Return value:
9483 * none
9484 **/
9485static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9486{
9487 int i;
9488
Brian Kinga65e8f12015-03-26 11:23:55 -05009489 if (ioa_cfg->ipr_cmnd_list) {
9490 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9491 if (ioa_cfg->ipr_cmnd_list[i])
9492 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9493 ioa_cfg->ipr_cmnd_list[i],
9494 ioa_cfg->ipr_cmnd_list_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009495
Brian Kinga65e8f12015-03-26 11:23:55 -05009496 ioa_cfg->ipr_cmnd_list[i] = NULL;
9497 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009498 }
9499
9500 if (ioa_cfg->ipr_cmd_pool)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009501 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009502
Brian King89aad422012-03-14 21:20:10 -05009503 kfree(ioa_cfg->ipr_cmnd_list);
9504 kfree(ioa_cfg->ipr_cmnd_list_dma);
9505 ioa_cfg->ipr_cmnd_list = NULL;
9506 ioa_cfg->ipr_cmnd_list_dma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009507 ioa_cfg->ipr_cmd_pool = NULL;
9508}
9509
9510/**
9511 * ipr_free_mem - Frees memory allocated for an adapter
9512 * @ioa_cfg: ioa cfg struct
9513 *
9514 * Return value:
9515 * nothing
9516 **/
9517static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9518{
9519 int i;
9520
9521 kfree(ioa_cfg->res_entries);
Anton Blanchardd73341b2014-10-30 17:27:08 -05009522 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9523 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009524 ipr_free_cmd_blks(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009525
9526 for (i = 0; i < ioa_cfg->hrrq_num; i++)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009527 dma_free_coherent(&ioa_cfg->pdev->dev,
9528 sizeof(u32) * ioa_cfg->hrrq[i].size,
9529 ioa_cfg->hrrq[i].host_rrq,
9530 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009531
Anton Blanchardd73341b2014-10-30 17:27:08 -05009532 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9533 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009534
Brian Kingafc3f832016-08-24 12:56:51 -05009535 for (i = 0; i < IPR_MAX_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009536 dma_free_coherent(&ioa_cfg->pdev->dev,
9537 sizeof(struct ipr_hostrcb),
9538 ioa_cfg->hostrcb[i],
9539 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009540 }
9541
9542 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009543 kfree(ioa_cfg->trace);
9544}
9545
9546/**
Brian King2796ca52015-03-26 11:23:52 -05009547 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9548 * @ioa_cfg: ipr cfg struct
9549 *
9550 * This function frees all allocated IRQs for the
9551 * specified adapter.
9552 *
9553 * Return value:
9554 * none
9555 **/
9556static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9557{
9558 struct pci_dev *pdev = ioa_cfg->pdev;
Christoph Hellwiga299ee62016-09-11 15:31:24 +02009559 int i;
Brian King2796ca52015-03-26 11:23:52 -05009560
Christoph Hellwiga299ee62016-09-11 15:31:24 +02009561 for (i = 0; i < ioa_cfg->nvectors; i++)
9562 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9563 pci_free_irq_vectors(pdev);
Brian King2796ca52015-03-26 11:23:52 -05009564}
9565
9566/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009567 * ipr_free_all_resources - Free all allocated resources for an adapter.
9568 * @ipr_cmd: ipr command struct
9569 *
9570 * This function frees all allocated resources for the
9571 * specified adapter.
9572 *
9573 * Return value:
9574 * none
9575 **/
9576static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9577{
9578 struct pci_dev *pdev = ioa_cfg->pdev;
9579
9580 ENTER;
Brian King2796ca52015-03-26 11:23:52 -05009581 ipr_free_irqs(ioa_cfg);
9582 if (ioa_cfg->reset_work_q)
9583 destroy_workqueue(ioa_cfg->reset_work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009584 iounmap(ioa_cfg->hdw_dma_regs);
9585 pci_release_regions(pdev);
9586 ipr_free_mem(ioa_cfg);
9587 scsi_host_put(ioa_cfg->host);
9588 pci_disable_device(pdev);
9589 LEAVE;
9590}
9591
9592/**
9593 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9594 * @ioa_cfg: ioa config struct
9595 *
9596 * Return value:
9597 * 0 on success / -ENOMEM on allocation failure
9598 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009599static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009600{
9601 struct ipr_cmnd *ipr_cmd;
9602 struct ipr_ioarcb *ioarcb;
9603 dma_addr_t dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009604 int i, entries_each_hrrq, hrrq_id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009605
Anton Blanchardd73341b2014-10-30 17:27:08 -05009606 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009607 sizeof(struct ipr_cmnd), 512, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009608
9609 if (!ioa_cfg->ipr_cmd_pool)
9610 return -ENOMEM;
9611
Brian King89aad422012-03-14 21:20:10 -05009612 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9613 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9614
9615 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9616 ipr_free_cmd_blks(ioa_cfg);
9617 return -ENOMEM;
9618 }
9619
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009620 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9621 if (ioa_cfg->hrrq_num > 1) {
9622 if (i == 0) {
9623 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9624 ioa_cfg->hrrq[i].min_cmd_id = 0;
Colin Ian Kingb82378e2017-12-01 13:33:27 +00009625 ioa_cfg->hrrq[i].max_cmd_id =
9626 (entries_each_hrrq - 1);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009627 } else {
9628 entries_each_hrrq =
9629 IPR_NUM_BASE_CMD_BLKS/
9630 (ioa_cfg->hrrq_num - 1);
9631 ioa_cfg->hrrq[i].min_cmd_id =
9632 IPR_NUM_INTERNAL_CMD_BLKS +
9633 (i - 1) * entries_each_hrrq;
9634 ioa_cfg->hrrq[i].max_cmd_id =
9635 (IPR_NUM_INTERNAL_CMD_BLKS +
9636 i * entries_each_hrrq - 1);
9637 }
9638 } else {
9639 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9640 ioa_cfg->hrrq[i].min_cmd_id = 0;
9641 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9642 }
9643 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9644 }
9645
9646 BUG_ON(ioa_cfg->hrrq_num == 0);
9647
9648 i = IPR_NUM_CMD_BLKS -
9649 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9650 if (i > 0) {
9651 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9652 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9653 }
9654
Linus Torvalds1da177e2005-04-16 15:20:36 -07009655 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Souptick Joarder8b1bb6d2018-03-08 18:41:57 +05309656 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9657 GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009658
9659 if (!ipr_cmd) {
9660 ipr_free_cmd_blks(ioa_cfg);
9661 return -ENOMEM;
9662 }
9663
Linus Torvalds1da177e2005-04-16 15:20:36 -07009664 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9665 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9666
9667 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08009668 ipr_cmd->dma_addr = dma_addr;
9669 if (ioa_cfg->sis64)
9670 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9671 else
9672 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9673
Linus Torvalds1da177e2005-04-16 15:20:36 -07009674 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08009675 if (ioa_cfg->sis64) {
9676 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9677 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9678 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009679 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
Wayne Boyera32c0552010-02-19 13:23:36 -08009680 } else {
9681 ioarcb->write_ioadl_addr =
9682 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9683 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9684 ioarcb->ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009685 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
Wayne Boyera32c0552010-02-19 13:23:36 -08009686 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009687 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9688 ipr_cmd->cmd_index = i;
9689 ipr_cmd->ioa_cfg = ioa_cfg;
9690 ipr_cmd->sense_buffer_dma = dma_addr +
9691 offsetof(struct ipr_cmnd, sense_buffer);
9692
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009693 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9694 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9695 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9696 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9697 hrrq_id++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009698 }
9699
9700 return 0;
9701}
9702
9703/**
9704 * ipr_alloc_mem - Allocate memory for an adapter
9705 * @ioa_cfg: ioa config struct
9706 *
9707 * Return value:
9708 * 0 on success / non-zero for error
9709 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009710static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009711{
9712 struct pci_dev *pdev = ioa_cfg->pdev;
9713 int i, rc = -ENOMEM;
9714
9715 ENTER;
Kees Cook6396bb22018-06-12 14:03:40 -07009716 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9717 sizeof(struct ipr_resource_entry),
9718 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009719
9720 if (!ioa_cfg->res_entries)
9721 goto out;
9722
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009723 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009724 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009725 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9726 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009727
Anton Blanchardd73341b2014-10-30 17:27:08 -05009728 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9729 sizeof(struct ipr_misc_cbs),
9730 &ioa_cfg->vpd_cbs_dma,
9731 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009732
9733 if (!ioa_cfg->vpd_cbs)
9734 goto out_free_res_entries;
9735
9736 if (ipr_alloc_cmd_blks(ioa_cfg))
9737 goto out_free_vpd_cbs;
9738
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009739 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009740 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009741 sizeof(u32) * ioa_cfg->hrrq[i].size,
Anton Blanchardd73341b2014-10-30 17:27:08 -05009742 &ioa_cfg->hrrq[i].host_rrq_dma,
9743 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009744
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009745 if (!ioa_cfg->hrrq[i].host_rrq) {
9746 while (--i > 0)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009747 dma_free_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009748 sizeof(u32) * ioa_cfg->hrrq[i].size,
9749 ioa_cfg->hrrq[i].host_rrq,
9750 ioa_cfg->hrrq[i].host_rrq_dma);
9751 goto out_ipr_free_cmd_blocks;
9752 }
9753 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9754 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009755
Anton Blanchardd73341b2014-10-30 17:27:08 -05009756 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9757 ioa_cfg->cfg_table_size,
9758 &ioa_cfg->cfg_table_dma,
9759 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009760
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009761 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009762 goto out_free_host_rrq;
9763
Brian Kingafc3f832016-08-24 12:56:51 -05009764 for (i = 0; i < IPR_MAX_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009765 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9766 sizeof(struct ipr_hostrcb),
9767 &ioa_cfg->hostrcb_dma[i],
9768 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009769
9770 if (!ioa_cfg->hostrcb[i])
9771 goto out_free_hostrcb_dma;
9772
9773 ioa_cfg->hostrcb[i]->hostrcb_dma =
9774 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06009775 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009776 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9777 }
9778
Kees Cook6396bb22018-06-12 14:03:40 -07009779 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9780 sizeof(struct ipr_trace_entry),
9781 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009782
9783 if (!ioa_cfg->trace)
9784 goto out_free_hostrcb_dma;
9785
Linus Torvalds1da177e2005-04-16 15:20:36 -07009786 rc = 0;
9787out:
9788 LEAVE;
9789 return rc;
9790
9791out_free_hostrcb_dma:
9792 while (i-- > 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009793 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9794 ioa_cfg->hostrcb[i],
9795 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009796 }
Anton Blanchardd73341b2014-10-30 17:27:08 -05009797 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9798 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009799out_free_host_rrq:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009800 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009801 dma_free_coherent(&pdev->dev,
9802 sizeof(u32) * ioa_cfg->hrrq[i].size,
9803 ioa_cfg->hrrq[i].host_rrq,
9804 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009806out_ipr_free_cmd_blocks:
9807 ipr_free_cmd_blks(ioa_cfg);
9808out_free_vpd_cbs:
Anton Blanchardd73341b2014-10-30 17:27:08 -05009809 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9810 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009811out_free_res_entries:
9812 kfree(ioa_cfg->res_entries);
9813 goto out;
9814}
9815
9816/**
9817 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9818 * @ioa_cfg: ioa config struct
9819 *
9820 * Return value:
9821 * none
9822 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009823static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009824{
9825 int i;
9826
9827 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9828 ioa_cfg->bus_attr[i].bus = i;
9829 ioa_cfg->bus_attr[i].qas_enabled = 0;
9830 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9831 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9832 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9833 else
9834 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9835 }
9836}
9837
9838/**
Brian King6270e592014-01-21 12:16:41 -06009839 * ipr_init_regs - Initialize IOA registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07009840 * @ioa_cfg: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07009841 *
9842 * Return value:
Brian King6270e592014-01-21 12:16:41 -06009843 * none
Linus Torvalds1da177e2005-04-16 15:20:36 -07009844 **/
Brian King6270e592014-01-21 12:16:41 -06009845static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009846{
9847 const struct ipr_interrupt_offsets *p;
9848 struct ipr_interrupts *t;
9849 void __iomem *base;
9850
Linus Torvalds1da177e2005-04-16 15:20:36 -07009851 p = &ioa_cfg->chip_cfg->regs;
9852 t = &ioa_cfg->regs;
9853 base = ioa_cfg->hdw_dma_regs;
9854
9855 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9856 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009857 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009858 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009859 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009860 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009861 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009862 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009863 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009864 t->ioarrin_reg = base + p->ioarrin_reg;
9865 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009866 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009867 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009868 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009869 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009870 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009871
9872 if (ioa_cfg->sis64) {
Wayne Boyer214777b2010-02-19 13:24:26 -08009873 t->init_feedback_reg = base + p->init_feedback_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009874 t->dump_addr_reg = base + p->dump_addr_reg;
9875 t->dump_data_reg = base + p->dump_data_reg;
Wayne Boyer8701f182010-06-04 10:26:50 -07009876 t->endian_swap_reg = base + p->endian_swap_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009877 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009878}
9879
9880/**
Brian King6270e592014-01-21 12:16:41 -06009881 * ipr_init_ioa_cfg - Initialize IOA config struct
9882 * @ioa_cfg: ioa config struct
9883 * @host: scsi host struct
9884 * @pdev: PCI dev struct
9885 *
9886 * Return value:
9887 * none
9888 **/
9889static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9890 struct Scsi_Host *host, struct pci_dev *pdev)
9891{
9892 int i;
9893
9894 ioa_cfg->host = host;
9895 ioa_cfg->pdev = pdev;
9896 ioa_cfg->log_level = ipr_log_level;
9897 ioa_cfg->doorbell = IPR_DOORBELL;
9898 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9899 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9900 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9901 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9902 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9903 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9904
9905 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9906 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
Brian Kingafc3f832016-08-24 12:56:51 -05009907 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
Brian King6270e592014-01-21 12:16:41 -06009908 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9909 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9910 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9911 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9912 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9913 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9914 ioa_cfg->sdt_state = INACTIVE;
9915
9916 ipr_initialize_bus_attr(ioa_cfg);
9917 ioa_cfg->max_devs_supported = ipr_max_devs;
9918
9919 if (ioa_cfg->sis64) {
9920 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9921 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9922 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9923 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9924 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9925 + ((sizeof(struct ipr_config_table_entry64)
9926 * ioa_cfg->max_devs_supported)));
9927 } else {
9928 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9929 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9930 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9931 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9932 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9933 + ((sizeof(struct ipr_config_table_entry)
9934 * ioa_cfg->max_devs_supported)));
9935 }
9936
Brian Kingf688f962014-12-02 12:47:37 -06009937 host->max_channel = IPR_VSET_BUS;
Brian King6270e592014-01-21 12:16:41 -06009938 host->unique_id = host->host_no;
9939 host->max_cmd_len = IPR_MAX_CDB_LEN;
9940 host->can_queue = ioa_cfg->max_cmds;
9941 pci_set_drvdata(pdev, ioa_cfg);
9942
9943 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9944 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9945 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9946 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9947 if (i == 0)
9948 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9949 else
9950 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9951 }
9952}
9953
9954/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009955 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07009956 * @dev_id: PCI device id struct
9957 *
9958 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009959 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009960 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009961static const struct ipr_chip_t *
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009962ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009963{
9964 int i;
9965
Linus Torvalds1da177e2005-04-16 15:20:36 -07009966 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9967 if (ipr_chip[i].vendor == dev_id->vendor &&
9968 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009969 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07009970 return NULL;
9971}
9972
Brian King6270e592014-01-21 12:16:41 -06009973/**
9974 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9975 * during probe time
9976 * @ioa_cfg: ioa config struct
9977 *
9978 * Return value:
9979 * None
9980 **/
9981static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9982{
9983 struct pci_dev *pdev = ioa_cfg->pdev;
9984
9985 if (pci_channel_offline(pdev)) {
9986 wait_event_timeout(ioa_cfg->eeh_wait_q,
9987 !pci_channel_offline(pdev),
9988 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9989 pci_restore_state(pdev);
9990 }
9991}
9992
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009993static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9994{
9995 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9996
9997 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9998 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9999 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10000 ioa_cfg->vectors_info[vec_idx].
10001 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10002 }
10003}
10004
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010005static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10006 struct pci_dev *pdev)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010007{
10008 int i, rc;
10009
10010 for (i = 1; i < ioa_cfg->nvectors; i++) {
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010011 rc = request_irq(pci_irq_vector(pdev, i),
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010012 ipr_isr_mhrrq,
10013 0,
10014 ioa_cfg->vectors_info[i].desc,
10015 &ioa_cfg->hrrq[i]);
10016 if (rc) {
10017 while (--i >= 0)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010018 free_irq(pci_irq_vector(pdev, i),
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010019 &ioa_cfg->hrrq[i]);
10020 return rc;
10021 }
10022 }
10023 return 0;
10024}
10025
Linus Torvalds1da177e2005-04-16 15:20:36 -070010026/**
Wayne Boyer95fecd92009-06-16 15:13:28 -070010027 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10028 * @pdev: PCI device struct
10029 *
10030 * Description: Simply set the msi_received flag to 1 indicating that
10031 * Message Signaled Interrupts are supported.
10032 *
10033 * Return value:
10034 * 0 on success / non-zero on failure
10035 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010036static irqreturn_t ipr_test_intr(int irq, void *devp)
Wayne Boyer95fecd92009-06-16 15:13:28 -070010037{
10038 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10039 unsigned long lock_flags = 0;
10040 irqreturn_t rc = IRQ_HANDLED;
10041
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010042 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010043 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10044
10045 ioa_cfg->msi_received = 1;
10046 wake_up(&ioa_cfg->msi_wait_q);
10047
10048 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10049 return rc;
10050}
10051
10052/**
10053 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10054 * @pdev: PCI device struct
10055 *
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010056 * Description: This routine sets up and initiates a test interrupt to determine
Wayne Boyer95fecd92009-06-16 15:13:28 -070010057 * if the interrupt is received via the ipr_test_intr() service routine.
10058 * If the tests fails, the driver will fall back to LSI.
10059 *
10060 * Return value:
10061 * 0 on success / non-zero on failure
10062 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010063static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
Wayne Boyer95fecd92009-06-16 15:13:28 -070010064{
10065 int rc;
10066 volatile u32 int_reg;
10067 unsigned long lock_flags = 0;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010068 int irq = pci_irq_vector(pdev, 0);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010069
10070 ENTER;
10071
10072 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10073 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10074 ioa_cfg->msi_received = 0;
10075 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer214777b2010-02-19 13:24:26 -080010076 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010077 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10078 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10079
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010080 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010081 if (rc) {
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010082 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010083 return rc;
10084 } else if (ipr_debug)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010085 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010086
Wayne Boyer214777b2010-02-19 13:24:26 -080010087 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010088 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10089 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010090 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010091 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10092
Wayne Boyer95fecd92009-06-16 15:13:28 -070010093 if (!ioa_cfg->msi_received) {
10094 /* MSI test failed */
10095 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10096 rc = -EOPNOTSUPP;
10097 } else if (ipr_debug)
10098 dev_info(&pdev->dev, "MSI test succeeded.\n");
10099
10100 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10101
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010102 free_irq(irq, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010103
10104 LEAVE;
10105
10106 return rc;
10107}
10108
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010109 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -070010110 * @pdev: PCI device struct
10111 * @dev_id: PCI device id struct
10112 *
10113 * Return value:
10114 * 0 on success / non-zero on failure
10115 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010116static int ipr_probe_ioa(struct pci_dev *pdev,
10117 const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010118{
10119 struct ipr_ioa_cfg *ioa_cfg;
10120 struct Scsi_Host *host;
10121 unsigned long ipr_regs_pci;
10122 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -070010123 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -050010124 volatile u32 mask, uproc, interrupts;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010125 unsigned long lock_flags, driver_lock_flags;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010126 unsigned int irq_flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010127
10128 ENTER;
10129
Linus Torvalds1da177e2005-04-16 15:20:36 -070010130 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010131 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10132
10133 if (!host) {
10134 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10135 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -060010136 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010137 }
10138
10139 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10140 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Dan Williams8d8e7d12012-07-09 21:06:08 -070010141 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010142
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010143 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010144
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010145 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010146 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10147 dev_id->vendor, dev_id->device);
10148 goto out_scsi_host_put;
10149 }
10150
Wayne Boyera32c0552010-02-19 13:23:36 -080010151 /* set SIS 32 or SIS 64 */
10152 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010153 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
Brian King7dd21302012-03-14 21:20:08 -050010154 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
Brian King89aad422012-03-14 21:20:10 -050010155 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010156
Brian King5469cb52007-03-29 12:42:40 -050010157 if (ipr_transop_timeout)
10158 ioa_cfg->transop_timeout = ipr_transop_timeout;
10159 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10160 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10161 else
10162 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10163
Auke Kok44c10132007-06-08 15:46:36 -070010164 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -050010165
Brian King6270e592014-01-21 12:16:41 -060010166 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10167
Linus Torvalds1da177e2005-04-16 15:20:36 -070010168 ipr_regs_pci = pci_resource_start(pdev, 0);
10169
10170 rc = pci_request_regions(pdev, IPR_NAME);
10171 if (rc < 0) {
10172 dev_err(&pdev->dev,
10173 "Couldn't register memory range of registers\n");
10174 goto out_scsi_host_put;
10175 }
10176
Brian King6270e592014-01-21 12:16:41 -060010177 rc = pci_enable_device(pdev);
10178
10179 if (rc || pci_channel_offline(pdev)) {
10180 if (pci_channel_offline(pdev)) {
10181 ipr_wait_for_pci_err_recovery(ioa_cfg);
10182 rc = pci_enable_device(pdev);
10183 }
10184
10185 if (rc) {
10186 dev_err(&pdev->dev, "Cannot enable adapter\n");
10187 ipr_wait_for_pci_err_recovery(ioa_cfg);
10188 goto out_release_regions;
10189 }
10190 }
10191
Arjan van de Ven25729a72008-09-28 16:18:02 -070010192 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010193
10194 if (!ipr_regs) {
10195 dev_err(&pdev->dev,
10196 "Couldn't map memory range of registers\n");
10197 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -060010198 goto out_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010199 }
10200
10201 ioa_cfg->hdw_dma_regs = ipr_regs;
10202 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10203 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10204
Brian King6270e592014-01-21 12:16:41 -060010205 ipr_init_regs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010206
Wayne Boyera32c0552010-02-19 13:23:36 -080010207 if (ioa_cfg->sis64) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010208 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Wayne Boyera32c0552010-02-19 13:23:36 -080010209 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010210 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10211 rc = dma_set_mask_and_coherent(&pdev->dev,
10212 DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -080010213 }
Wayne Boyera32c0552010-02-19 13:23:36 -080010214 } else
Anton Blanchard869404c2014-10-30 17:27:09 -050010215 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -080010216
Linus Torvalds1da177e2005-04-16 15:20:36 -070010217 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010218 dev_err(&pdev->dev, "Failed to set DMA mask\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070010219 goto cleanup_nomem;
10220 }
10221
10222 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10223 ioa_cfg->chip_cfg->cache_line_size);
10224
10225 if (rc != PCIBIOS_SUCCESSFUL) {
10226 dev_err(&pdev->dev, "Write of cache line size failed\n");
Brian King6270e592014-01-21 12:16:41 -060010227 ipr_wait_for_pci_err_recovery(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010228 rc = -EIO;
10229 goto cleanup_nomem;
10230 }
10231
Brian King6270e592014-01-21 12:16:41 -060010232 /* Issue MMIO read to ensure card is not in EEH */
10233 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10234 ipr_wait_for_pci_err_recovery(ioa_cfg);
10235
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010236 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10237 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10238 IPR_MAX_MSIX_VECTORS);
10239 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10240 }
10241
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010242 irq_flag = PCI_IRQ_LEGACY;
10243 if (ioa_cfg->ipr_chip->has_msi)
10244 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10245 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10246 if (rc < 0) {
10247 ipr_wait_for_pci_err_recovery(ioa_cfg);
10248 goto cleanup_nomem;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010249 }
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010250 ioa_cfg->nvectors = rc;
10251
10252 if (!pdev->msi_enabled && !pdev->msix_enabled)
10253 ioa_cfg->clear_isr = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010254
Brian King6270e592014-01-21 12:16:41 -060010255 pci_set_master(pdev);
10256
10257 if (pci_channel_offline(pdev)) {
10258 ipr_wait_for_pci_err_recovery(ioa_cfg);
10259 pci_set_master(pdev);
10260 if (pci_channel_offline(pdev)) {
10261 rc = -EIO;
10262 goto out_msi_disable;
10263 }
10264 }
10265
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010266 if (pdev->msi_enabled || pdev->msix_enabled) {
Wayne Boyer95fecd92009-06-16 15:13:28 -070010267 rc = ipr_test_msi(ioa_cfg, pdev);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010268 switch (rc) {
10269 case 0:
10270 dev_info(&pdev->dev,
10271 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10272 pdev->msix_enabled ? "-X" : "");
10273 break;
10274 case -EOPNOTSUPP:
Brian King6270e592014-01-21 12:16:41 -060010275 ipr_wait_for_pci_err_recovery(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010276 pci_free_irq_vectors(pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010277
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010278 ioa_cfg->nvectors = 1;
Benjamin Herrenschmidt9dadfb92016-11-30 15:28:55 -060010279 ioa_cfg->clear_isr = 1;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010280 break;
10281 default:
Wayne Boyer95fecd92009-06-16 15:13:28 -070010282 goto out_msi_disable;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010283 }
10284 }
10285
10286 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10287 (unsigned int)num_online_cpus(),
10288 (unsigned int)IPR_MAX_HRRQ_NUM);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010289
Linus Torvalds1da177e2005-04-16 15:20:36 -070010290 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -070010291 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010292
10293 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -070010294 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010295
10296 rc = ipr_alloc_mem(ioa_cfg);
10297 if (rc < 0) {
10298 dev_err(&pdev->dev,
10299 "Couldn't allocate enough memory for device driver!\n");
Julia Lawallf170c682011-07-11 14:08:25 -070010300 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010301 }
10302
Brian King6270e592014-01-21 12:16:41 -060010303 /* Save away PCI config space for use following IOA reset */
10304 rc = pci_save_state(pdev);
10305
10306 if (rc != PCIBIOS_SUCCESSFUL) {
10307 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10308 rc = -EIO;
10309 goto cleanup_nolog;
10310 }
10311
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010312 /*
10313 * If HRRQ updated interrupt is not masked, or reset alert is set,
10314 * the card is in an unknown state and needs a hard reset
10315 */
Wayne Boyer214777b2010-02-19 13:24:26 -080010316 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10317 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10318 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010319 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10320 ioa_cfg->needs_hard_reset = 1;
Anton Blanchard5d7c20b2011-08-01 19:43:45 +100010321 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
Brian King473b1e82007-05-02 10:44:11 -050010322 ioa_cfg->needs_hard_reset = 1;
10323 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10324 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010325
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010326 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010327 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010328 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010329
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010330 if (pdev->msi_enabled || pdev->msix_enabled) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010331 name_msi_vectors(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010332 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010333 ioa_cfg->vectors_info[0].desc,
10334 &ioa_cfg->hrrq[0]);
10335 if (!rc)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010336 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010337 } else {
10338 rc = request_irq(pdev->irq, ipr_isr,
10339 IRQF_SHARED,
10340 IPR_NAME, &ioa_cfg->hrrq[0]);
10341 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010342 if (rc) {
10343 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10344 pdev->irq, rc);
10345 goto cleanup_nolog;
10346 }
10347
Brian King463fc692007-05-07 17:09:05 -050010348 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10349 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10350 ioa_cfg->needs_warm_reset = 1;
10351 ioa_cfg->reset = ipr_reset_slot_reset;
Brian King2796ca52015-03-26 11:23:52 -050010352
10353 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10354 WQ_MEM_RECLAIM, host->host_no);
10355
10356 if (!ioa_cfg->reset_work_q) {
10357 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
Wei Yongjunc8e18ac2016-07-29 16:00:45 +000010358 rc = -ENOMEM;
Brian King2796ca52015-03-26 11:23:52 -050010359 goto out_free_irq;
10360 }
Brian King463fc692007-05-07 17:09:05 -050010361 } else
10362 ioa_cfg->reset = ipr_reset_start_bist;
10363
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010364 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010365 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010366 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010367
10368 LEAVE;
10369out:
10370 return rc;
10371
Brian King2796ca52015-03-26 11:23:52 -050010372out_free_irq:
10373 ipr_free_irqs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010374cleanup_nolog:
10375 ipr_free_mem(ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010376out_msi_disable:
Brian King6270e592014-01-21 12:16:41 -060010377 ipr_wait_for_pci_err_recovery(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010378 pci_free_irq_vectors(pdev);
Julia Lawallf170c682011-07-11 14:08:25 -070010379cleanup_nomem:
10380 iounmap(ipr_regs);
Brian King6270e592014-01-21 12:16:41 -060010381out_disable:
10382 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010383out_release_regions:
10384 pci_release_regions(pdev);
10385out_scsi_host_put:
10386 scsi_host_put(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010387 goto out;
10388}
10389
10390/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010391 * ipr_initiate_ioa_bringdown - Bring down an adapter
10392 * @ioa_cfg: ioa config struct
10393 * @shutdown_type: shutdown type
10394 *
10395 * Description: This function will initiate bringing down the adapter.
10396 * This consists of issuing an IOA shutdown to the adapter
10397 * to flush the cache, and running BIST.
10398 * If the caller needs to wait on the completion of the reset,
10399 * the caller must sleep on the reset_wait_q.
10400 *
10401 * Return value:
10402 * none
10403 **/
10404static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10405 enum ipr_shutdown_type shutdown_type)
10406{
10407 ENTER;
10408 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10409 ioa_cfg->sdt_state = ABORT_DUMP;
10410 ioa_cfg->reset_retries = 0;
10411 ioa_cfg->in_ioa_bringdown = 1;
10412 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10413 LEAVE;
10414}
10415
10416/**
10417 * __ipr_remove - Remove a single adapter
10418 * @pdev: pci device struct
10419 *
10420 * Adapter hot plug remove entry point.
10421 *
10422 * Return value:
10423 * none
10424 **/
10425static void __ipr_remove(struct pci_dev *pdev)
10426{
10427 unsigned long host_lock_flags = 0;
10428 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Brian Kingbfae7822013-01-30 23:45:08 -060010429 int i;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010430 unsigned long driver_lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010431 ENTER;
10432
10433 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -030010434 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -050010435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10436 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10437 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10438 }
10439
Brian Kingbfae7822013-01-30 23:45:08 -060010440 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10441 spin_lock(&ioa_cfg->hrrq[i]._lock);
10442 ioa_cfg->hrrq[i].removing_ioa = 1;
10443 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10444 }
10445 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070010446 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10447
10448 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10449 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Tejun Heo43829732012-08-20 14:51:24 -070010450 flush_work(&ioa_cfg->work_q);
Brian King2796ca52015-03-26 11:23:52 -050010451 if (ioa_cfg->reset_work_q)
10452 flush_workqueue(ioa_cfg->reset_work_q);
wenxiong@linux.vnet.ibm.com9077a942013-03-14 13:52:24 -050010453 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010454 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10455
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010456 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010457 list_del(&ioa_cfg->queue);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010458 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010459
10460 if (ioa_cfg->sdt_state == ABORT_DUMP)
10461 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10462 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10463
10464 ipr_free_all_resources(ioa_cfg);
10465
10466 LEAVE;
10467}
10468
10469/**
10470 * ipr_remove - IOA hot plug remove entry point
10471 * @pdev: pci device struct
10472 *
10473 * Adapter hot plug remove entry point.
10474 *
10475 * Return value:
10476 * none
10477 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010478static void ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010479{
10480 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10481
10482 ENTER;
10483
Tony Jonesee959b02008-02-22 00:13:36 +010010484 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010485 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +010010486 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010487 &ipr_dump_attr);
Brian Kingafc3f832016-08-24 12:56:51 -050010488 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10489 &ipr_ioa_async_err_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010490 scsi_remove_host(ioa_cfg->host);
10491
10492 __ipr_remove(pdev);
10493
10494 LEAVE;
10495}
10496
10497/**
10498 * ipr_probe - Adapter hot plug add entry point
10499 *
10500 * Return value:
10501 * 0 on success / non-zero on failure
10502 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010503static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010504{
10505 struct ipr_ioa_cfg *ioa_cfg;
Brian Kingb195d5e2016-07-15 14:48:03 -050010506 unsigned long flags;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010507 int rc, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010508
10509 rc = ipr_probe_ioa(pdev, dev_id);
10510
10511 if (rc)
10512 return rc;
10513
10514 ioa_cfg = pci_get_drvdata(pdev);
10515 rc = ipr_probe_ioa_part2(ioa_cfg);
10516
10517 if (rc) {
10518 __ipr_remove(pdev);
10519 return rc;
10520 }
10521
10522 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10523
10524 if (rc) {
10525 __ipr_remove(pdev);
10526 return rc;
10527 }
10528
Tony Jonesee959b02008-02-22 00:13:36 +010010529 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010530 &ipr_trace_attr);
10531
10532 if (rc) {
10533 scsi_remove_host(ioa_cfg->host);
10534 __ipr_remove(pdev);
10535 return rc;
10536 }
10537
Brian Kingafc3f832016-08-24 12:56:51 -050010538 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10539 &ipr_ioa_async_err_log);
10540
10541 if (rc) {
10542 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10543 &ipr_dump_attr);
10544 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10545 &ipr_trace_attr);
10546 scsi_remove_host(ioa_cfg->host);
10547 __ipr_remove(pdev);
10548 return rc;
10549 }
10550
Tony Jonesee959b02008-02-22 00:13:36 +010010551 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010552 &ipr_dump_attr);
10553
10554 if (rc) {
Brian Kingafc3f832016-08-24 12:56:51 -050010555 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10556 &ipr_ioa_async_err_log);
Tony Jonesee959b02008-02-22 00:13:36 +010010557 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010558 &ipr_trace_attr);
10559 scsi_remove_host(ioa_cfg->host);
10560 __ipr_remove(pdev);
10561 return rc;
10562 }
Brian Kinga3d1ddd2016-08-08 17:53:12 -050010563 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10564 ioa_cfg->scan_enabled = 1;
10565 schedule_work(&ioa_cfg->work_q);
10566 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010567
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010568 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10569
Jens Axboe89f8b332014-03-13 09:38:42 -060010570 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010571 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +010010572 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010573 ioa_cfg->iopoll_weight, ipr_iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010574 }
10575 }
10576
Brian Kinga3d1ddd2016-08-08 17:53:12 -050010577 scsi_scan_host(ioa_cfg->host);
10578
Linus Torvalds1da177e2005-04-16 15:20:36 -070010579 return 0;
10580}
10581
10582/**
10583 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010584 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -070010585 *
10586 * This function is invoked upon system shutdown/reboot. It will issue
10587 * an adapter shutdown to the adapter to flush the write cache.
10588 *
10589 * Return value:
10590 * none
10591 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010592static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010593{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010594 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010595 unsigned long lock_flags = 0;
Brian King4fdd7c72015-03-26 11:23:50 -050010596 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010597 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010598
10599 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Jens Axboe89f8b332014-03-13 09:38:42 -060010600 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010601 ioa_cfg->iopoll_weight = 0;
10602 for (i = 1; i < ioa_cfg->hrrq_num; i++)
Christoph Hellwig511cbce2015-11-10 14:56:14 +010010603 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010604 }
10605
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -030010606 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -050010607 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10608 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10609 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10610 }
10611
Brian King4fdd7c72015-03-26 11:23:50 -050010612 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10613 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10614
10615 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010616 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10617 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Brian King4fdd7c72015-03-26 11:23:50 -050010618 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
Brian King2796ca52015-03-26 11:23:52 -050010619 ipr_free_irqs(ioa_cfg);
Brian King4fdd7c72015-03-26 11:23:50 -050010620 pci_disable_device(ioa_cfg->pdev);
10621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010622}
10623
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010624static struct pci_device_id ipr_pci_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010625 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010626 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010627 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010628 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010629 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010630 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010631 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010632 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010633 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010634 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010635 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010636 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010637 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010638 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010639 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -050010640 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10641 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010642 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -060010643 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010644 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -050010645 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10646 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010647 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -050010648 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10649 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010650 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -060010651 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010652 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -050010653 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10654 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -060010655 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -050010656 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10657 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010658 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -050010659 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10660 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -050010661 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King185eb312007-03-29 12:42:53 -050010662 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10663 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Wayne Boyerb0f56d32010-06-24 13:34:14 -070010664 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10665 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -050010666 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -050010667 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010668 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -060010669 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010670 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -060010671 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010672 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10674 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010675 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010676 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10677 IPR_USE_LONG_TRANSOP_TIMEOUT },
Wayne Boyerd7b46272010-02-19 13:24:38 -080010678 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10679 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10680 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10682 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
Wayne Boyer32622bd2010-10-18 20:24:34 -070010684 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10686 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer5a918352011-10-27 11:58:21 -070010687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10688 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer32622bd2010-10-18 20:24:34 -070010689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010690 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010692 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010693 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010694 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010695 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010696 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10697 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10698 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010699 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010700 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10701 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10702 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10703 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10704 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10705 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10706 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10707 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010708 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10709 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10710 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wendy Xiongf94d9962014-01-21 12:16:40 -060010711 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10712 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010713 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10714 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10715 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10716 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10717 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10718 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10719 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10720 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10721 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10722 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10723 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
Wendy Xiong5eeac3e2014-03-12 16:08:52 -050010724 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10725 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10726 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10727 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10728 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10729 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
Wen Xiong00da9ff2016-07-12 16:02:07 -050010730 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10731 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10732 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10733 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010734 { }
10735};
10736MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10737
Stephen Hemmingera55b2d22012-09-07 09:33:16 -070010738static const struct pci_error_handlers ipr_err_handler = {
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010739 .error_detected = ipr_pci_error_detected,
Brian King6270e592014-01-21 12:16:41 -060010740 .mmio_enabled = ipr_pci_mmio_enabled,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010741 .slot_reset = ipr_pci_slot_reset,
10742};
10743
Linus Torvalds1da177e2005-04-16 15:20:36 -070010744static struct pci_driver ipr_driver = {
10745 .name = IPR_NAME,
10746 .id_table = ipr_pci_table,
10747 .probe = ipr_probe,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010748 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010749 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010750 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010751};
10752
10753/**
Wayne Boyerf72919e2010-02-19 13:24:21 -080010754 * ipr_halt_done - Shutdown prepare completion
10755 *
10756 * Return value:
10757 * none
10758 **/
10759static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10760{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010761 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010762}
10763
10764/**
10765 * ipr_halt - Issue shutdown prepare to all adapters
10766 *
10767 * Return value:
10768 * NOTIFY_OK on success / NOTIFY_DONE on failure
10769 **/
10770static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10771{
10772 struct ipr_cmnd *ipr_cmd;
10773 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010774 unsigned long flags = 0, driver_lock_flags;
Wayne Boyerf72919e2010-02-19 13:24:21 -080010775
10776 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10777 return NOTIFY_DONE;
10778
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010779 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010780
10781 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10782 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King4fdd7c72015-03-26 11:23:50 -050010783 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10784 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
Wayne Boyerf72919e2010-02-19 13:24:21 -080010785 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10786 continue;
10787 }
10788
10789 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10790 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10791 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10792 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10793 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10794
10795 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10796 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10797 }
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010798 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010799
10800 return NOTIFY_OK;
10801}
10802
10803static struct notifier_block ipr_notifier = {
10804 ipr_halt, NULL, 0
10805};
10806
10807/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010808 * ipr_init - Module entry point
10809 *
10810 * Return value:
10811 * 0 on success / negative value on failure
10812 **/
10813static int __init ipr_init(void)
10814{
10815 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10816 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10817
Wayne Boyerf72919e2010-02-19 13:24:21 -080010818 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -070010819 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010820}
10821
10822/**
10823 * ipr_exit - Module unload
10824 *
10825 * Module unload entry point.
10826 *
10827 * Return value:
10828 * none
10829 **/
10830static void __exit ipr_exit(void)
10831{
Wayne Boyerf72919e2010-02-19 13:24:21 -080010832 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010833 pci_unregister_driver(&ipr_driver);
10834}
10835
10836module_init(ipr_init);
10837module_exit(ipr_exit);