blob: 0a9b8b387bd2e70e87310ef7908012a46f32942f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -030063#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050075#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050076#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080077#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080078#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include "ipr.h"
88
89/*
90 * Global Data
91 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080092static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050097static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060098static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080099static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -0500100static unsigned int ipr_dual_ioa_raid = 1;
Wen Xiongcb05cbb2016-07-12 16:02:08 -0500101static unsigned int ipr_number_of_msix = 16;
Brian King4fdd7c72015-03-26 11:23:50 -0500102static unsigned int ipr_fast_reboot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103static DEFINE_SPINLOCK(ipr_driver_lock);
104
105/* This table describes the differences between DMA controller chips */
106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 .mailbox = 0x0042C,
Brian King89aad422012-03-14 21:20:10 -0500109 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500111 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600112 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
Wayne Boyer214777b2010-02-19 13:24:26 -0800116 .clr_interrupt_mask_reg32 = 0x00230,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 .sense_interrupt_mask_reg = 0x0022C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800118 .sense_interrupt_mask_reg32 = 0x0022C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .clr_interrupt_reg = 0x00228,
Wayne Boyer214777b2010-02-19 13:24:26 -0800120 .clr_interrupt_reg32 = 0x00228,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 .sense_interrupt_reg = 0x00224,
Wayne Boyer214777b2010-02-19 13:24:26 -0800122 .sense_interrupt_reg32 = 0x00224,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800125 .sense_uproc_interrupt_reg32 = 0x00214,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 .set_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
131 },
132 { /* Snipe and Scamp */
133 .mailbox = 0x0052C,
Brian King89aad422012-03-14 21:20:10 -0500134 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500136 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600137 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800141 .clr_interrupt_mask_reg32 = 0x0028C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 .sense_interrupt_mask_reg = 0x00288,
Wayne Boyer214777b2010-02-19 13:24:26 -0800143 .sense_interrupt_mask_reg32 = 0x00288,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 .clr_interrupt_reg = 0x00284,
Wayne Boyer214777b2010-02-19 13:24:26 -0800145 .clr_interrupt_reg32 = 0x00284,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 .sense_interrupt_reg = 0x00280,
Wayne Boyer214777b2010-02-19 13:24:26 -0800147 .sense_interrupt_reg32 = 0x00280,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800150 .sense_uproc_interrupt_reg32 = 0x00290,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 .set_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 }
156 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800157 { /* CRoC */
Wayne Boyer110def82010-11-04 09:36:16 -0700158 .mailbox = 0x00044,
Brian King89aad422012-03-14 21:20:10 -0500159 .max_cmds = 1000,
Wayne Boyera74c1632010-02-19 13:23:51 -0800160 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500161 .clear_isr = 0,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600162 .iopoll_weight = 64,
Wayne Boyera74c1632010-02-19 13:23:51 -0800163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
Wayne Boyer214777b2010-02-19 13:24:26 -0800166 .clr_interrupt_mask_reg32 = 0x0001C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800167 .sense_interrupt_mask_reg = 0x00010,
Wayne Boyer214777b2010-02-19 13:24:26 -0800168 .sense_interrupt_mask_reg32 = 0x00014,
Wayne Boyera74c1632010-02-19 13:23:51 -0800169 .clr_interrupt_reg = 0x00008,
Wayne Boyer214777b2010-02-19 13:24:26 -0800170 .clr_interrupt_reg32 = 0x0000C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800171 .sense_interrupt_reg = 0x00000,
Wayne Boyer214777b2010-02-19 13:24:26 -0800172 .sense_interrupt_reg32 = 0x00004,
Wayne Boyera74c1632010-02-19 13:23:51 -0800173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800175 .sense_uproc_interrupt_reg32 = 0x00024,
Wayne Boyera74c1632010-02-19 13:23:51 -0800176 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800177 .set_uproc_interrupt_reg32 = 0x00024,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800178 .clr_uproc_interrupt_reg = 0x00028,
Wayne Boyer214777b2010-02-19 13:24:26 -0800179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800181 .dump_addr_reg = 0x00064,
Wayne Boyer8701f182010-06-04 10:26:50 -0700182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
Wayne Boyera74c1632010-02-19 13:23:51 -0800184 }
185 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186};
187
188static const struct ipr_chip_t ipr_chip[] = {
Christoph Hellwiga299ee62016-09-11 15:31:24 +0200189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199};
200
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300201static int ipr_max_bus_speeds[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203};
204
205MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207module_param_named(max_speed, ipr_max_speed, uint, 0);
208MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209module_param_named(log_level, ipr_log_level, uint, 0);
210MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211module_param_named(testmode, ipr_testmode, int, 0);
212MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800213module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800217module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600218MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500219module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800221module_param_named(max_devs, ipr_max_devs, int, 0);
222MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600224module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
Wen Xiongcb05cbb2016-07-12 16:02:08 -0500225MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
Brian King4fdd7c72015-03-26 11:23:50 -0500226module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228MODULE_LICENSE("GPL");
229MODULE_VERSION(IPR_DRIVER_VERSION);
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231/* A constant array of IOASCs/URCs/Error Messages */
232static const
233struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 "8155: An unknown error was received"},
236 {0x00330000, 0, 0,
237 "Soft underlength error"},
238 {0x005A0000, 0, 0,
239 "Command to be cancelled not found"},
240 {0x00808000, 0, 0,
241 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500245 "4101: Soft device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFFD: Logical block guard error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500283 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 "8009: Impending cache battery pack failure"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500288 {0x02040100, 0, 0,
289 "Logical Unit in process of becoming ready"},
290 {0x02040200, 0, 0,
291 "Initializing command required"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 {0x02040400, 0, 0,
293 "34FF: Disk device format in progress"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500294 {0x02040C00, 0, 0,
295 "Logical unit not accessible, target port in unavailable state"},
Brian King65f56472007-04-26 16:00:12 -0500296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 {0x023F0000, 0, 0,
299 "Synchronization required"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500300 {0x02408500, 0, 0,
301 "IOA microcode download required"},
302 {0x02408600, 0, 0,
303 "Device bus connection is prohibited by host"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 {0x024E0000, 0, 0,
305 "No ready, IOA shutdown"},
306 {0x025A0000, 0, 0,
307 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 "3020: Storage subsystem configuration error"},
310 {0x03110B00, 0, 0,
311 "FFF5: Medium error, data unreadable, recommend reassign"},
312 {0x03110C00, 0, 0,
313 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 "3109: IOA timed out a device command"},
322 {0x04088000, 0, 0,
323 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500325 "4100: Hard device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "310D: Logical block guard error detected by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 "9002: IOA reserved area LRC error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347 "Hardware Error, IOA metadata access error"},
Brian King933916f2007-03-29 12:43:30 -0500348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 "8150: PCI bus error"},
360 {0x04430000, 1, 0,
361 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 "8151: IOA microcode error"},
370 {0x04448500, 0, 0,
371 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500374 {0x04448700, 0, 0,
375 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 {0x04490000, 0, 0,
377 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500389 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600393 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500395 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500397 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500399 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500401 "4110: Unsupported enclosure function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403 "4120: SAS cable VPD cannot be read"},
Brian King933916f2007-03-29 12:43:30 -0500404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 "FFF4: Command to logical unit failed"},
406 {0x05240000, 1, 0,
407 "Illegal request, invalid request type or request packet"},
408 {0x05250000, 0, 0,
409 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600410 {0x05258000, 0, 0,
411 "Illegal request, commands not allowed to this device"},
412 {0x05258100, 0, 0,
413 "Illegal request, command not allowed to a secondary adapter"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800414 {0x05258200, 0, 0,
415 "Illegal request, command not allowed to a non-optimized resource"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 {0x05260000, 0, 0,
417 "Illegal request, invalid field in parameter list"},
418 {0x05260100, 0, 0,
419 "Illegal request, parameter not supported"},
420 {0x05260200, 0, 0,
421 "Illegal request, parameter value invalid"},
422 {0x052C0000, 0, 0,
423 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600424 {0x052C8000, 1, 0,
425 "Illegal request, dual adapter support not enabled"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500426 {0x052C8100, 1, 0,
427 "Illegal request, another cable connector was physically disabled"},
428 {0x054E8000, 1, 0,
429 "Illegal request, inconsistent group id/group count"},
Brian King933916f2007-03-29 12:43:30 -0500430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 "9040: Array protection temporarily suspended, protection resuming"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"},
Wen Xiong81471b02018-05-09 13:47:54 -0500438 {0x060B8100, 0, IPR_DEFAULT_LOG_LEVEL,
439 "4086: SAS Adapter Hardware Configuration Error"},
Brian King933916f2007-03-29 12:43:30 -0500440 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500441 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500442 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 "FFFB: SCSI bus was reset"},
444 {0x06290500, 0, 0,
445 "FFFE: SCSI bus transition to single ended"},
446 {0x06290600, 0, 0,
447 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500448 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500450 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 "3029: A device replacement has occurred"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500452 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
453 "4102: Device bus fabric performance degradation"},
Brian King933916f2007-03-29 12:43:30 -0500454 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500456 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600457 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500458 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500460 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500462 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500464 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600465 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500466 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500467 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500468 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500469 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500470 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500471 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500472 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500473 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500474 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500475 "4050: Enclosure does not support a required multipath function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500476 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4121: Configuration error, required cable is missing"},
478 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4122: Cable is not plugged into the correct location on remote IOA"},
480 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "4123: Configuration error, invalid cable vital product data"},
482 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
483 "4124: Configuration error, both cable ends are plugged into the same IOA"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800484 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
485 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500486 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500488 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500490 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500492 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600493 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500494 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600495 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500496 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 "9032: Array exposed but still protected"},
Brian King7b3871f2016-09-16 16:51:36 -0500498 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
Brian Kinge4353402007-03-29 12:43:37 -0500499 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500500 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500501 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500502 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500503 "4060: Multipath redundancy level got worse"},
Brian King7b3871f2016-09-16 16:51:36 -0500504 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
Wen Xiongf8ee25d2015-03-26 11:23:58 -0500505 "9083: Device raw mode enabled"},
Brian King7b3871f2016-09-16 16:51:36 -0500506 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
Wen Xiongf8ee25d2015-03-26 11:23:58 -0500507 "9084: Device raw mode disabled"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 {0x07270000, 0, 0,
509 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500510 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500512 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500514 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500516 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500518 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500520 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500522 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500524 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500526 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500528 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500530 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500532 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500534 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500536 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500538 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500540 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500542 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500544 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500546 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500548 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 "9063: Maximum number of functional arrays has been exceeded"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500550 {0x07279A00, 0, 0,
551 "Data protect, other volume set problem"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 {0x0B260000, 0, 0,
553 "Aborted command, invalid descriptor"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500554 {0x0B3F9000, 0, 0,
555 "Target operating conditions have changed, dual adapter takeover"},
556 {0x0B530200, 0, 0,
557 "Aborted command, medium removal prevented"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 {0x0B5A0000, 0, 0,
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500559 "Command terminated by host"},
560 {0x0B5B8000, 0, 0,
561 "Aborted command, command terminated by host"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562};
563
564static const struct ipr_ses_table_entry ipr_ses_table[] = {
565 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
566 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
567 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
568 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
569 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
570 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
571 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
572 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
573 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
574 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
576 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
577 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
578};
579
580/*
581 * Function Prototypes
582 */
583static int ipr_reset_alert(struct ipr_cmnd *);
584static void ipr_process_ccn(struct ipr_cmnd *);
585static void ipr_process_error(struct ipr_cmnd *);
586static void ipr_reset_ioa_job(struct ipr_cmnd *);
587static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
588 enum ipr_shutdown_type);
589
590#ifdef CONFIG_SCSI_IPR_TRACE
591/**
592 * ipr_trc_hook - Add a trace entry to the driver trace
593 * @ipr_cmd: ipr command struct
594 * @type: trace type
595 * @add_data: additional data
596 *
597 * Return value:
598 * none
599 **/
600static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
601 u8 type, u32 add_data)
602{
603 struct ipr_trace_entry *trace_entry;
604 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Brian Kingbb7c5432015-07-14 11:41:31 -0500605 unsigned int trace_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Brian Kingbb7c5432015-07-14 11:41:31 -0500607 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
608 trace_entry = &ioa_cfg->trace[trace_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 trace_entry->time = jiffies;
610 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
611 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800612 if (ipr_cmd->ioa_cfg->sis64)
613 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
614 else
615 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500616 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
618 trace_entry->u.add_data = add_data;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600619 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620}
621#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300622#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623#endif
624
625/**
Brian King172cd6e2012-07-17 08:14:40 -0500626 * ipr_lock_and_done - Acquire lock and complete command
627 * @ipr_cmd: ipr command struct
628 *
629 * Return value:
630 * none
631 **/
632static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
633{
634 unsigned long lock_flags;
635 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
636
637 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
638 ipr_cmd->done(ipr_cmd);
639 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
640}
641
642/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
644 * @ipr_cmd: ipr command struct
645 *
646 * Return value:
647 * none
648 **/
649static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
650{
651 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700652 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
653 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
Wayne Boyera32c0552010-02-19 13:23:36 -0800654 dma_addr_t dma_addr = ipr_cmd->dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600655 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600657 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600659 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
Wayne Boyera32c0552010-02-19 13:23:36 -0800660 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800662 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800664
Wayne Boyer96d21f02010-05-10 09:13:27 -0700665 if (ipr_cmd->ioa_cfg->sis64) {
Wayne Boyera32c0552010-02-19 13:23:36 -0800666 ioarcb->u.sis64_addr_data.data_ioadl_addr =
667 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
Wayne Boyer96d21f02010-05-10 09:13:27 -0700668 ioasa64->u.gata.status = 0;
669 } else {
Wayne Boyera32c0552010-02-19 13:23:36 -0800670 ioarcb->write_ioadl_addr =
671 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
672 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700673 ioasa->u.gata.status = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800674 }
675
Wayne Boyer96d21f02010-05-10 09:13:27 -0700676 ioasa->hdr.ioasc = 0;
677 ioasa->hdr.residual_data_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500679 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 ipr_cmd->sense_buffer[0] = 0;
681 ipr_cmd->dma_use_sg = 0;
682}
683
684/**
685 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
686 * @ipr_cmd: ipr command struct
687 *
688 * Return value:
689 * none
690 **/
Brian King172cd6e2012-07-17 08:14:40 -0500691static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
692 void (*fast_done) (struct ipr_cmnd *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693{
694 ipr_reinit_ipr_cmnd(ipr_cmd);
695 ipr_cmd->u.scratch = 0;
696 ipr_cmd->sibling = NULL;
Brian King6cdb0812014-10-30 17:27:10 -0500697 ipr_cmd->eh_comp = NULL;
Brian King172cd6e2012-07-17 08:14:40 -0500698 ipr_cmd->fast_done = fast_done;
Kees Cook738c6ec2017-08-18 16:53:24 -0700699 timer_setup(&ipr_cmd->timer, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700}
701
702/**
Brian King00bfef22012-07-17 08:13:52 -0500703 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 * @ioa_cfg: ioa config struct
705 *
706 * Return value:
707 * pointer to ipr command struct
708 **/
709static
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600710struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600712 struct ipr_cmnd *ipr_cmd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600714 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
715 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
716 struct ipr_cmnd, queue);
717 list_del(&ipr_cmd->queue);
718 }
719
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 return ipr_cmd;
722}
723
724/**
Brian King00bfef22012-07-17 08:13:52 -0500725 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
726 * @ioa_cfg: ioa config struct
727 *
728 * Return value:
729 * pointer to ipr command struct
730 **/
731static
732struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
733{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600734 struct ipr_cmnd *ipr_cmd =
735 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
Brian King172cd6e2012-07-17 08:14:40 -0500736 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King00bfef22012-07-17 08:13:52 -0500737 return ipr_cmd;
738}
739
740/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
742 * @ioa_cfg: ioa config struct
743 * @clr_ints: interrupts to clear
744 *
745 * This function masks all interrupts on the adapter, then clears the
746 * interrupts specified in the mask
747 *
748 * Return value:
749 * none
750 **/
751static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
752 u32 clr_ints)
753{
754 volatile u32 int_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600755 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
757 /* Stop new interrupts */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600758 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
759 spin_lock(&ioa_cfg->hrrq[i]._lock);
760 ioa_cfg->hrrq[i].allow_interrupts = 0;
761 spin_unlock(&ioa_cfg->hrrq[i]._lock);
762 }
763 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
765 /* Set interrupt mask to stop all new interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800766 if (ioa_cfg->sis64)
767 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
768 else
769 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
771 /* Clear any pending interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800772 if (ioa_cfg->sis64)
773 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
774 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
776}
777
778/**
779 * ipr_save_pcix_cmd_reg - Save PCI-X command register
780 * @ioa_cfg: ioa config struct
781 *
782 * Return value:
783 * 0 on success / -EIO on failure
784 **/
785static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
786{
787 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
788
Brian King7dce0e12007-01-23 11:25:30 -0600789 if (pcix_cmd_reg == 0)
790 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791
792 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
793 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
794 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
795 return -EIO;
796 }
797
798 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
799 return 0;
800}
801
802/**
803 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
804 * @ioa_cfg: ioa config struct
805 *
806 * Return value:
807 * 0 on success / -EIO on failure
808 **/
809static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
810{
811 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
812
813 if (pcix_cmd_reg) {
814 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
815 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
816 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
817 return -EIO;
818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 }
820
821 return 0;
822}
823
824/**
Brian Kingf646f322017-03-15 16:58:39 -0500825 * __ipr_sata_eh_done - done function for aborted SATA commands
826 * @ipr_cmd: ipr command struct
827 *
828 * This function is invoked for ops generated to SATA
829 * devices which are being aborted.
830 *
831 * Return value:
832 * none
833 **/
834static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
835{
836 struct ata_queued_cmd *qc = ipr_cmd->qc;
837 struct ipr_sata_port *sata_port = qc->ap->private_data;
838
839 qc->err_mask |= AC_ERR_OTHER;
840 sata_port->ioasa.status |= ATA_BUSY;
841 ata_qc_complete(qc);
842 if (ipr_cmd->eh_comp)
843 complete(ipr_cmd->eh_comp);
844 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
845}
846
847/**
Brian King35a39692006-09-25 12:39:20 -0500848 * ipr_sata_eh_done - done function for aborted SATA commands
849 * @ipr_cmd: ipr command struct
850 *
851 * This function is invoked for ops generated to SATA
852 * devices which are being aborted.
853 *
854 * Return value:
855 * none
856 **/
857static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
858{
Brian Kingf646f322017-03-15 16:58:39 -0500859 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
860 unsigned long hrrq_flags;
Brian King35a39692006-09-25 12:39:20 -0500861
Brian Kingf646f322017-03-15 16:58:39 -0500862 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
863 __ipr_sata_eh_done(ipr_cmd);
864 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
865}
866
867/**
868 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
869 * @ipr_cmd: ipr command struct
870 *
871 * This function is invoked by the interrupt handler for
872 * ops generated by the SCSI mid-layer which are being aborted.
873 *
874 * Return value:
875 * none
876 **/
877static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
878{
879 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
880
881 scsi_cmd->result |= (DID_ERROR << 16);
882
883 scsi_dma_unmap(ipr_cmd->scsi_cmd);
884 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -0500885 if (ipr_cmd->eh_comp)
886 complete(ipr_cmd->eh_comp);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600887 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King35a39692006-09-25 12:39:20 -0500888}
889
890/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 * ipr_scsi_eh_done - mid-layer done function for aborted ops
892 * @ipr_cmd: ipr command struct
893 *
894 * This function is invoked by the interrupt handler for
895 * ops generated by the SCSI mid-layer which are being aborted.
896 *
897 * Return value:
898 * none
899 **/
900static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
901{
Brian Kingf646f322017-03-15 16:58:39 -0500902 unsigned long hrrq_flags;
903 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904
Brian Kingf646f322017-03-15 16:58:39 -0500905 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
906 __ipr_scsi_eh_done(ipr_cmd);
907 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908}
909
910/**
911 * ipr_fail_all_ops - Fails all outstanding ops.
912 * @ioa_cfg: ioa config struct
913 *
914 * This function fails all outstanding ops.
915 *
916 * Return value:
917 * none
918 **/
919static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
920{
921 struct ipr_cmnd *ipr_cmd, *temp;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600922 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
924 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600925 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600926 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600927 list_for_each_entry_safe(ipr_cmd,
928 temp, &hrrq->hrrq_pending_q, queue) {
929 list_del(&ipr_cmd->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600931 ipr_cmd->s.ioasa.hdr.ioasc =
932 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
933 ipr_cmd->s.ioasa.hdr.ilid =
934 cpu_to_be32(IPR_DRIVER_ILID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600936 if (ipr_cmd->scsi_cmd)
Brian Kingf646f322017-03-15 16:58:39 -0500937 ipr_cmd->done = __ipr_scsi_eh_done;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600938 else if (ipr_cmd->qc)
Brian Kingf646f322017-03-15 16:58:39 -0500939 ipr_cmd->done = __ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600941 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
942 IPR_IOASC_IOA_WAS_RESET);
943 del_timer(&ipr_cmd->timer);
944 ipr_cmd->done(ipr_cmd);
945 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600946 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 LEAVE;
949}
950
951/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800952 * ipr_send_command - Send driver initiated requests.
953 * @ipr_cmd: ipr command struct
954 *
955 * This function sends a command to the adapter using the correct write call.
956 * In the case of sis64, calculate the ioarcb size required. Then or in the
957 * appropriate bits.
958 *
959 * Return value:
960 * none
961 **/
962static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
963{
964 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
965 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
966
967 if (ioa_cfg->sis64) {
968 /* The default size is 256 bytes */
969 send_dma_addr |= 0x1;
970
971 /* If the number of ioadls * size of ioadl > 128 bytes,
972 then use a 512 byte ioarcb */
973 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
974 send_dma_addr |= 0x4;
975 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976 } else
977 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
978}
979
980/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 * ipr_do_req - Send driver initiated requests.
982 * @ipr_cmd: ipr command struct
983 * @done: done function
984 * @timeout_func: timeout function
985 * @timeout: timeout value
986 *
987 * This function sends the specified command to the adapter with the
988 * timeout given. The done function is invoked on command completion.
989 *
990 * Return value:
991 * none
992 **/
993static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
994 void (*done) (struct ipr_cmnd *),
Kees Cook738c6ec2017-08-18 16:53:24 -0700995 void (*timeout_func) (struct timer_list *), u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600997 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
999 ipr_cmd->done = done;
1000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 ipr_cmd->timer.expires = jiffies + timeout;
Kees Cook841b86f2017-10-23 09:40:42 +02001002 ipr_cmd->timer.function = timeout_func;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
1004 add_timer(&ipr_cmd->timer);
1005
1006 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1007
Wayne Boyera32c0552010-02-19 13:23:36 -08001008 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009}
1010
1011/**
1012 * ipr_internal_cmd_done - Op done function for an internally generated op.
1013 * @ipr_cmd: ipr command struct
1014 *
1015 * This function is the op done function for an internally generated,
1016 * blocking op. It simply wakes the sleeping thread.
1017 *
1018 * Return value:
1019 * none
1020 **/
1021static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1022{
1023 if (ipr_cmd->sibling)
1024 ipr_cmd->sibling = NULL;
1025 else
1026 complete(&ipr_cmd->completion);
1027}
1028
1029/**
Wayne Boyera32c0552010-02-19 13:23:36 -08001030 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1031 * @ipr_cmd: ipr command struct
1032 * @dma_addr: dma address
1033 * @len: transfer length
1034 * @flags: ioadl flag value
1035 *
1036 * This function initializes an ioadl in the case where there is only a single
1037 * descriptor.
1038 *
1039 * Return value:
1040 * nothing
1041 **/
1042static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1043 u32 len, int flags)
1044{
1045 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1046 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1047
1048 ipr_cmd->dma_use_sg = 1;
1049
1050 if (ipr_cmd->ioa_cfg->sis64) {
1051 ioadl64->flags = cpu_to_be32(flags);
1052 ioadl64->data_len = cpu_to_be32(len);
1053 ioadl64->address = cpu_to_be64(dma_addr);
1054
1055 ipr_cmd->ioarcb.ioadl_len =
1056 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1057 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1058 } else {
1059 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1060 ioadl->address = cpu_to_be32(dma_addr);
1061
1062 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1063 ipr_cmd->ioarcb.read_ioadl_len =
1064 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1065 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1066 } else {
1067 ipr_cmd->ioarcb.ioadl_len =
1068 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1069 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1070 }
1071 }
1072}
1073
1074/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1076 * @ipr_cmd: ipr command struct
1077 * @timeout_func: function to invoke if command times out
1078 * @timeout: timeout
1079 *
1080 * Return value:
1081 * none
1082 **/
1083static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
Kees Cook738c6ec2017-08-18 16:53:24 -07001084 void (*timeout_func) (struct timer_list *),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 u32 timeout)
1086{
1087 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1088
1089 init_completion(&ipr_cmd->completion);
1090 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1091
1092 spin_unlock_irq(ioa_cfg->host->host_lock);
1093 wait_for_completion(&ipr_cmd->completion);
1094 spin_lock_irq(ioa_cfg->host->host_lock);
1095}
1096
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001097static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1098{
Brian King3f1c0582015-07-14 11:41:33 -05001099 unsigned int hrrq;
1100
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001101 if (ioa_cfg->hrrq_num == 1)
Brian King3f1c0582015-07-14 11:41:33 -05001102 hrrq = 0;
1103 else {
1104 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1105 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1106 }
1107 return hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001108}
1109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110/**
1111 * ipr_send_hcam - Send an HCAM to the adapter.
1112 * @ioa_cfg: ioa config struct
1113 * @type: HCAM type
1114 * @hostrcb: hostrcb struct
1115 *
1116 * This function will send a Host Controlled Async command to the adapter.
1117 * If HCAMs are currently not allowed to be issued to the adapter, it will
1118 * place the hostrcb on the free queue.
1119 *
1120 * Return value:
1121 * none
1122 **/
1123static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1124 struct ipr_hostrcb *hostrcb)
1125{
1126 struct ipr_cmnd *ipr_cmd;
1127 struct ipr_ioarcb *ioarcb;
1128
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001129 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001131 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1133
1134 ipr_cmd->u.hostrcb = hostrcb;
1135 ioarcb = &ipr_cmd->ioarcb;
1136
1137 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1138 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1139 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1140 ioarcb->cmd_pkt.cdb[1] = type;
1141 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1142 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1143
Wayne Boyera32c0552010-02-19 13:23:36 -08001144 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1145 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
1147 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1148 ipr_cmd->done = ipr_process_ccn;
1149 else
1150 ipr_cmd->done = ipr_process_error;
1151
1152 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1153
Wayne Boyera32c0552010-02-19 13:23:36 -08001154 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155 } else {
1156 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1157 }
1158}
1159
1160/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001161 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001163 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 *
1165 * Return value:
1166 * none
1167 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001168static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03001170 switch (proto) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001171 case IPR_PROTO_SATA:
1172 case IPR_PROTO_SAS_STP:
1173 res->ata_class = ATA_DEV_ATA;
1174 break;
1175 case IPR_PROTO_SATA_ATAPI:
1176 case IPR_PROTO_SAS_STP_ATAPI:
1177 res->ata_class = ATA_DEV_ATAPI;
1178 break;
1179 default:
1180 res->ata_class = ATA_DEV_UNKNOWN;
1181 break;
1182 };
1183}
1184
1185/**
1186 * ipr_init_res_entry - Initialize a resource entry struct.
1187 * @res: resource entry struct
1188 * @cfgtew: config table entry wrapper struct
1189 *
1190 * Return value:
1191 * none
1192 **/
1193static void ipr_init_res_entry(struct ipr_resource_entry *res,
1194 struct ipr_config_table_entry_wrapper *cfgtew)
1195{
1196 int found = 0;
1197 unsigned int proto;
1198 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1199 struct ipr_resource_entry *gscsi_res = NULL;
1200
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06001201 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 res->in_erp = 0;
1203 res->add_to_ml = 0;
1204 res->del_from_ml = 0;
1205 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06001206 res->reset_occurred = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05001208 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001209
1210 if (ioa_cfg->sis64) {
1211 proto = cfgtew->u.cfgte64->proto;
Brian King359d96e2015-06-11 20:45:20 -05001212 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1213 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001214 res->qmodel = IPR_QUEUEING_MODEL64(res);
Wayne Boyer438b0332010-05-10 09:13:00 -07001215 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001216
1217 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1218 sizeof(res->res_path));
1219
1220 res->bus = 0;
Wayne Boyer0cb992ed2010-11-04 09:35:58 -07001221 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1222 sizeof(res->dev_lun.scsi_lun));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001223 res->lun = scsilun_to_int(&res->dev_lun);
1224
1225 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1226 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1227 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1228 found = 1;
1229 res->target = gscsi_res->target;
1230 break;
1231 }
1232 }
1233 if (!found) {
1234 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1235 ioa_cfg->max_devs_supported);
1236 set_bit(res->target, ioa_cfg->target_ids);
1237 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001238 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1239 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1240 res->target = 0;
1241 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1242 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1243 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1244 ioa_cfg->max_devs_supported);
1245 set_bit(res->target, ioa_cfg->array_ids);
1246 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1247 res->bus = IPR_VSET_VIRTUAL_BUS;
1248 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1249 ioa_cfg->max_devs_supported);
1250 set_bit(res->target, ioa_cfg->vset_ids);
1251 } else {
1252 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1253 ioa_cfg->max_devs_supported);
1254 set_bit(res->target, ioa_cfg->target_ids);
1255 }
1256 } else {
1257 proto = cfgtew->u.cfgte->proto;
1258 res->qmodel = IPR_QUEUEING_MODEL(res);
1259 res->flags = cfgtew->u.cfgte->flags;
1260 if (res->flags & IPR_IS_IOA_RESOURCE)
1261 res->type = IPR_RES_TYPE_IOAFP;
1262 else
1263 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1264
1265 res->bus = cfgtew->u.cfgte->res_addr.bus;
1266 res->target = cfgtew->u.cfgte->res_addr.target;
1267 res->lun = cfgtew->u.cfgte->res_addr.lun;
Wayne Boyer46d74562010-08-11 07:15:17 -07001268 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001269 }
1270
1271 ipr_update_ata_class(res, proto);
1272}
1273
1274/**
1275 * ipr_is_same_device - Determine if two devices are the same.
1276 * @res: resource entry struct
1277 * @cfgtew: config table entry wrapper struct
1278 *
1279 * Return value:
1280 * 1 if the devices are the same / 0 otherwise
1281 **/
1282static int ipr_is_same_device(struct ipr_resource_entry *res,
1283 struct ipr_config_table_entry_wrapper *cfgtew)
1284{
1285 if (res->ioa_cfg->sis64) {
1286 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1287 sizeof(cfgtew->u.cfgte64->dev_id)) &&
Wayne Boyer0cb992ed2010-11-04 09:35:58 -07001288 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001289 sizeof(cfgtew->u.cfgte64->lun))) {
1290 return 1;
1291 }
1292 } else {
1293 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1294 res->target == cfgtew->u.cfgte->res_addr.target &&
1295 res->lun == cfgtew->u.cfgte->res_addr.lun)
1296 return 1;
1297 }
1298
1299 return 0;
1300}
1301
1302/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001303 * __ipr_format_res_path - Format the resource path for printing.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001304 * @res_path: resource path
1305 * @buf: buffer
Brian Kingb3b3b402013-01-11 17:43:49 -06001306 * @len: length of buffer provided
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001307 *
1308 * Return value:
1309 * pointer to buffer
1310 **/
Brian Kingb3b3b402013-01-11 17:43:49 -06001311static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001312{
1313 int i;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001314 char *p = buffer;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001315
Wayne Boyer46d74562010-08-11 07:15:17 -07001316 *p = '\0';
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001317 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1318 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1319 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001320
1321 return buffer;
1322}
1323
1324/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001325 * ipr_format_res_path - Format the resource path for printing.
1326 * @ioa_cfg: ioa config struct
1327 * @res_path: resource path
1328 * @buf: buffer
1329 * @len: length of buffer provided
1330 *
1331 * Return value:
1332 * pointer to buffer
1333 **/
1334static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1335 u8 *res_path, char *buffer, int len)
1336{
1337 char *p = buffer;
1338
1339 *p = '\0';
1340 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1341 __ipr_format_res_path(res_path, p, len - (buffer - p));
1342 return buffer;
1343}
1344
1345/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001346 * ipr_update_res_entry - Update the resource entry.
1347 * @res: resource entry struct
1348 * @cfgtew: config table entry wrapper struct
1349 *
1350 * Return value:
1351 * none
1352 **/
1353static void ipr_update_res_entry(struct ipr_resource_entry *res,
1354 struct ipr_config_table_entry_wrapper *cfgtew)
1355{
1356 char buffer[IPR_MAX_RES_PATH_LENGTH];
1357 unsigned int proto;
1358 int new_path = 0;
1359
1360 if (res->ioa_cfg->sis64) {
Brian King359d96e2015-06-11 20:45:20 -05001361 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1362 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
Wayne Boyer75576bb2010-07-14 10:50:14 -07001363 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001364
1365 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1366 sizeof(struct ipr_std_inq_data));
1367
1368 res->qmodel = IPR_QUEUEING_MODEL64(res);
1369 proto = cfgtew->u.cfgte64->proto;
1370 res->res_handle = cfgtew->u.cfgte64->res_handle;
1371 res->dev_id = cfgtew->u.cfgte64->dev_id;
1372
1373 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1374 sizeof(res->dev_lun.scsi_lun));
1375
1376 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1377 sizeof(res->res_path))) {
1378 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1379 sizeof(res->res_path));
1380 new_path = 1;
1381 }
1382
1383 if (res->sdev && new_path)
1384 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06001385 ipr_format_res_path(res->ioa_cfg,
1386 res->res_path, buffer, sizeof(buffer)));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001387 } else {
1388 res->flags = cfgtew->u.cfgte->flags;
1389 if (res->flags & IPR_IS_IOA_RESOURCE)
1390 res->type = IPR_RES_TYPE_IOAFP;
1391 else
1392 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1393
1394 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1395 sizeof(struct ipr_std_inq_data));
1396
1397 res->qmodel = IPR_QUEUEING_MODEL(res);
1398 proto = cfgtew->u.cfgte->proto;
1399 res->res_handle = cfgtew->u.cfgte->res_handle;
1400 }
1401
1402 ipr_update_ata_class(res, proto);
1403}
1404
1405/**
1406 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1407 * for the resource.
1408 * @res: resource entry struct
1409 * @cfgtew: config table entry wrapper struct
1410 *
1411 * Return value:
1412 * none
1413 **/
1414static void ipr_clear_res_target(struct ipr_resource_entry *res)
1415{
1416 struct ipr_resource_entry *gscsi_res = NULL;
1417 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1418
1419 if (!ioa_cfg->sis64)
1420 return;
1421
1422 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1423 clear_bit(res->target, ioa_cfg->array_ids);
1424 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1425 clear_bit(res->target, ioa_cfg->vset_ids);
1426 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1427 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1428 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1429 return;
1430 clear_bit(res->target, ioa_cfg->target_ids);
1431
1432 } else if (res->bus == 0)
1433 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434}
1435
1436/**
1437 * ipr_handle_config_change - Handle a config change from the adapter
1438 * @ioa_cfg: ioa config struct
1439 * @hostrcb: hostrcb
1440 *
1441 * Return value:
1442 * none
1443 **/
1444static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001445 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446{
1447 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001448 struct ipr_config_table_entry_wrapper cfgtew;
1449 __be32 cc_res_handle;
1450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 u32 is_ndn = 1;
1452
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001453 if (ioa_cfg->sis64) {
1454 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1455 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1456 } else {
1457 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1458 cc_res_handle = cfgtew.u.cfgte->res_handle;
1459 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460
1461 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001462 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 is_ndn = 0;
1464 break;
1465 }
1466 }
1467
1468 if (is_ndn) {
1469 if (list_empty(&ioa_cfg->free_res_q)) {
1470 ipr_send_hcam(ioa_cfg,
1471 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1472 hostrcb);
1473 return;
1474 }
1475
1476 res = list_entry(ioa_cfg->free_res_q.next,
1477 struct ipr_resource_entry, queue);
1478
1479 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001480 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1482 }
1483
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001484 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485
1486 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1487 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001489 res->res_handle = IPR_INVALID_RES_HANDLE;
Brian Kingf688f962014-12-02 12:47:37 -06001490 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001491 } else {
1492 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001494 }
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02001495 } else if (!res->sdev || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 res->add_to_ml = 1;
Brian Kingf688f962014-12-02 12:47:37 -06001497 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 }
1499
1500 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1501}
1502
1503/**
1504 * ipr_process_ccn - Op done function for a CCN.
1505 * @ipr_cmd: ipr command struct
1506 *
1507 * This function is the op done function for a configuration
1508 * change notification host controlled async from the adapter.
1509 *
1510 * Return value:
1511 * none
1512 **/
1513static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1514{
1515 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1516 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07001517 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518
Brian Kingafc3f832016-08-24 12:56:51 -05001519 list_del_init(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001520 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521
1522 if (ioasc) {
Brian King4fdd7c72015-03-26 11:23:50 -05001523 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1524 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 dev_err(&ioa_cfg->pdev->dev,
1526 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1527
1528 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1529 } else {
1530 ipr_handle_config_change(ioa_cfg, hostrcb);
1531 }
1532}
1533
1534/**
Brian King8cf093e2007-04-26 16:00:14 -05001535 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1536 * @i: index into buffer
1537 * @buf: string to modify
1538 *
1539 * This function will strip all trailing whitespace, pad the end
1540 * of the string with a single space, and NULL terminate the string.
1541 *
1542 * Return value:
1543 * new length of string
1544 **/
1545static int strip_and_pad_whitespace(int i, char *buf)
1546{
1547 while (i && buf[i] == ' ')
1548 i--;
1549 buf[i+1] = ' ';
1550 buf[i+2] = '\0';
1551 return i + 2;
1552}
1553
1554/**
1555 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1556 * @prefix: string to print at start of printk
1557 * @hostrcb: hostrcb pointer
1558 * @vpd: vendor/product id/sn struct
1559 *
1560 * Return value:
1561 * none
1562 **/
1563static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1564 struct ipr_vpd *vpd)
1565{
1566 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1567 int i = 0;
1568
1569 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1570 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1571
1572 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1573 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1574
1575 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1576 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1577
1578 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1579}
1580
1581/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001583 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 *
1585 * Return value:
1586 * none
1587 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001588static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589{
1590 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1591 + IPR_SERIAL_NUM_LEN];
1592
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001593 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1594 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 IPR_PROD_ID_LEN);
1596 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1597 ipr_err("Vendor/Product ID: %s\n", buffer);
1598
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001599 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1601 ipr_err(" Serial Number: %s\n", buffer);
1602}
1603
1604/**
Brian King8cf093e2007-04-26 16:00:14 -05001605 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1606 * @prefix: string to print at start of printk
1607 * @hostrcb: hostrcb pointer
1608 * @vpd: vendor/product id/sn/wwn struct
1609 *
1610 * Return value:
1611 * none
1612 **/
1613static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1614 struct ipr_ext_vpd *vpd)
1615{
1616 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1617 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1618 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1619}
1620
1621/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001622 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1623 * @vpd: vendor/product id/sn/wwn struct
1624 *
1625 * Return value:
1626 * none
1627 **/
1628static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1629{
1630 ipr_log_vpd(&vpd->vpd);
1631 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1632 be32_to_cpu(vpd->wwid[1]));
1633}
1634
1635/**
1636 * ipr_log_enhanced_cache_error - Log a cache error.
1637 * @ioa_cfg: ioa config struct
1638 * @hostrcb: hostrcb struct
1639 *
1640 * Return value:
1641 * none
1642 **/
1643static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1644 struct ipr_hostrcb *hostrcb)
1645{
Wayne Boyer4565e372010-02-19 13:24:07 -08001646 struct ipr_hostrcb_type_12_error *error;
1647
1648 if (ioa_cfg->sis64)
1649 error = &hostrcb->hcam.u.error64.u.type_12_error;
1650 else
1651 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001652
1653 ipr_err("-----Current Configuration-----\n");
1654 ipr_err("Cache Directory Card Information:\n");
1655 ipr_log_ext_vpd(&error->ioa_vpd);
1656 ipr_err("Adapter Card Information:\n");
1657 ipr_log_ext_vpd(&error->cfc_vpd);
1658
1659 ipr_err("-----Expected Configuration-----\n");
1660 ipr_err("Cache Directory Card Information:\n");
1661 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1662 ipr_err("Adapter Card Information:\n");
1663 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1664
1665 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1666 be32_to_cpu(error->ioa_data[0]),
1667 be32_to_cpu(error->ioa_data[1]),
1668 be32_to_cpu(error->ioa_data[2]));
1669}
1670
1671/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001672 * ipr_log_cache_error - Log a cache error.
1673 * @ioa_cfg: ioa config struct
1674 * @hostrcb: hostrcb struct
1675 *
1676 * Return value:
1677 * none
1678 **/
1679static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1680 struct ipr_hostrcb *hostrcb)
1681{
1682 struct ipr_hostrcb_type_02_error *error =
1683 &hostrcb->hcam.u.error.u.type_02_error;
1684
1685 ipr_err("-----Current Configuration-----\n");
1686 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001687 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001689 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
1691 ipr_err("-----Expected Configuration-----\n");
1692 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001693 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001695 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696
1697 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1698 be32_to_cpu(error->ioa_data[0]),
1699 be32_to_cpu(error->ioa_data[1]),
1700 be32_to_cpu(error->ioa_data[2]));
1701}
1702
1703/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001704 * ipr_log_enhanced_config_error - Log a configuration error.
1705 * @ioa_cfg: ioa config struct
1706 * @hostrcb: hostrcb struct
1707 *
1708 * Return value:
1709 * none
1710 **/
1711static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1712 struct ipr_hostrcb *hostrcb)
1713{
1714 int errors_logged, i;
1715 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1716 struct ipr_hostrcb_type_13_error *error;
1717
1718 error = &hostrcb->hcam.u.error.u.type_13_error;
1719 errors_logged = be32_to_cpu(error->errors_logged);
1720
1721 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1722 be32_to_cpu(error->errors_detected), errors_logged);
1723
1724 dev_entry = error->dev;
1725
1726 for (i = 0; i < errors_logged; i++, dev_entry++) {
1727 ipr_err_separator;
1728
1729 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1730 ipr_log_ext_vpd(&dev_entry->vpd);
1731
1732 ipr_err("-----New Device Information-----\n");
1733 ipr_log_ext_vpd(&dev_entry->new_vpd);
1734
1735 ipr_err("Cache Directory Card Information:\n");
1736 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1737
1738 ipr_err("Adapter Card Information:\n");
1739 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1740 }
1741}
1742
1743/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001744 * ipr_log_sis64_config_error - Log a device error.
1745 * @ioa_cfg: ioa config struct
1746 * @hostrcb: hostrcb struct
1747 *
1748 * Return value:
1749 * none
1750 **/
1751static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1752 struct ipr_hostrcb *hostrcb)
1753{
1754 int errors_logged, i;
1755 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1756 struct ipr_hostrcb_type_23_error *error;
1757 char buffer[IPR_MAX_RES_PATH_LENGTH];
1758
1759 error = &hostrcb->hcam.u.error64.u.type_23_error;
1760 errors_logged = be32_to_cpu(error->errors_logged);
1761
1762 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1763 be32_to_cpu(error->errors_detected), errors_logged);
1764
1765 dev_entry = error->dev;
1766
1767 for (i = 0; i < errors_logged; i++, dev_entry++) {
1768 ipr_err_separator;
1769
1770 ipr_err("Device %d : %s", i + 1,
Brian Kingb3b3b402013-01-11 17:43:49 -06001771 __ipr_format_res_path(dev_entry->res_path,
1772 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001773 ipr_log_ext_vpd(&dev_entry->vpd);
1774
1775 ipr_err("-----New Device Information-----\n");
1776 ipr_log_ext_vpd(&dev_entry->new_vpd);
1777
1778 ipr_err("Cache Directory Card Information:\n");
1779 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1780
1781 ipr_err("Adapter Card Information:\n");
1782 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1783 }
1784}
1785
1786/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 * ipr_log_config_error - Log a configuration error.
1788 * @ioa_cfg: ioa config struct
1789 * @hostrcb: hostrcb struct
1790 *
1791 * Return value:
1792 * none
1793 **/
1794static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1795 struct ipr_hostrcb *hostrcb)
1796{
1797 int errors_logged, i;
1798 struct ipr_hostrcb_device_data_entry *dev_entry;
1799 struct ipr_hostrcb_type_03_error *error;
1800
1801 error = &hostrcb->hcam.u.error.u.type_03_error;
1802 errors_logged = be32_to_cpu(error->errors_logged);
1803
1804 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1805 be32_to_cpu(error->errors_detected), errors_logged);
1806
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001807 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
1809 for (i = 0; i < errors_logged; i++, dev_entry++) {
1810 ipr_err_separator;
1811
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001812 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001813 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
1815 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001816 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817
1818 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001819 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
1821 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001822 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823
1824 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1825 be32_to_cpu(dev_entry->ioa_data[0]),
1826 be32_to_cpu(dev_entry->ioa_data[1]),
1827 be32_to_cpu(dev_entry->ioa_data[2]),
1828 be32_to_cpu(dev_entry->ioa_data[3]),
1829 be32_to_cpu(dev_entry->ioa_data[4]));
1830 }
1831}
1832
1833/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001834 * ipr_log_enhanced_array_error - Log an array configuration error.
1835 * @ioa_cfg: ioa config struct
1836 * @hostrcb: hostrcb struct
1837 *
1838 * Return value:
1839 * none
1840 **/
1841static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1842 struct ipr_hostrcb *hostrcb)
1843{
1844 int i, num_entries;
1845 struct ipr_hostrcb_type_14_error *error;
1846 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1847 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1848
1849 error = &hostrcb->hcam.u.error.u.type_14_error;
1850
1851 ipr_err_separator;
1852
1853 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1854 error->protection_level,
1855 ioa_cfg->host->host_no,
1856 error->last_func_vset_res_addr.bus,
1857 error->last_func_vset_res_addr.target,
1858 error->last_func_vset_res_addr.lun);
1859
1860 ipr_err_separator;
1861
1862 array_entry = error->array_member;
1863 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
Wayne Boyer72620262010-09-27 10:45:28 -07001864 ARRAY_SIZE(error->array_member));
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001865
1866 for (i = 0; i < num_entries; i++, array_entry++) {
1867 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1868 continue;
1869
1870 if (be32_to_cpu(error->exposed_mode_adn) == i)
1871 ipr_err("Exposed Array Member %d:\n", i);
1872 else
1873 ipr_err("Array Member %d:\n", i);
1874
1875 ipr_log_ext_vpd(&array_entry->vpd);
1876 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1877 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1878 "Expected Location");
1879
1880 ipr_err_separator;
1881 }
1882}
1883
1884/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 * ipr_log_array_error - Log an array configuration error.
1886 * @ioa_cfg: ioa config struct
1887 * @hostrcb: hostrcb struct
1888 *
1889 * Return value:
1890 * none
1891 **/
1892static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1893 struct ipr_hostrcb *hostrcb)
1894{
1895 int i;
1896 struct ipr_hostrcb_type_04_error *error;
1897 struct ipr_hostrcb_array_data_entry *array_entry;
1898 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1899
1900 error = &hostrcb->hcam.u.error.u.type_04_error;
1901
1902 ipr_err_separator;
1903
1904 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1905 error->protection_level,
1906 ioa_cfg->host->host_no,
1907 error->last_func_vset_res_addr.bus,
1908 error->last_func_vset_res_addr.target,
1909 error->last_func_vset_res_addr.lun);
1910
1911 ipr_err_separator;
1912
1913 array_entry = error->array_member;
1914
1915 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001916 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 continue;
1918
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001919 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001921 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001924 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001926 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1927 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1928 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
1930 ipr_err_separator;
1931
1932 if (i == 9)
1933 array_entry = error->array_member2;
1934 else
1935 array_entry++;
1936 }
1937}
1938
1939/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001940 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001941 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001942 * @data: IOA error data
1943 * @len: data length
1944 *
1945 * Return value:
1946 * none
1947 **/
Brian King359d96e2015-06-11 20:45:20 -05001948static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001949{
1950 int i;
1951
1952 if (len == 0)
1953 return;
1954
Brian Kingac719ab2006-11-21 10:28:42 -06001955 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1956 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1957
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001958 for (i = 0; i < len / 4; i += 4) {
1959 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1960 be32_to_cpu(data[i]),
1961 be32_to_cpu(data[i+1]),
1962 be32_to_cpu(data[i+2]),
1963 be32_to_cpu(data[i+3]));
1964 }
1965}
1966
1967/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001968 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1969 * @ioa_cfg: ioa config struct
1970 * @hostrcb: hostrcb struct
1971 *
1972 * Return value:
1973 * none
1974 **/
1975static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1976 struct ipr_hostrcb *hostrcb)
1977{
1978 struct ipr_hostrcb_type_17_error *error;
1979
Wayne Boyer4565e372010-02-19 13:24:07 -08001980 if (ioa_cfg->sis64)
1981 error = &hostrcb->hcam.u.error64.u.type_17_error;
1982 else
1983 error = &hostrcb->hcam.u.error.u.type_17_error;
1984
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001985 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001986 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001987
Brian King8cf093e2007-04-26 16:00:14 -05001988 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1989 be32_to_cpu(hostrcb->hcam.u.error.prc));
1990 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001991 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001992 be32_to_cpu(hostrcb->hcam.length) -
1993 (offsetof(struct ipr_hostrcb_error, u) +
1994 offsetof(struct ipr_hostrcb_type_17_error, data)));
1995}
1996
1997/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001998 * ipr_log_dual_ioa_error - Log a dual adapter error.
1999 * @ioa_cfg: ioa config struct
2000 * @hostrcb: hostrcb struct
2001 *
2002 * Return value:
2003 * none
2004 **/
2005static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2006 struct ipr_hostrcb *hostrcb)
2007{
2008 struct ipr_hostrcb_type_07_error *error;
2009
2010 error = &hostrcb->hcam.u.error.u.type_07_error;
2011 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08002012 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002013
Brian King8cf093e2007-04-26 16:00:14 -05002014 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2015 be32_to_cpu(hostrcb->hcam.u.error.prc));
2016 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06002017 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002018 be32_to_cpu(hostrcb->hcam.length) -
2019 (offsetof(struct ipr_hostrcb_error, u) +
2020 offsetof(struct ipr_hostrcb_type_07_error, data)));
2021}
2022
Brian King49dc6a12006-11-21 10:28:35 -06002023static const struct {
2024 u8 active;
2025 char *desc;
2026} path_active_desc[] = {
2027 { IPR_PATH_NO_INFO, "Path" },
2028 { IPR_PATH_ACTIVE, "Active path" },
2029 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2030};
2031
2032static const struct {
2033 u8 state;
2034 char *desc;
2035} path_state_desc[] = {
2036 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2037 { IPR_PATH_HEALTHY, "is healthy" },
2038 { IPR_PATH_DEGRADED, "is degraded" },
2039 { IPR_PATH_FAILED, "is failed" }
2040};
2041
2042/**
2043 * ipr_log_fabric_path - Log a fabric path error
2044 * @hostrcb: hostrcb struct
2045 * @fabric: fabric descriptor
2046 *
2047 * Return value:
2048 * none
2049 **/
2050static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2051 struct ipr_hostrcb_fabric_desc *fabric)
2052{
2053 int i, j;
2054 u8 path_state = fabric->path_state;
2055 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2056 u8 state = path_state & IPR_PATH_STATE_MASK;
2057
2058 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2059 if (path_active_desc[i].active != active)
2060 continue;
2061
2062 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2063 if (path_state_desc[j].state != state)
2064 continue;
2065
2066 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2067 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2068 path_active_desc[i].desc, path_state_desc[j].desc,
2069 fabric->ioa_port);
2070 } else if (fabric->cascaded_expander == 0xff) {
2071 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2072 path_active_desc[i].desc, path_state_desc[j].desc,
2073 fabric->ioa_port, fabric->phy);
2074 } else if (fabric->phy == 0xff) {
2075 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2076 path_active_desc[i].desc, path_state_desc[j].desc,
2077 fabric->ioa_port, fabric->cascaded_expander);
2078 } else {
2079 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2080 path_active_desc[i].desc, path_state_desc[j].desc,
2081 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2082 }
2083 return;
2084 }
2085 }
2086
2087 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2088 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2089}
2090
Wayne Boyer4565e372010-02-19 13:24:07 -08002091/**
2092 * ipr_log64_fabric_path - Log a fabric path error
2093 * @hostrcb: hostrcb struct
2094 * @fabric: fabric descriptor
2095 *
2096 * Return value:
2097 * none
2098 **/
2099static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2100 struct ipr_hostrcb64_fabric_desc *fabric)
2101{
2102 int i, j;
2103 u8 path_state = fabric->path_state;
2104 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2105 u8 state = path_state & IPR_PATH_STATE_MASK;
2106 char buffer[IPR_MAX_RES_PATH_LENGTH];
2107
2108 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2109 if (path_active_desc[i].active != active)
2110 continue;
2111
2112 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2113 if (path_state_desc[j].state != state)
2114 continue;
2115
2116 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2117 path_active_desc[i].desc, path_state_desc[j].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002118 ipr_format_res_path(hostrcb->ioa_cfg,
2119 fabric->res_path,
2120 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002121 return;
2122 }
2123 }
2124
2125 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
Brian Kingb3b3b402013-01-11 17:43:49 -06002126 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2127 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002128}
2129
Brian King49dc6a12006-11-21 10:28:35 -06002130static const struct {
2131 u8 type;
2132 char *desc;
2133} path_type_desc[] = {
2134 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2135 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2136 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2137 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2138};
2139
2140static const struct {
2141 u8 status;
2142 char *desc;
2143} path_status_desc[] = {
2144 { IPR_PATH_CFG_NO_PROB, "Functional" },
2145 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2146 { IPR_PATH_CFG_FAILED, "Failed" },
2147 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2148 { IPR_PATH_NOT_DETECTED, "Missing" },
2149 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2150};
2151
2152static const char *link_rate[] = {
2153 "unknown",
2154 "disabled",
2155 "phy reset problem",
2156 "spinup hold",
2157 "port selector",
2158 "unknown",
2159 "unknown",
2160 "unknown",
2161 "1.5Gbps",
2162 "3.0Gbps",
2163 "unknown",
2164 "unknown",
2165 "unknown",
2166 "unknown",
2167 "unknown",
2168 "unknown"
2169};
2170
2171/**
2172 * ipr_log_path_elem - Log a fabric path element.
2173 * @hostrcb: hostrcb struct
2174 * @cfg: fabric path element struct
2175 *
2176 * Return value:
2177 * none
2178 **/
2179static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2180 struct ipr_hostrcb_config_element *cfg)
2181{
2182 int i, j;
2183 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2184 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2185
2186 if (type == IPR_PATH_CFG_NOT_EXIST)
2187 return;
2188
2189 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2190 if (path_type_desc[i].type != type)
2191 continue;
2192
2193 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2194 if (path_status_desc[j].status != status)
2195 continue;
2196
2197 if (type == IPR_PATH_CFG_IOA_PORT) {
2198 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2199 path_status_desc[j].desc, path_type_desc[i].desc,
2200 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2201 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2202 } else {
2203 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2204 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2205 path_status_desc[j].desc, path_type_desc[i].desc,
2206 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2207 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2208 } else if (cfg->cascaded_expander == 0xff) {
2209 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2210 "WWN=%08X%08X\n", path_status_desc[j].desc,
2211 path_type_desc[i].desc, cfg->phy,
2212 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2213 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2214 } else if (cfg->phy == 0xff) {
2215 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2216 "WWN=%08X%08X\n", path_status_desc[j].desc,
2217 path_type_desc[i].desc, cfg->cascaded_expander,
2218 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2219 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2220 } else {
2221 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2222 "WWN=%08X%08X\n", path_status_desc[j].desc,
2223 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2224 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2225 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2226 }
2227 }
2228 return;
2229 }
2230 }
2231
2232 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2233 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2234 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2235 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2236}
2237
2238/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002239 * ipr_log64_path_elem - Log a fabric path element.
2240 * @hostrcb: hostrcb struct
2241 * @cfg: fabric path element struct
2242 *
2243 * Return value:
2244 * none
2245 **/
2246static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2247 struct ipr_hostrcb64_config_element *cfg)
2248{
2249 int i, j;
2250 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2251 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2252 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2253 char buffer[IPR_MAX_RES_PATH_LENGTH];
2254
2255 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2256 return;
2257
2258 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2259 if (path_type_desc[i].type != type)
2260 continue;
2261
2262 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2263 if (path_status_desc[j].status != status)
2264 continue;
2265
2266 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2267 path_status_desc[j].desc, path_type_desc[i].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002268 ipr_format_res_path(hostrcb->ioa_cfg,
2269 cfg->res_path, buffer, sizeof(buffer)),
2270 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2271 be32_to_cpu(cfg->wwid[0]),
2272 be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002273 return;
2274 }
2275 }
2276 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2277 "WWN=%08X%08X\n", cfg->type_status,
Brian Kingb3b3b402013-01-11 17:43:49 -06002278 ipr_format_res_path(hostrcb->ioa_cfg,
2279 cfg->res_path, buffer, sizeof(buffer)),
2280 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2281 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002282}
2283
2284/**
Brian King49dc6a12006-11-21 10:28:35 -06002285 * ipr_log_fabric_error - Log a fabric error.
2286 * @ioa_cfg: ioa config struct
2287 * @hostrcb: hostrcb struct
2288 *
2289 * Return value:
2290 * none
2291 **/
2292static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2293 struct ipr_hostrcb *hostrcb)
2294{
2295 struct ipr_hostrcb_type_20_error *error;
2296 struct ipr_hostrcb_fabric_desc *fabric;
2297 struct ipr_hostrcb_config_element *cfg;
2298 int i, add_len;
2299
2300 error = &hostrcb->hcam.u.error.u.type_20_error;
2301 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2302 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2303
2304 add_len = be32_to_cpu(hostrcb->hcam.length) -
2305 (offsetof(struct ipr_hostrcb_error, u) +
2306 offsetof(struct ipr_hostrcb_type_20_error, desc));
2307
2308 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2309 ipr_log_fabric_path(hostrcb, fabric);
2310 for_each_fabric_cfg(fabric, cfg)
2311 ipr_log_path_elem(hostrcb, cfg);
2312
2313 add_len -= be16_to_cpu(fabric->length);
2314 fabric = (struct ipr_hostrcb_fabric_desc *)
2315 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2316 }
2317
Brian King359d96e2015-06-11 20:45:20 -05002318 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002319}
2320
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002321/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002322 * ipr_log_sis64_array_error - Log a sis64 array error.
2323 * @ioa_cfg: ioa config struct
2324 * @hostrcb: hostrcb struct
2325 *
2326 * Return value:
2327 * none
2328 **/
2329static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2330 struct ipr_hostrcb *hostrcb)
2331{
2332 int i, num_entries;
2333 struct ipr_hostrcb_type_24_error *error;
2334 struct ipr_hostrcb64_array_data_entry *array_entry;
2335 char buffer[IPR_MAX_RES_PATH_LENGTH];
2336 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2337
2338 error = &hostrcb->hcam.u.error64.u.type_24_error;
2339
2340 ipr_err_separator;
2341
2342 ipr_err("RAID %s Array Configuration: %s\n",
2343 error->protection_level,
Brian Kingb3b3b402013-01-11 17:43:49 -06002344 ipr_format_res_path(ioa_cfg, error->last_res_path,
2345 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002346
2347 ipr_err_separator;
2348
2349 array_entry = error->array_member;
Wayne Boyer72620262010-09-27 10:45:28 -07002350 num_entries = min_t(u32, error->num_entries,
2351 ARRAY_SIZE(error->array_member));
Wayne Boyer4565e372010-02-19 13:24:07 -08002352
2353 for (i = 0; i < num_entries; i++, array_entry++) {
2354
2355 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2356 continue;
2357
2358 if (error->exposed_mode_adn == i)
2359 ipr_err("Exposed Array Member %d:\n", i);
2360 else
2361 ipr_err("Array Member %d:\n", i);
2362
2363 ipr_err("Array Member %d:\n", i);
2364 ipr_log_ext_vpd(&array_entry->vpd);
Wayne Boyer72620262010-09-27 10:45:28 -07002365 ipr_err("Current Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002366 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2367 buffer, sizeof(buffer)));
Wayne Boyer72620262010-09-27 10:45:28 -07002368 ipr_err("Expected Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002369 ipr_format_res_path(ioa_cfg,
2370 array_entry->expected_res_path,
2371 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002372
2373 ipr_err_separator;
2374 }
2375}
2376
2377/**
2378 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2379 * @ioa_cfg: ioa config struct
2380 * @hostrcb: hostrcb struct
2381 *
2382 * Return value:
2383 * none
2384 **/
2385static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2386 struct ipr_hostrcb *hostrcb)
2387{
2388 struct ipr_hostrcb_type_30_error *error;
2389 struct ipr_hostrcb64_fabric_desc *fabric;
2390 struct ipr_hostrcb64_config_element *cfg;
2391 int i, add_len;
2392
2393 error = &hostrcb->hcam.u.error64.u.type_30_error;
2394
2395 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2396 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2397
2398 add_len = be32_to_cpu(hostrcb->hcam.length) -
2399 (offsetof(struct ipr_hostrcb64_error, u) +
2400 offsetof(struct ipr_hostrcb_type_30_error, desc));
2401
2402 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2403 ipr_log64_fabric_path(hostrcb, fabric);
2404 for_each_fabric_cfg(fabric, cfg)
2405 ipr_log64_path_elem(hostrcb, cfg);
2406
2407 add_len -= be16_to_cpu(fabric->length);
2408 fabric = (struct ipr_hostrcb64_fabric_desc *)
2409 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2410 }
2411
Brian King359d96e2015-06-11 20:45:20 -05002412 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
Wayne Boyer4565e372010-02-19 13:24:07 -08002413}
2414
2415/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 * ipr_log_generic_error - Log an adapter error.
2417 * @ioa_cfg: ioa config struct
2418 * @hostrcb: hostrcb struct
2419 *
2420 * Return value:
2421 * none
2422 **/
2423static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2424 struct ipr_hostrcb *hostrcb)
2425{
Brian Kingac719ab2006-11-21 10:28:42 -06002426 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002427 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428}
2429
2430/**
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002431 * ipr_log_sis64_device_error - Log a cache error.
2432 * @ioa_cfg: ioa config struct
2433 * @hostrcb: hostrcb struct
2434 *
2435 * Return value:
2436 * none
2437 **/
2438static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2439 struct ipr_hostrcb *hostrcb)
2440{
2441 struct ipr_hostrcb_type_21_error *error;
2442 char buffer[IPR_MAX_RES_PATH_LENGTH];
2443
2444 error = &hostrcb->hcam.u.error64.u.type_21_error;
2445
2446 ipr_err("-----Failing Device Information-----\n");
2447 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2448 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2449 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2450 ipr_err("Device Resource Path: %s\n",
2451 __ipr_format_res_path(error->res_path,
2452 buffer, sizeof(buffer)));
2453 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2454 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2455 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2456 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2457 ipr_err("SCSI Sense Data:\n");
2458 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2459 ipr_err("SCSI Command Descriptor Block: \n");
2460 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2461
2462 ipr_err("Additional IOA Data:\n");
2463 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2464}
2465
2466/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2468 * @ioasc: IOASC
2469 *
2470 * This function will return the index of into the ipr_error_table
2471 * for the specified IOASC. If the IOASC is not in the table,
2472 * 0 will be returned, which points to the entry used for unknown errors.
2473 *
2474 * Return value:
2475 * index into the ipr_error_table
2476 **/
2477static u32 ipr_get_error(u32 ioasc)
2478{
2479 int i;
2480
2481 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002482 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 return i;
2484
2485 return 0;
2486}
2487
2488/**
2489 * ipr_handle_log_data - Log an adapter error.
2490 * @ioa_cfg: ioa config struct
2491 * @hostrcb: hostrcb struct
2492 *
2493 * This function logs an adapter error to the system.
2494 *
2495 * Return value:
2496 * none
2497 **/
2498static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2499 struct ipr_hostrcb *hostrcb)
2500{
2501 u32 ioasc;
2502 int error_index;
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002503 struct ipr_hostrcb_type_21_error *error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504
2505 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2506 return;
2507
2508 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2509 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2510
Wayne Boyer4565e372010-02-19 13:24:07 -08002511 if (ioa_cfg->sis64)
2512 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2513 else
2514 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515
Wayne Boyer4565e372010-02-19 13:24:07 -08002516 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2517 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2519 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002520 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 }
2522
2523 error_index = ipr_get_error(ioasc);
2524
2525 if (!ipr_error_table[error_index].log_hcam)
2526 return;
2527
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002528 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2529 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2530 error = &hostrcb->hcam.u.error64.u.type_21_error;
2531
2532 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2533 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2534 return;
2535 }
2536
Brian King49dc6a12006-11-21 10:28:35 -06002537 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538
2539 /* Set indication we have logged an error */
2540 ioa_cfg->errors_logged++;
2541
Brian King933916f2007-03-29 12:43:30 -05002542 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002544 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2545 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546
2547 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 case IPR_HOST_RCB_OVERLAY_ID_2:
2549 ipr_log_cache_error(ioa_cfg, hostrcb);
2550 break;
2551 case IPR_HOST_RCB_OVERLAY_ID_3:
2552 ipr_log_config_error(ioa_cfg, hostrcb);
2553 break;
2554 case IPR_HOST_RCB_OVERLAY_ID_4:
2555 case IPR_HOST_RCB_OVERLAY_ID_6:
2556 ipr_log_array_error(ioa_cfg, hostrcb);
2557 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002558 case IPR_HOST_RCB_OVERLAY_ID_7:
2559 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2560 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002561 case IPR_HOST_RCB_OVERLAY_ID_12:
2562 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2563 break;
2564 case IPR_HOST_RCB_OVERLAY_ID_13:
2565 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2566 break;
2567 case IPR_HOST_RCB_OVERLAY_ID_14:
2568 case IPR_HOST_RCB_OVERLAY_ID_16:
2569 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2570 break;
2571 case IPR_HOST_RCB_OVERLAY_ID_17:
2572 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2573 break;
Brian King49dc6a12006-11-21 10:28:35 -06002574 case IPR_HOST_RCB_OVERLAY_ID_20:
2575 ipr_log_fabric_error(ioa_cfg, hostrcb);
2576 break;
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002577 case IPR_HOST_RCB_OVERLAY_ID_21:
2578 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2579 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002580 case IPR_HOST_RCB_OVERLAY_ID_23:
2581 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2582 break;
2583 case IPR_HOST_RCB_OVERLAY_ID_24:
2584 case IPR_HOST_RCB_OVERLAY_ID_26:
2585 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2586 break;
2587 case IPR_HOST_RCB_OVERLAY_ID_30:
2588 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2589 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002590 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002593 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 break;
2595 }
2596}
2597
Brian Kingafc3f832016-08-24 12:56:51 -05002598static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2599{
2600 struct ipr_hostrcb *hostrcb;
2601
2602 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2603 struct ipr_hostrcb, queue);
2604
2605 if (unlikely(!hostrcb)) {
2606 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2607 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2608 struct ipr_hostrcb, queue);
2609 }
2610
2611 list_del_init(&hostrcb->queue);
2612 return hostrcb;
2613}
2614
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615/**
2616 * ipr_process_error - Op done function for an adapter error log.
2617 * @ipr_cmd: ipr command struct
2618 *
2619 * This function is the op done function for an error log host
2620 * controlled async from the adapter. It will log the error and
2621 * send the HCAM back to the adapter.
2622 *
2623 * Return value:
2624 * none
2625 **/
2626static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2627{
2628 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2629 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07002630 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002631 u32 fd_ioasc;
2632
2633 if (ioa_cfg->sis64)
2634 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2635 else
2636 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637
Brian Kingafc3f832016-08-24 12:56:51 -05002638 list_del_init(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06002639 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640
2641 if (!ioasc) {
2642 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002643 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2644 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Brian King4fdd7c72015-03-26 11:23:50 -05002645 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2646 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 dev_err(&ioa_cfg->pdev->dev,
2648 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2649 }
2650
Brian Kingafc3f832016-08-24 12:56:51 -05002651 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
Brian King8a4236a2016-10-13 14:45:24 -05002652 schedule_work(&ioa_cfg->work_q);
Brian Kingafc3f832016-08-24 12:56:51 -05002653 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
Brian Kingafc3f832016-08-24 12:56:51 -05002654
Linus Torvalds1da177e2005-04-16 15:20:36 -07002655 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2656}
2657
2658/**
2659 * ipr_timeout - An internally generated op has timed out.
2660 * @ipr_cmd: ipr command struct
2661 *
2662 * This function blocks host requests and initiates an
2663 * adapter reset.
2664 *
2665 * Return value:
2666 * none
2667 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07002668static void ipr_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669{
Kees Cook738c6ec2017-08-18 16:53:24 -07002670 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671 unsigned long lock_flags = 0;
2672 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2673
2674 ENTER;
2675 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2676
2677 ioa_cfg->errors_logged++;
2678 dev_err(&ioa_cfg->pdev->dev,
2679 "Adapter being reset due to command timeout.\n");
2680
2681 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2682 ioa_cfg->sdt_state = GET_DUMP;
2683
2684 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2685 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2686
2687 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2688 LEAVE;
2689}
2690
2691/**
2692 * ipr_oper_timeout - Adapter timed out transitioning to operational
2693 * @ipr_cmd: ipr command struct
2694 *
2695 * This function blocks host requests and initiates an
2696 * adapter reset.
2697 *
2698 * Return value:
2699 * none
2700 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07002701static void ipr_oper_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702{
Kees Cook738c6ec2017-08-18 16:53:24 -07002703 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 unsigned long lock_flags = 0;
2705 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2706
2707 ENTER;
2708 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2709
2710 ioa_cfg->errors_logged++;
2711 dev_err(&ioa_cfg->pdev->dev,
2712 "Adapter timed out transitioning to operational.\n");
2713
2714 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2715 ioa_cfg->sdt_state = GET_DUMP;
2716
2717 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2718 if (ipr_fastfail)
2719 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2720 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2721 }
2722
2723 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2724 LEAVE;
2725}
2726
2727/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 * ipr_find_ses_entry - Find matching SES in SES table
2729 * @res: resource entry struct of SES
2730 *
2731 * Return value:
2732 * pointer to SES table entry / NULL on failure
2733 **/
2734static const struct ipr_ses_table_entry *
2735ipr_find_ses_entry(struct ipr_resource_entry *res)
2736{
2737 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002738 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2740
2741 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2742 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2743 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002744 vpids = &res->std_inq_data.vpids;
2745 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 matches++;
2747 else
2748 break;
2749 } else
2750 matches++;
2751 }
2752
2753 if (matches == IPR_PROD_ID_LEN)
2754 return ste;
2755 }
2756
2757 return NULL;
2758}
2759
2760/**
2761 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2762 * @ioa_cfg: ioa config struct
2763 * @bus: SCSI bus
2764 * @bus_width: bus width
2765 *
2766 * Return value:
2767 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2768 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2769 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2770 * max 160MHz = max 320MB/sec).
2771 **/
2772static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2773{
2774 struct ipr_resource_entry *res;
2775 const struct ipr_ses_table_entry *ste;
2776 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2777
2778 /* Loop through each config table entry in the config table buffer */
2779 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002780 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 continue;
2782
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002783 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 continue;
2785
2786 if (!(ste = ipr_find_ses_entry(res)))
2787 continue;
2788
2789 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2790 }
2791
2792 return max_xfer_rate;
2793}
2794
2795/**
2796 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2797 * @ioa_cfg: ioa config struct
2798 * @max_delay: max delay in micro-seconds to wait
2799 *
2800 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2801 *
2802 * Return value:
2803 * 0 on success / other on failure
2804 **/
2805static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2806{
2807 volatile u32 pcii_reg;
2808 int delay = 1;
2809
2810 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2811 while (delay < max_delay) {
2812 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2813
2814 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2815 return 0;
2816
2817 /* udelay cannot be used if delay is more than a few milliseconds */
2818 if ((delay / 1000) > MAX_UDELAY_MS)
2819 mdelay(delay / 1000);
2820 else
2821 udelay(delay);
2822
2823 delay += delay;
2824 }
2825 return -EIO;
2826}
2827
2828/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002829 * ipr_get_sis64_dump_data_section - Dump IOA memory
2830 * @ioa_cfg: ioa config struct
2831 * @start_addr: adapter address to dump
2832 * @dest: destination kernel buffer
2833 * @length_in_words: length to dump in 4 byte words
2834 *
2835 * Return value:
2836 * 0 on success
2837 **/
2838static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2839 u32 start_addr,
2840 __be32 *dest, u32 length_in_words)
2841{
2842 int i;
2843
2844 for (i = 0; i < length_in_words; i++) {
2845 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2846 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2847 dest++;
2848 }
2849
2850 return 0;
2851}
2852
2853/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 * ipr_get_ldump_data_section - Dump IOA memory
2855 * @ioa_cfg: ioa config struct
2856 * @start_addr: adapter address to dump
2857 * @dest: destination kernel buffer
2858 * @length_in_words: length to dump in 4 byte words
2859 *
2860 * Return value:
2861 * 0 on success / -EIO on failure
2862 **/
2863static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2864 u32 start_addr,
2865 __be32 *dest, u32 length_in_words)
2866{
2867 volatile u32 temp_pcii_reg;
2868 int i, delay = 0;
2869
Wayne Boyerdcbad002010-02-19 13:24:14 -08002870 if (ioa_cfg->sis64)
2871 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2872 dest, length_in_words);
2873
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 /* Write IOA interrupt reg starting LDUMP state */
2875 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
Wayne Boyer214777b2010-02-19 13:24:26 -08002876 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877
2878 /* Wait for IO debug acknowledge */
2879 if (ipr_wait_iodbg_ack(ioa_cfg,
2880 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2881 dev_err(&ioa_cfg->pdev->dev,
2882 "IOA dump long data transfer timeout\n");
2883 return -EIO;
2884 }
2885
2886 /* Signal LDUMP interlocked - clear IO debug ack */
2887 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2888 ioa_cfg->regs.clr_interrupt_reg);
2889
2890 /* Write Mailbox with starting address */
2891 writel(start_addr, ioa_cfg->ioa_mailbox);
2892
2893 /* Signal address valid - clear IOA Reset alert */
2894 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002895 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896
2897 for (i = 0; i < length_in_words; i++) {
2898 /* Wait for IO debug acknowledge */
2899 if (ipr_wait_iodbg_ack(ioa_cfg,
2900 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2901 dev_err(&ioa_cfg->pdev->dev,
2902 "IOA dump short data transfer timeout\n");
2903 return -EIO;
2904 }
2905
2906 /* Read data from mailbox and increment destination pointer */
2907 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2908 dest++;
2909
2910 /* For all but the last word of data, signal data received */
2911 if (i < (length_in_words - 1)) {
2912 /* Signal dump data received - Clear IO debug Ack */
2913 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2914 ioa_cfg->regs.clr_interrupt_reg);
2915 }
2916 }
2917
2918 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2919 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002920 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921
2922 writel(IPR_UPROCI_IO_DEBUG_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002923 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924
2925 /* Signal dump data received - Clear IO debug Ack */
2926 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2927 ioa_cfg->regs.clr_interrupt_reg);
2928
2929 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2930 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2931 temp_pcii_reg =
Wayne Boyer214777b2010-02-19 13:24:26 -08002932 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002933
2934 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2935 return 0;
2936
2937 udelay(10);
2938 delay += 10;
2939 }
2940
2941 return 0;
2942}
2943
2944#ifdef CONFIG_SCSI_IPR_DUMP
2945/**
2946 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2947 * @ioa_cfg: ioa config struct
2948 * @pci_address: adapter address
2949 * @length: length of data to copy
2950 *
2951 * Copy data from PCI adapter to kernel buffer.
2952 * Note: length MUST be a 4 byte multiple
2953 * Return value:
2954 * 0 on success / other on failure
2955 **/
2956static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2957 unsigned long pci_address, u32 length)
2958{
2959 int bytes_copied = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002960 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 __be32 *page;
2962 unsigned long lock_flags = 0;
2963 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2964
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002965 if (ioa_cfg->sis64)
2966 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2967 else
2968 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2969
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 while (bytes_copied < length &&
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002971 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002972 if (ioa_dump->page_offset >= PAGE_SIZE ||
2973 ioa_dump->page_offset == 0) {
2974 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2975
2976 if (!page) {
2977 ipr_trace;
2978 return bytes_copied;
2979 }
2980
2981 ioa_dump->page_offset = 0;
2982 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2983 ioa_dump->next_page_index++;
2984 } else
2985 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2986
2987 rem_len = length - bytes_copied;
2988 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2989 cur_len = min(rem_len, rem_page_len);
2990
2991 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2992 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2993 rc = -EIO;
2994 } else {
2995 rc = ipr_get_ldump_data_section(ioa_cfg,
2996 pci_address + bytes_copied,
2997 &page[ioa_dump->page_offset / 4],
2998 (cur_len / sizeof(u32)));
2999 }
3000 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3001
3002 if (!rc) {
3003 ioa_dump->page_offset += cur_len;
3004 bytes_copied += cur_len;
3005 } else {
3006 ipr_trace;
3007 break;
3008 }
3009 schedule();
3010 }
3011
3012 return bytes_copied;
3013}
3014
3015/**
3016 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3017 * @hdr: dump entry header struct
3018 *
3019 * Return value:
3020 * nothing
3021 **/
3022static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3023{
3024 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3025 hdr->num_elems = 1;
3026 hdr->offset = sizeof(*hdr);
3027 hdr->status = IPR_DUMP_STATUS_SUCCESS;
3028}
3029
3030/**
3031 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3032 * @ioa_cfg: ioa config struct
3033 * @driver_dump: driver dump struct
3034 *
3035 * Return value:
3036 * nothing
3037 **/
3038static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3039 struct ipr_driver_dump *driver_dump)
3040{
3041 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3042
3043 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3044 driver_dump->ioa_type_entry.hdr.len =
3045 sizeof(struct ipr_dump_ioa_type_entry) -
3046 sizeof(struct ipr_dump_entry_header);
3047 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3048 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3049 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3050 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3051 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3052 ucode_vpd->minor_release[1];
3053 driver_dump->hdr.num_entries++;
3054}
3055
3056/**
3057 * ipr_dump_version_data - Fill in the driver version in the dump.
3058 * @ioa_cfg: ioa config struct
3059 * @driver_dump: driver dump struct
3060 *
3061 * Return value:
3062 * nothing
3063 **/
3064static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3065 struct ipr_driver_dump *driver_dump)
3066{
3067 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3068 driver_dump->version_entry.hdr.len =
3069 sizeof(struct ipr_dump_version_entry) -
3070 sizeof(struct ipr_dump_entry_header);
3071 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3072 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3073 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3074 driver_dump->hdr.num_entries++;
3075}
3076
3077/**
3078 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3079 * @ioa_cfg: ioa config struct
3080 * @driver_dump: driver dump struct
3081 *
3082 * Return value:
3083 * nothing
3084 **/
3085static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3086 struct ipr_driver_dump *driver_dump)
3087{
3088 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3089 driver_dump->trace_entry.hdr.len =
3090 sizeof(struct ipr_dump_trace_entry) -
3091 sizeof(struct ipr_dump_entry_header);
3092 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3093 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3094 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3095 driver_dump->hdr.num_entries++;
3096}
3097
3098/**
3099 * ipr_dump_location_data - Fill in the IOA location in the dump.
3100 * @ioa_cfg: ioa config struct
3101 * @driver_dump: driver dump struct
3102 *
3103 * Return value:
3104 * nothing
3105 **/
3106static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3107 struct ipr_driver_dump *driver_dump)
3108{
3109 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3110 driver_dump->location_entry.hdr.len =
3111 sizeof(struct ipr_dump_location_entry) -
3112 sizeof(struct ipr_dump_entry_header);
3113 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3114 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01003115 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003116 driver_dump->hdr.num_entries++;
3117}
3118
3119/**
3120 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3121 * @ioa_cfg: ioa config struct
3122 * @dump: dump struct
3123 *
3124 * Return value:
3125 * nothing
3126 **/
3127static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3128{
3129 unsigned long start_addr, sdt_word;
3130 unsigned long lock_flags = 0;
3131 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3132 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003133 u32 num_entries, max_num_entries, start_off, end_off;
3134 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08003136 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 int i;
3138
3139 ENTER;
3140
3141 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3142
Brian King41e9a692011-09-21 08:51:11 -05003143 if (ioa_cfg->sdt_state != READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3145 return;
3146 }
3147
Wayne Boyer110def82010-11-04 09:36:16 -07003148 if (ioa_cfg->sis64) {
3149 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3150 ssleep(IPR_DUMP_DELAY_SECONDS);
3151 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3152 }
3153
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 start_addr = readl(ioa_cfg->ioa_mailbox);
3155
Wayne Boyerdcbad002010-02-19 13:24:14 -08003156 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157 dev_err(&ioa_cfg->pdev->dev,
3158 "Invalid dump table format: %lx\n", start_addr);
3159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3160 return;
3161 }
3162
3163 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3164
3165 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3166
3167 /* Initialize the overall dump header */
3168 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3169 driver_dump->hdr.num_entries = 1;
3170 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3171 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3172 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3173 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3174
3175 ipr_dump_version_data(ioa_cfg, driver_dump);
3176 ipr_dump_location_data(ioa_cfg, driver_dump);
3177 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3178 ipr_dump_trace_data(ioa_cfg, driver_dump);
3179
3180 /* Update dump_header */
3181 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3182
3183 /* IOA Dump entry */
3184 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185 ioa_dump->hdr.len = 0;
3186 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3187 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3188
3189 /* First entries in sdt are actually a list of dump addresses and
3190 lengths to gather the real dump data. sdt represents the pointer
3191 to the ioa generated dump table. Dump data will be extracted based
3192 on entries in this table */
3193 sdt = &ioa_dump->sdt;
3194
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003195 if (ioa_cfg->sis64) {
3196 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3197 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3198 } else {
3199 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3200 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3201 }
3202
3203 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3204 (max_num_entries * sizeof(struct ipr_sdt_entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003206 bytes_to_copy / sizeof(__be32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003207
3208 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08003209 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3210 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 dev_err(&ioa_cfg->pdev->dev,
3212 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3213 rc, be32_to_cpu(sdt->hdr.state));
3214 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3215 ioa_cfg->sdt_state = DUMP_OBTAINED;
3216 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3217 return;
3218 }
3219
3220 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3221
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003222 if (num_entries > max_num_entries)
3223 num_entries = max_num_entries;
3224
3225 /* Update dump length to the actual data to be copied */
3226 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3227 if (ioa_cfg->sis64)
3228 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3229 else
3230 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003231
3232 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3233
3234 for (i = 0; i < num_entries; i++) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003235 if (ioa_dump->hdr.len > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003236 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3237 break;
3238 }
3239
3240 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08003241 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3242 if (ioa_cfg->sis64)
3243 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3244 else {
3245 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3246 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003247
Wayne Boyerdcbad002010-02-19 13:24:14 -08003248 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3249 bytes_to_copy = end_off - start_off;
3250 else
3251 valid = 0;
3252 }
3253 if (valid) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003254 if (bytes_to_copy > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3256 continue;
3257 }
3258
3259 /* Copy data from adapter to driver buffers */
3260 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3261 bytes_to_copy);
3262
3263 ioa_dump->hdr.len += bytes_copied;
3264
3265 if (bytes_copied != bytes_to_copy) {
3266 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3267 break;
3268 }
3269 }
3270 }
3271 }
3272
3273 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3274
3275 /* Update dump_header */
3276 driver_dump->hdr.len += ioa_dump->hdr.len;
3277 wmb();
3278 ioa_cfg->sdt_state = DUMP_OBTAINED;
3279 LEAVE;
3280}
3281
3282#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003283#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284#endif
3285
3286/**
3287 * ipr_release_dump - Free adapter dump memory
3288 * @kref: kref struct
3289 *
3290 * Return value:
3291 * nothing
3292 **/
3293static void ipr_release_dump(struct kref *kref)
3294{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003295 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003296 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3297 unsigned long lock_flags = 0;
3298 int i;
3299
3300 ENTER;
3301 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3302 ioa_cfg->dump = NULL;
3303 ioa_cfg->sdt_state = INACTIVE;
3304 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3305
3306 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3307 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3308
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003309 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310 kfree(dump);
3311 LEAVE;
3312}
3313
3314/**
3315 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00003316 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 *
3318 * Called at task level from a work thread. This function takes care
3319 * of adding and removing device from the mid-layer as configuration
3320 * changes are detected by the adapter.
3321 *
3322 * Return value:
3323 * nothing
3324 **/
David Howellsc4028952006-11-22 14:57:56 +00003325static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326{
3327 unsigned long lock_flags;
3328 struct ipr_resource_entry *res;
3329 struct scsi_device *sdev;
3330 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00003331 struct ipr_ioa_cfg *ioa_cfg =
3332 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333 u8 bus, target, lun;
3334 int did_work;
3335
3336 ENTER;
3337 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3338
Brian King41e9a692011-09-21 08:51:11 -05003339 if (ioa_cfg->sdt_state == READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340 dump = ioa_cfg->dump;
3341 if (!dump) {
3342 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3343 return;
3344 }
3345 kref_get(&dump->kref);
3346 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3347 ipr_get_ioa_dump(ioa_cfg, dump);
3348 kref_put(&dump->kref, ipr_release_dump);
3349
3350 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King4c647e92011-10-15 09:08:56 -05003351 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3353 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3354 return;
3355 }
3356
Brian Kingb0e17a92017-08-01 10:21:30 -05003357 if (ioa_cfg->scsi_unblock) {
3358 ioa_cfg->scsi_unblock = 0;
3359 ioa_cfg->scsi_blocked = 0;
3360 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3361 scsi_unblock_requests(ioa_cfg->host);
3362 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3363 if (ioa_cfg->scsi_blocked)
3364 scsi_block_requests(ioa_cfg->host);
3365 }
3366
Brian Kingb195d5e2016-07-15 14:48:03 -05003367 if (!ioa_cfg->scan_enabled) {
3368 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3369 return;
3370 }
3371
Linus Torvalds1da177e2005-04-16 15:20:36 -07003372restart:
3373 do {
3374 did_work = 0;
Brian Kingf688f962014-12-02 12:47:37 -06003375 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003376 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3377 return;
3378 }
3379
3380 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3381 if (res->del_from_ml && res->sdev) {
3382 did_work = 1;
3383 sdev = res->sdev;
3384 if (!scsi_device_get(sdev)) {
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02003385 if (!res->add_to_ml)
3386 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3387 else
3388 res->del_from_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3390 scsi_remove_device(sdev);
3391 scsi_device_put(sdev);
3392 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3393 }
3394 break;
3395 }
3396 }
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003397 } while (did_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398
3399 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3400 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003401 bus = res->bus;
3402 target = res->target;
3403 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003404 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003405 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3406 scsi_add_device(ioa_cfg->host, bus, target, lun);
3407 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3408 goto restart;
3409 }
3410 }
3411
Brian Kingf688f962014-12-02 12:47:37 -06003412 ioa_cfg->scan_done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003414 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415 LEAVE;
3416}
3417
3418#ifdef CONFIG_SCSI_IPR_TRACE
3419/**
3420 * ipr_read_trace - Dump the adapter trace
Chris Wright2c3c8be2010-05-12 18:28:57 -07003421 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003423 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 * @buf: buffer
3425 * @off: offset
3426 * @count: buffer size
3427 *
3428 * Return value:
3429 * number of bytes printed to buffer
3430 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003431static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003432 struct bin_attribute *bin_attr,
3433 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434{
Tony Jonesee959b02008-02-22 00:13:36 +01003435 struct device *dev = container_of(kobj, struct device, kobj);
3436 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3438 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003439 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440
3441 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003442 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3443 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003445
3446 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447}
3448
3449static struct bin_attribute ipr_trace_attr = {
3450 .attr = {
3451 .name = "trace",
3452 .mode = S_IRUGO,
3453 },
3454 .size = 0,
3455 .read = ipr_read_trace,
3456};
3457#endif
3458
3459/**
3460 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003461 * @dev: class device struct
3462 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 *
3464 * Return value:
3465 * number of bytes printed to buffer
3466 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003467static ssize_t ipr_show_fw_version(struct device *dev,
3468 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469{
Tony Jonesee959b02008-02-22 00:13:36 +01003470 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3472 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3473 unsigned long lock_flags = 0;
3474 int len;
3475
3476 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3477 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3478 ucode_vpd->major_release, ucode_vpd->card_type,
3479 ucode_vpd->minor_release[0],
3480 ucode_vpd->minor_release[1]);
3481 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3482 return len;
3483}
3484
Tony Jonesee959b02008-02-22 00:13:36 +01003485static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 .attr = {
3487 .name = "fw_version",
3488 .mode = S_IRUGO,
3489 },
3490 .show = ipr_show_fw_version,
3491};
3492
3493/**
3494 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003495 * @dev: class device struct
3496 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 *
3498 * Return value:
3499 * number of bytes printed to buffer
3500 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003501static ssize_t ipr_show_log_level(struct device *dev,
3502 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503{
Tony Jonesee959b02008-02-22 00:13:36 +01003504 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3506 unsigned long lock_flags = 0;
3507 int len;
3508
3509 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3510 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3511 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3512 return len;
3513}
3514
3515/**
3516 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003517 * @dev: class device struct
3518 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519 *
3520 * Return value:
3521 * number of bytes printed to buffer
3522 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003523static ssize_t ipr_store_log_level(struct device *dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003524 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003525 const char *buf, size_t count)
3526{
Tony Jonesee959b02008-02-22 00:13:36 +01003527 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3529 unsigned long lock_flags = 0;
3530
3531 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3532 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3533 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3534 return strlen(buf);
3535}
3536
Tony Jonesee959b02008-02-22 00:13:36 +01003537static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 .attr = {
3539 .name = "log_level",
3540 .mode = S_IRUGO | S_IWUSR,
3541 },
3542 .show = ipr_show_log_level,
3543 .store = ipr_store_log_level
3544};
3545
3546/**
3547 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003548 * @dev: device struct
3549 * @buf: buffer
3550 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551 *
3552 * This function will reset the adapter and wait a reasonable
3553 * amount of time for any errors that the adapter might log.
3554 *
3555 * Return value:
3556 * count on success / other on failure
3557 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003558static ssize_t ipr_store_diagnostics(struct device *dev,
3559 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003560 const char *buf, size_t count)
3561{
Tony Jonesee959b02008-02-22 00:13:36 +01003562 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003563 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3564 unsigned long lock_flags = 0;
3565 int rc = count;
3566
3567 if (!capable(CAP_SYS_ADMIN))
3568 return -EACCES;
3569
Linus Torvalds1da177e2005-04-16 15:20:36 -07003570 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003571 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003572 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3573 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3574 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3575 }
3576
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 ioa_cfg->errors_logged = 0;
3578 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3579
3580 if (ioa_cfg->in_reset_reload) {
3581 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3582 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3583
3584 /* Wait for a second for any errors to be logged */
3585 msleep(1000);
3586 } else {
3587 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3588 return -EIO;
3589 }
3590
3591 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3592 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3593 rc = -EIO;
3594 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3595
3596 return rc;
3597}
3598
Tony Jonesee959b02008-02-22 00:13:36 +01003599static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 .attr = {
3601 .name = "run_diagnostics",
3602 .mode = S_IWUSR,
3603 },
3604 .store = ipr_store_diagnostics
3605};
3606
3607/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003608 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003609 * @class_dev: device struct
3610 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003611 *
3612 * Return value:
3613 * number of bytes printed to buffer
3614 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003615static ssize_t ipr_show_adapter_state(struct device *dev,
3616 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003617{
Tony Jonesee959b02008-02-22 00:13:36 +01003618 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003619 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3620 unsigned long lock_flags = 0;
3621 int len;
3622
3623 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003624 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003625 len = snprintf(buf, PAGE_SIZE, "offline\n");
3626 else
3627 len = snprintf(buf, PAGE_SIZE, "online\n");
3628 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3629 return len;
3630}
3631
3632/**
3633 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003634 * @dev: device struct
3635 * @buf: buffer
3636 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003637 *
3638 * This function will change the adapter's state.
3639 *
3640 * Return value:
3641 * count on success / other on failure
3642 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003643static ssize_t ipr_store_adapter_state(struct device *dev,
3644 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003645 const char *buf, size_t count)
3646{
Tony Jonesee959b02008-02-22 00:13:36 +01003647 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003648 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3649 unsigned long lock_flags;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003650 int result = count, i;
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003651
3652 if (!capable(CAP_SYS_ADMIN))
3653 return -EACCES;
3654
3655 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003656 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3657 !strncmp(buf, "online", 6)) {
3658 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3659 spin_lock(&ioa_cfg->hrrq[i]._lock);
3660 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3661 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3662 }
3663 wmb();
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003664 ioa_cfg->reset_retries = 0;
3665 ioa_cfg->in_ioa_bringdown = 0;
3666 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3667 }
3668 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3669 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3670
3671 return result;
3672}
3673
Tony Jonesee959b02008-02-22 00:13:36 +01003674static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003675 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003676 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003677 .mode = S_IRUGO | S_IWUSR,
3678 },
3679 .show = ipr_show_adapter_state,
3680 .store = ipr_store_adapter_state
3681};
3682
3683/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003685 * @dev: device struct
3686 * @buf: buffer
3687 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003688 *
3689 * This function will reset the adapter.
3690 *
3691 * Return value:
3692 * count on success / other on failure
3693 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003694static ssize_t ipr_store_reset_adapter(struct device *dev,
3695 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003696 const char *buf, size_t count)
3697{
Tony Jonesee959b02008-02-22 00:13:36 +01003698 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003699 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3700 unsigned long lock_flags;
3701 int result = count;
3702
3703 if (!capable(CAP_SYS_ADMIN))
3704 return -EACCES;
3705
3706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3707 if (!ioa_cfg->in_reset_reload)
3708 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3709 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3710 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3711
3712 return result;
3713}
3714
Tony Jonesee959b02008-02-22 00:13:36 +01003715static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716 .attr = {
3717 .name = "reset_host",
3718 .mode = S_IWUSR,
3719 },
3720 .store = ipr_store_reset_adapter
3721};
3722
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003723static int ipr_iopoll(struct irq_poll *iop, int budget);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003724 /**
3725 * ipr_show_iopoll_weight - Show ipr polling mode
3726 * @dev: class device struct
3727 * @buf: buffer
3728 *
3729 * Return value:
3730 * number of bytes printed to buffer
3731 **/
3732static ssize_t ipr_show_iopoll_weight(struct device *dev,
3733 struct device_attribute *attr, char *buf)
3734{
3735 struct Scsi_Host *shost = class_to_shost(dev);
3736 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3737 unsigned long lock_flags = 0;
3738 int len;
3739
3740 spin_lock_irqsave(shost->host_lock, lock_flags);
3741 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3742 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3743
3744 return len;
3745}
3746
3747/**
3748 * ipr_store_iopoll_weight - Change the adapter's polling mode
3749 * @dev: class device struct
3750 * @buf: buffer
3751 *
3752 * Return value:
3753 * number of bytes printed to buffer
3754 **/
3755static ssize_t ipr_store_iopoll_weight(struct device *dev,
3756 struct device_attribute *attr,
3757 const char *buf, size_t count)
3758{
3759 struct Scsi_Host *shost = class_to_shost(dev);
3760 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3761 unsigned long user_iopoll_weight;
3762 unsigned long lock_flags = 0;
3763 int i;
3764
3765 if (!ioa_cfg->sis64) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003766 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003767 return -EINVAL;
3768 }
3769 if (kstrtoul(buf, 10, &user_iopoll_weight))
3770 return -EINVAL;
3771
3772 if (user_iopoll_weight > 256) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003773 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003774 return -EINVAL;
3775 }
3776
3777 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003778 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003779 return strlen(buf);
3780 }
3781
Jens Axboe89f8b332014-03-13 09:38:42 -06003782 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003783 for (i = 1; i < ioa_cfg->hrrq_num; i++)
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003784 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003785 }
3786
3787 spin_lock_irqsave(shost->host_lock, lock_flags);
3788 ioa_cfg->iopoll_weight = user_iopoll_weight;
Jens Axboe89f8b332014-03-13 09:38:42 -06003789 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003790 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003791 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003792 ioa_cfg->iopoll_weight, ipr_iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003793 }
3794 }
3795 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3796
3797 return strlen(buf);
3798}
3799
3800static struct device_attribute ipr_iopoll_weight_attr = {
3801 .attr = {
3802 .name = "iopoll_weight",
3803 .mode = S_IRUGO | S_IWUSR,
3804 },
3805 .show = ipr_show_iopoll_weight,
3806 .store = ipr_store_iopoll_weight
3807};
3808
Linus Torvalds1da177e2005-04-16 15:20:36 -07003809/**
3810 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3811 * @buf_len: buffer length
3812 *
3813 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3814 * list to use for microcode download
3815 *
3816 * Return value:
3817 * pointer to sglist / NULL on failure
3818 **/
3819static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3820{
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003821 int sg_size, order;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003823
3824 /* Get the minimum size per scatter/gather element */
3825 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3826
3827 /* Get the actual size per element */
3828 order = get_order(sg_size);
3829
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 /* Allocate a scatter/gather list for the DMA */
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003831 sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832 if (sglist == NULL) {
3833 ipr_trace;
3834 return NULL;
3835 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003836 sglist->order = order;
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003837 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3838 &sglist->num_sg);
3839 if (!sglist->scatterlist) {
3840 kfree(sglist);
3841 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003842 }
3843
3844 return sglist;
3845}
3846
3847/**
3848 * ipr_free_ucode_buffer - Frees a microcode download buffer
3849 * @p_dnld: scatter/gather list pointer
3850 *
3851 * Free a DMA'able ucode download buffer previously allocated with
3852 * ipr_alloc_ucode_buffer
3853 *
3854 * Return value:
3855 * nothing
3856 **/
3857static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3858{
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003859 sgl_free_order(sglist->scatterlist, sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860 kfree(sglist);
3861}
3862
3863/**
3864 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3865 * @sglist: scatter/gather list pointer
3866 * @buffer: buffer pointer
3867 * @len: buffer length
3868 *
3869 * Copy a microcode image from a user buffer into a buffer allocated by
3870 * ipr_alloc_ucode_buffer
3871 *
3872 * Return value:
3873 * 0 on success / other on failure
3874 **/
3875static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3876 u8 *buffer, u32 len)
3877{
3878 int bsize_elem, i, result = 0;
3879 struct scatterlist *scatterlist;
3880 void *kaddr;
3881
3882 /* Determine the actual number of bytes per element */
3883 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3884
3885 scatterlist = sglist->scatterlist;
3886
3887 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003888 struct page *page = sg_page(&scatterlist[i]);
3889
3890 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003892 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893
3894 scatterlist[i].length = bsize_elem;
3895
3896 if (result != 0) {
3897 ipr_trace;
3898 return result;
3899 }
3900 }
3901
3902 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003903 struct page *page = sg_page(&scatterlist[i]);
3904
3905 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003907 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003908
3909 scatterlist[i].length = len % bsize_elem;
3910 }
3911
3912 sglist->buffer_len = len;
3913 return result;
3914}
3915
3916/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003917 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3918 * @ipr_cmd: ipr command struct
3919 * @sglist: scatter/gather list
3920 *
3921 * Builds a microcode download IOA data list (IOADL).
3922 *
3923 **/
3924static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3925 struct ipr_sglist *sglist)
3926{
3927 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3928 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3929 struct scatterlist *scatterlist = sglist->scatterlist;
3930 int i;
3931
3932 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3933 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3934 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3935
3936 ioarcb->ioadl_len =
3937 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3938 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3939 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3940 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3941 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3942 }
3943
3944 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3945}
3946
3947/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003948 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 * @ipr_cmd: ipr command struct
3950 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003952 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003955static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3956 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003959 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960 struct scatterlist *scatterlist = sglist->scatterlist;
3961 int i;
3962
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003963 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003965 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3966
3967 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3969
3970 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3971 ioadl[i].flags_and_data_len =
3972 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3973 ioadl[i].address =
3974 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3975 }
3976
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003977 ioadl[i-1].flags_and_data_len |=
3978 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3979}
3980
3981/**
3982 * ipr_update_ioa_ucode - Update IOA's microcode
3983 * @ioa_cfg: ioa config struct
3984 * @sglist: scatter/gather list
3985 *
3986 * Initiate an adapter reset to update the IOA's microcode
3987 *
3988 * Return value:
3989 * 0 on success / -EIO on failure
3990 **/
3991static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3992 struct ipr_sglist *sglist)
3993{
3994 unsigned long lock_flags;
3995
3996 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003997 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003998 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3999 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4000 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4001 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004002
4003 if (ioa_cfg->ucode_sglist) {
4004 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4005 dev_err(&ioa_cfg->pdev->dev,
4006 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 return -EIO;
4008 }
4009
Anton Blanchardd73341b2014-10-30 17:27:08 -05004010 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4011 sglist->scatterlist, sglist->num_sg,
4012 DMA_TO_DEVICE);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004013
4014 if (!sglist->num_dma_sg) {
4015 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4016 dev_err(&ioa_cfg->pdev->dev,
4017 "Failed to map microcode download buffer!\n");
4018 return -EIO;
4019 }
4020
4021 ioa_cfg->ucode_sglist = sglist;
4022 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4023 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4024 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4025
4026 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4027 ioa_cfg->ucode_sglist = NULL;
4028 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029 return 0;
4030}
4031
4032/**
4033 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01004034 * @class_dev: device struct
4035 * @buf: buffer
4036 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037 *
4038 * This function will update the firmware on the adapter.
4039 *
4040 * Return value:
4041 * count on success / other on failure
4042 **/
Tony Jonesee959b02008-02-22 00:13:36 +01004043static ssize_t ipr_store_update_fw(struct device *dev,
4044 struct device_attribute *attr,
4045 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046{
Tony Jonesee959b02008-02-22 00:13:36 +01004047 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004048 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4049 struct ipr_ucode_image_header *image_hdr;
4050 const struct firmware *fw_entry;
4051 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052 char fname[100];
4053 char *src;
Gabriel Krisman Bertazi21b81712016-02-25 13:54:20 -03004054 char *endline;
Insu Yund63c7dd2016-01-06 12:44:01 -05004055 int result, dnld_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056
4057 if (!capable(CAP_SYS_ADMIN))
4058 return -EACCES;
4059
Insu Yund63c7dd2016-01-06 12:44:01 -05004060 snprintf(fname, sizeof(fname), "%s", buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004061
Gabriel Krisman Bertazi21b81712016-02-25 13:54:20 -03004062 endline = strchr(fname, '\n');
4063 if (endline)
4064 *endline = '\0';
4065
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004066 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004067 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4068 return -EIO;
4069 }
4070
4071 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4072
Linus Torvalds1da177e2005-04-16 15:20:36 -07004073 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4074 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4075 sglist = ipr_alloc_ucode_buffer(dnld_size);
4076
4077 if (!sglist) {
4078 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4079 release_firmware(fw_entry);
4080 return -ENOMEM;
4081 }
4082
4083 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4084
4085 if (result) {
4086 dev_err(&ioa_cfg->pdev->dev,
4087 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004088 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089 }
4090
Wayne Boyer14ed9cc2011-10-03 20:54:37 -07004091 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4092
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004093 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004095 if (!result)
4096 result = count;
4097out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098 ipr_free_ucode_buffer(sglist);
4099 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004100 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101}
4102
Tony Jonesee959b02008-02-22 00:13:36 +01004103static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104 .attr = {
4105 .name = "update_fw",
4106 .mode = S_IWUSR,
4107 },
4108 .store = ipr_store_update_fw
4109};
4110
Wayne Boyer75576bb2010-07-14 10:50:14 -07004111/**
4112 * ipr_show_fw_type - Show the adapter's firmware type.
4113 * @dev: class device struct
4114 * @buf: buffer
4115 *
4116 * Return value:
4117 * number of bytes printed to buffer
4118 **/
4119static ssize_t ipr_show_fw_type(struct device *dev,
4120 struct device_attribute *attr, char *buf)
4121{
4122 struct Scsi_Host *shost = class_to_shost(dev);
4123 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4124 unsigned long lock_flags = 0;
4125 int len;
4126
4127 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4128 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4129 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4130 return len;
4131}
4132
4133static struct device_attribute ipr_ioa_fw_type_attr = {
4134 .attr = {
4135 .name = "fw_type",
4136 .mode = S_IRUGO,
4137 },
4138 .show = ipr_show_fw_type
4139};
4140
Brian Kingafc3f832016-08-24 12:56:51 -05004141static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4142 struct bin_attribute *bin_attr, char *buf,
4143 loff_t off, size_t count)
4144{
4145 struct device *cdev = container_of(kobj, struct device, kobj);
4146 struct Scsi_Host *shost = class_to_shost(cdev);
4147 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4148 struct ipr_hostrcb *hostrcb;
4149 unsigned long lock_flags = 0;
4150 int ret;
4151
4152 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4153 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4154 struct ipr_hostrcb, queue);
4155 if (!hostrcb) {
4156 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4157 return 0;
4158 }
4159 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4160 sizeof(hostrcb->hcam));
4161 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4162 return ret;
4163}
4164
4165static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4166 struct bin_attribute *bin_attr, char *buf,
4167 loff_t off, size_t count)
4168{
4169 struct device *cdev = container_of(kobj, struct device, kobj);
4170 struct Scsi_Host *shost = class_to_shost(cdev);
4171 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4172 struct ipr_hostrcb *hostrcb;
4173 unsigned long lock_flags = 0;
4174
4175 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4176 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4177 struct ipr_hostrcb, queue);
4178 if (!hostrcb) {
4179 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4180 return count;
4181 }
4182
4183 /* Reclaim hostrcb before exit */
4184 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4185 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4186 return count;
4187}
4188
4189static struct bin_attribute ipr_ioa_async_err_log = {
4190 .attr = {
4191 .name = "async_err_log",
4192 .mode = S_IRUGO | S_IWUSR,
4193 },
4194 .size = 0,
4195 .read = ipr_read_async_err_log,
4196 .write = ipr_next_async_err_log
4197};
4198
Tony Jonesee959b02008-02-22 00:13:36 +01004199static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200 &ipr_fw_version_attr,
4201 &ipr_log_level_attr,
4202 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06004203 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204 &ipr_ioa_reset_attr,
4205 &ipr_update_fw_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004206 &ipr_ioa_fw_type_attr,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06004207 &ipr_iopoll_weight_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 NULL,
4209};
4210
4211#ifdef CONFIG_SCSI_IPR_DUMP
4212/**
4213 * ipr_read_dump - Dump the adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004214 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004216 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217 * @buf: buffer
4218 * @off: offset
4219 * @count: buffer size
4220 *
4221 * Return value:
4222 * number of bytes printed to buffer
4223 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004224static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004225 struct bin_attribute *bin_attr,
4226 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227{
Tony Jonesee959b02008-02-22 00:13:36 +01004228 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229 struct Scsi_Host *shost = class_to_shost(cdev);
4230 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4231 struct ipr_dump *dump;
4232 unsigned long lock_flags = 0;
4233 char *src;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004234 int len, sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004235 size_t rc = count;
4236
4237 if (!capable(CAP_SYS_ADMIN))
4238 return -EACCES;
4239
4240 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4241 dump = ioa_cfg->dump;
4242
4243 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4244 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4245 return 0;
4246 }
4247 kref_get(&dump->kref);
4248 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4249
4250 if (off > dump->driver_dump.hdr.len) {
4251 kref_put(&dump->kref, ipr_release_dump);
4252 return 0;
4253 }
4254
4255 if (off + count > dump->driver_dump.hdr.len) {
4256 count = dump->driver_dump.hdr.len - off;
4257 rc = count;
4258 }
4259
4260 if (count && off < sizeof(dump->driver_dump)) {
4261 if (off + count > sizeof(dump->driver_dump))
4262 len = sizeof(dump->driver_dump) - off;
4263 else
4264 len = count;
4265 src = (u8 *)&dump->driver_dump + off;
4266 memcpy(buf, src, len);
4267 buf += len;
4268 off += len;
4269 count -= len;
4270 }
4271
4272 off -= sizeof(dump->driver_dump);
4273
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004274 if (ioa_cfg->sis64)
4275 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4276 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4277 sizeof(struct ipr_sdt_entry));
4278 else
4279 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4280 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4281
4282 if (count && off < sdt_end) {
4283 if (off + count > sdt_end)
4284 len = sdt_end - off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 else
4286 len = count;
4287 src = (u8 *)&dump->ioa_dump + off;
4288 memcpy(buf, src, len);
4289 buf += len;
4290 off += len;
4291 count -= len;
4292 }
4293
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004294 off -= sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295
4296 while (count) {
4297 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4298 len = PAGE_ALIGN(off) - off;
4299 else
4300 len = count;
4301 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4302 src += off & ~PAGE_MASK;
4303 memcpy(buf, src, len);
4304 buf += len;
4305 off += len;
4306 count -= len;
4307 }
4308
4309 kref_put(&dump->kref, ipr_release_dump);
4310 return rc;
4311}
4312
4313/**
4314 * ipr_alloc_dump - Prepare for adapter dump
4315 * @ioa_cfg: ioa config struct
4316 *
4317 * Return value:
4318 * 0 on success / other on failure
4319 **/
4320static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4321{
4322 struct ipr_dump *dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004323 __be32 **ioa_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 unsigned long lock_flags = 0;
4325
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06004326 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327
4328 if (!dump) {
4329 ipr_err("Dump memory allocation failed\n");
4330 return -ENOMEM;
4331 }
4332
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004333 if (ioa_cfg->sis64)
Kees Cook42bc47b2018-06-12 14:27:11 -07004334 ioa_data = vmalloc(array_size(IPR_FMT3_MAX_NUM_DUMP_PAGES,
4335 sizeof(__be32 *)));
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004336 else
Kees Cook42bc47b2018-06-12 14:27:11 -07004337 ioa_data = vmalloc(array_size(IPR_FMT2_MAX_NUM_DUMP_PAGES,
4338 sizeof(__be32 *)));
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004339
4340 if (!ioa_data) {
4341 ipr_err("Dump memory allocation failed\n");
4342 kfree(dump);
4343 return -ENOMEM;
4344 }
4345
4346 dump->ioa_dump.ioa_data = ioa_data;
4347
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348 kref_init(&dump->kref);
4349 dump->ioa_cfg = ioa_cfg;
4350
4351 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4352
4353 if (INACTIVE != ioa_cfg->sdt_state) {
4354 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004355 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356 kfree(dump);
4357 return 0;
4358 }
4359
4360 ioa_cfg->dump = dump;
4361 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004362 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363 ioa_cfg->dump_taken = 1;
4364 schedule_work(&ioa_cfg->work_q);
4365 }
4366 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4367
Linus Torvalds1da177e2005-04-16 15:20:36 -07004368 return 0;
4369}
4370
4371/**
4372 * ipr_free_dump - Free adapter dump memory
4373 * @ioa_cfg: ioa config struct
4374 *
4375 * Return value:
4376 * 0 on success / other on failure
4377 **/
4378static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4379{
4380 struct ipr_dump *dump;
4381 unsigned long lock_flags = 0;
4382
4383 ENTER;
4384
4385 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4386 dump = ioa_cfg->dump;
4387 if (!dump) {
4388 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4389 return 0;
4390 }
4391
4392 ioa_cfg->dump = NULL;
4393 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4394
4395 kref_put(&dump->kref, ipr_release_dump);
4396
4397 LEAVE;
4398 return 0;
4399}
4400
4401/**
4402 * ipr_write_dump - Setup dump state of adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004403 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004405 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406 * @buf: buffer
4407 * @off: offset
4408 * @count: buffer size
4409 *
4410 * Return value:
4411 * number of bytes printed to buffer
4412 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004413static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004414 struct bin_attribute *bin_attr,
4415 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416{
Tony Jonesee959b02008-02-22 00:13:36 +01004417 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004418 struct Scsi_Host *shost = class_to_shost(cdev);
4419 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4420 int rc;
4421
4422 if (!capable(CAP_SYS_ADMIN))
4423 return -EACCES;
4424
4425 if (buf[0] == '1')
4426 rc = ipr_alloc_dump(ioa_cfg);
4427 else if (buf[0] == '0')
4428 rc = ipr_free_dump(ioa_cfg);
4429 else
4430 return -EINVAL;
4431
4432 if (rc)
4433 return rc;
4434 else
4435 return count;
4436}
4437
4438static struct bin_attribute ipr_dump_attr = {
4439 .attr = {
4440 .name = "dump",
4441 .mode = S_IRUSR | S_IWUSR,
4442 },
4443 .size = 0,
4444 .read = ipr_read_dump,
4445 .write = ipr_write_dump
4446};
4447#else
4448static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4449#endif
4450
4451/**
4452 * ipr_change_queue_depth - Change the device's queue depth
4453 * @sdev: scsi device struct
4454 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07004455 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07004456 *
4457 * Return value:
4458 * actual depth set
4459 **/
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004460static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004461{
Brian King35a39692006-09-25 12:39:20 -05004462 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4463 struct ipr_resource_entry *res;
4464 unsigned long lock_flags = 0;
4465
4466 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4467 res = (struct ipr_resource_entry *)sdev->hostdata;
4468
4469 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4470 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4471 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4472
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004473 scsi_change_queue_depth(sdev, qdepth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474 return sdev->queue_depth;
4475}
4476
4477/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4479 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004480 * @attr: device attribute structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07004481 * @buf: buffer
4482 *
4483 * Return value:
4484 * number of bytes printed to buffer
4485 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004486static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004487{
4488 struct scsi_device *sdev = to_scsi_device(dev);
4489 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4490 struct ipr_resource_entry *res;
4491 unsigned long lock_flags = 0;
4492 ssize_t len = -ENXIO;
4493
4494 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4495 res = (struct ipr_resource_entry *)sdev->hostdata;
4496 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004497 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004498 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4499 return len;
4500}
4501
4502static struct device_attribute ipr_adapter_handle_attr = {
4503 .attr = {
4504 .name = "adapter_handle",
4505 .mode = S_IRUSR,
4506 },
4507 .show = ipr_show_adapter_handle
4508};
4509
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004510/**
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004511 * ipr_show_resource_path - Show the resource path or the resource address for
4512 * this device.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004513 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004514 * @attr: device attribute structure
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004515 * @buf: buffer
4516 *
4517 * Return value:
4518 * number of bytes printed to buffer
4519 **/
4520static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4521{
4522 struct scsi_device *sdev = to_scsi_device(dev);
4523 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4524 struct ipr_resource_entry *res;
4525 unsigned long lock_flags = 0;
4526 ssize_t len = -ENXIO;
4527 char buffer[IPR_MAX_RES_PATH_LENGTH];
4528
4529 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4530 res = (struct ipr_resource_entry *)sdev->hostdata;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004531 if (res && ioa_cfg->sis64)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004532 len = snprintf(buf, PAGE_SIZE, "%s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004533 __ipr_format_res_path(res->res_path, buffer,
4534 sizeof(buffer)));
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004535 else if (res)
4536 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4537 res->bus, res->target, res->lun);
4538
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004539 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4540 return len;
4541}
4542
4543static struct device_attribute ipr_resource_path_attr = {
4544 .attr = {
4545 .name = "resource_path",
Wayne Boyer75576bb2010-07-14 10:50:14 -07004546 .mode = S_IRUGO,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004547 },
4548 .show = ipr_show_resource_path
4549};
4550
Wayne Boyer75576bb2010-07-14 10:50:14 -07004551/**
Wayne Boyer46d74562010-08-11 07:15:17 -07004552 * ipr_show_device_id - Show the device_id for this device.
4553 * @dev: device struct
4554 * @attr: device attribute structure
4555 * @buf: buffer
4556 *
4557 * Return value:
4558 * number of bytes printed to buffer
4559 **/
4560static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4561{
4562 struct scsi_device *sdev = to_scsi_device(dev);
4563 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4564 struct ipr_resource_entry *res;
4565 unsigned long lock_flags = 0;
4566 ssize_t len = -ENXIO;
4567
4568 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4569 res = (struct ipr_resource_entry *)sdev->hostdata;
4570 if (res && ioa_cfg->sis64)
Wen Xiongbb8647e2015-06-11 20:45:18 -05004571 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
Wayne Boyer46d74562010-08-11 07:15:17 -07004572 else if (res)
4573 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4574
4575 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4576 return len;
4577}
4578
4579static struct device_attribute ipr_device_id_attr = {
4580 .attr = {
4581 .name = "device_id",
4582 .mode = S_IRUGO,
4583 },
4584 .show = ipr_show_device_id
4585};
4586
4587/**
Wayne Boyer75576bb2010-07-14 10:50:14 -07004588 * ipr_show_resource_type - Show the resource type for this device.
4589 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004590 * @attr: device attribute structure
Wayne Boyer75576bb2010-07-14 10:50:14 -07004591 * @buf: buffer
4592 *
4593 * Return value:
4594 * number of bytes printed to buffer
4595 **/
4596static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4597{
4598 struct scsi_device *sdev = to_scsi_device(dev);
4599 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4600 struct ipr_resource_entry *res;
4601 unsigned long lock_flags = 0;
4602 ssize_t len = -ENXIO;
4603
4604 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4605 res = (struct ipr_resource_entry *)sdev->hostdata;
4606
4607 if (res)
4608 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4609
4610 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4611 return len;
4612}
4613
4614static struct device_attribute ipr_resource_type_attr = {
4615 .attr = {
4616 .name = "resource_type",
4617 .mode = S_IRUGO,
4618 },
4619 .show = ipr_show_resource_type
4620};
4621
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004622/**
4623 * ipr_show_raw_mode - Show the adapter's raw mode
4624 * @dev: class device struct
4625 * @buf: buffer
4626 *
4627 * Return value:
4628 * number of bytes printed to buffer
4629 **/
4630static ssize_t ipr_show_raw_mode(struct device *dev,
4631 struct device_attribute *attr, char *buf)
4632{
4633 struct scsi_device *sdev = to_scsi_device(dev);
4634 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4635 struct ipr_resource_entry *res;
4636 unsigned long lock_flags = 0;
4637 ssize_t len;
4638
4639 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4640 res = (struct ipr_resource_entry *)sdev->hostdata;
4641 if (res)
4642 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4643 else
4644 len = -ENXIO;
4645 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4646 return len;
4647}
4648
4649/**
4650 * ipr_store_raw_mode - Change the adapter's raw mode
4651 * @dev: class device struct
4652 * @buf: buffer
4653 *
4654 * Return value:
4655 * number of bytes printed to buffer
4656 **/
4657static ssize_t ipr_store_raw_mode(struct device *dev,
4658 struct device_attribute *attr,
4659 const char *buf, size_t count)
4660{
4661 struct scsi_device *sdev = to_scsi_device(dev);
4662 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4663 struct ipr_resource_entry *res;
4664 unsigned long lock_flags = 0;
4665 ssize_t len;
4666
4667 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4668 res = (struct ipr_resource_entry *)sdev->hostdata;
4669 if (res) {
Gabriel Krisman Bertazie35d7f272015-08-19 11:47:06 -03004670 if (ipr_is_af_dasd_device(res)) {
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004671 res->raw_mode = simple_strtoul(buf, NULL, 10);
4672 len = strlen(buf);
4673 if (res->sdev)
4674 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4675 res->raw_mode ? "enabled" : "disabled");
4676 } else
4677 len = -EINVAL;
4678 } else
4679 len = -ENXIO;
4680 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4681 return len;
4682}
4683
4684static struct device_attribute ipr_raw_mode_attr = {
4685 .attr = {
4686 .name = "raw_mode",
4687 .mode = S_IRUGO | S_IWUSR,
4688 },
4689 .show = ipr_show_raw_mode,
4690 .store = ipr_store_raw_mode
4691};
4692
Linus Torvalds1da177e2005-04-16 15:20:36 -07004693static struct device_attribute *ipr_dev_attrs[] = {
4694 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004695 &ipr_resource_path_attr,
Wayne Boyer46d74562010-08-11 07:15:17 -07004696 &ipr_device_id_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004697 &ipr_resource_type_attr,
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004698 &ipr_raw_mode_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004699 NULL,
4700};
4701
4702/**
4703 * ipr_biosparam - Return the HSC mapping
4704 * @sdev: scsi device struct
4705 * @block_device: block device pointer
4706 * @capacity: capacity of the device
4707 * @parm: Array containing returned HSC values.
4708 *
4709 * This function generates the HSC parms that fdisk uses.
4710 * We want to make sure we return something that places partitions
4711 * on 4k boundaries for best performance with the IOA.
4712 *
4713 * Return value:
4714 * 0 on success
4715 **/
4716static int ipr_biosparam(struct scsi_device *sdev,
4717 struct block_device *block_device,
4718 sector_t capacity, int *parm)
4719{
4720 int heads, sectors;
4721 sector_t cylinders;
4722
4723 heads = 128;
4724 sectors = 32;
4725
4726 cylinders = capacity;
4727 sector_div(cylinders, (128 * 32));
4728
4729 /* return result */
4730 parm[0] = heads;
4731 parm[1] = sectors;
4732 parm[2] = cylinders;
4733
4734 return 0;
4735}
4736
4737/**
Brian King35a39692006-09-25 12:39:20 -05004738 * ipr_find_starget - Find target based on bus/target.
4739 * @starget: scsi target struct
4740 *
4741 * Return value:
4742 * resource entry pointer if found / NULL if not found
4743 **/
4744static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4745{
4746 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4747 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4748 struct ipr_resource_entry *res;
4749
4750 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004751 if ((res->bus == starget->channel) &&
Brian King0ee1d712012-03-14 21:20:06 -05004752 (res->target == starget->id)) {
Brian King35a39692006-09-25 12:39:20 -05004753 return res;
4754 }
4755 }
4756
4757 return NULL;
4758}
4759
4760static struct ata_port_info sata_port_info;
4761
4762/**
4763 * ipr_target_alloc - Prepare for commands to a SCSI target
4764 * @starget: scsi target struct
4765 *
4766 * If the device is a SATA device, this function allocates an
4767 * ATA port with libata, else it does nothing.
4768 *
4769 * Return value:
4770 * 0 on success / non-0 on failure
4771 **/
4772static int ipr_target_alloc(struct scsi_target *starget)
4773{
4774 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4775 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4776 struct ipr_sata_port *sata_port;
4777 struct ata_port *ap;
4778 struct ipr_resource_entry *res;
4779 unsigned long lock_flags;
4780
4781 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4782 res = ipr_find_starget(starget);
4783 starget->hostdata = NULL;
4784
4785 if (res && ipr_is_gata(res)) {
4786 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4787 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4788 if (!sata_port)
4789 return -ENOMEM;
4790
4791 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4792 if (ap) {
4793 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4794 sata_port->ioa_cfg = ioa_cfg;
4795 sata_port->ap = ap;
4796 sata_port->res = res;
4797
4798 res->sata_port = sata_port;
4799 ap->private_data = sata_port;
4800 starget->hostdata = sata_port;
4801 } else {
4802 kfree(sata_port);
4803 return -ENOMEM;
4804 }
4805 }
4806 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4807
4808 return 0;
4809}
4810
4811/**
4812 * ipr_target_destroy - Destroy a SCSI target
4813 * @starget: scsi target struct
4814 *
4815 * If the device was a SATA device, this function frees the libata
4816 * ATA port, else it does nothing.
4817 *
4818 **/
4819static void ipr_target_destroy(struct scsi_target *starget)
4820{
4821 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004822 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4823 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4824
4825 if (ioa_cfg->sis64) {
Brian King0ee1d712012-03-14 21:20:06 -05004826 if (!ipr_find_starget(starget)) {
4827 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4828 clear_bit(starget->id, ioa_cfg->array_ids);
4829 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4830 clear_bit(starget->id, ioa_cfg->vset_ids);
4831 else if (starget->channel == 0)
4832 clear_bit(starget->id, ioa_cfg->target_ids);
4833 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004834 }
Brian King35a39692006-09-25 12:39:20 -05004835
4836 if (sata_port) {
4837 starget->hostdata = NULL;
4838 ata_sas_port_destroy(sata_port->ap);
4839 kfree(sata_port);
4840 }
4841}
4842
4843/**
4844 * ipr_find_sdev - Find device based on bus/target/lun.
4845 * @sdev: scsi device struct
4846 *
4847 * Return value:
4848 * resource entry pointer if found / NULL if not found
4849 **/
4850static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4851{
4852 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4853 struct ipr_resource_entry *res;
4854
4855 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004856 if ((res->bus == sdev->channel) &&
4857 (res->target == sdev->id) &&
4858 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004859 return res;
4860 }
4861
4862 return NULL;
4863}
4864
4865/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866 * ipr_slave_destroy - Unconfigure a SCSI device
4867 * @sdev: scsi device struct
4868 *
4869 * Return value:
4870 * nothing
4871 **/
4872static void ipr_slave_destroy(struct scsi_device *sdev)
4873{
4874 struct ipr_resource_entry *res;
4875 struct ipr_ioa_cfg *ioa_cfg;
4876 unsigned long lock_flags = 0;
4877
4878 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4879
4880 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4881 res = (struct ipr_resource_entry *) sdev->hostdata;
4882 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004883 if (res->sata_port)
Tejun Heo3e4ec342010-05-10 21:41:30 +02004884 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004885 sdev->hostdata = NULL;
4886 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004887 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004888 }
4889 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4890}
4891
4892/**
4893 * ipr_slave_configure - Configure a SCSI device
4894 * @sdev: scsi device struct
4895 *
4896 * This function configures the specified scsi device.
4897 *
4898 * Return value:
4899 * 0 on success
4900 **/
4901static int ipr_slave_configure(struct scsi_device *sdev)
4902{
4903 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4904 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004905 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004906 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004907 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004908
4909 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4910 res = sdev->hostdata;
4911 if (res) {
4912 if (ipr_is_af_dasd_device(res))
4913 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004914 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004915 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004916 sdev->no_uld_attach = 1;
4917 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004918 if (ipr_is_vset_device(res)) {
Brian King60654e22014-12-02 12:47:46 -06004919 sdev->scsi_level = SCSI_SPC_3;
Brian King723cd772017-08-18 16:17:32 -05004920 sdev->no_report_opcodes = 1;
Jens Axboe242f9dc2008-09-14 05:55:09 -07004921 blk_queue_rq_timeout(sdev->request_queue,
4922 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004923 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 }
Brian Kingdd406ef2009-04-22 08:58:02 -05004925 if (ipr_is_gata(res) && res->sata_port)
4926 ap = res->sata_port->ap;
4927 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4928
4929 if (ap) {
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004930 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004931 ata_sas_slave_configure(sdev, ap);
Christoph Hellwigc8b09f62014-11-03 20:15:14 +01004932 }
4933
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004934 if (ioa_cfg->sis64)
4935 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004936 ipr_format_res_path(ioa_cfg,
4937 res->res_path, buffer, sizeof(buffer)));
Brian Kingdd406ef2009-04-22 08:58:02 -05004938 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004939 }
4940 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4941 return 0;
4942}
4943
4944/**
Brian King35a39692006-09-25 12:39:20 -05004945 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4946 * @sdev: scsi device struct
4947 *
4948 * This function initializes an ATA port so that future commands
4949 * sent through queuecommand will work.
4950 *
4951 * Return value:
4952 * 0 on success
4953 **/
4954static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4955{
4956 struct ipr_sata_port *sata_port = NULL;
4957 int rc = -ENXIO;
4958
4959 ENTER;
4960 if (sdev->sdev_target)
4961 sata_port = sdev->sdev_target->hostdata;
Dan Williamsb2024452012-03-21 21:09:07 -07004962 if (sata_port) {
Brian King35a39692006-09-25 12:39:20 -05004963 rc = ata_sas_port_init(sata_port->ap);
Dan Williamsb2024452012-03-21 21:09:07 -07004964 if (rc == 0)
4965 rc = ata_sas_sync_probe(sata_port->ap);
4966 }
4967
Brian King35a39692006-09-25 12:39:20 -05004968 if (rc)
4969 ipr_slave_destroy(sdev);
4970
4971 LEAVE;
4972 return rc;
4973}
4974
4975/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976 * ipr_slave_alloc - Prepare for commands to a device.
4977 * @sdev: scsi device struct
4978 *
4979 * This function saves a pointer to the resource entry
4980 * in the scsi device struct if the device exists. We
4981 * can then use this pointer in ipr_queuecommand when
4982 * handling new commands.
4983 *
4984 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004985 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07004986 **/
4987static int ipr_slave_alloc(struct scsi_device *sdev)
4988{
4989 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4990 struct ipr_resource_entry *res;
4991 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004992 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993
4994 sdev->hostdata = NULL;
4995
4996 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4997
Brian King35a39692006-09-25 12:39:20 -05004998 res = ipr_find_sdev(sdev);
4999 if (res) {
5000 res->sdev = sdev;
5001 res->add_to_ml = 0;
5002 res->in_erp = 0;
5003 sdev->hostdata = res;
5004 if (!ipr_is_naca_model(res))
5005 res->needs_sync_complete = 1;
5006 rc = 0;
5007 if (ipr_is_gata(res)) {
5008 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5009 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005010 }
5011 }
5012
5013 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5014
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06005015 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005016}
5017
Brian King6cdb0812014-10-30 17:27:10 -05005018/**
5019 * ipr_match_lun - Match function for specified LUN
5020 * @ipr_cmd: ipr command struct
5021 * @device: device to match (sdev)
5022 *
5023 * Returns:
5024 * 1 if command matches sdev / 0 if command does not match sdev
5025 **/
5026static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5027{
5028 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5029 return 1;
5030 return 0;
5031}
5032
5033/**
Brian King439ae282017-03-15 16:58:39 -05005034 * ipr_cmnd_is_free - Check if a command is free or not
5035 * @ipr_cmd ipr command struct
5036 *
5037 * Returns:
5038 * true / false
5039 **/
5040static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5041{
5042 struct ipr_cmnd *loop_cmd;
5043
5044 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5045 if (loop_cmd == ipr_cmd)
5046 return true;
5047 }
5048
5049 return false;
5050}
5051
5052/**
Brian Kingef97d8a2017-03-15 16:58:41 -05005053 * ipr_match_res - Match function for specified resource entry
5054 * @ipr_cmd: ipr command struct
5055 * @resource: resource entry to match
5056 *
5057 * Returns:
5058 * 1 if command matches sdev / 0 if command does not match sdev
5059 **/
5060static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5061{
5062 struct ipr_resource_entry *res = resource;
5063
5064 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5065 return 1;
5066 return 0;
5067}
5068
5069/**
Brian King6cdb0812014-10-30 17:27:10 -05005070 * ipr_wait_for_ops - Wait for matching commands to complete
5071 * @ipr_cmd: ipr command struct
5072 * @device: device to match (sdev)
5073 * @match: match function to use
5074 *
5075 * Returns:
5076 * SUCCESS / FAILED
5077 **/
5078static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5079 int (*match)(struct ipr_cmnd *, void *))
5080{
5081 struct ipr_cmnd *ipr_cmd;
Brian King439ae282017-03-15 16:58:39 -05005082 int wait, i;
Brian King6cdb0812014-10-30 17:27:10 -05005083 unsigned long flags;
5084 struct ipr_hrr_queue *hrrq;
5085 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5086 DECLARE_COMPLETION_ONSTACK(comp);
5087
5088 ENTER;
5089 do {
5090 wait = 0;
5091
5092 for_each_hrrq(hrrq, ioa_cfg) {
5093 spin_lock_irqsave(hrrq->lock, flags);
Brian King439ae282017-03-15 16:58:39 -05005094 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5095 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5096 if (!ipr_cmnd_is_free(ipr_cmd)) {
5097 if (match(ipr_cmd, device)) {
5098 ipr_cmd->eh_comp = &comp;
5099 wait++;
5100 }
Brian King6cdb0812014-10-30 17:27:10 -05005101 }
5102 }
5103 spin_unlock_irqrestore(hrrq->lock, flags);
5104 }
5105
5106 if (wait) {
5107 timeout = wait_for_completion_timeout(&comp, timeout);
5108
5109 if (!timeout) {
5110 wait = 0;
5111
5112 for_each_hrrq(hrrq, ioa_cfg) {
5113 spin_lock_irqsave(hrrq->lock, flags);
Brian King439ae282017-03-15 16:58:39 -05005114 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5115 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5116 if (!ipr_cmnd_is_free(ipr_cmd)) {
5117 if (match(ipr_cmd, device)) {
5118 ipr_cmd->eh_comp = NULL;
5119 wait++;
5120 }
Brian King6cdb0812014-10-30 17:27:10 -05005121 }
5122 }
5123 spin_unlock_irqrestore(hrrq->lock, flags);
5124 }
5125
5126 if (wait)
5127 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5128 LEAVE;
5129 return wait ? FAILED : SUCCESS;
5130 }
5131 }
5132 } while (wait);
5133
5134 LEAVE;
5135 return SUCCESS;
5136}
5137
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005138static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139{
5140 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005141 unsigned long lock_flags = 0;
5142 int rc = SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143
5144 ENTER;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005145 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5146 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005147
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05005148 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005149 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005150 dev_err(&ioa_cfg->pdev->dev,
5151 "Adapter being reset as a result of error recovery.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005152
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005153 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5154 ioa_cfg->sdt_state = GET_DUMP;
5155 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005156
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005157 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5158 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5159 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005160
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005161 /* If we got hit with a host reset while we were already resetting
5162 the adapter for some reason, and the reset failed. */
5163 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5164 ipr_trace;
5165 rc = FAILED;
5166 }
5167
5168 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005169 LEAVE;
5170 return rc;
5171}
5172
5173/**
Brian Kingc6513092006-03-29 09:37:43 -06005174 * ipr_device_reset - Reset the device
5175 * @ioa_cfg: ioa config struct
5176 * @res: resource entry struct
5177 *
5178 * This function issues a device reset to the affected device.
5179 * If the device is a SCSI device, a LUN reset will be sent
5180 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05005181 * will be sent. If the device is a SATA device, a PHY reset will
5182 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06005183 *
5184 * Return value:
5185 * 0 on success / non-zero on failure
5186 **/
5187static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5188 struct ipr_resource_entry *res)
5189{
5190 struct ipr_cmnd *ipr_cmd;
5191 struct ipr_ioarcb *ioarcb;
5192 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05005193 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06005194 u32 ioasc;
5195
5196 ENTER;
5197 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5198 ioarcb = &ipr_cmd->ioarcb;
5199 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08005200
5201 if (ipr_cmd->ioa_cfg->sis64) {
5202 regs = &ipr_cmd->i.ata_ioadl.regs;
5203 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5204 } else
5205 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06005206
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005207 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06005208 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5209 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05005210 if (ipr_is_gata(res)) {
5211 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08005212 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05005213 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5214 }
Brian Kingc6513092006-03-29 09:37:43 -06005215
5216 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005217 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005218 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005219 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5220 if (ipr_cmd->ioa_cfg->sis64)
5221 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5222 sizeof(struct ipr_ioasa_gata));
5223 else
5224 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5225 sizeof(struct ipr_ioasa_gata));
5226 }
Brian Kingc6513092006-03-29 09:37:43 -06005227
5228 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005229 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
Brian Kingc6513092006-03-29 09:37:43 -06005230}
5231
5232/**
Brian King35a39692006-09-25 12:39:20 -05005233 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09005234 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05005235 * @classes: class of the attached device
5236 *
Tejun Heocc0680a2007-08-06 18:36:23 +09005237 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05005238 *
5239 * Return value:
5240 * 0 on success / non-zero on failure
5241 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09005242static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07005243 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05005244{
Tejun Heocc0680a2007-08-06 18:36:23 +09005245 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05005246 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5247 struct ipr_resource_entry *res;
5248 unsigned long lock_flags = 0;
Brian Kingef97d8a2017-03-15 16:58:41 -05005249 int rc = -ENXIO, ret;
Brian King35a39692006-09-25 12:39:20 -05005250
5251 ENTER;
5252 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005253 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06005254 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5255 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5256 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5257 }
5258
Brian King35a39692006-09-25 12:39:20 -05005259 res = sata_port->res;
5260 if (res) {
5261 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005262 *classes = res->ata_class;
Brian Kingef97d8a2017-03-15 16:58:41 -05005263 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Brian King35a39692006-09-25 12:39:20 -05005264
Brian Kingef97d8a2017-03-15 16:58:41 -05005265 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5266 if (ret != SUCCESS) {
5267 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5268 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5269 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5270
5271 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5272 }
5273 } else
5274 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5275
Brian King35a39692006-09-25 12:39:20 -05005276 LEAVE;
5277 return rc;
5278}
5279
5280/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005281 * ipr_eh_dev_reset - Reset the device
5282 * @scsi_cmd: scsi command struct
5283 *
5284 * This function issues a device reset to the affected device.
5285 * A LUN reset will be sent to the device first. If that does
5286 * not work, a target reset will be sent.
5287 *
5288 * Return value:
5289 * SUCCESS / FAILED
5290 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005291static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005292{
5293 struct ipr_cmnd *ipr_cmd;
5294 struct ipr_ioa_cfg *ioa_cfg;
5295 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05005296 struct ata_port *ap;
Brian King439ae282017-03-15 16:58:39 -05005297 int rc = 0, i;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005298 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005299
5300 ENTER;
5301 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5302 res = scsi_cmd->device->hostdata;
5303
Linus Torvalds1da177e2005-04-16 15:20:36 -07005304 /*
5305 * If we are currently going through reset/reload, return failed. This will force the
5306 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5307 * reset to complete
5308 */
5309 if (ioa_cfg->in_reset_reload)
5310 return FAILED;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005311 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005312 return FAILED;
5313
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005314 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005315 spin_lock(&hrrq->_lock);
Brian King439ae282017-03-15 16:58:39 -05005316 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5317 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5318
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005319 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Brian King960e9642017-03-15 16:58:37 -05005320 if (!ipr_cmd->qc)
5321 continue;
Brian King439ae282017-03-15 16:58:39 -05005322 if (ipr_cmnd_is_free(ipr_cmd))
5323 continue;
Brian King960e9642017-03-15 16:58:37 -05005324
5325 ipr_cmd->done = ipr_sata_eh_done;
5326 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005327 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5328 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5329 }
Brian King7402ece2006-11-21 10:28:23 -06005330 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005331 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005332 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005333 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005334 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005335 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05005336
5337 if (ipr_is_gata(res) && res->sata_port) {
5338 ap = res->sata_port->ap;
5339 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09005340 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05005341 spin_lock_irq(scsi_cmd->device->host->host_lock);
5342 } else
5343 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005344 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06005345 res->reset_occurred = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005346
Linus Torvalds1da177e2005-04-16 15:20:36 -07005347 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005348 return rc ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005349}
5350
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005351static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005352{
5353 int rc;
Brian King6cdb0812014-10-30 17:27:10 -05005354 struct ipr_ioa_cfg *ioa_cfg;
Brian Kingef97d8a2017-03-15 16:58:41 -05005355 struct ipr_resource_entry *res;
Brian King6cdb0812014-10-30 17:27:10 -05005356
5357 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
Brian Kingef97d8a2017-03-15 16:58:41 -05005358 res = cmd->device->hostdata;
5359
5360 if (!res)
5361 return FAILED;
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005362
5363 spin_lock_irq(cmd->device->host->host_lock);
5364 rc = __ipr_eh_dev_reset(cmd);
5365 spin_unlock_irq(cmd->device->host->host_lock);
5366
Brian Kingef97d8a2017-03-15 16:58:41 -05005367 if (rc == SUCCESS) {
5368 if (ipr_is_gata(res) && res->sata_port)
5369 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5370 else
5371 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5372 }
Brian King6cdb0812014-10-30 17:27:10 -05005373
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005374 return rc;
5375}
5376
Linus Torvalds1da177e2005-04-16 15:20:36 -07005377/**
5378 * ipr_bus_reset_done - Op done function for bus reset.
5379 * @ipr_cmd: ipr command struct
5380 *
5381 * This function is the op done function for a bus reset
5382 *
5383 * Return value:
5384 * none
5385 **/
5386static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5387{
5388 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5389 struct ipr_resource_entry *res;
5390
5391 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005392 if (!ioa_cfg->sis64)
5393 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5394 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5395 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5396 break;
5397 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005398 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005399
5400 /*
5401 * If abort has not completed, indicate the reset has, else call the
5402 * abort's done function to wake the sleeping eh thread
5403 */
5404 if (ipr_cmd->sibling->sibling)
5405 ipr_cmd->sibling->sibling = NULL;
5406 else
5407 ipr_cmd->sibling->done(ipr_cmd->sibling);
5408
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005409 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005410 LEAVE;
5411}
5412
5413/**
5414 * ipr_abort_timeout - An abort task has timed out
5415 * @ipr_cmd: ipr command struct
5416 *
5417 * This function handles when an abort task times out. If this
5418 * happens we issue a bus reset since we have resources tied
5419 * up that must be freed before returning to the midlayer.
5420 *
5421 * Return value:
5422 * none
5423 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07005424static void ipr_abort_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005425{
Kees Cook738c6ec2017-08-18 16:53:24 -07005426 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005427 struct ipr_cmnd *reset_cmd;
5428 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5429 struct ipr_cmd_pkt *cmd_pkt;
5430 unsigned long lock_flags = 0;
5431
5432 ENTER;
5433 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5434 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5435 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5436 return;
5437 }
5438
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005439 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005440 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5441 ipr_cmd->sibling = reset_cmd;
5442 reset_cmd->sibling = ipr_cmd;
5443 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5444 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5445 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5446 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5447 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5448
5449 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5450 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5451 LEAVE;
5452}
5453
5454/**
5455 * ipr_cancel_op - Cancel specified op
5456 * @scsi_cmd: scsi command struct
5457 *
5458 * This function cancels specified op.
5459 *
5460 * Return value:
5461 * SUCCESS / FAILED
5462 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005463static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005464{
5465 struct ipr_cmnd *ipr_cmd;
5466 struct ipr_ioa_cfg *ioa_cfg;
5467 struct ipr_resource_entry *res;
5468 struct ipr_cmd_pkt *cmd_pkt;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005469 u32 ioasc, int_reg;
Brian King439ae282017-03-15 16:58:39 -05005470 int i, op_found = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005471 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005472
5473 ENTER;
5474 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5475 res = scsi_cmd->device->hostdata;
5476
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005477 /* If we are currently going through reset/reload, return failed.
5478 * This will force the mid-layer to call ipr_eh_host_reset,
5479 * which will then go to sleep and wait for the reset to complete
5480 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005481 if (ioa_cfg->in_reset_reload ||
5482 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005483 return FAILED;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005484 if (!res)
5485 return FAILED;
5486
5487 /*
5488 * If we are aborting a timed out op, chances are that the timeout was caused
5489 * by a still not detected EEH error. In such cases, reading a register will
5490 * trigger the EEH recovery infrastructure.
5491 */
5492 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5493
5494 if (!ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005495 return FAILED;
5496
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005497 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005498 spin_lock(&hrrq->_lock);
Brian King439ae282017-03-15 16:58:39 -05005499 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5500 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5501 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5502 op_found = 1;
5503 break;
5504 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005505 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005506 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005507 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005508 }
5509
5510 if (!op_found)
5511 return SUCCESS;
5512
5513 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005514 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005515 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5516 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5517 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5518 ipr_cmd->u.sdev = scsi_cmd->device;
5519
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005520 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5521 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005522 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005523 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005524
5525 /*
5526 * If the abort task timed out and we sent a bus reset, we will get
5527 * one the following responses to the abort
5528 */
5529 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5530 ioasc = 0;
5531 ipr_trace;
5532 }
5533
Kleber Sacilotto de Souzac4ee22a2013-03-14 13:52:23 -05005534 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005535 if (!ipr_is_naca_model(res))
5536 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005537
5538 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005539 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005540}
5541
5542/**
5543 * ipr_eh_abort - Abort a single op
5544 * @scsi_cmd: scsi command struct
5545 *
5546 * Return value:
Brian Kingf688f962014-12-02 12:47:37 -06005547 * 0 if scan in progress / 1 if scan is complete
5548 **/
5549static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5550{
5551 unsigned long lock_flags;
5552 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5553 int rc = 0;
5554
5555 spin_lock_irqsave(shost->host_lock, lock_flags);
5556 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5557 rc = 1;
5558 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5559 rc = 1;
5560 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5561 return rc;
5562}
5563
5564/**
5565 * ipr_eh_host_reset - Reset the host adapter
5566 * @scsi_cmd: scsi command struct
5567 *
5568 * Return value:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005569 * SUCCESS / FAILED
5570 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005571static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005572{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005573 unsigned long flags;
5574 int rc;
Brian King6cdb0812014-10-30 17:27:10 -05005575 struct ipr_ioa_cfg *ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005576
5577 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005578
Brian King6cdb0812014-10-30 17:27:10 -05005579 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5580
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005581 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5582 rc = ipr_cancel_op(scsi_cmd);
5583 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005584
Brian King6cdb0812014-10-30 17:27:10 -05005585 if (rc == SUCCESS)
5586 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005587 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005588 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005589}
5590
5591/**
5592 * ipr_handle_other_interrupt - Handle "other" interrupts
5593 * @ioa_cfg: ioa config struct
Wayne Boyer634651f2010-08-27 14:45:07 -07005594 * @int_reg: interrupt register
Linus Torvalds1da177e2005-04-16 15:20:36 -07005595 *
5596 * Return value:
5597 * IRQ_NONE / IRQ_HANDLED
5598 **/
Wayne Boyer634651f2010-08-27 14:45:07 -07005599static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer630ad8312011-04-07 12:12:30 -07005600 u32 int_reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005601{
5602 irqreturn_t rc = IRQ_HANDLED;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005603 u32 int_mask_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005604
Wayne Boyer7dacb642011-04-12 10:29:02 -07005605 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5606 int_reg &= ~int_mask_reg;
5607
5608 /* If an interrupt on the adapter did not occur, ignore it.
5609 * Or in the case of SIS 64, check for a stage change interrupt.
5610 */
5611 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5612 if (ioa_cfg->sis64) {
5613 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5614 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5615 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5616
5617 /* clear stage change */
5618 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5619 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5620 list_del(&ioa_cfg->reset_cmd->queue);
5621 del_timer(&ioa_cfg->reset_cmd->timer);
5622 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5623 return IRQ_HANDLED;
5624 }
5625 }
5626
5627 return IRQ_NONE;
5628 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629
5630 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5631 /* Mask the interrupt */
5632 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005633 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5634
5635 list_del(&ioa_cfg->reset_cmd->queue);
5636 del_timer(&ioa_cfg->reset_cmd->timer);
5637 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005638 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
Brian King7dd21302012-03-14 21:20:08 -05005639 if (ioa_cfg->clear_isr) {
5640 if (ipr_debug && printk_ratelimit())
5641 dev_err(&ioa_cfg->pdev->dev,
5642 "Spurious interrupt detected. 0x%08X\n", int_reg);
5643 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5644 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5645 return IRQ_NONE;
5646 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647 } else {
5648 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5649 ioa_cfg->ioa_unit_checked = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005650 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5651 dev_err(&ioa_cfg->pdev->dev,
5652 "No Host RRQ. 0x%08X\n", int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005653 else
5654 dev_err(&ioa_cfg->pdev->dev,
5655 "Permanent IOA failure. 0x%08X\n", int_reg);
5656
5657 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5658 ioa_cfg->sdt_state = GET_DUMP;
5659
5660 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5661 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5662 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005663
Linus Torvalds1da177e2005-04-16 15:20:36 -07005664 return rc;
5665}
5666
5667/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005668 * ipr_isr_eh - Interrupt service routine error handler
5669 * @ioa_cfg: ioa config struct
5670 * @msg: message to log
5671 *
5672 * Return value:
5673 * none
5674 **/
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005675static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005676{
5677 ioa_cfg->errors_logged++;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005678 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005679
5680 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5681 ioa_cfg->sdt_state = GET_DUMP;
5682
5683 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5684}
5685
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005686static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005687 struct list_head *doneq)
5688{
5689 u32 ioasc;
5690 u16 cmd_index;
5691 struct ipr_cmnd *ipr_cmd;
5692 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5693 int num_hrrq = 0;
5694
5695 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005696 if (!hrr_queue->allow_interrupts)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005697 return 0;
5698
5699 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5700 hrr_queue->toggle_bit) {
5701
5702 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5703 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5704 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5705
5706 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5707 cmd_index < hrr_queue->min_cmd_id)) {
5708 ipr_isr_eh(ioa_cfg,
5709 "Invalid response handle from IOA: ",
5710 cmd_index);
5711 break;
5712 }
5713
5714 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5715 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5716
5717 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5718
5719 list_move_tail(&ipr_cmd->queue, doneq);
5720
5721 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5722 hrr_queue->hrrq_curr++;
5723 } else {
5724 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5725 hrr_queue->toggle_bit ^= 1u;
5726 }
5727 num_hrrq++;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005728 if (budget > 0 && num_hrrq >= budget)
5729 break;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005730 }
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005731
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005732 return num_hrrq;
5733}
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005734
Christoph Hellwig511cbce2015-11-10 14:56:14 +01005735static int ipr_iopoll(struct irq_poll *iop, int budget)
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005736{
5737 struct ipr_ioa_cfg *ioa_cfg;
5738 struct ipr_hrr_queue *hrrq;
5739 struct ipr_cmnd *ipr_cmd, *temp;
5740 unsigned long hrrq_flags;
5741 int completed_ops;
5742 LIST_HEAD(doneq);
5743
5744 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5745 ioa_cfg = hrrq->ioa_cfg;
5746
5747 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5748 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5749
5750 if (completed_ops < budget)
Christoph Hellwig511cbce2015-11-10 14:56:14 +01005751 irq_poll_complete(iop);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005752 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5753
5754 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5755 list_del(&ipr_cmd->queue);
5756 del_timer(&ipr_cmd->timer);
5757 ipr_cmd->fast_done(ipr_cmd);
5758 }
5759
5760 return completed_ops;
5761}
5762
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005763/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005764 * ipr_isr - Interrupt service routine
5765 * @irq: irq number
5766 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005767 *
5768 * Return value:
5769 * IRQ_NONE / IRQ_HANDLED
5770 **/
David Howells7d12e782006-10-05 14:55:46 +01005771static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005772{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005773 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5774 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005775 unsigned long hrrq_flags = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005776 u32 int_reg = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005777 int num_hrrq = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005778 int irq_none = 0;
Brian King172cd6e2012-07-17 08:14:40 -05005779 struct ipr_cmnd *ipr_cmd, *temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005780 irqreturn_t rc = IRQ_NONE;
Brian King172cd6e2012-07-17 08:14:40 -05005781 LIST_HEAD(doneq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005782
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005783 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005784 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005785 if (!hrrq->allow_interrupts) {
5786 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005787 return IRQ_NONE;
5788 }
5789
Linus Torvalds1da177e2005-04-16 15:20:36 -07005790 while (1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005791 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5792 rc = IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005793
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005794 if (!ioa_cfg->clear_isr)
5795 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005796
Linus Torvalds1da177e2005-04-16 15:20:36 -07005797 /* Clear the PCI interrupt */
Wayne Boyera5442ba2011-05-17 09:18:53 -07005798 num_hrrq = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005799 do {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005800 writel(IPR_PCII_HRRQ_UPDATED,
5801 ioa_cfg->regs.clr_interrupt_reg32);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005802 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005803 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005804 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005805
Wayne Boyer7dacb642011-04-12 10:29:02 -07005806 } else if (rc == IRQ_NONE && irq_none == 0) {
5807 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5808 irq_none++;
Wayne Boyera5442ba2011-05-17 09:18:53 -07005809 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5810 int_reg & IPR_PCII_HRRQ_UPDATED) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005811 ipr_isr_eh(ioa_cfg,
5812 "Error clearing HRRQ: ", num_hrrq);
Brian King172cd6e2012-07-17 08:14:40 -05005813 rc = IRQ_HANDLED;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005814 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005815 } else
5816 break;
5817 }
5818
5819 if (unlikely(rc == IRQ_NONE))
Wayne Boyer634651f2010-08-27 14:45:07 -07005820 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005821
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005822 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05005823 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5824 list_del(&ipr_cmd->queue);
5825 del_timer(&ipr_cmd->timer);
5826 ipr_cmd->fast_done(ipr_cmd);
5827 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005828 return rc;
5829}
Brian King172cd6e2012-07-17 08:14:40 -05005830
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005831/**
5832 * ipr_isr_mhrrq - Interrupt service routine
5833 * @irq: irq number
5834 * @devp: pointer to ioa config struct
5835 *
5836 * Return value:
5837 * IRQ_NONE / IRQ_HANDLED
5838 **/
5839static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5840{
5841 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005842 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005843 unsigned long hrrq_flags = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005844 struct ipr_cmnd *ipr_cmd, *temp;
5845 irqreturn_t rc = IRQ_NONE;
5846 LIST_HEAD(doneq);
5847
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005848 spin_lock_irqsave(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005849
5850 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005851 if (!hrrq->allow_interrupts) {
5852 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005853 return IRQ_NONE;
5854 }
5855
Jens Axboe89f8b332014-03-13 09:38:42 -06005856 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005857 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5858 hrrq->toggle_bit) {
Christoph Hellwigea511902015-12-07 06:41:11 -08005859 irq_poll_sched(&hrrq->iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005860 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5861 return IRQ_HANDLED;
5862 }
5863 } else {
5864 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5865 hrrq->toggle_bit)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005866
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005867 if (ipr_process_hrrq(hrrq, -1, &doneq))
5868 rc = IRQ_HANDLED;
5869 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005870
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005871 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005872
5873 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5874 list_del(&ipr_cmd->queue);
5875 del_timer(&ipr_cmd->timer);
5876 ipr_cmd->fast_done(ipr_cmd);
5877 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005878 return rc;
5879}
5880
5881/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005882 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07005883 * @ioa_cfg: ioa config struct
5884 * @ipr_cmd: ipr command struct
5885 *
5886 * Return value:
5887 * 0 on success / -1 on failure
5888 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08005889static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5890 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005891{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005892 int i, nseg;
5893 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005894 u32 length;
5895 u32 ioadl_flags = 0;
5896 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5897 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005898 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005899
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005900 length = scsi_bufflen(scsi_cmd);
5901 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005902 return 0;
5903
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005904 nseg = scsi_dma_map(scsi_cmd);
5905 if (nseg < 0) {
Anton Blanchard51f52a42011-05-09 10:07:40 +10005906 if (printk_ratelimit())
Anton Blanchardd73341b2014-10-30 17:27:08 -05005907 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005908 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005909 }
5910
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005911 ipr_cmd->dma_use_sg = nseg;
5912
Wayne Boyer438b0332010-05-10 09:13:00 -07005913 ioarcb->data_transfer_length = cpu_to_be32(length);
Wayne Boyerb8803b12010-05-14 08:55:13 -07005914 ioarcb->ioadl_len =
5915 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
Wayne Boyer438b0332010-05-10 09:13:00 -07005916
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005917 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5918 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5919 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005920 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5921 ioadl_flags = IPR_IOADL_FLAGS_READ;
5922
5923 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5924 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5925 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5926 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5927 }
5928
5929 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5930 return 0;
5931}
5932
5933/**
5934 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5935 * @ioa_cfg: ioa config struct
5936 * @ipr_cmd: ipr command struct
5937 *
5938 * Return value:
5939 * 0 on success / -1 on failure
5940 **/
5941static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5942 struct ipr_cmnd *ipr_cmd)
5943{
5944 int i, nseg;
5945 struct scatterlist *sg;
5946 u32 length;
5947 u32 ioadl_flags = 0;
5948 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5949 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5950 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5951
5952 length = scsi_bufflen(scsi_cmd);
5953 if (!length)
5954 return 0;
5955
5956 nseg = scsi_dma_map(scsi_cmd);
5957 if (nseg < 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05005958 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
Wayne Boyera32c0552010-02-19 13:23:36 -08005959 return -1;
5960 }
5961
5962 ipr_cmd->dma_use_sg = nseg;
5963
5964 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5965 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5966 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5967 ioarcb->data_transfer_length = cpu_to_be32(length);
5968 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005969 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5970 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5971 ioadl_flags = IPR_IOADL_FLAGS_READ;
5972 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5973 ioarcb->read_ioadl_len =
5974 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5975 }
5976
Wayne Boyera32c0552010-02-19 13:23:36 -08005977 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5978 ioadl = ioarcb->u.add_data.u.ioadl;
5979 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5980 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005981 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5982 }
5983
5984 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5985 ioadl[i].flags_and_data_len =
5986 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5987 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5988 }
5989
5990 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5991 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005992}
5993
5994/**
Brian Kingf646f322017-03-15 16:58:39 -05005995 * __ipr_erp_done - Process completion of ERP for a device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005996 * @ipr_cmd: ipr command struct
5997 *
5998 * This function copies the sense buffer into the scsi_cmd
5999 * struct and pushes the scsi_done function.
6000 *
6001 * Return value:
6002 * nothing
6003 **/
Brian Kingf646f322017-03-15 16:58:39 -05006004static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006005{
6006 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6007 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006008 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006009
6010 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6011 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06006012 scmd_printk(KERN_ERR, scsi_cmd,
6013 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006014 } else {
6015 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6016 SCSI_SENSE_BUFFERSIZE);
6017 }
6018
6019 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006020 if (!ipr_is_naca_model(res))
6021 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006022 res->in_erp = 0;
6023 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006024 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006025 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006026 if (ipr_cmd->eh_comp)
6027 complete(ipr_cmd->eh_comp);
6028 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006029}
6030
6031/**
Brian Kingf646f322017-03-15 16:58:39 -05006032 * ipr_erp_done - Process completion of ERP for a device
6033 * @ipr_cmd: ipr command struct
6034 *
6035 * This function copies the sense buffer into the scsi_cmd
6036 * struct and pushes the scsi_done function.
6037 *
6038 * Return value:
6039 * nothing
6040 **/
6041static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6042{
6043 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6044 unsigned long hrrq_flags;
6045
6046 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6047 __ipr_erp_done(ipr_cmd);
6048 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006049}
6050
6051/**
6052 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6053 * @ipr_cmd: ipr command struct
6054 *
6055 * Return value:
6056 * none
6057 **/
6058static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6059{
Brian King51b1c7e2007-03-29 12:43:50 -05006060 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006061 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08006062 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006063
6064 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08006065 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006066 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08006067 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006068 ioarcb->read_ioadl_len = 0;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006069 ioasa->hdr.ioasc = 0;
6070 ioasa->hdr.residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08006071
6072 if (ipr_cmd->ioa_cfg->sis64)
6073 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6074 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6075 else {
6076 ioarcb->write_ioadl_addr =
6077 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6078 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6079 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006080}
6081
6082/**
Brian Kingf646f322017-03-15 16:58:39 -05006083 * __ipr_erp_request_sense - Send request sense to a device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006084 * @ipr_cmd: ipr command struct
6085 *
6086 * This function sends a request sense to a device as a result
6087 * of a check condition.
6088 *
6089 * Return value:
6090 * nothing
6091 **/
Brian Kingf646f322017-03-15 16:58:39 -05006092static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006093{
6094 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006095 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006096
6097 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
Brian Kingf646f322017-03-15 16:58:39 -05006098 __ipr_erp_done(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006099 return;
6100 }
6101
6102 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6103
6104 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6105 cmd_pkt->cdb[0] = REQUEST_SENSE;
6106 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6107 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6108 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6109 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6110
Wayne Boyera32c0552010-02-19 13:23:36 -08006111 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6112 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006113
6114 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6115 IPR_REQUEST_SENSE_TIMEOUT * 2);
6116}
6117
6118/**
Brian Kingf646f322017-03-15 16:58:39 -05006119 * ipr_erp_request_sense - Send request sense to a device
6120 * @ipr_cmd: ipr command struct
6121 *
6122 * This function sends a request sense to a device as a result
6123 * of a check condition.
6124 *
6125 * Return value:
6126 * nothing
6127 **/
6128static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6129{
6130 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6131 unsigned long hrrq_flags;
6132
6133 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6134 __ipr_erp_request_sense(ipr_cmd);
6135 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6136}
6137
6138/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006139 * ipr_erp_cancel_all - Send cancel all to a device
6140 * @ipr_cmd: ipr command struct
6141 *
6142 * This function sends a cancel all to a device to clear the
6143 * queue. If we are running TCQ on the device, QERR is set to 1,
6144 * which means all outstanding ops have been dropped on the floor.
6145 * Cancel all will return them to us.
6146 *
6147 * Return value:
6148 * nothing
6149 **/
6150static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6151{
6152 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6153 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6154 struct ipr_cmd_pkt *cmd_pkt;
6155
6156 res->in_erp = 1;
6157
6158 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6159
Christoph Hellwig17ea0122014-11-24 15:36:20 +01006160 if (!scsi_cmd->device->simple_tags) {
Brian Kingf646f322017-03-15 16:58:39 -05006161 __ipr_erp_request_sense(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006162 return;
6163 }
6164
6165 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6166 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6167 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6168
6169 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6170 IPR_CANCEL_ALL_TIMEOUT);
6171}
6172
6173/**
6174 * ipr_dump_ioasa - Dump contents of IOASA
6175 * @ioa_cfg: ioa config struct
6176 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06006177 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006178 *
6179 * This function is invoked by the interrupt handler when ops
6180 * fail. It will log the IOASA if appropriate. Only called
6181 * for GPDD ops.
6182 *
6183 * Return value:
6184 * none
6185 **/
6186static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06006187 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006188{
6189 int i;
6190 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05006191 u32 ioasc, fd_ioasc;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006192 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006193 __be32 *ioasa_data = (__be32 *)ioasa;
6194 int error_index;
6195
Wayne Boyer96d21f02010-05-10 09:13:27 -07006196 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6197 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006198
6199 if (0 == ioasc)
6200 return;
6201
6202 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6203 return;
6204
Brian Kingb0692dd2007-03-29 12:43:09 -05006205 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6206 error_index = ipr_get_error(fd_ioasc);
6207 else
6208 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006209
6210 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6211 /* Don't log an error if the IOA already logged one */
Wayne Boyer96d21f02010-05-10 09:13:27 -07006212 if (ioasa->hdr.ilid != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006213 return;
6214
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006215 if (!ipr_is_gscsi(res))
6216 return;
6217
Linus Torvalds1da177e2005-04-16 15:20:36 -07006218 if (ipr_error_table[error_index].log_ioasa == 0)
6219 return;
6220 }
6221
Brian Kingfe964d02006-03-29 09:37:29 -06006222 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006223
Wayne Boyer96d21f02010-05-10 09:13:27 -07006224 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6225 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6226 data_len = sizeof(struct ipr_ioasa64);
6227 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006228 data_len = sizeof(struct ipr_ioasa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006229
6230 ipr_err("IOASA Dump:\n");
6231
6232 for (i = 0; i < data_len / 4; i += 4) {
6233 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6234 be32_to_cpu(ioasa_data[i]),
6235 be32_to_cpu(ioasa_data[i+1]),
6236 be32_to_cpu(ioasa_data[i+2]),
6237 be32_to_cpu(ioasa_data[i+3]));
6238 }
6239}
6240
6241/**
6242 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6243 * @ioasa: IOASA
6244 * @sense_buf: sense data buffer
6245 *
6246 * Return value:
6247 * none
6248 **/
6249static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6250{
6251 u32 failing_lba;
6252 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6253 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006254 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6255 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006256
6257 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6258
6259 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6260 return;
6261
6262 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6263
6264 if (ipr_is_vset_device(res) &&
6265 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6266 ioasa->u.vset.failing_lba_hi != 0) {
6267 sense_buf[0] = 0x72;
6268 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6269 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6270 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6271
6272 sense_buf[7] = 12;
6273 sense_buf[8] = 0;
6274 sense_buf[9] = 0x0A;
6275 sense_buf[10] = 0x80;
6276
6277 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6278
6279 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6280 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6281 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6282 sense_buf[15] = failing_lba & 0x000000ff;
6283
6284 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6285
6286 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6287 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6288 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6289 sense_buf[19] = failing_lba & 0x000000ff;
6290 } else {
6291 sense_buf[0] = 0x70;
6292 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6293 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6294 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6295
6296 /* Illegal request */
6297 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
Wayne Boyer96d21f02010-05-10 09:13:27 -07006298 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006299 sense_buf[7] = 10; /* additional length */
6300
6301 /* IOARCB was in error */
6302 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6303 sense_buf[15] = 0xC0;
6304 else /* Parameter data was invalid */
6305 sense_buf[15] = 0x80;
6306
6307 sense_buf[16] =
6308 ((IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07006309 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006310 sense_buf[17] =
6311 (IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07006312 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006313 } else {
6314 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6315 if (ipr_is_vset_device(res))
6316 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6317 else
6318 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6319
6320 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6321 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6322 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6323 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6324 sense_buf[6] = failing_lba & 0x000000ff;
6325 }
6326
6327 sense_buf[7] = 6; /* additional length */
6328 }
6329 }
6330}
6331
6332/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006333 * ipr_get_autosense - Copy autosense data to sense buffer
6334 * @ipr_cmd: ipr command struct
6335 *
6336 * This function copies the autosense buffer to the buffer
6337 * in the scsi_cmd, if there is autosense available.
6338 *
6339 * Return value:
6340 * 1 if autosense was available / 0 if not
6341 **/
6342static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6343{
Wayne Boyer96d21f02010-05-10 09:13:27 -07006344 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6345 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006346
Wayne Boyer96d21f02010-05-10 09:13:27 -07006347 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006348 return 0;
6349
Wayne Boyer96d21f02010-05-10 09:13:27 -07006350 if (ipr_cmd->ioa_cfg->sis64)
6351 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6352 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6353 SCSI_SENSE_BUFFERSIZE));
6354 else
6355 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6356 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6357 SCSI_SENSE_BUFFERSIZE));
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006358 return 1;
6359}
6360
6361/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006362 * ipr_erp_start - Process an error response for a SCSI op
6363 * @ioa_cfg: ioa config struct
6364 * @ipr_cmd: ipr command struct
6365 *
6366 * This function determines whether or not to initiate ERP
6367 * on the affected device.
6368 *
6369 * Return value:
6370 * nothing
6371 **/
6372static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6373 struct ipr_cmnd *ipr_cmd)
6374{
6375 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6376 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006377 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King8a048992007-04-26 16:00:10 -05006378 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006379
6380 if (!res) {
Brian Kingf646f322017-03-15 16:58:39 -05006381 __ipr_scsi_eh_done(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006382 return;
6383 }
6384
Brian King8a048992007-04-26 16:00:10 -05006385 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006386 ipr_gen_sense(ipr_cmd);
6387
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006388 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6389
Brian King8a048992007-04-26 16:00:10 -05006390 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006391 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006392 if (ipr_is_naca_model(res))
6393 scsi_cmd->result |= (DID_ABORT << 16);
6394 else
6395 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006396 break;
6397 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006398 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006399 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6400 break;
6401 case IPR_IOASC_HW_SEL_TIMEOUT:
6402 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006403 if (!ipr_is_naca_model(res))
6404 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006405 break;
6406 case IPR_IOASC_SYNC_REQUIRED:
6407 if (!res->in_erp)
6408 res->needs_sync_complete = 1;
6409 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6410 break;
6411 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006412 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Mauricio Faria de Oliveira785a4702017-04-11 11:46:04 -03006413 /*
6414 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6415 * so SCSI mid-layer and upper layers handle it accordingly.
6416 */
6417 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6418 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006419 break;
6420 case IPR_IOASC_BUS_WAS_RESET:
6421 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6422 /*
6423 * Report the bus reset and ask for a retry. The device
6424 * will give CC/UA the next command.
6425 */
6426 if (!res->resetting_device)
6427 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6428 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006429 if (!ipr_is_naca_model(res))
6430 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006431 break;
6432 case IPR_IOASC_HW_DEV_BUS_STATUS:
6433 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6434 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006435 if (!ipr_get_autosense(ipr_cmd)) {
6436 if (!ipr_is_naca_model(res)) {
6437 ipr_erp_cancel_all(ipr_cmd);
6438 return;
6439 }
6440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006441 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006442 if (!ipr_is_naca_model(res))
6443 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006444 break;
6445 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6446 break;
Wen Xiongf8ee25d2015-03-26 11:23:58 -05006447 case IPR_IOASC_IR_NON_OPTIMIZED:
6448 if (res->raw_mode) {
6449 res->raw_mode = 0;
6450 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6451 } else
6452 scsi_cmd->result |= (DID_ERROR << 16);
6453 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006454 default:
Brian King5b7304f2006-08-02 14:57:51 -05006455 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6456 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006457 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006458 res->needs_sync_complete = 1;
6459 break;
6460 }
6461
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006462 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006463 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006464 if (ipr_cmd->eh_comp)
6465 complete(ipr_cmd->eh_comp);
6466 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006467}
6468
6469/**
6470 * ipr_scsi_done - mid-layer done function
6471 * @ipr_cmd: ipr command struct
6472 *
6473 * This function is invoked by the interrupt handler for
6474 * ops generated by the SCSI mid-layer
6475 *
6476 * Return value:
6477 * none
6478 **/
6479static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6480{
6481 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6482 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006483 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King36b8e182015-07-14 11:41:29 -05006484 unsigned long lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006485
Wayne Boyer96d21f02010-05-10 09:13:27 -07006486 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006487
6488 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
Brian King172cd6e2012-07-17 08:14:40 -05006489 scsi_dma_unmap(scsi_cmd);
6490
Brian King36b8e182015-07-14 11:41:29 -05006491 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006492 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006493 if (ipr_cmd->eh_comp)
6494 complete(ipr_cmd->eh_comp);
6495 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King36b8e182015-07-14 11:41:29 -05006496 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006497 } else {
Brian King36b8e182015-07-14 11:41:29 -05006498 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6499 spin_lock(&ipr_cmd->hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006500 ipr_erp_start(ioa_cfg, ipr_cmd);
Brian King36b8e182015-07-14 11:41:29 -05006501 spin_unlock(&ipr_cmd->hrrq->_lock);
6502 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006503 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006504}
6505
6506/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006507 * ipr_queuecommand - Queue a mid-layer request
Brian King00bfef22012-07-17 08:13:52 -05006508 * @shost: scsi host struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006509 * @scsi_cmd: scsi command struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006510 *
6511 * This function queues a request generated by the mid-layer.
6512 *
6513 * Return value:
6514 * 0 on success
6515 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6516 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6517 **/
Brian King00bfef22012-07-17 08:13:52 -05006518static int ipr_queuecommand(struct Scsi_Host *shost,
6519 struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006520{
6521 struct ipr_ioa_cfg *ioa_cfg;
6522 struct ipr_resource_entry *res;
6523 struct ipr_ioarcb *ioarcb;
6524 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006525 unsigned long hrrq_flags, lock_flags;
Dan Carpenterd12f1572012-07-30 11:18:22 +03006526 int rc;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006527 struct ipr_hrr_queue *hrrq;
6528 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006529
Brian King00bfef22012-07-17 08:13:52 -05006530 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6531
Linus Torvalds1da177e2005-04-16 15:20:36 -07006532 scsi_cmd->result = (DID_OK << 16);
Brian King00bfef22012-07-17 08:13:52 -05006533 res = scsi_cmd->device->hostdata;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006534
6535 if (ipr_is_gata(res) && res->sata_port) {
6536 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6537 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6538 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6539 return rc;
6540 }
6541
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006542 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6543 hrrq = &ioa_cfg->hrrq[hrrq_id];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006544
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006545 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006546 /*
6547 * We are currently blocking all devices due to a host reset
6548 * We have told the host to stop giving us new requests, but
6549 * ERP ops don't count. FIXME
6550 */
Brian Kingbfae7822013-01-30 23:45:08 -06006551 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006552 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006553 return SCSI_MLQUEUE_HOST_BUSY;
Brian King00bfef22012-07-17 08:13:52 -05006554 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006555
6556 /*
6557 * FIXME - Create scsi_set_host_offline interface
6558 * and the ioa_is_dead check can be removed
6559 */
Brian Kingbfae7822013-01-30 23:45:08 -06006560 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006561 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006562 goto err_nodev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006563 }
6564
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006565 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6566 if (ipr_cmd == NULL) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006567 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006568 return SCSI_MLQUEUE_HOST_BUSY;
6569 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006570 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006571
Brian King172cd6e2012-07-17 08:14:40 -05006572 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006573 ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006574
6575 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6576 ipr_cmd->scsi_cmd = scsi_cmd;
Brian King172cd6e2012-07-17 08:14:40 -05006577 ipr_cmd->done = ipr_scsi_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006578
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006579 if (ipr_is_gscsi(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006580 if (scsi_cmd->underflow == 0)
6581 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6582
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006583 if (res->reset_occurred) {
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006584 res->reset_occurred = 0;
Wayne Boyerab6c10b2011-03-31 09:56:10 -07006585 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006586 }
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006587 }
6588
6589 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6590 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6591
Linus Torvalds1da177e2005-04-16 15:20:36 -07006592 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
Christoph Hellwig50668632014-10-30 14:30:06 +01006593 if (scsi_cmd->flags & SCMD_TAGGED)
6594 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6595 else
6596 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006597 }
6598
6599 if (scsi_cmd->cmnd[0] >= 0xC0 &&
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006600 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006601 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006602 }
Gabriel Krisman Bertazi3cb4fc12015-08-19 11:47:05 -03006603 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
Wen Xiongf8ee25d2015-03-26 11:23:58 -05006604 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006605
Gabriel Krisman Bertazi3cb4fc12015-08-19 11:47:05 -03006606 if (scsi_cmd->underflow == 0)
6607 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6608 }
6609
Dan Carpenterd12f1572012-07-30 11:18:22 +03006610 if (ioa_cfg->sis64)
6611 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6612 else
6613 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006614
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006615 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6616 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006617 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006618 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006619 if (!rc)
6620 scsi_dma_unmap(scsi_cmd);
Brian Kinga5fb4072012-03-14 21:20:09 -05006621 return SCSI_MLQUEUE_HOST_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006622 }
6623
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006624 if (unlikely(hrrq->ioa_is_dead)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006625 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006626 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006627 scsi_dma_unmap(scsi_cmd);
6628 goto err_nodev;
6629 }
6630
6631 ioarcb->res_handle = res->res_handle;
6632 if (res->needs_sync_complete) {
6633 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6634 res->needs_sync_complete = 0;
6635 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006636 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
Brian King00bfef22012-07-17 08:13:52 -05006637 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian Kinga5fb4072012-03-14 21:20:09 -05006638 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006639 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006640 return 0;
6641
6642err_nodev:
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006643 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006644 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6645 scsi_cmd->result = (DID_NO_CONNECT << 16);
6646 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006647 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006648 return 0;
6649}
6650
6651/**
Brian King35a39692006-09-25 12:39:20 -05006652 * ipr_ioctl - IOCTL handler
6653 * @sdev: scsi device struct
6654 * @cmd: IOCTL cmd
6655 * @arg: IOCTL arg
6656 *
6657 * Return value:
6658 * 0 on success / other on failure
6659 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06006660static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05006661{
6662 struct ipr_resource_entry *res;
6663
6664 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05006665 if (res && ipr_is_gata(res)) {
6666 if (cmd == HDIO_GET_IDENTITY)
6667 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05006668 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05006669 }
Brian King35a39692006-09-25 12:39:20 -05006670
6671 return -EINVAL;
6672}
6673
6674/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006675 * ipr_info - Get information about the card/driver
6676 * @scsi_host: scsi host struct
6677 *
6678 * Return value:
6679 * pointer to buffer with description string
6680 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006681static const char *ipr_ioa_info(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006682{
6683 static char buffer[512];
6684 struct ipr_ioa_cfg *ioa_cfg;
6685 unsigned long lock_flags = 0;
6686
6687 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6688
6689 spin_lock_irqsave(host->host_lock, lock_flags);
6690 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6691 spin_unlock_irqrestore(host->host_lock, lock_flags);
6692
6693 return buffer;
6694}
6695
6696static struct scsi_host_template driver_template = {
6697 .module = THIS_MODULE,
6698 .name = "IPR",
6699 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05006700 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006701 .queuecommand = ipr_queuecommand,
6702 .eh_abort_handler = ipr_eh_abort,
6703 .eh_device_reset_handler = ipr_eh_dev_reset,
6704 .eh_host_reset_handler = ipr_eh_host_reset,
6705 .slave_alloc = ipr_slave_alloc,
6706 .slave_configure = ipr_slave_configure,
6707 .slave_destroy = ipr_slave_destroy,
Brian Kingf688f962014-12-02 12:47:37 -06006708 .scan_finished = ipr_scan_finished,
Brian King35a39692006-09-25 12:39:20 -05006709 .target_alloc = ipr_target_alloc,
6710 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006711 .change_queue_depth = ipr_change_queue_depth,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006712 .bios_param = ipr_biosparam,
6713 .can_queue = IPR_MAX_COMMANDS,
6714 .this_id = -1,
6715 .sg_tablesize = IPR_MAX_SGLIST,
6716 .max_sectors = IPR_IOA_MAX_SECTORS,
6717 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6718 .use_clustering = ENABLE_CLUSTERING,
6719 .shost_attrs = ipr_ioa_attrs,
6720 .sdev_attrs = ipr_dev_attrs,
Martin K. Petersen54b2b502013-10-23 06:25:40 -04006721 .proc_name = IPR_NAME,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006722};
6723
Brian King35a39692006-09-25 12:39:20 -05006724/**
6725 * ipr_ata_phy_reset - libata phy_reset handler
6726 * @ap: ata port to reset
6727 *
6728 **/
6729static void ipr_ata_phy_reset(struct ata_port *ap)
6730{
6731 unsigned long flags;
6732 struct ipr_sata_port *sata_port = ap->private_data;
6733 struct ipr_resource_entry *res = sata_port->res;
6734 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6735 int rc;
6736
6737 ENTER;
6738 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006739 while (ioa_cfg->in_reset_reload) {
Brian King35a39692006-09-25 12:39:20 -05006740 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6741 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6742 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6743 }
6744
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006745 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Brian King35a39692006-09-25 12:39:20 -05006746 goto out_unlock;
6747
6748 rc = ipr_device_reset(ioa_cfg, res);
6749
6750 if (rc) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02006751 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006752 goto out_unlock;
6753 }
6754
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006755 ap->link.device[0].class = res->ata_class;
6756 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Tejun Heo3e4ec342010-05-10 21:41:30 +02006757 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006758
6759out_unlock:
6760 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6761 LEAVE;
6762}
6763
6764/**
6765 * ipr_ata_post_internal - Cleanup after an internal command
6766 * @qc: ATA queued command
6767 *
6768 * Return value:
6769 * none
6770 **/
6771static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6772{
6773 struct ipr_sata_port *sata_port = qc->ap->private_data;
6774 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6775 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006776 struct ipr_hrr_queue *hrrq;
Brian King35a39692006-09-25 12:39:20 -05006777 unsigned long flags;
6778
6779 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006780 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06006781 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6782 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6783 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6784 }
6785
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006786 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006787 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006788 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6789 if (ipr_cmd->qc == qc) {
6790 ipr_device_reset(ioa_cfg, sata_port->res);
6791 break;
6792 }
Brian King35a39692006-09-25 12:39:20 -05006793 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006794 spin_unlock(&hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006795 }
6796 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6797}
6798
6799/**
Brian King35a39692006-09-25 12:39:20 -05006800 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6801 * @regs: destination
6802 * @tf: source ATA taskfile
6803 *
6804 * Return value:
6805 * none
6806 **/
6807static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6808 struct ata_taskfile *tf)
6809{
6810 regs->feature = tf->feature;
6811 regs->nsect = tf->nsect;
6812 regs->lbal = tf->lbal;
6813 regs->lbam = tf->lbam;
6814 regs->lbah = tf->lbah;
6815 regs->device = tf->device;
6816 regs->command = tf->command;
6817 regs->hob_feature = tf->hob_feature;
6818 regs->hob_nsect = tf->hob_nsect;
6819 regs->hob_lbal = tf->hob_lbal;
6820 regs->hob_lbam = tf->hob_lbam;
6821 regs->hob_lbah = tf->hob_lbah;
6822 regs->ctl = tf->ctl;
6823}
6824
6825/**
6826 * ipr_sata_done - done function for SATA commands
6827 * @ipr_cmd: ipr command struct
6828 *
6829 * This function is invoked by the interrupt handler for
6830 * ops generated by the SCSI mid-layer to SATA devices
6831 *
6832 * Return value:
6833 * none
6834 **/
6835static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6836{
6837 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6838 struct ata_queued_cmd *qc = ipr_cmd->qc;
6839 struct ipr_sata_port *sata_port = qc->ap->private_data;
6840 struct ipr_resource_entry *res = sata_port->res;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006841 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King35a39692006-09-25 12:39:20 -05006842
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006843 spin_lock(&ipr_cmd->hrrq->_lock);
Wayne Boyer96d21f02010-05-10 09:13:27 -07006844 if (ipr_cmd->ioa_cfg->sis64)
6845 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6846 sizeof(struct ipr_ioasa_gata));
6847 else
6848 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6849 sizeof(struct ipr_ioasa_gata));
Brian King35a39692006-09-25 12:39:20 -05006850 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6851
Wayne Boyer96d21f02010-05-10 09:13:27 -07006852 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006853 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05006854
6855 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
Wayne Boyer96d21f02010-05-10 09:13:27 -07006856 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
Brian King35a39692006-09-25 12:39:20 -05006857 else
Wayne Boyer96d21f02010-05-10 09:13:27 -07006858 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006859 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006860 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006861 ata_qc_complete(qc);
6862}
6863
6864/**
Wayne Boyera32c0552010-02-19 13:23:36 -08006865 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6866 * @ipr_cmd: ipr command struct
6867 * @qc: ATA queued command
6868 *
6869 **/
6870static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6871 struct ata_queued_cmd *qc)
6872{
6873 u32 ioadl_flags = 0;
6874 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006875 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
Wayne Boyera32c0552010-02-19 13:23:36 -08006876 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6877 int len = qc->nbytes;
6878 struct scatterlist *sg;
6879 unsigned int si;
6880 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6881
6882 if (len == 0)
6883 return;
6884
6885 if (qc->dma_dir == DMA_TO_DEVICE) {
6886 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6887 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6888 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6889 ioadl_flags = IPR_IOADL_FLAGS_READ;
6890
6891 ioarcb->data_transfer_length = cpu_to_be32(len);
6892 ioarcb->ioadl_len =
6893 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6894 ioarcb->u.sis64_addr_data.data_ioadl_addr =
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006895 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
Wayne Boyera32c0552010-02-19 13:23:36 -08006896
6897 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6898 ioadl64->flags = cpu_to_be32(ioadl_flags);
6899 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6900 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6901
6902 last_ioadl64 = ioadl64;
6903 ioadl64++;
6904 }
6905
6906 if (likely(last_ioadl64))
6907 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6908}
6909
6910/**
Brian King35a39692006-09-25 12:39:20 -05006911 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6912 * @ipr_cmd: ipr command struct
6913 * @qc: ATA queued command
6914 *
6915 **/
6916static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6917 struct ata_queued_cmd *qc)
6918{
6919 u32 ioadl_flags = 0;
6920 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08006921 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006922 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01006923 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05006924 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09006925 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05006926
6927 if (len == 0)
6928 return;
6929
6930 if (qc->dma_dir == DMA_TO_DEVICE) {
6931 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6932 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08006933 ioarcb->data_transfer_length = cpu_to_be32(len);
6934 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05006935 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6936 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6937 ioadl_flags = IPR_IOADL_FLAGS_READ;
6938 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6939 ioarcb->read_ioadl_len =
6940 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6941 }
6942
Tejun Heoff2aeb12007-12-05 16:43:11 +09006943 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05006944 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6945 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006946
6947 last_ioadl = ioadl;
6948 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05006949 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006950
6951 if (likely(last_ioadl))
6952 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05006953}
6954
6955/**
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006956 * ipr_qc_defer - Get a free ipr_cmd
6957 * @qc: queued command
6958 *
6959 * Return value:
6960 * 0 if success
6961 **/
6962static int ipr_qc_defer(struct ata_queued_cmd *qc)
6963{
6964 struct ata_port *ap = qc->ap;
6965 struct ipr_sata_port *sata_port = ap->private_data;
6966 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6967 struct ipr_cmnd *ipr_cmd;
6968 struct ipr_hrr_queue *hrrq;
6969 int hrrq_id;
6970
6971 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6972 hrrq = &ioa_cfg->hrrq[hrrq_id];
6973
6974 qc->lldd_task = NULL;
6975 spin_lock(&hrrq->_lock);
6976 if (unlikely(hrrq->ioa_is_dead)) {
6977 spin_unlock(&hrrq->_lock);
6978 return 0;
6979 }
6980
6981 if (unlikely(!hrrq->allow_cmds)) {
6982 spin_unlock(&hrrq->_lock);
6983 return ATA_DEFER_LINK;
6984 }
6985
6986 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6987 if (ipr_cmd == NULL) {
6988 spin_unlock(&hrrq->_lock);
6989 return ATA_DEFER_LINK;
6990 }
6991
6992 qc->lldd_task = ipr_cmd;
6993 spin_unlock(&hrrq->_lock);
6994 return 0;
6995}
6996
6997/**
Brian King35a39692006-09-25 12:39:20 -05006998 * ipr_qc_issue - Issue a SATA qc to a device
6999 * @qc: queued command
7000 *
7001 * Return value:
7002 * 0 if success
7003 **/
7004static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7005{
7006 struct ata_port *ap = qc->ap;
7007 struct ipr_sata_port *sata_port = ap->private_data;
7008 struct ipr_resource_entry *res = sata_port->res;
7009 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7010 struct ipr_cmnd *ipr_cmd;
7011 struct ipr_ioarcb *ioarcb;
7012 struct ipr_ioarcb_ata_regs *regs;
7013
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007014 if (qc->lldd_task == NULL)
7015 ipr_qc_defer(qc);
7016
7017 ipr_cmd = qc->lldd_task;
7018 if (ipr_cmd == NULL)
Brian King0feeed82007-03-29 12:43:43 -05007019 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05007020
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007021 qc->lldd_task = NULL;
7022 spin_lock(&ipr_cmd->hrrq->_lock);
7023 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7024 ipr_cmd->hrrq->ioa_is_dead)) {
7025 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7026 spin_unlock(&ipr_cmd->hrrq->_lock);
7027 return AC_ERR_SYSTEM;
7028 }
7029
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007030 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King35a39692006-09-25 12:39:20 -05007031 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05007032
Wayne Boyera32c0552010-02-19 13:23:36 -08007033 if (ioa_cfg->sis64) {
7034 regs = &ipr_cmd->i.ata_ioadl.regs;
7035 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7036 } else
7037 regs = &ioarcb->u.add_data.u.regs;
7038
7039 memset(regs, 0, sizeof(*regs));
7040 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05007041
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007042 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Brian King35a39692006-09-25 12:39:20 -05007043 ipr_cmd->qc = qc;
7044 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007045 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05007046 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7047 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7048 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01007049 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05007050
Wayne Boyera32c0552010-02-19 13:23:36 -08007051 if (ioa_cfg->sis64)
7052 ipr_build_ata_ioadl64(ipr_cmd, qc);
7053 else
7054 ipr_build_ata_ioadl(ipr_cmd, qc);
7055
Brian King35a39692006-09-25 12:39:20 -05007056 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7057 ipr_copy_sata_tf(regs, &qc->tf);
7058 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007059 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05007060
7061 switch (qc->tf.protocol) {
7062 case ATA_PROT_NODATA:
7063 case ATA_PROT_PIO:
7064 break;
7065
7066 case ATA_PROT_DMA:
7067 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7068 break;
7069
Tejun Heo0dc36882007-12-18 16:34:43 -05007070 case ATAPI_PROT_PIO:
7071 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05007072 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7073 break;
7074
Tejun Heo0dc36882007-12-18 16:34:43 -05007075 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05007076 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7077 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7078 break;
7079
7080 default:
7081 WARN_ON(1);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007082 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King0feeed82007-03-29 12:43:43 -05007083 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05007084 }
7085
Wayne Boyera32c0552010-02-19 13:23:36 -08007086 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007087 spin_unlock(&ipr_cmd->hrrq->_lock);
Wayne Boyera32c0552010-02-19 13:23:36 -08007088
Brian King35a39692006-09-25 12:39:20 -05007089 return 0;
7090}
7091
7092/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007093 * ipr_qc_fill_rtf - Read result TF
7094 * @qc: ATA queued command
7095 *
7096 * Return value:
7097 * true
7098 **/
7099static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7100{
7101 struct ipr_sata_port *sata_port = qc->ap->private_data;
7102 struct ipr_ioasa_gata *g = &sata_port->ioasa;
7103 struct ata_taskfile *tf = &qc->result_tf;
7104
7105 tf->feature = g->error;
7106 tf->nsect = g->nsect;
7107 tf->lbal = g->lbal;
7108 tf->lbam = g->lbam;
7109 tf->lbah = g->lbah;
7110 tf->device = g->device;
7111 tf->command = g->status;
7112 tf->hob_nsect = g->hob_nsect;
7113 tf->hob_lbal = g->hob_lbal;
7114 tf->hob_lbam = g->hob_lbam;
7115 tf->hob_lbah = g->hob_lbah;
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007116
7117 return true;
7118}
7119
Brian King35a39692006-09-25 12:39:20 -05007120static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05007121 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09007122 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05007123 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05007124 .qc_prep = ata_noop_qc_prep,
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007125 .qc_defer = ipr_qc_defer,
Brian King35a39692006-09-25 12:39:20 -05007126 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007127 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05007128 .port_start = ata_sas_port_start,
7129 .port_stop = ata_sas_port_stop
7130};
7131
7132static struct ata_port_info sata_port_info = {
Shaohua Li5067c042015-03-12 10:32:18 -07007133 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7134 ATA_FLAG_SAS_HOST,
Sergei Shtylyov0f2e0332011-01-21 20:32:01 +03007135 .pio_mask = ATA_PIO4_ONLY,
7136 .mwdma_mask = ATA_MWDMA2,
7137 .udma_mask = ATA_UDMA6,
Brian King35a39692006-09-25 12:39:20 -05007138 .port_ops = &ipr_sata_ops
7139};
7140
Linus Torvalds1da177e2005-04-16 15:20:36 -07007141#ifdef CONFIG_PPC_PSERIES
7142static const u16 ipr_blocked_processors[] = {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00007143 PVR_NORTHSTAR,
7144 PVR_PULSAR,
7145 PVR_POWER4,
7146 PVR_ICESTAR,
7147 PVR_SSTAR,
7148 PVR_POWER4p,
7149 PVR_630,
7150 PVR_630p
Linus Torvalds1da177e2005-04-16 15:20:36 -07007151};
7152
7153/**
7154 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7155 * @ioa_cfg: ioa cfg struct
7156 *
7157 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7158 * certain pSeries hardware. This function determines if the given
7159 * adapter is in one of these confgurations or not.
7160 *
7161 * Return value:
7162 * 1 if adapter is not supported / 0 if adapter is supported
7163 **/
7164static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7165{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007166 int i;
7167
Auke Kok44c10132007-06-08 15:46:36 -07007168 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007169 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00007170 if (pvr_version_is(ipr_blocked_processors[i]))
Auke Kok44c10132007-06-08 15:46:36 -07007171 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007172 }
7173 }
7174 return 0;
7175}
7176#else
7177#define ipr_invalid_adapter(ioa_cfg) 0
7178#endif
7179
7180/**
7181 * ipr_ioa_bringdown_done - IOA bring down completion.
7182 * @ipr_cmd: ipr command struct
7183 *
7184 * This function processes the completion of an adapter bring down.
7185 * It wakes any reset sleepers.
7186 *
7187 * Return value:
7188 * IPR_RC_JOB_RETURN
7189 **/
7190static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7191{
7192 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05007193 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007194
7195 ENTER;
Brian Kingbfae7822013-01-30 23:45:08 -06007196 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7197 ipr_trace;
Brian Kingb0e17a92017-08-01 10:21:30 -05007198 ioa_cfg->scsi_unblock = 1;
7199 schedule_work(&ioa_cfg->work_q);
Brian Kingbfae7822013-01-30 23:45:08 -06007200 }
7201
Linus Torvalds1da177e2005-04-16 15:20:36 -07007202 ioa_cfg->in_reset_reload = 0;
7203 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05007204 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7205 spin_lock(&ioa_cfg->hrrq[i]._lock);
7206 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7207 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7208 }
7209 wmb();
7210
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007211 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007212 wake_up_all(&ioa_cfg->reset_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007213 LEAVE;
7214
7215 return IPR_RC_JOB_RETURN;
7216}
7217
7218/**
7219 * ipr_ioa_reset_done - IOA reset completion.
7220 * @ipr_cmd: ipr command struct
7221 *
7222 * This function processes the completion of an adapter reset.
7223 * It schedules any necessary mid-layer add/removes and
7224 * wakes any reset sleepers.
7225 *
7226 * Return value:
7227 * IPR_RC_JOB_RETURN
7228 **/
7229static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7230{
7231 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7232 struct ipr_resource_entry *res;
Brian Kingafc3f832016-08-24 12:56:51 -05007233 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007234
7235 ENTER;
7236 ioa_cfg->in_reset_reload = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007237 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7238 spin_lock(&ioa_cfg->hrrq[j]._lock);
7239 ioa_cfg->hrrq[j].allow_cmds = 1;
7240 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7241 }
7242 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007243 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06007244 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007245
7246 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Brian Kingf688f962014-12-02 12:47:37 -06007247 if (res->add_to_ml || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007248 ipr_trace;
7249 break;
7250 }
7251 }
7252 schedule_work(&ioa_cfg->work_q);
7253
Brian Kingafc3f832016-08-24 12:56:51 -05007254 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7255 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7256 if (j < IPR_NUM_LOG_HCAMS)
7257 ipr_send_hcam(ioa_cfg,
7258 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7259 ioa_cfg->hostrcb[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007260 else
Brian Kingafc3f832016-08-24 12:56:51 -05007261 ipr_send_hcam(ioa_cfg,
7262 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7263 ioa_cfg->hostrcb[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007264 }
7265
Brian King6bb04172007-04-26 16:00:08 -05007266 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007267 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7268
7269 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007270 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007271 wake_up_all(&ioa_cfg->reset_wait_q);
7272
Brian Kingb0e17a92017-08-01 10:21:30 -05007273 ioa_cfg->scsi_unblock = 1;
Brian Kingf688f962014-12-02 12:47:37 -06007274 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007275 LEAVE;
7276 return IPR_RC_JOB_RETURN;
7277}
7278
7279/**
7280 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7281 * @supported_dev: supported device struct
7282 * @vpids: vendor product id struct
7283 *
7284 * Return value:
7285 * none
7286 **/
7287static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7288 struct ipr_std_inq_vpids *vpids)
7289{
7290 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7291 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7292 supported_dev->num_records = 1;
7293 supported_dev->data_length =
7294 cpu_to_be16(sizeof(struct ipr_supported_device));
7295 supported_dev->reserved = 0;
7296}
7297
7298/**
7299 * ipr_set_supported_devs - Send Set Supported Devices for a device
7300 * @ipr_cmd: ipr command struct
7301 *
Wayne Boyera32c0552010-02-19 13:23:36 -08007302 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07007303 *
7304 * Return value:
7305 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7306 **/
7307static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7308{
7309 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7310 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007311 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7312 struct ipr_resource_entry *res = ipr_cmd->u.res;
7313
7314 ipr_cmd->job_step = ipr_ioa_reset_done;
7315
7316 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06007317 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007318 continue;
7319
7320 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007321 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007322
7323 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7324 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7325 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7326
7327 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007328 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007329 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7330 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7331
Wayne Boyera32c0552010-02-19 13:23:36 -08007332 ipr_init_ioadl(ipr_cmd,
7333 ioa_cfg->vpd_cbs_dma +
7334 offsetof(struct ipr_misc_cbs, supp_dev),
7335 sizeof(struct ipr_supported_device),
7336 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007337
7338 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7339 IPR_SET_SUP_DEVICE_TIMEOUT);
7340
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007341 if (!ioa_cfg->sis64)
7342 ipr_cmd->job_step = ipr_set_supported_devs;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007343 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007344 return IPR_RC_JOB_RETURN;
7345 }
7346
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007347 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007348 return IPR_RC_JOB_CONTINUE;
7349}
7350
7351/**
7352 * ipr_get_mode_page - Locate specified mode page
7353 * @mode_pages: mode page buffer
7354 * @page_code: page code to find
7355 * @len: minimum required length for mode page
7356 *
7357 * Return value:
7358 * pointer to mode page / NULL on failure
7359 **/
7360static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7361 u32 page_code, u32 len)
7362{
7363 struct ipr_mode_page_hdr *mode_hdr;
7364 u32 page_length;
7365 u32 length;
7366
7367 if (!mode_pages || (mode_pages->hdr.length == 0))
7368 return NULL;
7369
7370 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7371 mode_hdr = (struct ipr_mode_page_hdr *)
7372 (mode_pages->data + mode_pages->hdr.block_desc_len);
7373
7374 while (length) {
7375 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7376 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7377 return mode_hdr;
7378 break;
7379 } else {
7380 page_length = (sizeof(struct ipr_mode_page_hdr) +
7381 mode_hdr->page_length);
7382 length -= page_length;
7383 mode_hdr = (struct ipr_mode_page_hdr *)
7384 ((unsigned long)mode_hdr + page_length);
7385 }
7386 }
7387 return NULL;
7388}
7389
7390/**
7391 * ipr_check_term_power - Check for term power errors
7392 * @ioa_cfg: ioa config struct
7393 * @mode_pages: IOAFP mode pages buffer
7394 *
7395 * Check the IOAFP's mode page 28 for term power errors
7396 *
7397 * Return value:
7398 * nothing
7399 **/
7400static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7401 struct ipr_mode_pages *mode_pages)
7402{
7403 int i;
7404 int entry_length;
7405 struct ipr_dev_bus_entry *bus;
7406 struct ipr_mode_page28 *mode_page;
7407
7408 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7409 sizeof(struct ipr_mode_page28));
7410
7411 entry_length = mode_page->entry_length;
7412
7413 bus = mode_page->bus;
7414
7415 for (i = 0; i < mode_page->num_entries; i++) {
7416 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7417 dev_err(&ioa_cfg->pdev->dev,
7418 "Term power is absent on scsi bus %d\n",
7419 bus->res_addr.bus);
7420 }
7421
7422 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7423 }
7424}
7425
7426/**
7427 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7428 * @ioa_cfg: ioa config struct
7429 *
7430 * Looks through the config table checking for SES devices. If
7431 * the SES device is in the SES table indicating a maximum SCSI
7432 * bus speed, the speed is limited for the bus.
7433 *
7434 * Return value:
7435 * none
7436 **/
7437static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7438{
7439 u32 max_xfer_rate;
7440 int i;
7441
7442 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7443 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7444 ioa_cfg->bus_attr[i].bus_width);
7445
7446 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7447 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7448 }
7449}
7450
7451/**
7452 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7453 * @ioa_cfg: ioa config struct
7454 * @mode_pages: mode page 28 buffer
7455 *
7456 * Updates mode page 28 based on driver configuration
7457 *
7458 * Return value:
7459 * none
7460 **/
7461static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007462 struct ipr_mode_pages *mode_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007463{
7464 int i, entry_length;
7465 struct ipr_dev_bus_entry *bus;
7466 struct ipr_bus_attributes *bus_attr;
7467 struct ipr_mode_page28 *mode_page;
7468
7469 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7470 sizeof(struct ipr_mode_page28));
7471
7472 entry_length = mode_page->entry_length;
7473
7474 /* Loop for each device bus entry */
7475 for (i = 0, bus = mode_page->bus;
7476 i < mode_page->num_entries;
7477 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7478 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7479 dev_err(&ioa_cfg->pdev->dev,
7480 "Invalid resource address reported: 0x%08X\n",
7481 IPR_GET_PHYS_LOC(bus->res_addr));
7482 continue;
7483 }
7484
7485 bus_attr = &ioa_cfg->bus_attr[i];
7486 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7487 bus->bus_width = bus_attr->bus_width;
7488 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7489 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7490 if (bus_attr->qas_enabled)
7491 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7492 else
7493 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7494 }
7495}
7496
7497/**
7498 * ipr_build_mode_select - Build a mode select command
7499 * @ipr_cmd: ipr command struct
7500 * @res_handle: resource handle to send command to
7501 * @parm: Byte 2 of Mode Sense command
7502 * @dma_addr: DMA buffer address
7503 * @xfer_len: data transfer length
7504 *
7505 * Return value:
7506 * none
7507 **/
7508static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08007509 __be32 res_handle, u8 parm,
7510 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007511{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007512 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7513
7514 ioarcb->res_handle = res_handle;
7515 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7516 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7517 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7518 ioarcb->cmd_pkt.cdb[1] = parm;
7519 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7520
Wayne Boyera32c0552010-02-19 13:23:36 -08007521 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007522}
7523
7524/**
7525 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7526 * @ipr_cmd: ipr command struct
7527 *
7528 * This function sets up the SCSI bus attributes and sends
7529 * a Mode Select for Page 28 to activate them.
7530 *
7531 * Return value:
7532 * IPR_RC_JOB_RETURN
7533 **/
7534static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7535{
7536 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7537 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7538 int length;
7539
7540 ENTER;
Brian King47338042006-02-08 20:57:42 -06007541 ipr_scsi_bus_speed_limit(ioa_cfg);
7542 ipr_check_term_power(ioa_cfg, mode_pages);
7543 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7544 length = mode_pages->hdr.length + 1;
7545 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007546
7547 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7548 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7549 length);
7550
Wayne Boyerf72919e2010-02-19 13:24:21 -08007551 ipr_cmd->job_step = ipr_set_supported_devs;
7552 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7553 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007554 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7555
7556 LEAVE;
7557 return IPR_RC_JOB_RETURN;
7558}
7559
7560/**
7561 * ipr_build_mode_sense - Builds a mode sense command
7562 * @ipr_cmd: ipr command struct
7563 * @res: resource entry struct
7564 * @parm: Byte 2 of mode sense command
7565 * @dma_addr: DMA address of mode sense buffer
7566 * @xfer_len: Size of DMA buffer
7567 *
7568 * Return value:
7569 * none
7570 **/
7571static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7572 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08007573 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007574{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007575 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7576
7577 ioarcb->res_handle = res_handle;
7578 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7579 ioarcb->cmd_pkt.cdb[2] = parm;
7580 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7581 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7582
Wayne Boyera32c0552010-02-19 13:23:36 -08007583 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007584}
7585
7586/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007587 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7588 * @ipr_cmd: ipr command struct
7589 *
7590 * This function handles the failure of an IOA bringup command.
7591 *
7592 * Return value:
7593 * IPR_RC_JOB_RETURN
7594 **/
7595static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7596{
7597 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007598 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007599
7600 dev_err(&ioa_cfg->pdev->dev,
7601 "0x%02X failed with IOASC: 0x%08X\n",
7602 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7603
7604 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007605 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007606 return IPR_RC_JOB_RETURN;
7607}
7608
7609/**
7610 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7611 * @ipr_cmd: ipr command struct
7612 *
7613 * This function handles the failure of a Mode Sense to the IOAFP.
7614 * Some adapters do not handle all mode pages.
7615 *
7616 * Return value:
7617 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7618 **/
7619static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7620{
Wayne Boyerf72919e2010-02-19 13:24:21 -08007621 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007622 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007623
7624 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08007625 ipr_cmd->job_step = ipr_set_supported_devs;
7626 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7627 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007628 return IPR_RC_JOB_CONTINUE;
7629 }
7630
7631 return ipr_reset_cmd_failed(ipr_cmd);
7632}
7633
7634/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007635 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7636 * @ipr_cmd: ipr command struct
7637 *
7638 * This function send a Page 28 mode sense to the IOA to
7639 * retrieve SCSI bus attributes.
7640 *
7641 * Return value:
7642 * IPR_RC_JOB_RETURN
7643 **/
7644static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7645{
7646 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7647
7648 ENTER;
7649 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7650 0x28, ioa_cfg->vpd_cbs_dma +
7651 offsetof(struct ipr_misc_cbs, mode_pages),
7652 sizeof(struct ipr_mode_pages));
7653
7654 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007655 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007656
7657 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7658
7659 LEAVE;
7660 return IPR_RC_JOB_RETURN;
7661}
7662
7663/**
Brian Kingac09c342007-04-26 16:00:16 -05007664 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7665 * @ipr_cmd: ipr command struct
7666 *
7667 * This function enables dual IOA RAID support if possible.
7668 *
7669 * Return value:
7670 * IPR_RC_JOB_RETURN
7671 **/
7672static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7673{
7674 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7675 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7676 struct ipr_mode_page24 *mode_page;
7677 int length;
7678
7679 ENTER;
7680 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7681 sizeof(struct ipr_mode_page24));
7682
7683 if (mode_page)
7684 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7685
7686 length = mode_pages->hdr.length + 1;
7687 mode_pages->hdr.length = 0;
7688
7689 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7690 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7691 length);
7692
7693 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7694 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7695
7696 LEAVE;
7697 return IPR_RC_JOB_RETURN;
7698}
7699
7700/**
7701 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7702 * @ipr_cmd: ipr command struct
7703 *
7704 * This function handles the failure of a Mode Sense to the IOAFP.
7705 * Some adapters do not handle all mode pages.
7706 *
7707 * Return value:
7708 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7709 **/
7710static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7711{
Wayne Boyer96d21f02010-05-10 09:13:27 -07007712 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian Kingac09c342007-04-26 16:00:16 -05007713
7714 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7715 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7716 return IPR_RC_JOB_CONTINUE;
7717 }
7718
7719 return ipr_reset_cmd_failed(ipr_cmd);
7720}
7721
7722/**
7723 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7724 * @ipr_cmd: ipr command struct
7725 *
7726 * This function send a mode sense to the IOA to retrieve
7727 * the IOA Advanced Function Control mode page.
7728 *
7729 * Return value:
7730 * IPR_RC_JOB_RETURN
7731 **/
7732static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7733{
7734 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7735
7736 ENTER;
7737 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7738 0x24, ioa_cfg->vpd_cbs_dma +
7739 offsetof(struct ipr_misc_cbs, mode_pages),
7740 sizeof(struct ipr_mode_pages));
7741
7742 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7743 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7744
7745 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7746
7747 LEAVE;
7748 return IPR_RC_JOB_RETURN;
7749}
7750
7751/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007752 * ipr_init_res_table - Initialize the resource table
7753 * @ipr_cmd: ipr command struct
7754 *
7755 * This function looks through the existing resource table, comparing
7756 * it with the config table. This function will take care of old/new
7757 * devices and schedule adding/removing them from the mid-layer
7758 * as appropriate.
7759 *
7760 * Return value:
7761 * IPR_RC_JOB_CONTINUE
7762 **/
7763static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7764{
7765 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7766 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007767 struct ipr_config_table_entry_wrapper cfgtew;
7768 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007769 LIST_HEAD(old_res);
7770
7771 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007772 if (ioa_cfg->sis64)
7773 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7774 else
7775 flag = ioa_cfg->u.cfg_table->hdr.flags;
7776
7777 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007778 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7779
7780 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7781 list_move_tail(&res->queue, &old_res);
7782
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007783 if (ioa_cfg->sis64)
Wayne Boyer438b0332010-05-10 09:13:00 -07007784 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007785 else
7786 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7787
7788 for (i = 0; i < entries; i++) {
7789 if (ioa_cfg->sis64)
7790 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7791 else
7792 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007793 found = 0;
7794
7795 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007796 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007797 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7798 found = 1;
7799 break;
7800 }
7801 }
7802
7803 if (!found) {
7804 if (list_empty(&ioa_cfg->free_res_q)) {
7805 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7806 break;
7807 }
7808
7809 found = 1;
7810 res = list_entry(ioa_cfg->free_res_q.next,
7811 struct ipr_resource_entry, queue);
7812 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007813 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007814 res->add_to_ml = 1;
Wayne Boyer56115592010-06-10 14:46:34 -07007815 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7816 res->sdev->allow_restart = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007817
7818 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007819 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007820 }
7821
7822 list_for_each_entry_safe(res, temp, &old_res, queue) {
7823 if (res->sdev) {
7824 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007825 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007826 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007827 }
7828 }
7829
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007830 list_for_each_entry_safe(res, temp, &old_res, queue) {
7831 ipr_clear_res_target(res);
7832 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7833 }
7834
Brian Kingac09c342007-04-26 16:00:16 -05007835 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7836 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7837 else
7838 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007839
7840 LEAVE;
7841 return IPR_RC_JOB_CONTINUE;
7842}
7843
7844/**
7845 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7846 * @ipr_cmd: ipr command struct
7847 *
7848 * This function sends a Query IOA Configuration command
7849 * to the adapter to retrieve the IOA configuration table.
7850 *
7851 * Return value:
7852 * IPR_RC_JOB_RETURN
7853 **/
7854static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7855{
7856 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7857 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007858 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05007859 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007860
7861 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05007862 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7863 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007864 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7865 ucode_vpd->major_release, ucode_vpd->card_type,
7866 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7867 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7868 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7869
7870 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer438b0332010-05-10 09:13:00 -07007871 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007872 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7873 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007874
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007875 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08007876 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007877
7878 ipr_cmd->job_step = ipr_init_res_table;
7879
7880 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7881
7882 LEAVE;
7883 return IPR_RC_JOB_RETURN;
7884}
7885
Gabriel Krisman Bertazi1a47af22015-11-03 16:26:09 -02007886static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7887{
7888 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7889
7890 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7891 return IPR_RC_JOB_CONTINUE;
7892
7893 return ipr_reset_cmd_failed(ipr_cmd);
7894}
7895
7896static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7897 __be32 res_handle, u8 sa_code)
7898{
7899 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7900
7901 ioarcb->res_handle = res_handle;
7902 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7903 ioarcb->cmd_pkt.cdb[1] = sa_code;
7904 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7905}
7906
7907/**
7908 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7909 * action
7910 *
7911 * Return value:
7912 * none
7913 **/
7914static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7915{
7916 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7917 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7918 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7919
7920 ENTER;
7921
7922 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7923
7924 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7925 ipr_build_ioa_service_action(ipr_cmd,
7926 cpu_to_be32(IPR_IOA_RES_HANDLE),
7927 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7928
7929 ioarcb->cmd_pkt.cdb[2] = 0x40;
7930
7931 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7932 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7933 IPR_SET_SUP_DEVICE_TIMEOUT);
7934
7935 LEAVE;
7936 return IPR_RC_JOB_RETURN;
7937 }
7938
7939 LEAVE;
7940 return IPR_RC_JOB_CONTINUE;
7941}
7942
Linus Torvalds1da177e2005-04-16 15:20:36 -07007943/**
7944 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7945 * @ipr_cmd: ipr command struct
7946 *
7947 * This utility function sends an inquiry to the adapter.
7948 *
7949 * Return value:
7950 * none
7951 **/
7952static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08007953 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007954{
7955 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007956
7957 ENTER;
7958 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7959 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7960
7961 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7962 ioarcb->cmd_pkt.cdb[1] = flags;
7963 ioarcb->cmd_pkt.cdb[2] = page;
7964 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7965
Wayne Boyera32c0552010-02-19 13:23:36 -08007966 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007967
7968 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7969 LEAVE;
7970}
7971
7972/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06007973 * ipr_inquiry_page_supported - Is the given inquiry page supported
7974 * @page0: inquiry page 0 buffer
7975 * @page: page code.
7976 *
7977 * This function determines if the specified inquiry page is supported.
7978 *
7979 * Return value:
7980 * 1 if page is supported / 0 if not
7981 **/
7982static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7983{
7984 int i;
7985
7986 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7987 if (page0->page[i] == page)
7988 return 1;
7989
7990 return 0;
7991}
7992
7993/**
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02007994 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7995 * @ipr_cmd: ipr command struct
7996 *
7997 * This function sends a Page 0xC4 inquiry to the adapter
7998 * to retrieve software VPD information.
7999 *
8000 * Return value:
8001 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8002 **/
8003static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8004{
8005 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8006 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8007 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8008
8009 ENTER;
Gabriel Krisman Bertazi1a47af22015-11-03 16:26:09 -02008010 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02008011 memset(pageC4, 0, sizeof(*pageC4));
8012
8013 if (ipr_inquiry_page_supported(page0, 0xC4)) {
8014 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8015 (ioa_cfg->vpd_cbs_dma
8016 + offsetof(struct ipr_misc_cbs,
8017 pageC4_data)),
8018 sizeof(struct ipr_inquiry_pageC4));
8019 return IPR_RC_JOB_RETURN;
8020 }
8021
8022 LEAVE;
8023 return IPR_RC_JOB_CONTINUE;
8024}
8025
8026/**
Brian Kingac09c342007-04-26 16:00:16 -05008027 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8028 * @ipr_cmd: ipr command struct
8029 *
8030 * This function sends a Page 0xD0 inquiry to the adapter
8031 * to retrieve adapter capabilities.
8032 *
8033 * Return value:
8034 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8035 **/
8036static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8037{
8038 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8039 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8040 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8041
8042 ENTER;
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02008043 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
Brian Kingac09c342007-04-26 16:00:16 -05008044 memset(cap, 0, sizeof(*cap));
8045
8046 if (ipr_inquiry_page_supported(page0, 0xD0)) {
8047 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8048 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8049 sizeof(struct ipr_inquiry_cap));
8050 return IPR_RC_JOB_RETURN;
8051 }
8052
8053 LEAVE;
8054 return IPR_RC_JOB_CONTINUE;
8055}
8056
8057/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008058 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8059 * @ipr_cmd: ipr command struct
8060 *
8061 * This function sends a Page 3 inquiry to the adapter
8062 * to retrieve software VPD information.
8063 *
8064 * Return value:
8065 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8066 **/
8067static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8068{
8069 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008070
8071 ENTER;
8072
Brian Kingac09c342007-04-26 16:00:16 -05008073 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008074
8075 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8076 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8077 sizeof(struct ipr_inquiry_page3));
8078
8079 LEAVE;
8080 return IPR_RC_JOB_RETURN;
8081}
8082
8083/**
8084 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8085 * @ipr_cmd: ipr command struct
8086 *
8087 * This function sends a Page 0 inquiry to the adapter
8088 * to retrieve supported inquiry pages.
8089 *
8090 * Return value:
8091 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8092 **/
8093static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8094{
8095 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008096 char type[5];
8097
8098 ENTER;
8099
8100 /* Grab the type out of the VPD and store it away */
8101 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8102 type[4] = '\0';
8103 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8104
Brian Kingf688f962014-12-02 12:47:37 -06008105 if (ipr_invalid_adapter(ioa_cfg)) {
8106 dev_err(&ioa_cfg->pdev->dev,
8107 "Adapter not supported in this hardware configuration.\n");
8108
8109 if (!ipr_testmode) {
8110 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8111 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8112 list_add_tail(&ipr_cmd->queue,
8113 &ioa_cfg->hrrq->hrrq_free_q);
8114 return IPR_RC_JOB_RETURN;
8115 }
8116 }
8117
brking@us.ibm.com62275042005-11-01 17:01:14 -06008118 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008119
brking@us.ibm.com62275042005-11-01 17:01:14 -06008120 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8121 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8122 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008123
8124 LEAVE;
8125 return IPR_RC_JOB_RETURN;
8126}
8127
8128/**
8129 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8130 * @ipr_cmd: ipr command struct
8131 *
8132 * This function sends a standard inquiry to the adapter.
8133 *
8134 * Return value:
8135 * IPR_RC_JOB_RETURN
8136 **/
8137static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8138{
8139 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8140
8141 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008142 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008143
8144 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8145 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8146 sizeof(struct ipr_ioa_vpd));
8147
8148 LEAVE;
8149 return IPR_RC_JOB_RETURN;
8150}
8151
8152/**
Wayne Boyer214777b2010-02-19 13:24:26 -08008153 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008154 * @ipr_cmd: ipr command struct
8155 *
8156 * This function send an Identify Host Request Response Queue
8157 * command to establish the HRRQ with the adapter.
8158 *
8159 * Return value:
8160 * IPR_RC_JOB_RETURN
8161 **/
Wayne Boyer214777b2010-02-19 13:24:26 -08008162static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008163{
8164 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8165 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008166 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008167
8168 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008169 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
Brian King87adbe02016-09-16 16:51:37 -05008170 if (ioa_cfg->identify_hrrq_index == 0)
8171 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008172
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008173 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8174 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -07008175
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008176 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8177 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008178
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008179 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8180 if (ioa_cfg->sis64)
8181 ioarcb->cmd_pkt.cdb[1] = 0x1;
8182
8183 if (ioa_cfg->nvectors == 1)
8184 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8185 else
8186 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8187
8188 ioarcb->cmd_pkt.cdb[2] =
8189 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8190 ioarcb->cmd_pkt.cdb[3] =
8191 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8192 ioarcb->cmd_pkt.cdb[4] =
8193 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8194 ioarcb->cmd_pkt.cdb[5] =
8195 ((u64) hrrq->host_rrq_dma) & 0xff;
8196 ioarcb->cmd_pkt.cdb[7] =
8197 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8198 ioarcb->cmd_pkt.cdb[8] =
8199 (sizeof(u32) * hrrq->size) & 0xff;
8200
8201 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008202 ioarcb->cmd_pkt.cdb[9] =
8203 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008204
8205 if (ioa_cfg->sis64) {
8206 ioarcb->cmd_pkt.cdb[10] =
8207 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8208 ioarcb->cmd_pkt.cdb[11] =
8209 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8210 ioarcb->cmd_pkt.cdb[12] =
8211 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8212 ioarcb->cmd_pkt.cdb[13] =
8213 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8214 }
8215
8216 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008217 ioarcb->cmd_pkt.cdb[14] =
8218 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008219
8220 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8221 IPR_INTERNAL_TIMEOUT);
8222
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008223 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8224 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008225
8226 LEAVE;
8227 return IPR_RC_JOB_RETURN;
Wayne Boyer214777b2010-02-19 13:24:26 -08008228 }
8229
Linus Torvalds1da177e2005-04-16 15:20:36 -07008230 LEAVE;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008231 return IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008232}
8233
8234/**
8235 * ipr_reset_timer_done - Adapter reset timer function
8236 * @ipr_cmd: ipr command struct
8237 *
8238 * Description: This function is used in adapter reset processing
8239 * for timing events. If the reset_cmd pointer in the IOA
8240 * config struct is not this adapter's we are doing nested
8241 * resets and fail_all_ops will take care of freeing the
8242 * command block.
8243 *
8244 * Return value:
8245 * none
8246 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07008247static void ipr_reset_timer_done(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008248{
Kees Cook738c6ec2017-08-18 16:53:24 -07008249 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008250 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8251 unsigned long lock_flags = 0;
8252
8253 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8254
8255 if (ioa_cfg->reset_cmd == ipr_cmd) {
8256 list_del(&ipr_cmd->queue);
8257 ipr_cmd->done(ipr_cmd);
8258 }
8259
8260 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8261}
8262
8263/**
8264 * ipr_reset_start_timer - Start a timer for adapter reset job
8265 * @ipr_cmd: ipr command struct
8266 * @timeout: timeout value
8267 *
8268 * Description: This function is used in adapter reset processing
8269 * for timing events. If the reset_cmd pointer in the IOA
8270 * config struct is not this adapter's we are doing nested
8271 * resets and fail_all_ops will take care of freeing the
8272 * command block.
8273 *
8274 * Return value:
8275 * none
8276 **/
8277static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8278 unsigned long timeout)
8279{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008280
8281 ENTER;
8282 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008283 ipr_cmd->done = ipr_reset_ioa_job;
8284
Linus Torvalds1da177e2005-04-16 15:20:36 -07008285 ipr_cmd->timer.expires = jiffies + timeout;
Kees Cook841b86f2017-10-23 09:40:42 +02008286 ipr_cmd->timer.function = ipr_reset_timer_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008287 add_timer(&ipr_cmd->timer);
8288}
8289
8290/**
8291 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8292 * @ioa_cfg: ioa cfg struct
8293 *
8294 * Return value:
8295 * nothing
8296 **/
8297static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8298{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008299 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008300
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008301 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008302 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008303 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8304
8305 /* Initialize Host RRQ pointers */
8306 hrrq->hrrq_start = hrrq->host_rrq;
8307 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8308 hrrq->hrrq_curr = hrrq->hrrq_start;
8309 hrrq->toggle_bit = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008310 spin_unlock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008311 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008312 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008313
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008314 ioa_cfg->identify_hrrq_index = 0;
8315 if (ioa_cfg->hrrq_num == 1)
8316 atomic_set(&ioa_cfg->hrrq_index, 0);
8317 else
8318 atomic_set(&ioa_cfg->hrrq_index, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008319
8320 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008321 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008322}
8323
8324/**
Wayne Boyer214777b2010-02-19 13:24:26 -08008325 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8326 * @ipr_cmd: ipr command struct
8327 *
8328 * Return value:
8329 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8330 **/
8331static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8332{
8333 unsigned long stage, stage_time;
8334 u32 feedback;
8335 volatile u32 int_reg;
8336 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8337 u64 maskval = 0;
8338
8339 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8340 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8341 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8342
8343 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8344
8345 /* sanity check the stage_time value */
Wayne Boyer438b0332010-05-10 09:13:00 -07008346 if (stage_time == 0)
8347 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8348 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
Wayne Boyer214777b2010-02-19 13:24:26 -08008349 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8350 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8351 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8352
8353 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8354 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8355 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8356 stage_time = ioa_cfg->transop_timeout;
8357 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8358 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
Wayne Boyer1df79ca2010-07-14 10:49:43 -07008359 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8360 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8361 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8362 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8363 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8364 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8365 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8366 return IPR_RC_JOB_CONTINUE;
8367 }
Wayne Boyer214777b2010-02-19 13:24:26 -08008368 }
8369
Wayne Boyer214777b2010-02-19 13:24:26 -08008370 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
Kees Cook841b86f2017-10-23 09:40:42 +02008371 ipr_cmd->timer.function = ipr_oper_timeout;
Wayne Boyer214777b2010-02-19 13:24:26 -08008372 ipr_cmd->done = ipr_reset_ioa_job;
8373 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008374
8375 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Wayne Boyer214777b2010-02-19 13:24:26 -08008376
8377 return IPR_RC_JOB_RETURN;
8378}
8379
8380/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008381 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8382 * @ipr_cmd: ipr command struct
8383 *
8384 * This function reinitializes some control blocks and
8385 * enables destructive diagnostics on the adapter.
8386 *
8387 * Return value:
8388 * IPR_RC_JOB_RETURN
8389 **/
8390static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8391{
8392 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8393 volatile u32 int_reg;
Wayne Boyer7be96902010-05-10 09:14:07 -07008394 volatile u64 maskval;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008395 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008396
8397 ENTER;
Wayne Boyer214777b2010-02-19 13:24:26 -08008398 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008399 ipr_init_ioa_mem(ioa_cfg);
8400
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008401 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8402 spin_lock(&ioa_cfg->hrrq[i]._lock);
8403 ioa_cfg->hrrq[i].allow_interrupts = 1;
8404 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8405 }
8406 wmb();
Wayne Boyer8701f182010-06-04 10:26:50 -07008407 if (ioa_cfg->sis64) {
8408 /* Set the adapter to the correct endian mode. */
8409 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8410 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8411 }
8412
Wayne Boyer7be96902010-05-10 09:14:07 -07008413 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008414
8415 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8416 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
Wayne Boyer214777b2010-02-19 13:24:26 -08008417 ioa_cfg->regs.clr_interrupt_mask_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008418 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8419 return IPR_RC_JOB_CONTINUE;
8420 }
8421
8422 /* Enable destructive diagnostics on IOA */
Wayne Boyer214777b2010-02-19 13:24:26 -08008423 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008424
Wayne Boyer7be96902010-05-10 09:14:07 -07008425 if (ioa_cfg->sis64) {
8426 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8427 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8428 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8429 } else
8430 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer214777b2010-02-19 13:24:26 -08008431
Linus Torvalds1da177e2005-04-16 15:20:36 -07008432 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8433
8434 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8435
Wayne Boyer214777b2010-02-19 13:24:26 -08008436 if (ioa_cfg->sis64) {
8437 ipr_cmd->job_step = ipr_reset_next_stage;
8438 return IPR_RC_JOB_CONTINUE;
8439 }
8440
Brian King5469cb52007-03-29 12:42:40 -05008441 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Kees Cook841b86f2017-10-23 09:40:42 +02008442 ipr_cmd->timer.function = ipr_oper_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008443 ipr_cmd->done = ipr_reset_ioa_job;
8444 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008445 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008446
8447 LEAVE;
8448 return IPR_RC_JOB_RETURN;
8449}
8450
8451/**
8452 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8453 * @ipr_cmd: ipr command struct
8454 *
8455 * This function is invoked when an adapter dump has run out
8456 * of processing time.
8457 *
8458 * Return value:
8459 * IPR_RC_JOB_CONTINUE
8460 **/
8461static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8462{
8463 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8464
8465 if (ioa_cfg->sdt_state == GET_DUMP)
Brian King41e9a692011-09-21 08:51:11 -05008466 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8467 else if (ioa_cfg->sdt_state == READ_DUMP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008468 ioa_cfg->sdt_state = ABORT_DUMP;
8469
Brian King4c647e92011-10-15 09:08:56 -05008470 ioa_cfg->dump_timeout = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008471 ipr_cmd->job_step = ipr_reset_alert;
8472
8473 return IPR_RC_JOB_CONTINUE;
8474}
8475
8476/**
8477 * ipr_unit_check_no_data - Log a unit check/no data error log
8478 * @ioa_cfg: ioa config struct
8479 *
8480 * Logs an error indicating the adapter unit checked, but for some
8481 * reason, we were unable to fetch the unit check buffer.
8482 *
8483 * Return value:
8484 * nothing
8485 **/
8486static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8487{
8488 ioa_cfg->errors_logged++;
8489 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8490}
8491
8492/**
8493 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8494 * @ioa_cfg: ioa config struct
8495 *
8496 * Fetches the unit check buffer from the adapter by clocking the data
8497 * through the mailbox register.
8498 *
8499 * Return value:
8500 * nothing
8501 **/
8502static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8503{
8504 unsigned long mailbox;
8505 struct ipr_hostrcb *hostrcb;
8506 struct ipr_uc_sdt sdt;
8507 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05008508 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008509
8510 mailbox = readl(ioa_cfg->ioa_mailbox);
8511
Wayne Boyerdcbad002010-02-19 13:24:14 -08008512 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008513 ipr_unit_check_no_data(ioa_cfg);
8514 return;
8515 }
8516
8517 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8518 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8519 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8520
Wayne Boyerdcbad002010-02-19 13:24:14 -08008521 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8522 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8523 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008524 ipr_unit_check_no_data(ioa_cfg);
8525 return;
8526 }
8527
8528 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08008529 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8530 length = be32_to_cpu(sdt.entry[0].end_token);
8531 else
8532 length = (be32_to_cpu(sdt.entry[0].end_token) -
8533 be32_to_cpu(sdt.entry[0].start_token)) &
8534 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008535
8536 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8537 struct ipr_hostrcb, queue);
Brian Kingafc3f832016-08-24 12:56:51 -05008538 list_del_init(&hostrcb->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008539 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8540
8541 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08008542 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07008543 (__be32 *)&hostrcb->hcam,
8544 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8545
Brian King65f56472007-04-26 16:00:12 -05008546 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008547 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08008548 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05008549 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8550 ioa_cfg->sdt_state == GET_DUMP)
8551 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8552 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07008553 ipr_unit_check_no_data(ioa_cfg);
8554
8555 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8556}
8557
8558/**
Wayne Boyer110def82010-11-04 09:36:16 -07008559 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8560 * @ipr_cmd: ipr command struct
8561 *
8562 * Description: This function will call to get the unit check buffer.
8563 *
8564 * Return value:
8565 * IPR_RC_JOB_RETURN
8566 **/
8567static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8568{
8569 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8570
8571 ENTER;
8572 ioa_cfg->ioa_unit_checked = 0;
8573 ipr_get_unit_check_buffer(ioa_cfg);
8574 ipr_cmd->job_step = ipr_reset_alert;
8575 ipr_reset_start_timer(ipr_cmd, 0);
8576
8577 LEAVE;
8578 return IPR_RC_JOB_RETURN;
8579}
8580
Gabriel Krisman Bertazif41f1d92015-11-03 16:26:06 -02008581static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8582{
8583 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8584
8585 ENTER;
8586
8587 if (ioa_cfg->sdt_state != GET_DUMP)
8588 return IPR_RC_JOB_RETURN;
8589
8590 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8591 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8592 IPR_PCII_MAILBOX_STABLE)) {
8593
8594 if (!ipr_cmd->u.time_left)
8595 dev_err(&ioa_cfg->pdev->dev,
8596 "Timed out waiting for Mailbox register.\n");
8597
8598 ioa_cfg->sdt_state = READ_DUMP;
8599 ioa_cfg->dump_timeout = 0;
8600 if (ioa_cfg->sis64)
8601 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8602 else
8603 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8604 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8605 schedule_work(&ioa_cfg->work_q);
8606
8607 } else {
8608 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8609 ipr_reset_start_timer(ipr_cmd,
8610 IPR_CHECK_FOR_RESET_TIMEOUT);
8611 }
8612
8613 LEAVE;
8614 return IPR_RC_JOB_RETURN;
8615}
8616
Wayne Boyer110def82010-11-04 09:36:16 -07008617/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008618 * ipr_reset_restore_cfg_space - Restore PCI config space.
8619 * @ipr_cmd: ipr command struct
8620 *
8621 * Description: This function restores the saved PCI config space of
8622 * the adapter, fails all outstanding ops back to the callers, and
8623 * fetches the dump/unit check if applicable to this reset.
8624 *
8625 * Return value:
8626 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8627 **/
8628static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8629{
8630 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer630ad8312011-04-07 12:12:30 -07008631 u32 int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008632
8633 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02008634 ioa_cfg->pdev->state_saved = true;
Jon Mason1d3c16a2010-11-30 17:43:26 -06008635 pci_restore_state(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008636
8637 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008638 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008639 return IPR_RC_JOB_CONTINUE;
8640 }
8641
8642 ipr_fail_all_ops(ioa_cfg);
8643
Wayne Boyer8701f182010-06-04 10:26:50 -07008644 if (ioa_cfg->sis64) {
8645 /* Set the adapter to the correct endian mode. */
8646 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8647 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8648 }
8649
Linus Torvalds1da177e2005-04-16 15:20:36 -07008650 if (ioa_cfg->ioa_unit_checked) {
Wayne Boyer110def82010-11-04 09:36:16 -07008651 if (ioa_cfg->sis64) {
8652 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8653 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8654 return IPR_RC_JOB_RETURN;
8655 } else {
8656 ioa_cfg->ioa_unit_checked = 0;
8657 ipr_get_unit_check_buffer(ioa_cfg);
8658 ipr_cmd->job_step = ipr_reset_alert;
8659 ipr_reset_start_timer(ipr_cmd, 0);
8660 return IPR_RC_JOB_RETURN;
8661 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008662 }
8663
8664 if (ioa_cfg->in_ioa_bringdown) {
8665 ipr_cmd->job_step = ipr_ioa_bringdown_done;
Gabriel Krisman Bertazif41f1d92015-11-03 16:26:06 -02008666 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8667 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8668 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008669 } else {
8670 ipr_cmd->job_step = ipr_reset_enable_ioa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008671 }
8672
Wayne Boyer438b0332010-05-10 09:13:00 -07008673 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008674 return IPR_RC_JOB_CONTINUE;
8675}
8676
8677/**
Brian Kinge619e1a2007-01-23 11:25:37 -06008678 * ipr_reset_bist_done - BIST has completed on the adapter.
8679 * @ipr_cmd: ipr command struct
8680 *
8681 * Description: Unblock config space and resume the reset process.
8682 *
8683 * Return value:
8684 * IPR_RC_JOB_CONTINUE
8685 **/
8686static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8687{
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008688 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8689
Brian Kinge619e1a2007-01-23 11:25:37 -06008690 ENTER;
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008691 if (ioa_cfg->cfg_locked)
8692 pci_cfg_access_unlock(ioa_cfg->pdev);
8693 ioa_cfg->cfg_locked = 0;
Brian Kinge619e1a2007-01-23 11:25:37 -06008694 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8695 LEAVE;
8696 return IPR_RC_JOB_CONTINUE;
8697}
8698
8699/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008700 * ipr_reset_start_bist - Run BIST on the adapter.
8701 * @ipr_cmd: ipr command struct
8702 *
8703 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8704 *
8705 * Return value:
8706 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8707 **/
8708static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8709{
8710 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008711 int rc = PCIBIOS_SUCCESSFUL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008712
8713 ENTER;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008714 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8715 writel(IPR_UPROCI_SIS64_START_BIST,
8716 ioa_cfg->regs.set_uproc_interrupt_reg32);
8717 else
8718 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8719
8720 if (rc == PCIBIOS_SUCCESSFUL) {
Brian Kinge619e1a2007-01-23 11:25:37 -06008721 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008722 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8723 rc = IPR_RC_JOB_RETURN;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008724 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008725 if (ioa_cfg->cfg_locked)
8726 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8727 ioa_cfg->cfg_locked = 0;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008728 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8729 rc = IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008730 }
8731
8732 LEAVE;
8733 return rc;
8734}
8735
8736/**
Brian King463fc692007-05-07 17:09:05 -05008737 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8738 * @ipr_cmd: ipr command struct
8739 *
8740 * Description: This clears PCI reset to the adapter and delays two seconds.
8741 *
8742 * Return value:
8743 * IPR_RC_JOB_RETURN
8744 **/
8745static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8746{
8747 ENTER;
Brian King463fc692007-05-07 17:09:05 -05008748 ipr_cmd->job_step = ipr_reset_bist_done;
8749 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8750 LEAVE;
8751 return IPR_RC_JOB_RETURN;
8752}
8753
8754/**
Brian King2796ca52015-03-26 11:23:52 -05008755 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8756 * @work: work struct
8757 *
8758 * Description: This pulses warm reset to a slot.
8759 *
8760 **/
8761static void ipr_reset_reset_work(struct work_struct *work)
8762{
8763 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8764 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8765 struct pci_dev *pdev = ioa_cfg->pdev;
8766 unsigned long lock_flags = 0;
8767
8768 ENTER;
8769 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8770 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8771 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8772
8773 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8774 if (ioa_cfg->reset_cmd == ipr_cmd)
8775 ipr_reset_ioa_job(ipr_cmd);
8776 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8777 LEAVE;
8778}
8779
8780/**
Brian King463fc692007-05-07 17:09:05 -05008781 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8782 * @ipr_cmd: ipr command struct
8783 *
8784 * Description: This asserts PCI reset to the adapter.
8785 *
8786 * Return value:
8787 * IPR_RC_JOB_RETURN
8788 **/
8789static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8790{
8791 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Brian King463fc692007-05-07 17:09:05 -05008792
8793 ENTER;
Brian King2796ca52015-03-26 11:23:52 -05008794 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8795 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
Brian King463fc692007-05-07 17:09:05 -05008796 ipr_cmd->job_step = ipr_reset_slot_reset_done;
Brian King463fc692007-05-07 17:09:05 -05008797 LEAVE;
8798 return IPR_RC_JOB_RETURN;
8799}
8800
8801/**
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008802 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8803 * @ipr_cmd: ipr command struct
8804 *
8805 * Description: This attempts to block config access to the IOA.
8806 *
8807 * Return value:
8808 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8809 **/
8810static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8811{
8812 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8813 int rc = IPR_RC_JOB_CONTINUE;
8814
8815 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8816 ioa_cfg->cfg_locked = 1;
8817 ipr_cmd->job_step = ioa_cfg->reset;
8818 } else {
8819 if (ipr_cmd->u.time_left) {
8820 rc = IPR_RC_JOB_RETURN;
8821 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8822 ipr_reset_start_timer(ipr_cmd,
8823 IPR_CHECK_FOR_RESET_TIMEOUT);
8824 } else {
8825 ipr_cmd->job_step = ioa_cfg->reset;
8826 dev_err(&ioa_cfg->pdev->dev,
8827 "Timed out waiting to lock config access. Resetting anyway.\n");
8828 }
8829 }
8830
8831 return rc;
8832}
8833
8834/**
8835 * ipr_reset_block_config_access - Block config access to the IOA
8836 * @ipr_cmd: ipr command struct
8837 *
8838 * Description: This attempts to block config access to the IOA
8839 *
8840 * Return value:
8841 * IPR_RC_JOB_CONTINUE
8842 **/
8843static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8844{
8845 ipr_cmd->ioa_cfg->cfg_locked = 0;
8846 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8847 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8848 return IPR_RC_JOB_CONTINUE;
8849}
8850
8851/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008852 * ipr_reset_allowed - Query whether or not IOA can be reset
8853 * @ioa_cfg: ioa config struct
8854 *
8855 * Return value:
8856 * 0 if reset not allowed / non-zero if reset is allowed
8857 **/
8858static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8859{
8860 volatile u32 temp_reg;
8861
8862 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8863 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8864}
8865
8866/**
8867 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8868 * @ipr_cmd: ipr command struct
8869 *
8870 * Description: This function waits for adapter permission to run BIST,
8871 * then runs BIST. If the adapter does not give permission after a
8872 * reasonable time, we will reset the adapter anyway. The impact of
8873 * resetting the adapter without warning the adapter is the risk of
8874 * losing the persistent error log on the adapter. If the adapter is
8875 * reset while it is writing to the flash on the adapter, the flash
8876 * segment will have bad ECC and be zeroed.
8877 *
8878 * Return value:
8879 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8880 **/
8881static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8882{
8883 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8884 int rc = IPR_RC_JOB_RETURN;
8885
8886 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8887 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8888 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8889 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008890 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008891 rc = IPR_RC_JOB_CONTINUE;
8892 }
8893
8894 return rc;
8895}
8896
8897/**
Wayne Boyer8701f182010-06-04 10:26:50 -07008898 * ipr_reset_alert - Alert the adapter of a pending reset
Linus Torvalds1da177e2005-04-16 15:20:36 -07008899 * @ipr_cmd: ipr command struct
8900 *
8901 * Description: This function alerts the adapter that it will be reset.
8902 * If memory space is not currently enabled, proceed directly
8903 * to running BIST on the adapter. The timer must always be started
8904 * so we guarantee we do not run BIST from ipr_isr.
8905 *
8906 * Return value:
8907 * IPR_RC_JOB_RETURN
8908 **/
8909static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8910{
8911 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8912 u16 cmd_reg;
8913 int rc;
8914
8915 ENTER;
8916 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8917
8918 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8919 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
Wayne Boyer214777b2010-02-19 13:24:26 -08008920 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008921 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8922 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008923 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008924 }
8925
8926 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8927 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8928
8929 LEAVE;
8930 return IPR_RC_JOB_RETURN;
8931}
8932
8933/**
Brian King4fdd7c72015-03-26 11:23:50 -05008934 * ipr_reset_quiesce_done - Complete IOA disconnect
8935 * @ipr_cmd: ipr command struct
8936 *
8937 * Description: Freeze the adapter to complete quiesce processing
8938 *
8939 * Return value:
8940 * IPR_RC_JOB_CONTINUE
8941 **/
8942static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8943{
8944 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8945
8946 ENTER;
8947 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8948 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8949 LEAVE;
8950 return IPR_RC_JOB_CONTINUE;
8951}
8952
8953/**
8954 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8955 * @ipr_cmd: ipr command struct
8956 *
8957 * Description: Ensure nothing is outstanding to the IOA and
8958 * proceed with IOA disconnect. Otherwise reset the IOA.
8959 *
8960 * Return value:
8961 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8962 **/
8963static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8964{
8965 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8966 struct ipr_cmnd *loop_cmd;
8967 struct ipr_hrr_queue *hrrq;
8968 int rc = IPR_RC_JOB_CONTINUE;
8969 int count = 0;
8970
8971 ENTER;
8972 ipr_cmd->job_step = ipr_reset_quiesce_done;
8973
8974 for_each_hrrq(hrrq, ioa_cfg) {
8975 spin_lock(&hrrq->_lock);
8976 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8977 count++;
8978 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8979 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8980 rc = IPR_RC_JOB_RETURN;
8981 break;
8982 }
8983 spin_unlock(&hrrq->_lock);
8984
8985 if (count)
8986 break;
8987 }
8988
8989 LEAVE;
8990 return rc;
8991}
8992
8993/**
8994 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8995 * @ipr_cmd: ipr command struct
8996 *
8997 * Description: Cancel any oustanding HCAMs to the IOA.
8998 *
8999 * Return value:
9000 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9001 **/
9002static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
9003{
9004 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9005 int rc = IPR_RC_JOB_CONTINUE;
9006 struct ipr_cmd_pkt *cmd_pkt;
9007 struct ipr_cmnd *hcam_cmd;
9008 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9009
9010 ENTER;
9011 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9012
9013 if (!hrrq->ioa_is_dead) {
9014 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9015 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9016 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9017 continue;
9018
9019 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9020 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9021 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9022 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9023 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9024 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9025 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9026 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9027 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9028 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9029 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9030 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9031 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9032 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9033
9034 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9035 IPR_CANCEL_TIMEOUT);
9036
9037 rc = IPR_RC_JOB_RETURN;
9038 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9039 break;
9040 }
9041 }
9042 } else
9043 ipr_cmd->job_step = ipr_reset_alert;
9044
9045 LEAVE;
9046 return rc;
9047}
9048
9049/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009050 * ipr_reset_ucode_download_done - Microcode download completion
9051 * @ipr_cmd: ipr command struct
9052 *
9053 * Description: This function unmaps the microcode download buffer.
9054 *
9055 * Return value:
9056 * IPR_RC_JOB_CONTINUE
9057 **/
9058static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9059{
9060 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9061 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9062
Anton Blanchardd73341b2014-10-30 17:27:08 -05009063 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009064 sglist->num_sg, DMA_TO_DEVICE);
9065
9066 ipr_cmd->job_step = ipr_reset_alert;
9067 return IPR_RC_JOB_CONTINUE;
9068}
9069
9070/**
9071 * ipr_reset_ucode_download - Download microcode to the adapter
9072 * @ipr_cmd: ipr command struct
9073 *
9074 * Description: This function checks to see if it there is microcode
9075 * to download to the adapter. If there is, a download is performed.
9076 *
9077 * Return value:
9078 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9079 **/
9080static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9081{
9082 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9083 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9084
9085 ENTER;
9086 ipr_cmd->job_step = ipr_reset_alert;
9087
9088 if (!sglist)
9089 return IPR_RC_JOB_CONTINUE;
9090
9091 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9092 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9093 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9094 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9095 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9096 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9097 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9098
Wayne Boyera32c0552010-02-19 13:23:36 -08009099 if (ioa_cfg->sis64)
9100 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9101 else
9102 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009103 ipr_cmd->job_step = ipr_reset_ucode_download_done;
9104
9105 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9106 IPR_WRITE_BUFFER_TIMEOUT);
9107
9108 LEAVE;
9109 return IPR_RC_JOB_RETURN;
9110}
9111
9112/**
9113 * ipr_reset_shutdown_ioa - Shutdown the adapter
9114 * @ipr_cmd: ipr command struct
9115 *
9116 * Description: This function issues an adapter shutdown of the
9117 * specified type to the specified adapter as part of the
9118 * adapter reset job.
9119 *
9120 * Return value:
9121 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9122 **/
9123static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9124{
9125 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9126 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9127 unsigned long timeout;
9128 int rc = IPR_RC_JOB_CONTINUE;
9129
9130 ENTER;
Brian King4fdd7c72015-03-26 11:23:50 -05009131 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9132 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9133 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009134 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009135 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9136 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9137 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9138 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9139
Brian Kingac09c342007-04-26 16:00:16 -05009140 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9141 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009142 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9143 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05009144 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9145 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009146 else
Brian Kingac09c342007-04-26 16:00:16 -05009147 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009148
9149 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9150
9151 rc = IPR_RC_JOB_RETURN;
9152 ipr_cmd->job_step = ipr_reset_ucode_download;
9153 } else
9154 ipr_cmd->job_step = ipr_reset_alert;
9155
9156 LEAVE;
9157 return rc;
9158}
9159
9160/**
9161 * ipr_reset_ioa_job - Adapter reset job
9162 * @ipr_cmd: ipr command struct
9163 *
9164 * Description: This function is the job router for the adapter reset job.
9165 *
9166 * Return value:
9167 * none
9168 **/
9169static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9170{
9171 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009172 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9173
9174 do {
Wayne Boyer96d21f02010-05-10 09:13:27 -07009175 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009176
9177 if (ioa_cfg->reset_cmd != ipr_cmd) {
9178 /*
9179 * We are doing nested adapter resets and this is
9180 * not the current reset job.
9181 */
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009182 list_add_tail(&ipr_cmd->queue,
9183 &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009184 return;
9185 }
9186
9187 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06009188 rc = ipr_cmd->job_step_failed(ipr_cmd);
9189 if (rc == IPR_RC_JOB_RETURN)
9190 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009191 }
9192
9193 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06009194 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009195 rc = ipr_cmd->job_step(ipr_cmd);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009196 } while (rc == IPR_RC_JOB_CONTINUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009197}
9198
9199/**
9200 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9201 * @ioa_cfg: ioa config struct
9202 * @job_step: first job step of reset job
9203 * @shutdown_type: shutdown type
9204 *
9205 * Description: This function will initiate the reset of the given adapter
9206 * starting at the selected job step.
9207 * If the caller needs to wait on the completion of the reset,
9208 * the caller must sleep on the reset_wait_q.
9209 *
9210 * Return value:
9211 * none
9212 **/
9213static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9214 int (*job_step) (struct ipr_cmnd *),
9215 enum ipr_shutdown_type shutdown_type)
9216{
9217 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009218 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009219
9220 ioa_cfg->in_reset_reload = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009221 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9222 spin_lock(&ioa_cfg->hrrq[i]._lock);
9223 ioa_cfg->hrrq[i].allow_cmds = 0;
9224 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9225 }
9226 wmb();
Brian Kingb0e17a92017-08-01 10:21:30 -05009227 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9228 ioa_cfg->scsi_unblock = 0;
9229 ioa_cfg->scsi_blocked = 1;
Brian Kingbfae7822013-01-30 23:45:08 -06009230 scsi_block_requests(ioa_cfg->host);
Brian Kingb0e17a92017-08-01 10:21:30 -05009231 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009232
9233 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9234 ioa_cfg->reset_cmd = ipr_cmd;
9235 ipr_cmd->job_step = job_step;
9236 ipr_cmd->u.shutdown_type = shutdown_type;
9237
9238 ipr_reset_ioa_job(ipr_cmd);
9239}
9240
9241/**
9242 * ipr_initiate_ioa_reset - Initiate an adapter reset
9243 * @ioa_cfg: ioa config struct
9244 * @shutdown_type: shutdown type
9245 *
9246 * Description: This function will initiate the reset of the given adapter.
9247 * If the caller needs to wait on the completion of the reset,
9248 * the caller must sleep on the reset_wait_q.
9249 *
9250 * Return value:
9251 * none
9252 **/
9253static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9254 enum ipr_shutdown_type shutdown_type)
9255{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009256 int i;
9257
9258 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009259 return;
9260
Brian King41e9a692011-09-21 08:51:11 -05009261 if (ioa_cfg->in_reset_reload) {
9262 if (ioa_cfg->sdt_state == GET_DUMP)
9263 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9264 else if (ioa_cfg->sdt_state == READ_DUMP)
9265 ioa_cfg->sdt_state = ABORT_DUMP;
9266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009267
9268 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9269 dev_err(&ioa_cfg->pdev->dev,
9270 "IOA taken offline - error recovery failed\n");
9271
9272 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009273 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9274 spin_lock(&ioa_cfg->hrrq[i]._lock);
9275 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9276 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9277 }
9278 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009279
9280 if (ioa_cfg->in_ioa_bringdown) {
9281 ioa_cfg->reset_cmd = NULL;
9282 ioa_cfg->in_reset_reload = 0;
9283 ipr_fail_all_ops(ioa_cfg);
9284 wake_up_all(&ioa_cfg->reset_wait_q);
9285
Brian Kingbfae7822013-01-30 23:45:08 -06009286 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
Brian Kingb0e17a92017-08-01 10:21:30 -05009287 ioa_cfg->scsi_unblock = 1;
9288 schedule_work(&ioa_cfg->work_q);
Brian Kingbfae7822013-01-30 23:45:08 -06009289 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009290 return;
9291 } else {
9292 ioa_cfg->in_ioa_bringdown = 1;
9293 shutdown_type = IPR_SHUTDOWN_NONE;
9294 }
9295 }
9296
9297 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9298 shutdown_type);
9299}
9300
9301/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009302 * ipr_reset_freeze - Hold off all I/O activity
9303 * @ipr_cmd: ipr command struct
9304 *
9305 * Description: If the PCI slot is frozen, hold off all I/O
9306 * activity; then, as soon as the slot is available again,
9307 * initiate an adapter reset.
9308 */
9309static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9310{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009311 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9312 int i;
9313
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009314 /* Disallow new interrupts, avoid loop */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009315 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9316 spin_lock(&ioa_cfg->hrrq[i]._lock);
9317 ioa_cfg->hrrq[i].allow_interrupts = 0;
9318 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9319 }
9320 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009321 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009322 ipr_cmd->done = ipr_reset_ioa_job;
9323 return IPR_RC_JOB_RETURN;
9324}
9325
9326/**
Brian King6270e592014-01-21 12:16:41 -06009327 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9328 * @pdev: PCI device struct
9329 *
9330 * Description: This routine is called to tell us that the MMIO
9331 * access to the IOA has been restored
9332 */
9333static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9334{
9335 unsigned long flags = 0;
9336 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9337
9338 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9339 if (!ioa_cfg->probe_done)
9340 pci_save_state(pdev);
9341 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9342 return PCI_ERS_RESULT_NEED_RESET;
9343}
9344
9345/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009346 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9347 * @pdev: PCI device struct
9348 *
9349 * Description: This routine is called to tell us that the PCI bus
9350 * is down. Can't do anything here, except put the device driver
9351 * into a holding pattern, waiting for the PCI bus to come back.
9352 */
9353static void ipr_pci_frozen(struct pci_dev *pdev)
9354{
9355 unsigned long flags = 0;
9356 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9357
9358 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009359 if (ioa_cfg->probe_done)
9360 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009361 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9362}
9363
9364/**
9365 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9366 * @pdev: PCI device struct
9367 *
9368 * Description: This routine is called by the pci error recovery
9369 * code after the PCI slot has been reset, just before we
9370 * should resume normal operations.
9371 */
9372static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9373{
9374 unsigned long flags = 0;
9375 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9376
9377 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009378 if (ioa_cfg->probe_done) {
9379 if (ioa_cfg->needs_warm_reset)
9380 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9381 else
9382 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9383 IPR_SHUTDOWN_NONE);
9384 } else
9385 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009386 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9387 return PCI_ERS_RESULT_RECOVERED;
9388}
9389
9390/**
9391 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9392 * @pdev: PCI device struct
9393 *
9394 * Description: This routine is called when the PCI bus has
9395 * permanently failed.
9396 */
9397static void ipr_pci_perm_failure(struct pci_dev *pdev)
9398{
9399 unsigned long flags = 0;
9400 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009401 int i;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009402
9403 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009404 if (ioa_cfg->probe_done) {
9405 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9406 ioa_cfg->sdt_state = ABORT_DUMP;
9407 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9408 ioa_cfg->in_ioa_bringdown = 1;
9409 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9410 spin_lock(&ioa_cfg->hrrq[i]._lock);
9411 ioa_cfg->hrrq[i].allow_cmds = 0;
9412 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9413 }
9414 wmb();
9415 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9416 } else
9417 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009418 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9419}
9420
9421/**
9422 * ipr_pci_error_detected - Called when a PCI error is detected.
9423 * @pdev: PCI device struct
9424 * @state: PCI channel state
9425 *
9426 * Description: Called when a PCI error is detected.
9427 *
9428 * Return value:
9429 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9430 */
9431static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9432 pci_channel_state_t state)
9433{
9434 switch (state) {
9435 case pci_channel_io_frozen:
9436 ipr_pci_frozen(pdev);
Brian King6270e592014-01-21 12:16:41 -06009437 return PCI_ERS_RESULT_CAN_RECOVER;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009438 case pci_channel_io_perm_failure:
9439 ipr_pci_perm_failure(pdev);
9440 return PCI_ERS_RESULT_DISCONNECT;
9441 break;
9442 default:
9443 break;
9444 }
9445 return PCI_ERS_RESULT_NEED_RESET;
9446}
9447
9448/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009449 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9450 * @ioa_cfg: ioa cfg struct
9451 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08009452 * Description: This is the second phase of adapter initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -07009453 * This function takes care of initilizing the adapter to the point
9454 * where it can accept new commands.
9455
9456 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02009457 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009458 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009459static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009460{
9461 int rc = 0;
9462 unsigned long host_lock_flags = 0;
9463
9464 ENTER;
9465 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9466 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
Brian King6270e592014-01-21 12:16:41 -06009467 ioa_cfg->probe_done = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009468 if (ioa_cfg->needs_hard_reset) {
9469 ioa_cfg->needs_hard_reset = 0;
9470 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9471 } else
9472 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9473 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009474 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009475
9476 LEAVE;
9477 return rc;
9478}
9479
9480/**
9481 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9482 * @ioa_cfg: ioa config struct
9483 *
9484 * Return value:
9485 * none
9486 **/
9487static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9488{
9489 int i;
9490
Brian Kinga65e8f12015-03-26 11:23:55 -05009491 if (ioa_cfg->ipr_cmnd_list) {
9492 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9493 if (ioa_cfg->ipr_cmnd_list[i])
9494 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9495 ioa_cfg->ipr_cmnd_list[i],
9496 ioa_cfg->ipr_cmnd_list_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009497
Brian Kinga65e8f12015-03-26 11:23:55 -05009498 ioa_cfg->ipr_cmnd_list[i] = NULL;
9499 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009500 }
9501
9502 if (ioa_cfg->ipr_cmd_pool)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009503 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009504
Brian King89aad422012-03-14 21:20:10 -05009505 kfree(ioa_cfg->ipr_cmnd_list);
9506 kfree(ioa_cfg->ipr_cmnd_list_dma);
9507 ioa_cfg->ipr_cmnd_list = NULL;
9508 ioa_cfg->ipr_cmnd_list_dma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009509 ioa_cfg->ipr_cmd_pool = NULL;
9510}
9511
9512/**
9513 * ipr_free_mem - Frees memory allocated for an adapter
9514 * @ioa_cfg: ioa cfg struct
9515 *
9516 * Return value:
9517 * nothing
9518 **/
9519static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9520{
9521 int i;
9522
9523 kfree(ioa_cfg->res_entries);
Anton Blanchardd73341b2014-10-30 17:27:08 -05009524 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9525 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009526 ipr_free_cmd_blks(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009527
9528 for (i = 0; i < ioa_cfg->hrrq_num; i++)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009529 dma_free_coherent(&ioa_cfg->pdev->dev,
9530 sizeof(u32) * ioa_cfg->hrrq[i].size,
9531 ioa_cfg->hrrq[i].host_rrq,
9532 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009533
Anton Blanchardd73341b2014-10-30 17:27:08 -05009534 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9535 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009536
Brian Kingafc3f832016-08-24 12:56:51 -05009537 for (i = 0; i < IPR_MAX_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009538 dma_free_coherent(&ioa_cfg->pdev->dev,
9539 sizeof(struct ipr_hostrcb),
9540 ioa_cfg->hostrcb[i],
9541 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009542 }
9543
9544 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009545 kfree(ioa_cfg->trace);
9546}
9547
9548/**
Brian King2796ca52015-03-26 11:23:52 -05009549 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9550 * @ioa_cfg: ipr cfg struct
9551 *
9552 * This function frees all allocated IRQs for the
9553 * specified adapter.
9554 *
9555 * Return value:
9556 * none
9557 **/
9558static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9559{
9560 struct pci_dev *pdev = ioa_cfg->pdev;
Christoph Hellwiga299ee62016-09-11 15:31:24 +02009561 int i;
Brian King2796ca52015-03-26 11:23:52 -05009562
Christoph Hellwiga299ee62016-09-11 15:31:24 +02009563 for (i = 0; i < ioa_cfg->nvectors; i++)
9564 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9565 pci_free_irq_vectors(pdev);
Brian King2796ca52015-03-26 11:23:52 -05009566}
9567
9568/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009569 * ipr_free_all_resources - Free all allocated resources for an adapter.
9570 * @ipr_cmd: ipr command struct
9571 *
9572 * This function frees all allocated resources for the
9573 * specified adapter.
9574 *
9575 * Return value:
9576 * none
9577 **/
9578static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9579{
9580 struct pci_dev *pdev = ioa_cfg->pdev;
9581
9582 ENTER;
Brian King2796ca52015-03-26 11:23:52 -05009583 ipr_free_irqs(ioa_cfg);
9584 if (ioa_cfg->reset_work_q)
9585 destroy_workqueue(ioa_cfg->reset_work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009586 iounmap(ioa_cfg->hdw_dma_regs);
9587 pci_release_regions(pdev);
9588 ipr_free_mem(ioa_cfg);
9589 scsi_host_put(ioa_cfg->host);
9590 pci_disable_device(pdev);
9591 LEAVE;
9592}
9593
9594/**
9595 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9596 * @ioa_cfg: ioa config struct
9597 *
9598 * Return value:
9599 * 0 on success / -ENOMEM on allocation failure
9600 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009601static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009602{
9603 struct ipr_cmnd *ipr_cmd;
9604 struct ipr_ioarcb *ioarcb;
9605 dma_addr_t dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009606 int i, entries_each_hrrq, hrrq_id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009607
Anton Blanchardd73341b2014-10-30 17:27:08 -05009608 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009609 sizeof(struct ipr_cmnd), 512, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009610
9611 if (!ioa_cfg->ipr_cmd_pool)
9612 return -ENOMEM;
9613
Brian King89aad422012-03-14 21:20:10 -05009614 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9615 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9616
9617 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9618 ipr_free_cmd_blks(ioa_cfg);
9619 return -ENOMEM;
9620 }
9621
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009622 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9623 if (ioa_cfg->hrrq_num > 1) {
9624 if (i == 0) {
9625 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9626 ioa_cfg->hrrq[i].min_cmd_id = 0;
Colin Ian Kingb82378e2017-12-01 13:33:27 +00009627 ioa_cfg->hrrq[i].max_cmd_id =
9628 (entries_each_hrrq - 1);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009629 } else {
9630 entries_each_hrrq =
9631 IPR_NUM_BASE_CMD_BLKS/
9632 (ioa_cfg->hrrq_num - 1);
9633 ioa_cfg->hrrq[i].min_cmd_id =
9634 IPR_NUM_INTERNAL_CMD_BLKS +
9635 (i - 1) * entries_each_hrrq;
9636 ioa_cfg->hrrq[i].max_cmd_id =
9637 (IPR_NUM_INTERNAL_CMD_BLKS +
9638 i * entries_each_hrrq - 1);
9639 }
9640 } else {
9641 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9642 ioa_cfg->hrrq[i].min_cmd_id = 0;
9643 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9644 }
9645 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9646 }
9647
9648 BUG_ON(ioa_cfg->hrrq_num == 0);
9649
9650 i = IPR_NUM_CMD_BLKS -
9651 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9652 if (i > 0) {
9653 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9654 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9655 }
9656
Linus Torvalds1da177e2005-04-16 15:20:36 -07009657 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Souptick Joarder8b1bb6d2018-03-08 18:41:57 +05309658 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9659 GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009660
9661 if (!ipr_cmd) {
9662 ipr_free_cmd_blks(ioa_cfg);
9663 return -ENOMEM;
9664 }
9665
Linus Torvalds1da177e2005-04-16 15:20:36 -07009666 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9667 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9668
9669 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08009670 ipr_cmd->dma_addr = dma_addr;
9671 if (ioa_cfg->sis64)
9672 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9673 else
9674 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9675
Linus Torvalds1da177e2005-04-16 15:20:36 -07009676 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08009677 if (ioa_cfg->sis64) {
9678 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9679 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9680 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009681 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
Wayne Boyera32c0552010-02-19 13:23:36 -08009682 } else {
9683 ioarcb->write_ioadl_addr =
9684 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9685 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9686 ioarcb->ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009687 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
Wayne Boyera32c0552010-02-19 13:23:36 -08009688 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009689 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9690 ipr_cmd->cmd_index = i;
9691 ipr_cmd->ioa_cfg = ioa_cfg;
9692 ipr_cmd->sense_buffer_dma = dma_addr +
9693 offsetof(struct ipr_cmnd, sense_buffer);
9694
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009695 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9696 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9697 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9698 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9699 hrrq_id++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009700 }
9701
9702 return 0;
9703}
9704
9705/**
9706 * ipr_alloc_mem - Allocate memory for an adapter
9707 * @ioa_cfg: ioa config struct
9708 *
9709 * Return value:
9710 * 0 on success / non-zero for error
9711 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009712static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009713{
9714 struct pci_dev *pdev = ioa_cfg->pdev;
9715 int i, rc = -ENOMEM;
9716
9717 ENTER;
Kees Cook6396bb22018-06-12 14:03:40 -07009718 ioa_cfg->res_entries = kcalloc(ioa_cfg->max_devs_supported,
9719 sizeof(struct ipr_resource_entry),
9720 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009721
9722 if (!ioa_cfg->res_entries)
9723 goto out;
9724
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009725 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009726 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009727 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9728 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009729
Anton Blanchardd73341b2014-10-30 17:27:08 -05009730 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9731 sizeof(struct ipr_misc_cbs),
9732 &ioa_cfg->vpd_cbs_dma,
9733 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009734
9735 if (!ioa_cfg->vpd_cbs)
9736 goto out_free_res_entries;
9737
9738 if (ipr_alloc_cmd_blks(ioa_cfg))
9739 goto out_free_vpd_cbs;
9740
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009741 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009742 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009743 sizeof(u32) * ioa_cfg->hrrq[i].size,
Anton Blanchardd73341b2014-10-30 17:27:08 -05009744 &ioa_cfg->hrrq[i].host_rrq_dma,
9745 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009746
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009747 if (!ioa_cfg->hrrq[i].host_rrq) {
9748 while (--i > 0)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009749 dma_free_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009750 sizeof(u32) * ioa_cfg->hrrq[i].size,
9751 ioa_cfg->hrrq[i].host_rrq,
9752 ioa_cfg->hrrq[i].host_rrq_dma);
9753 goto out_ipr_free_cmd_blocks;
9754 }
9755 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9756 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009757
Anton Blanchardd73341b2014-10-30 17:27:08 -05009758 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9759 ioa_cfg->cfg_table_size,
9760 &ioa_cfg->cfg_table_dma,
9761 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009762
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009763 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009764 goto out_free_host_rrq;
9765
Brian Kingafc3f832016-08-24 12:56:51 -05009766 for (i = 0; i < IPR_MAX_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009767 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9768 sizeof(struct ipr_hostrcb),
9769 &ioa_cfg->hostrcb_dma[i],
9770 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009771
9772 if (!ioa_cfg->hostrcb[i])
9773 goto out_free_hostrcb_dma;
9774
9775 ioa_cfg->hostrcb[i]->hostrcb_dma =
9776 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06009777 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009778 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9779 }
9780
Kees Cook6396bb22018-06-12 14:03:40 -07009781 ioa_cfg->trace = kcalloc(IPR_NUM_TRACE_ENTRIES,
9782 sizeof(struct ipr_trace_entry),
9783 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009784
9785 if (!ioa_cfg->trace)
9786 goto out_free_hostrcb_dma;
9787
Linus Torvalds1da177e2005-04-16 15:20:36 -07009788 rc = 0;
9789out:
9790 LEAVE;
9791 return rc;
9792
9793out_free_hostrcb_dma:
9794 while (i-- > 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009795 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9796 ioa_cfg->hostrcb[i],
9797 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009798 }
Anton Blanchardd73341b2014-10-30 17:27:08 -05009799 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9800 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009801out_free_host_rrq:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009802 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009803 dma_free_coherent(&pdev->dev,
9804 sizeof(u32) * ioa_cfg->hrrq[i].size,
9805 ioa_cfg->hrrq[i].host_rrq,
9806 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009807 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009808out_ipr_free_cmd_blocks:
9809 ipr_free_cmd_blks(ioa_cfg);
9810out_free_vpd_cbs:
Anton Blanchardd73341b2014-10-30 17:27:08 -05009811 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9812 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009813out_free_res_entries:
9814 kfree(ioa_cfg->res_entries);
9815 goto out;
9816}
9817
9818/**
9819 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9820 * @ioa_cfg: ioa config struct
9821 *
9822 * Return value:
9823 * none
9824 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009825static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009826{
9827 int i;
9828
9829 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9830 ioa_cfg->bus_attr[i].bus = i;
9831 ioa_cfg->bus_attr[i].qas_enabled = 0;
9832 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9833 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9834 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9835 else
9836 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9837 }
9838}
9839
9840/**
Brian King6270e592014-01-21 12:16:41 -06009841 * ipr_init_regs - Initialize IOA registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07009842 * @ioa_cfg: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07009843 *
9844 * Return value:
Brian King6270e592014-01-21 12:16:41 -06009845 * none
Linus Torvalds1da177e2005-04-16 15:20:36 -07009846 **/
Brian King6270e592014-01-21 12:16:41 -06009847static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009848{
9849 const struct ipr_interrupt_offsets *p;
9850 struct ipr_interrupts *t;
9851 void __iomem *base;
9852
Linus Torvalds1da177e2005-04-16 15:20:36 -07009853 p = &ioa_cfg->chip_cfg->regs;
9854 t = &ioa_cfg->regs;
9855 base = ioa_cfg->hdw_dma_regs;
9856
9857 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9858 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009859 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009860 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009861 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009862 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009863 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009864 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009865 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009866 t->ioarrin_reg = base + p->ioarrin_reg;
9867 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009868 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009869 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009870 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009871 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009872 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009873
9874 if (ioa_cfg->sis64) {
Wayne Boyer214777b2010-02-19 13:24:26 -08009875 t->init_feedback_reg = base + p->init_feedback_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009876 t->dump_addr_reg = base + p->dump_addr_reg;
9877 t->dump_data_reg = base + p->dump_data_reg;
Wayne Boyer8701f182010-06-04 10:26:50 -07009878 t->endian_swap_reg = base + p->endian_swap_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009879 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009880}
9881
9882/**
Brian King6270e592014-01-21 12:16:41 -06009883 * ipr_init_ioa_cfg - Initialize IOA config struct
9884 * @ioa_cfg: ioa config struct
9885 * @host: scsi host struct
9886 * @pdev: PCI dev struct
9887 *
9888 * Return value:
9889 * none
9890 **/
9891static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9892 struct Scsi_Host *host, struct pci_dev *pdev)
9893{
9894 int i;
9895
9896 ioa_cfg->host = host;
9897 ioa_cfg->pdev = pdev;
9898 ioa_cfg->log_level = ipr_log_level;
9899 ioa_cfg->doorbell = IPR_DOORBELL;
9900 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9901 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9902 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9903 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9904 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9905 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9906
9907 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9908 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
Brian Kingafc3f832016-08-24 12:56:51 -05009909 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
Brian King6270e592014-01-21 12:16:41 -06009910 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9911 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9912 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9913 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9914 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9915 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9916 ioa_cfg->sdt_state = INACTIVE;
9917
9918 ipr_initialize_bus_attr(ioa_cfg);
9919 ioa_cfg->max_devs_supported = ipr_max_devs;
9920
9921 if (ioa_cfg->sis64) {
9922 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9923 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9924 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9925 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9926 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9927 + ((sizeof(struct ipr_config_table_entry64)
9928 * ioa_cfg->max_devs_supported)));
9929 } else {
9930 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9931 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9932 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9933 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9934 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9935 + ((sizeof(struct ipr_config_table_entry)
9936 * ioa_cfg->max_devs_supported)));
9937 }
9938
Brian Kingf688f962014-12-02 12:47:37 -06009939 host->max_channel = IPR_VSET_BUS;
Brian King6270e592014-01-21 12:16:41 -06009940 host->unique_id = host->host_no;
9941 host->max_cmd_len = IPR_MAX_CDB_LEN;
9942 host->can_queue = ioa_cfg->max_cmds;
9943 pci_set_drvdata(pdev, ioa_cfg);
9944
9945 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9946 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9947 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9948 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9949 if (i == 0)
9950 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9951 else
9952 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9953 }
9954}
9955
9956/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009957 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07009958 * @dev_id: PCI device id struct
9959 *
9960 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009961 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009962 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009963static const struct ipr_chip_t *
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009964ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009965{
9966 int i;
9967
Linus Torvalds1da177e2005-04-16 15:20:36 -07009968 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9969 if (ipr_chip[i].vendor == dev_id->vendor &&
9970 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009971 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07009972 return NULL;
9973}
9974
Brian King6270e592014-01-21 12:16:41 -06009975/**
9976 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9977 * during probe time
9978 * @ioa_cfg: ioa config struct
9979 *
9980 * Return value:
9981 * None
9982 **/
9983static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9984{
9985 struct pci_dev *pdev = ioa_cfg->pdev;
9986
9987 if (pci_channel_offline(pdev)) {
9988 wait_event_timeout(ioa_cfg->eeh_wait_q,
9989 !pci_channel_offline(pdev),
9990 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9991 pci_restore_state(pdev);
9992 }
9993}
9994
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009995static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9996{
9997 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9998
9999 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
10000 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
10001 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
10002 ioa_cfg->vectors_info[vec_idx].
10003 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
10004 }
10005}
10006
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010007static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10008 struct pci_dev *pdev)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010009{
10010 int i, rc;
10011
10012 for (i = 1; i < ioa_cfg->nvectors; i++) {
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010013 rc = request_irq(pci_irq_vector(pdev, i),
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010014 ipr_isr_mhrrq,
10015 0,
10016 ioa_cfg->vectors_info[i].desc,
10017 &ioa_cfg->hrrq[i]);
10018 if (rc) {
10019 while (--i >= 0)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010020 free_irq(pci_irq_vector(pdev, i),
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010021 &ioa_cfg->hrrq[i]);
10022 return rc;
10023 }
10024 }
10025 return 0;
10026}
10027
Linus Torvalds1da177e2005-04-16 15:20:36 -070010028/**
Wayne Boyer95fecd92009-06-16 15:13:28 -070010029 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10030 * @pdev: PCI device struct
10031 *
10032 * Description: Simply set the msi_received flag to 1 indicating that
10033 * Message Signaled Interrupts are supported.
10034 *
10035 * Return value:
10036 * 0 on success / non-zero on failure
10037 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010038static irqreturn_t ipr_test_intr(int irq, void *devp)
Wayne Boyer95fecd92009-06-16 15:13:28 -070010039{
10040 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10041 unsigned long lock_flags = 0;
10042 irqreturn_t rc = IRQ_HANDLED;
10043
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010044 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010045 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10046
10047 ioa_cfg->msi_received = 1;
10048 wake_up(&ioa_cfg->msi_wait_q);
10049
10050 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10051 return rc;
10052}
10053
10054/**
10055 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10056 * @pdev: PCI device struct
10057 *
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010058 * Description: This routine sets up and initiates a test interrupt to determine
Wayne Boyer95fecd92009-06-16 15:13:28 -070010059 * if the interrupt is received via the ipr_test_intr() service routine.
10060 * If the tests fails, the driver will fall back to LSI.
10061 *
10062 * Return value:
10063 * 0 on success / non-zero on failure
10064 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010065static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
Wayne Boyer95fecd92009-06-16 15:13:28 -070010066{
10067 int rc;
10068 volatile u32 int_reg;
10069 unsigned long lock_flags = 0;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010070 int irq = pci_irq_vector(pdev, 0);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010071
10072 ENTER;
10073
10074 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10075 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10076 ioa_cfg->msi_received = 0;
10077 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer214777b2010-02-19 13:24:26 -080010078 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010079 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10080 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10081
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010082 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010083 if (rc) {
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010084 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010085 return rc;
10086 } else if (ipr_debug)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010087 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010088
Wayne Boyer214777b2010-02-19 13:24:26 -080010089 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010090 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10091 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010092 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010093 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10094
Wayne Boyer95fecd92009-06-16 15:13:28 -070010095 if (!ioa_cfg->msi_received) {
10096 /* MSI test failed */
10097 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10098 rc = -EOPNOTSUPP;
10099 } else if (ipr_debug)
10100 dev_info(&pdev->dev, "MSI test succeeded.\n");
10101
10102 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10103
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010104 free_irq(irq, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010105
10106 LEAVE;
10107
10108 return rc;
10109}
10110
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010111 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -070010112 * @pdev: PCI device struct
10113 * @dev_id: PCI device id struct
10114 *
10115 * Return value:
10116 * 0 on success / non-zero on failure
10117 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010118static int ipr_probe_ioa(struct pci_dev *pdev,
10119 const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010120{
10121 struct ipr_ioa_cfg *ioa_cfg;
10122 struct Scsi_Host *host;
10123 unsigned long ipr_regs_pci;
10124 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -070010125 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -050010126 volatile u32 mask, uproc, interrupts;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010127 unsigned long lock_flags, driver_lock_flags;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010128 unsigned int irq_flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010129
10130 ENTER;
10131
Linus Torvalds1da177e2005-04-16 15:20:36 -070010132 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010133 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10134
10135 if (!host) {
10136 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10137 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -060010138 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010139 }
10140
10141 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10142 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Dan Williams8d8e7d12012-07-09 21:06:08 -070010143 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010144
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010145 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010146
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010147 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010148 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10149 dev_id->vendor, dev_id->device);
10150 goto out_scsi_host_put;
10151 }
10152
Wayne Boyera32c0552010-02-19 13:23:36 -080010153 /* set SIS 32 or SIS 64 */
10154 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010155 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
Brian King7dd21302012-03-14 21:20:08 -050010156 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
Brian King89aad422012-03-14 21:20:10 -050010157 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010158
Brian King5469cb52007-03-29 12:42:40 -050010159 if (ipr_transop_timeout)
10160 ioa_cfg->transop_timeout = ipr_transop_timeout;
10161 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10162 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10163 else
10164 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10165
Auke Kok44c10132007-06-08 15:46:36 -070010166 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -050010167
Brian King6270e592014-01-21 12:16:41 -060010168 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10169
Linus Torvalds1da177e2005-04-16 15:20:36 -070010170 ipr_regs_pci = pci_resource_start(pdev, 0);
10171
10172 rc = pci_request_regions(pdev, IPR_NAME);
10173 if (rc < 0) {
10174 dev_err(&pdev->dev,
10175 "Couldn't register memory range of registers\n");
10176 goto out_scsi_host_put;
10177 }
10178
Brian King6270e592014-01-21 12:16:41 -060010179 rc = pci_enable_device(pdev);
10180
10181 if (rc || pci_channel_offline(pdev)) {
10182 if (pci_channel_offline(pdev)) {
10183 ipr_wait_for_pci_err_recovery(ioa_cfg);
10184 rc = pci_enable_device(pdev);
10185 }
10186
10187 if (rc) {
10188 dev_err(&pdev->dev, "Cannot enable adapter\n");
10189 ipr_wait_for_pci_err_recovery(ioa_cfg);
10190 goto out_release_regions;
10191 }
10192 }
10193
Arjan van de Ven25729a72008-09-28 16:18:02 -070010194 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010195
10196 if (!ipr_regs) {
10197 dev_err(&pdev->dev,
10198 "Couldn't map memory range of registers\n");
10199 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -060010200 goto out_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010201 }
10202
10203 ioa_cfg->hdw_dma_regs = ipr_regs;
10204 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10205 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10206
Brian King6270e592014-01-21 12:16:41 -060010207 ipr_init_regs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010208
Wayne Boyera32c0552010-02-19 13:23:36 -080010209 if (ioa_cfg->sis64) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010210 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Wayne Boyera32c0552010-02-19 13:23:36 -080010211 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010212 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10213 rc = dma_set_mask_and_coherent(&pdev->dev,
10214 DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -080010215 }
Wayne Boyera32c0552010-02-19 13:23:36 -080010216 } else
Anton Blanchard869404c2014-10-30 17:27:09 -050010217 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -080010218
Linus Torvalds1da177e2005-04-16 15:20:36 -070010219 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010220 dev_err(&pdev->dev, "Failed to set DMA mask\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070010221 goto cleanup_nomem;
10222 }
10223
10224 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10225 ioa_cfg->chip_cfg->cache_line_size);
10226
10227 if (rc != PCIBIOS_SUCCESSFUL) {
10228 dev_err(&pdev->dev, "Write of cache line size failed\n");
Brian King6270e592014-01-21 12:16:41 -060010229 ipr_wait_for_pci_err_recovery(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010230 rc = -EIO;
10231 goto cleanup_nomem;
10232 }
10233
Brian King6270e592014-01-21 12:16:41 -060010234 /* Issue MMIO read to ensure card is not in EEH */
10235 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10236 ipr_wait_for_pci_err_recovery(ioa_cfg);
10237
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010238 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10239 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10240 IPR_MAX_MSIX_VECTORS);
10241 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10242 }
10243
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010244 irq_flag = PCI_IRQ_LEGACY;
10245 if (ioa_cfg->ipr_chip->has_msi)
10246 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10247 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10248 if (rc < 0) {
10249 ipr_wait_for_pci_err_recovery(ioa_cfg);
10250 goto cleanup_nomem;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010251 }
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010252 ioa_cfg->nvectors = rc;
10253
10254 if (!pdev->msi_enabled && !pdev->msix_enabled)
10255 ioa_cfg->clear_isr = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010256
Brian King6270e592014-01-21 12:16:41 -060010257 pci_set_master(pdev);
10258
10259 if (pci_channel_offline(pdev)) {
10260 ipr_wait_for_pci_err_recovery(ioa_cfg);
10261 pci_set_master(pdev);
10262 if (pci_channel_offline(pdev)) {
10263 rc = -EIO;
10264 goto out_msi_disable;
10265 }
10266 }
10267
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010268 if (pdev->msi_enabled || pdev->msix_enabled) {
Wayne Boyer95fecd92009-06-16 15:13:28 -070010269 rc = ipr_test_msi(ioa_cfg, pdev);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010270 switch (rc) {
10271 case 0:
10272 dev_info(&pdev->dev,
10273 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10274 pdev->msix_enabled ? "-X" : "");
10275 break;
10276 case -EOPNOTSUPP:
Brian King6270e592014-01-21 12:16:41 -060010277 ipr_wait_for_pci_err_recovery(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010278 pci_free_irq_vectors(pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010279
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010280 ioa_cfg->nvectors = 1;
Benjamin Herrenschmidt9dadfb92016-11-30 15:28:55 -060010281 ioa_cfg->clear_isr = 1;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010282 break;
10283 default:
Wayne Boyer95fecd92009-06-16 15:13:28 -070010284 goto out_msi_disable;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010285 }
10286 }
10287
10288 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10289 (unsigned int)num_online_cpus(),
10290 (unsigned int)IPR_MAX_HRRQ_NUM);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010291
Linus Torvalds1da177e2005-04-16 15:20:36 -070010292 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -070010293 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010294
10295 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -070010296 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010297
10298 rc = ipr_alloc_mem(ioa_cfg);
10299 if (rc < 0) {
10300 dev_err(&pdev->dev,
10301 "Couldn't allocate enough memory for device driver!\n");
Julia Lawallf170c682011-07-11 14:08:25 -070010302 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010303 }
10304
Brian King6270e592014-01-21 12:16:41 -060010305 /* Save away PCI config space for use following IOA reset */
10306 rc = pci_save_state(pdev);
10307
10308 if (rc != PCIBIOS_SUCCESSFUL) {
10309 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10310 rc = -EIO;
10311 goto cleanup_nolog;
10312 }
10313
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010314 /*
10315 * If HRRQ updated interrupt is not masked, or reset alert is set,
10316 * the card is in an unknown state and needs a hard reset
10317 */
Wayne Boyer214777b2010-02-19 13:24:26 -080010318 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10319 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10320 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010321 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10322 ioa_cfg->needs_hard_reset = 1;
Anton Blanchard5d7c20b2011-08-01 19:43:45 +100010323 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
Brian King473b1e82007-05-02 10:44:11 -050010324 ioa_cfg->needs_hard_reset = 1;
10325 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10326 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010327
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010328 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010329 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010330 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010331
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010332 if (pdev->msi_enabled || pdev->msix_enabled) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010333 name_msi_vectors(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010334 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010335 ioa_cfg->vectors_info[0].desc,
10336 &ioa_cfg->hrrq[0]);
10337 if (!rc)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010338 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010339 } else {
10340 rc = request_irq(pdev->irq, ipr_isr,
10341 IRQF_SHARED,
10342 IPR_NAME, &ioa_cfg->hrrq[0]);
10343 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010344 if (rc) {
10345 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10346 pdev->irq, rc);
10347 goto cleanup_nolog;
10348 }
10349
Brian King463fc692007-05-07 17:09:05 -050010350 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10351 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10352 ioa_cfg->needs_warm_reset = 1;
10353 ioa_cfg->reset = ipr_reset_slot_reset;
Brian King2796ca52015-03-26 11:23:52 -050010354
10355 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10356 WQ_MEM_RECLAIM, host->host_no);
10357
10358 if (!ioa_cfg->reset_work_q) {
10359 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
Wei Yongjunc8e18ac2016-07-29 16:00:45 +000010360 rc = -ENOMEM;
Brian King2796ca52015-03-26 11:23:52 -050010361 goto out_free_irq;
10362 }
Brian King463fc692007-05-07 17:09:05 -050010363 } else
10364 ioa_cfg->reset = ipr_reset_start_bist;
10365
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010366 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010367 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010368 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010369
10370 LEAVE;
10371out:
10372 return rc;
10373
Brian King2796ca52015-03-26 11:23:52 -050010374out_free_irq:
10375 ipr_free_irqs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010376cleanup_nolog:
10377 ipr_free_mem(ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010378out_msi_disable:
Brian King6270e592014-01-21 12:16:41 -060010379 ipr_wait_for_pci_err_recovery(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010380 pci_free_irq_vectors(pdev);
Julia Lawallf170c682011-07-11 14:08:25 -070010381cleanup_nomem:
10382 iounmap(ipr_regs);
Brian King6270e592014-01-21 12:16:41 -060010383out_disable:
10384 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010385out_release_regions:
10386 pci_release_regions(pdev);
10387out_scsi_host_put:
10388 scsi_host_put(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010389 goto out;
10390}
10391
10392/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010393 * ipr_initiate_ioa_bringdown - Bring down an adapter
10394 * @ioa_cfg: ioa config struct
10395 * @shutdown_type: shutdown type
10396 *
10397 * Description: This function will initiate bringing down the adapter.
10398 * This consists of issuing an IOA shutdown to the adapter
10399 * to flush the cache, and running BIST.
10400 * If the caller needs to wait on the completion of the reset,
10401 * the caller must sleep on the reset_wait_q.
10402 *
10403 * Return value:
10404 * none
10405 **/
10406static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10407 enum ipr_shutdown_type shutdown_type)
10408{
10409 ENTER;
10410 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10411 ioa_cfg->sdt_state = ABORT_DUMP;
10412 ioa_cfg->reset_retries = 0;
10413 ioa_cfg->in_ioa_bringdown = 1;
10414 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10415 LEAVE;
10416}
10417
10418/**
10419 * __ipr_remove - Remove a single adapter
10420 * @pdev: pci device struct
10421 *
10422 * Adapter hot plug remove entry point.
10423 *
10424 * Return value:
10425 * none
10426 **/
10427static void __ipr_remove(struct pci_dev *pdev)
10428{
10429 unsigned long host_lock_flags = 0;
10430 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Brian Kingbfae7822013-01-30 23:45:08 -060010431 int i;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010432 unsigned long driver_lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010433 ENTER;
10434
10435 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -030010436 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -050010437 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10438 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10439 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10440 }
10441
Brian Kingbfae7822013-01-30 23:45:08 -060010442 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10443 spin_lock(&ioa_cfg->hrrq[i]._lock);
10444 ioa_cfg->hrrq[i].removing_ioa = 1;
10445 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10446 }
10447 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070010448 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10449
10450 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10451 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Tejun Heo43829732012-08-20 14:51:24 -070010452 flush_work(&ioa_cfg->work_q);
Brian King2796ca52015-03-26 11:23:52 -050010453 if (ioa_cfg->reset_work_q)
10454 flush_workqueue(ioa_cfg->reset_work_q);
wenxiong@linux.vnet.ibm.com9077a942013-03-14 13:52:24 -050010455 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010456 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10457
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010458 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010459 list_del(&ioa_cfg->queue);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010460 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010461
10462 if (ioa_cfg->sdt_state == ABORT_DUMP)
10463 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10464 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10465
10466 ipr_free_all_resources(ioa_cfg);
10467
10468 LEAVE;
10469}
10470
10471/**
10472 * ipr_remove - IOA hot plug remove entry point
10473 * @pdev: pci device struct
10474 *
10475 * Adapter hot plug remove entry point.
10476 *
10477 * Return value:
10478 * none
10479 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010480static void ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010481{
10482 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10483
10484 ENTER;
10485
Tony Jonesee959b02008-02-22 00:13:36 +010010486 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010487 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +010010488 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010489 &ipr_dump_attr);
Brian Kingafc3f832016-08-24 12:56:51 -050010490 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10491 &ipr_ioa_async_err_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010492 scsi_remove_host(ioa_cfg->host);
10493
10494 __ipr_remove(pdev);
10495
10496 LEAVE;
10497}
10498
10499/**
10500 * ipr_probe - Adapter hot plug add entry point
10501 *
10502 * Return value:
10503 * 0 on success / non-zero on failure
10504 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010505static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010506{
10507 struct ipr_ioa_cfg *ioa_cfg;
Brian Kingb195d5e2016-07-15 14:48:03 -050010508 unsigned long flags;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010509 int rc, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010510
10511 rc = ipr_probe_ioa(pdev, dev_id);
10512
10513 if (rc)
10514 return rc;
10515
10516 ioa_cfg = pci_get_drvdata(pdev);
10517 rc = ipr_probe_ioa_part2(ioa_cfg);
10518
10519 if (rc) {
10520 __ipr_remove(pdev);
10521 return rc;
10522 }
10523
10524 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10525
10526 if (rc) {
10527 __ipr_remove(pdev);
10528 return rc;
10529 }
10530
Tony Jonesee959b02008-02-22 00:13:36 +010010531 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010532 &ipr_trace_attr);
10533
10534 if (rc) {
10535 scsi_remove_host(ioa_cfg->host);
10536 __ipr_remove(pdev);
10537 return rc;
10538 }
10539
Brian Kingafc3f832016-08-24 12:56:51 -050010540 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10541 &ipr_ioa_async_err_log);
10542
10543 if (rc) {
10544 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10545 &ipr_dump_attr);
10546 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10547 &ipr_trace_attr);
10548 scsi_remove_host(ioa_cfg->host);
10549 __ipr_remove(pdev);
10550 return rc;
10551 }
10552
Tony Jonesee959b02008-02-22 00:13:36 +010010553 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010554 &ipr_dump_attr);
10555
10556 if (rc) {
Brian Kingafc3f832016-08-24 12:56:51 -050010557 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10558 &ipr_ioa_async_err_log);
Tony Jonesee959b02008-02-22 00:13:36 +010010559 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010560 &ipr_trace_attr);
10561 scsi_remove_host(ioa_cfg->host);
10562 __ipr_remove(pdev);
10563 return rc;
10564 }
Brian Kinga3d1ddd2016-08-08 17:53:12 -050010565 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10566 ioa_cfg->scan_enabled = 1;
10567 schedule_work(&ioa_cfg->work_q);
10568 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010569
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010570 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10571
Jens Axboe89f8b332014-03-13 09:38:42 -060010572 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010573 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +010010574 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010575 ioa_cfg->iopoll_weight, ipr_iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010576 }
10577 }
10578
Brian Kinga3d1ddd2016-08-08 17:53:12 -050010579 scsi_scan_host(ioa_cfg->host);
10580
Linus Torvalds1da177e2005-04-16 15:20:36 -070010581 return 0;
10582}
10583
10584/**
10585 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010586 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -070010587 *
10588 * This function is invoked upon system shutdown/reboot. It will issue
10589 * an adapter shutdown to the adapter to flush the write cache.
10590 *
10591 * Return value:
10592 * none
10593 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010594static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010595{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010596 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010597 unsigned long lock_flags = 0;
Brian King4fdd7c72015-03-26 11:23:50 -050010598 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010599 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010600
10601 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Jens Axboe89f8b332014-03-13 09:38:42 -060010602 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010603 ioa_cfg->iopoll_weight = 0;
10604 for (i = 1; i < ioa_cfg->hrrq_num; i++)
Christoph Hellwig511cbce2015-11-10 14:56:14 +010010605 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010606 }
10607
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -030010608 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -050010609 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10610 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10611 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10612 }
10613
Brian King4fdd7c72015-03-26 11:23:50 -050010614 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10615 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10616
10617 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010618 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10619 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Brian King4fdd7c72015-03-26 11:23:50 -050010620 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
Brian King2796ca52015-03-26 11:23:52 -050010621 ipr_free_irqs(ioa_cfg);
Brian King4fdd7c72015-03-26 11:23:50 -050010622 pci_disable_device(ioa_cfg->pdev);
10623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010624}
10625
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010626static struct pci_device_id ipr_pci_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010627 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010628 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010629 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010630 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010631 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010632 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010633 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010634 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010635 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010636 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010637 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010638 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010639 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010640 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010641 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -050010642 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10643 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010644 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -060010645 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010646 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -050010647 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10648 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010649 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -050010650 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10651 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010652 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -060010653 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010654 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -050010655 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10656 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -060010657 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -050010658 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10659 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010660 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -050010661 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10662 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -050010663 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King185eb312007-03-29 12:42:53 -050010664 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10665 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Wayne Boyerb0f56d32010-06-24 13:34:14 -070010666 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10667 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -050010668 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -050010669 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010670 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -060010671 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010672 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -060010673 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010674 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010675 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10676 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010677 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010678 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10679 IPR_USE_LONG_TRANSOP_TIMEOUT },
Wayne Boyerd7b46272010-02-19 13:24:38 -080010680 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10682 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10684 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
Wayne Boyer32622bd2010-10-18 20:24:34 -070010686 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10688 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer5a918352011-10-27 11:58:21 -070010689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10690 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer32622bd2010-10-18 20:24:34 -070010691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010692 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010693 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010694 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010695 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010696 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010697 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010698 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10699 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10700 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010701 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010702 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10703 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10704 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10705 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10706 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10707 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10708 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10709 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010710 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10711 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10712 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wendy Xiongf94d9962014-01-21 12:16:40 -060010713 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10714 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010715 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10716 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10717 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10718 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10719 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10720 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10721 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10722 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10723 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10724 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10725 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
Wendy Xiong5eeac3e2014-03-12 16:08:52 -050010726 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10727 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10728 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10729 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10730 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10731 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
Wen Xiong00da9ff2016-07-12 16:02:07 -050010732 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10733 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10734 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10735 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010736 { }
10737};
10738MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10739
Stephen Hemmingera55b2d22012-09-07 09:33:16 -070010740static const struct pci_error_handlers ipr_err_handler = {
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010741 .error_detected = ipr_pci_error_detected,
Brian King6270e592014-01-21 12:16:41 -060010742 .mmio_enabled = ipr_pci_mmio_enabled,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010743 .slot_reset = ipr_pci_slot_reset,
10744};
10745
Linus Torvalds1da177e2005-04-16 15:20:36 -070010746static struct pci_driver ipr_driver = {
10747 .name = IPR_NAME,
10748 .id_table = ipr_pci_table,
10749 .probe = ipr_probe,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010750 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010751 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010752 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010753};
10754
10755/**
Wayne Boyerf72919e2010-02-19 13:24:21 -080010756 * ipr_halt_done - Shutdown prepare completion
10757 *
10758 * Return value:
10759 * none
10760 **/
10761static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10762{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010763 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010764}
10765
10766/**
10767 * ipr_halt - Issue shutdown prepare to all adapters
10768 *
10769 * Return value:
10770 * NOTIFY_OK on success / NOTIFY_DONE on failure
10771 **/
10772static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10773{
10774 struct ipr_cmnd *ipr_cmd;
10775 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010776 unsigned long flags = 0, driver_lock_flags;
Wayne Boyerf72919e2010-02-19 13:24:21 -080010777
10778 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10779 return NOTIFY_DONE;
10780
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010781 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010782
10783 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10784 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King4fdd7c72015-03-26 11:23:50 -050010785 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10786 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
Wayne Boyerf72919e2010-02-19 13:24:21 -080010787 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10788 continue;
10789 }
10790
10791 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10792 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10793 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10794 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10795 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10796
10797 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10798 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10799 }
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010800 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010801
10802 return NOTIFY_OK;
10803}
10804
10805static struct notifier_block ipr_notifier = {
10806 ipr_halt, NULL, 0
10807};
10808
10809/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010810 * ipr_init - Module entry point
10811 *
10812 * Return value:
10813 * 0 on success / negative value on failure
10814 **/
10815static int __init ipr_init(void)
10816{
10817 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10818 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10819
Wayne Boyerf72919e2010-02-19 13:24:21 -080010820 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -070010821 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010822}
10823
10824/**
10825 * ipr_exit - Module unload
10826 *
10827 * Module unload entry point.
10828 *
10829 * Return value:
10830 * none
10831 **/
10832static void __exit ipr_exit(void)
10833{
Wayne Boyerf72919e2010-02-19 13:24:21 -080010834 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010835 pci_unregister_driver(&ipr_driver);
10836}
10837
10838module_init(ipr_init);
10839module_exit(ipr_exit);