blob: dda1a64ab89cfb32738fca17c23831c7e0d34ffe [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 * PCI-X Dual Channel Ultra 320 SCSI Adapter
33 * PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 * Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 * - Ultra 320 SCSI controller
38 * - PCI-X host interface
39 * - Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 * - Non-Volatile Write Cache
41 * - Supports attachment of non-RAID disks, tape, and optical devices
42 * - RAID Levels 0, 5, 10
43 * - Hot spare
44 * - Background Parity Checking
45 * - Background Data Scrubbing
46 * - Ability to increase the capacity of an existing RAID 5 disk array
47 * by adding disks
48 *
49 * Driver Features:
50 * - Tagged command queuing
51 * - Adapter microcode download
52 * - PCI hot plug
53 * - SCSI device hot plug
54 *
55 */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090062#include <linux/slab.h>
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -030063#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <linux/ioport.h>
65#include <linux/delay.h>
66#include <linux/pci.h>
67#include <linux/wait.h>
68#include <linux/spinlock.h>
69#include <linux/sched.h>
70#include <linux/interrupt.h>
71#include <linux/blkdev.h>
72#include <linux/firmware.h>
73#include <linux/module.h>
74#include <linux/moduleparam.h>
Brian King35a39692006-09-25 12:39:20 -050075#include <linux/libata.h>
Brian King0ce3a7e2008-07-11 13:37:50 -050076#include <linux/hdreg.h>
Wayne Boyerf72919e2010-02-19 13:24:21 -080077#include <linux/reboot.h>
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080078#include <linux/stringify.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079#include <asm/io.h>
80#include <asm/irq.h>
81#include <asm/processor.h>
82#include <scsi/scsi.h>
83#include <scsi/scsi_host.h>
84#include <scsi/scsi_tcq.h>
85#include <scsi/scsi_eh.h>
86#include <scsi/scsi_cmnd.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070087#include "ipr.h"
88
89/*
90 * Global Data
91 */
Denis Chengb7d68ca2007-12-13 16:14:27 -080092static LIST_HEAD(ipr_ioa_head);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
94static unsigned int ipr_max_speed = 1;
95static int ipr_testmode = 0;
96static unsigned int ipr_fastfail = 0;
Brian King5469cb52007-03-29 12:42:40 -050097static unsigned int ipr_transop_timeout = 0;
brking@us.ibm.comd3c74872005-11-01 17:01:34 -060098static unsigned int ipr_debug = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -080099static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
Brian Kingac09c342007-04-26 16:00:16 -0500100static unsigned int ipr_dual_ioa_raid = 1;
Wen Xiongcb05cbb2016-07-12 16:02:08 -0500101static unsigned int ipr_number_of_msix = 16;
Brian King4fdd7c72015-03-26 11:23:50 -0500102static unsigned int ipr_fast_reboot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103static DEFINE_SPINLOCK(ipr_driver_lock);
104
105/* This table describes the differences between DMA controller chips */
106static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
Brian King60e74862006-11-21 10:28:10 -0600107 { /* Gemstone, Citrine, Obsidian, and Obsidian-E */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 .mailbox = 0x0042C,
Brian King89aad422012-03-14 21:20:10 -0500109 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500111 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600112 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 {
114 .set_interrupt_mask_reg = 0x0022C,
115 .clr_interrupt_mask_reg = 0x00230,
Wayne Boyer214777b2010-02-19 13:24:26 -0800116 .clr_interrupt_mask_reg32 = 0x00230,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 .sense_interrupt_mask_reg = 0x0022C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800118 .sense_interrupt_mask_reg32 = 0x0022C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 .clr_interrupt_reg = 0x00228,
Wayne Boyer214777b2010-02-19 13:24:26 -0800120 .clr_interrupt_reg32 = 0x00228,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 .sense_interrupt_reg = 0x00224,
Wayne Boyer214777b2010-02-19 13:24:26 -0800122 .sense_interrupt_reg32 = 0x00224,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 .ioarrin_reg = 0x00404,
124 .sense_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800125 .sense_uproc_interrupt_reg32 = 0x00214,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 .set_uproc_interrupt_reg = 0x00214,
Wayne Boyer214777b2010-02-19 13:24:26 -0800127 .set_uproc_interrupt_reg32 = 0x00214,
128 .clr_uproc_interrupt_reg = 0x00218,
129 .clr_uproc_interrupt_reg32 = 0x00218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 }
131 },
132 { /* Snipe and Scamp */
133 .mailbox = 0x0052C,
Brian King89aad422012-03-14 21:20:10 -0500134 .max_cmds = 100,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500136 .clear_isr = 1,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600137 .iopoll_weight = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 {
139 .set_interrupt_mask_reg = 0x00288,
140 .clr_interrupt_mask_reg = 0x0028C,
Wayne Boyer214777b2010-02-19 13:24:26 -0800141 .clr_interrupt_mask_reg32 = 0x0028C,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 .sense_interrupt_mask_reg = 0x00288,
Wayne Boyer214777b2010-02-19 13:24:26 -0800143 .sense_interrupt_mask_reg32 = 0x00288,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 .clr_interrupt_reg = 0x00284,
Wayne Boyer214777b2010-02-19 13:24:26 -0800145 .clr_interrupt_reg32 = 0x00284,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 .sense_interrupt_reg = 0x00280,
Wayne Boyer214777b2010-02-19 13:24:26 -0800147 .sense_interrupt_reg32 = 0x00280,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 .ioarrin_reg = 0x00504,
149 .sense_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800150 .sense_uproc_interrupt_reg32 = 0x00290,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 .set_uproc_interrupt_reg = 0x00290,
Wayne Boyer214777b2010-02-19 13:24:26 -0800152 .set_uproc_interrupt_reg32 = 0x00290,
153 .clr_uproc_interrupt_reg = 0x00294,
154 .clr_uproc_interrupt_reg32 = 0x00294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 }
156 },
Wayne Boyera74c1632010-02-19 13:23:51 -0800157 { /* CRoC */
Wayne Boyer110def82010-11-04 09:36:16 -0700158 .mailbox = 0x00044,
Brian King89aad422012-03-14 21:20:10 -0500159 .max_cmds = 1000,
Wayne Boyera74c1632010-02-19 13:23:51 -0800160 .cache_line_size = 0x20,
Brian King7dd21302012-03-14 21:20:08 -0500161 .clear_isr = 0,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -0600162 .iopoll_weight = 64,
Wayne Boyera74c1632010-02-19 13:23:51 -0800163 {
164 .set_interrupt_mask_reg = 0x00010,
165 .clr_interrupt_mask_reg = 0x00018,
Wayne Boyer214777b2010-02-19 13:24:26 -0800166 .clr_interrupt_mask_reg32 = 0x0001C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800167 .sense_interrupt_mask_reg = 0x00010,
Wayne Boyer214777b2010-02-19 13:24:26 -0800168 .sense_interrupt_mask_reg32 = 0x00014,
Wayne Boyera74c1632010-02-19 13:23:51 -0800169 .clr_interrupt_reg = 0x00008,
Wayne Boyer214777b2010-02-19 13:24:26 -0800170 .clr_interrupt_reg32 = 0x0000C,
Wayne Boyera74c1632010-02-19 13:23:51 -0800171 .sense_interrupt_reg = 0x00000,
Wayne Boyer214777b2010-02-19 13:24:26 -0800172 .sense_interrupt_reg32 = 0x00004,
Wayne Boyera74c1632010-02-19 13:23:51 -0800173 .ioarrin_reg = 0x00070,
174 .sense_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800175 .sense_uproc_interrupt_reg32 = 0x00024,
Wayne Boyera74c1632010-02-19 13:23:51 -0800176 .set_uproc_interrupt_reg = 0x00020,
Wayne Boyer214777b2010-02-19 13:24:26 -0800177 .set_uproc_interrupt_reg32 = 0x00024,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800178 .clr_uproc_interrupt_reg = 0x00028,
Wayne Boyer214777b2010-02-19 13:24:26 -0800179 .clr_uproc_interrupt_reg32 = 0x0002C,
180 .init_feedback_reg = 0x0005C,
Wayne Boyerdcbad002010-02-19 13:24:14 -0800181 .dump_addr_reg = 0x00064,
Wayne Boyer8701f182010-06-04 10:26:50 -0700182 .dump_data_reg = 0x00068,
183 .endian_swap_reg = 0x00084
Wayne Boyera74c1632010-02-19 13:23:51 -0800184 }
185 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186};
187
188static const struct ipr_chip_t ipr_chip[] = {
Christoph Hellwiga299ee62016-09-11 15:31:24 +0200189 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
190 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
191 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
192 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
193 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, true, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
194 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
195 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, false, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
196 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
197 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
198 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE, true, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199};
200
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300201static int ipr_max_bus_speeds[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
203};
204
205MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
206MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
207module_param_named(max_speed, ipr_max_speed, uint, 0);
208MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
209module_param_named(log_level, ipr_log_level, uint, 0);
210MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
211module_param_named(testmode, ipr_testmode, int, 0);
212MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800213module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
215module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
216MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
Wayne Boyer2cf22be2009-02-24 11:36:00 -0800217module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
brking@us.ibm.comd3c74872005-11-01 17:01:34 -0600218MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
Brian Kingac09c342007-04-26 16:00:16 -0500219module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
220MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -0800221module_param_named(max_devs, ipr_max_devs, int, 0);
222MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
223 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600224module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
Wen Xiongcb05cbb2016-07-12 16:02:08 -0500225MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 16). (default:16)");
Brian King4fdd7c72015-03-26 11:23:50 -0500226module_param_named(fast_reboot, ipr_fast_reboot, int, S_IRUGO | S_IWUSR);
227MODULE_PARM_DESC(fast_reboot, "Skip adapter shutdown during reboot. Set to 1 to enable. (default: 0)");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228MODULE_LICENSE("GPL");
229MODULE_VERSION(IPR_DRIVER_VERSION);
230
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231/* A constant array of IOASCs/URCs/Error Messages */
232static const
233struct ipr_error_table_t ipr_error_table[] = {
Brian King933916f2007-03-29 12:43:30 -0500234 {0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 "8155: An unknown error was received"},
236 {0x00330000, 0, 0,
237 "Soft underlength error"},
238 {0x005A0000, 0, 0,
239 "Command to be cancelled not found"},
240 {0x00808000, 0, 0,
241 "Qualified success"},
Brian King933916f2007-03-29 12:43:30 -0500242 {0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 "FFFE: Soft device bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500244 {0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500245 "4101: Soft device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800246 {0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
247 "FFFC: Logical block guard error recovered by the device"},
248 {0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
249 "FFFC: Logical block reference tag error recovered by the device"},
250 {0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
251 "4171: Recovered scatter list tag / sequence number error"},
252 {0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
253 "FF3D: Recovered logical block CRC error on IOA to Host transfer"},
254 {0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
255 "4171: Recovered logical block sequence number error on IOA to Host transfer"},
256 {0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
257 "FFFD: Recovered logical block reference tag error detected by the IOA"},
258 {0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
259 "FFFD: Logical block guard error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500260 {0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 "FFF9: Device sector reassign successful"},
Brian King933916f2007-03-29 12:43:30 -0500262 {0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263 "FFF7: Media error recovered by device rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500264 {0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 "7001: IOA sector reassignment successful"},
Brian King933916f2007-03-29 12:43:30 -0500266 {0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 "FFF9: Soft media error. Sector reassignment recommended"},
Brian King933916f2007-03-29 12:43:30 -0500268 {0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 "FFF7: Media error recovered by IOA rewrite procedures"},
Brian King933916f2007-03-29 12:43:30 -0500270 {0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 "FF3D: Soft PCI bus error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500272 {0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 "FFF6: Device hardware error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500274 {0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 "FFF6: Device hardware error recovered by the device"},
Brian King933916f2007-03-29 12:43:30 -0500276 {0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 "FF3D: Soft IOA error recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500278 {0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 "FFFA: Undefined device response recovered by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500280 {0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 "FFF6: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500282 {0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500283 "FFFE: Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500284 {0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 "FFF6: Failure prediction threshold exceeded"},
Brian King933916f2007-03-29 12:43:30 -0500286 {0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 "8009: Impending cache battery pack failure"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500288 {0x02040100, 0, 0,
289 "Logical Unit in process of becoming ready"},
290 {0x02040200, 0, 0,
291 "Initializing command required"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 {0x02040400, 0, 0,
293 "34FF: Disk device format in progress"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500294 {0x02040C00, 0, 0,
295 "Logical unit not accessible, target port in unavailable state"},
Brian King65f56472007-04-26 16:00:12 -0500296 {0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
297 "9070: IOA requested reset"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 {0x023F0000, 0, 0,
299 "Synchronization required"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500300 {0x02408500, 0, 0,
301 "IOA microcode download required"},
302 {0x02408600, 0, 0,
303 "Device bus connection is prohibited by host"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304 {0x024E0000, 0, 0,
305 "No ready, IOA shutdown"},
306 {0x025A0000, 0, 0,
307 "Not ready, IOA has been shutdown"},
Brian King933916f2007-03-29 12:43:30 -0500308 {0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 "3020: Storage subsystem configuration error"},
310 {0x03110B00, 0, 0,
311 "FFF5: Medium error, data unreadable, recommend reassign"},
312 {0x03110C00, 0, 0,
313 "7000: Medium error, data unreadable, do not reassign"},
Brian King933916f2007-03-29 12:43:30 -0500314 {0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 "FFF3: Disk media format bad"},
Brian King933916f2007-03-29 12:43:30 -0500316 {0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 "3002: Addressed device failed to respond to selection"},
Brian King933916f2007-03-29 12:43:30 -0500318 {0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 "3100: Device bus error"},
Brian King933916f2007-03-29 12:43:30 -0500320 {0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 "3109: IOA timed out a device command"},
322 {0x04088000, 0, 0,
323 "3120: SCSI bus is not operational"},
Brian King933916f2007-03-29 12:43:30 -0500324 {0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500325 "4100: Hard device bus fabric error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800326 {0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
327 "310C: Logical block guard error detected by the device"},
328 {0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
329 "310C: Logical block reference tag error detected by the device"},
330 {0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
331 "4170: Scatter list tag / sequence number error"},
332 {0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
333 "8150: Logical block CRC error on IOA to Host transfer"},
334 {0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
335 "4170: Logical block sequence number error on IOA to Host transfer"},
336 {0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
337 "310D: Logical block reference tag error detected by the IOA"},
338 {0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
339 "310D: Logical block guard error detected by the IOA"},
Brian King933916f2007-03-29 12:43:30 -0500340 {0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 "9000: IOA reserved area data check"},
Brian King933916f2007-03-29 12:43:30 -0500342 {0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 "9001: IOA reserved area invalid data pattern"},
Brian King933916f2007-03-29 12:43:30 -0500344 {0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 "9002: IOA reserved area LRC error"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800346 {0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
347 "Hardware Error, IOA metadata access error"},
Brian King933916f2007-03-29 12:43:30 -0500348 {0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 "102E: Out of alternate sectors for disk storage"},
Brian King933916f2007-03-29 12:43:30 -0500350 {0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 "FFF4: Data transfer underlength error"},
Brian King933916f2007-03-29 12:43:30 -0500352 {0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 "FFF4: Data transfer overlength error"},
Brian King933916f2007-03-29 12:43:30 -0500354 {0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 "3400: Logical unit failure"},
Brian King933916f2007-03-29 12:43:30 -0500356 {0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 "FFF4: Device microcode is corrupt"},
Brian King933916f2007-03-29 12:43:30 -0500358 {0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 "8150: PCI bus error"},
360 {0x04430000, 1, 0,
361 "Unsupported device bus message received"},
Brian King933916f2007-03-29 12:43:30 -0500362 {0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 "FFF4: Disk device problem"},
Brian King933916f2007-03-29 12:43:30 -0500364 {0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 "8150: Permanent IOA failure"},
Brian King933916f2007-03-29 12:43:30 -0500366 {0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 "3010: Disk device returned wrong response to IOA"},
Brian King933916f2007-03-29 12:43:30 -0500368 {0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 "8151: IOA microcode error"},
370 {0x04448500, 0, 0,
371 "Device bus status error"},
Brian King933916f2007-03-29 12:43:30 -0500372 {0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 "8157: IOA error requiring IOA reset to recover"},
Brian King35a39692006-09-25 12:39:20 -0500374 {0x04448700, 0, 0,
375 "ATA device status error"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 {0x04490000, 0, 0,
377 "Message reject received from the device"},
Brian King933916f2007-03-29 12:43:30 -0500378 {0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 "8008: A permanent cache battery pack failure occurred"},
Brian King933916f2007-03-29 12:43:30 -0500380 {0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 "9090: Disk unit has been modified after the last known status"},
Brian King933916f2007-03-29 12:43:30 -0500382 {0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 "9081: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500384 {0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 "9082: IOA detected device error"},
Brian King933916f2007-03-29 12:43:30 -0500386 {0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 "3110: Device bus error, message or command phase"},
Brian King933916f2007-03-29 12:43:30 -0500388 {0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
Brian King35a39692006-09-25 12:39:20 -0500389 "3110: SAS Command / Task Management Function failed"},
Brian King933916f2007-03-29 12:43:30 -0500390 {0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 "9091: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500392 {0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600393 "9073: Invalid multi-adapter configuration"},
Brian King933916f2007-03-29 12:43:30 -0500394 {0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500395 "4010: Incorrect connection between cascaded expanders"},
Brian King933916f2007-03-29 12:43:30 -0500396 {0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500397 "4020: Connections exceed IOA design limits"},
Brian King933916f2007-03-29 12:43:30 -0500398 {0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500399 "4030: Incorrect multipath connection"},
Brian King933916f2007-03-29 12:43:30 -0500400 {0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500401 "4110: Unsupported enclosure function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500402 {0x04679800, 0, IPR_DEFAULT_LOG_LEVEL,
403 "4120: SAS cable VPD cannot be read"},
Brian King933916f2007-03-29 12:43:30 -0500404 {0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 "FFF4: Command to logical unit failed"},
406 {0x05240000, 1, 0,
407 "Illegal request, invalid request type or request packet"},
408 {0x05250000, 0, 0,
409 "Illegal request, invalid resource handle"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600410 {0x05258000, 0, 0,
411 "Illegal request, commands not allowed to this device"},
412 {0x05258100, 0, 0,
413 "Illegal request, command not allowed to a secondary adapter"},
Wayne Boyer5aa3a332010-02-19 13:24:32 -0800414 {0x05258200, 0, 0,
415 "Illegal request, command not allowed to a non-optimized resource"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 {0x05260000, 0, 0,
417 "Illegal request, invalid field in parameter list"},
418 {0x05260100, 0, 0,
419 "Illegal request, parameter not supported"},
420 {0x05260200, 0, 0,
421 "Illegal request, parameter value invalid"},
422 {0x052C0000, 0, 0,
423 "Illegal request, command sequence error"},
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600424 {0x052C8000, 1, 0,
425 "Illegal request, dual adapter support not enabled"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500426 {0x052C8100, 1, 0,
427 "Illegal request, another cable connector was physically disabled"},
428 {0x054E8000, 1, 0,
429 "Illegal request, inconsistent group id/group count"},
Brian King933916f2007-03-29 12:43:30 -0500430 {0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 "9031: Array protection temporarily suspended, protection resuming"},
Brian King933916f2007-03-29 12:43:30 -0500432 {0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 "9040: Array protection temporarily suspended, protection resuming"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500434 {0x060B0100, 0, IPR_DEFAULT_LOG_LEVEL,
435 "4080: IOA exceeded maximum operating temperature"},
436 {0x060B8000, 0, IPR_DEFAULT_LOG_LEVEL,
437 "4085: Service required"},
Brian King933916f2007-03-29 12:43:30 -0500438 {0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500439 "3140: Device bus not ready to ready transition"},
Brian King933916f2007-03-29 12:43:30 -0500440 {0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 "FFFB: SCSI bus was reset"},
442 {0x06290500, 0, 0,
443 "FFFE: SCSI bus transition to single ended"},
444 {0x06290600, 0, 0,
445 "FFFE: SCSI bus transition to LVD"},
Brian King933916f2007-03-29 12:43:30 -0500446 {0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 "FFFB: SCSI bus was reset by another initiator"},
Brian King933916f2007-03-29 12:43:30 -0500448 {0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 "3029: A device replacement has occurred"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500450 {0x063F8300, 0, IPR_DEFAULT_LOG_LEVEL,
451 "4102: Device bus fabric performance degradation"},
Brian King933916f2007-03-29 12:43:30 -0500452 {0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 "9051: IOA cache data exists for a missing or failed device"},
Brian King933916f2007-03-29 12:43:30 -0500454 {0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600455 "9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
Brian King933916f2007-03-29 12:43:30 -0500456 {0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 "9025: Disk unit is not supported at its physical location"},
Brian King933916f2007-03-29 12:43:30 -0500458 {0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 "3020: IOA detected a SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500460 {0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 "3150: SCSI bus configuration error"},
Brian King933916f2007-03-29 12:43:30 -0500462 {0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600463 "9074: Asymmetric advanced function disk configuration"},
Brian King933916f2007-03-29 12:43:30 -0500464 {0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500465 "4040: Incomplete multipath connection between IOA and enclosure"},
Brian King933916f2007-03-29 12:43:30 -0500466 {0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500467 "4041: Incomplete multipath connection between enclosure and device"},
Brian King933916f2007-03-29 12:43:30 -0500468 {0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500469 "9075: Incomplete multipath connection between IOA and remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500470 {0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500471 "9076: Configuration error, missing remote IOA"},
Brian King933916f2007-03-29 12:43:30 -0500472 {0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500473 "4050: Enclosure does not support a required multipath function"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500474 {0x06679800, 0, IPR_DEFAULT_LOG_LEVEL,
475 "4121: Configuration error, required cable is missing"},
476 {0x06679900, 0, IPR_DEFAULT_LOG_LEVEL,
477 "4122: Cable is not plugged into the correct location on remote IOA"},
478 {0x06679A00, 0, IPR_DEFAULT_LOG_LEVEL,
479 "4123: Configuration error, invalid cable vital product data"},
480 {0x06679B00, 0, IPR_DEFAULT_LOG_LEVEL,
481 "4124: Configuration error, both cable ends are plugged into the same IOA"},
Wayne Boyerb75424f2009-01-28 08:24:50 -0800482 {0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
483 "4070: Logically bad block written on device"},
Brian King933916f2007-03-29 12:43:30 -0500484 {0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 "9041: Array protection temporarily suspended"},
Brian King933916f2007-03-29 12:43:30 -0500486 {0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 "9042: Corrupt array parity detected on specified device"},
Brian King933916f2007-03-29 12:43:30 -0500488 {0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 "9030: Array no longer protected due to missing or failed disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500490 {0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600491 "9071: Link operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500492 {0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -0600493 "9072: Link not operational transition"},
Brian King933916f2007-03-29 12:43:30 -0500494 {0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 "9032: Array exposed but still protected"},
Brian King7b3871f2016-09-16 16:51:36 -0500496 {0x066B8300, 0, IPR_DEBUG_LOG_LEVEL,
Brian Kinge4353402007-03-29 12:43:37 -0500497 "70DD: Device forced failed by disrupt device command"},
Brian King933916f2007-03-29 12:43:30 -0500498 {0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500499 "4061: Multipath redundancy level got better"},
Brian King933916f2007-03-29 12:43:30 -0500500 {0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
Brian King896bbd22006-08-02 14:57:44 -0500501 "4060: Multipath redundancy level got worse"},
Brian King7b3871f2016-09-16 16:51:36 -0500502 {0x06808100, 0, IPR_DEBUG_LOG_LEVEL,
Wen Xiongf8ee25d2015-03-26 11:23:58 -0500503 "9083: Device raw mode enabled"},
Brian King7b3871f2016-09-16 16:51:36 -0500504 {0x06808200, 0, IPR_DEBUG_LOG_LEVEL,
Wen Xiongf8ee25d2015-03-26 11:23:58 -0500505 "9084: Device raw mode disabled"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 {0x07270000, 0, 0,
507 "Failure due to other device"},
Brian King933916f2007-03-29 12:43:30 -0500508 {0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 "9008: IOA does not support functions expected by devices"},
Brian King933916f2007-03-29 12:43:30 -0500510 {0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 "9010: Cache data associated with attached devices cannot be found"},
Brian King933916f2007-03-29 12:43:30 -0500512 {0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 "9011: Cache data belongs to devices other than those attached"},
Brian King933916f2007-03-29 12:43:30 -0500514 {0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 "9020: Array missing 2 or more devices with only 1 device present"},
Brian King933916f2007-03-29 12:43:30 -0500516 {0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 "9021: Array missing 2 or more devices with 2 or more devices present"},
Brian King933916f2007-03-29 12:43:30 -0500518 {0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 "9022: Exposed array is missing a required device"},
Brian King933916f2007-03-29 12:43:30 -0500520 {0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 "9023: Array member(s) not at required physical locations"},
Brian King933916f2007-03-29 12:43:30 -0500522 {0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 "9024: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500524 {0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 "9026: Array not functional due to present hardware configuration"},
Brian King933916f2007-03-29 12:43:30 -0500526 {0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527 "9027: Array is missing a device and parity is out of sync"},
Brian King933916f2007-03-29 12:43:30 -0500528 {0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 "9028: Maximum number of arrays already exist"},
Brian King933916f2007-03-29 12:43:30 -0500530 {0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 "9050: Required cache data cannot be located for a disk unit"},
Brian King933916f2007-03-29 12:43:30 -0500532 {0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 "9052: Cache data exists for a device that has been modified"},
Brian King933916f2007-03-29 12:43:30 -0500534 {0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 "9054: IOA resources not available due to previous problems"},
Brian King933916f2007-03-29 12:43:30 -0500536 {0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 "9092: Disk unit requires initialization before use"},
Brian King933916f2007-03-29 12:43:30 -0500538 {0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 "9029: Incorrect hardware configuration change has been detected"},
Brian King933916f2007-03-29 12:43:30 -0500540 {0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 "9060: One or more disk pairs are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500542 {0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 "9061: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500544 {0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 "9062: One or more disks are missing from an array"},
Brian King933916f2007-03-29 12:43:30 -0500546 {0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 "9063: Maximum number of functional arrays has been exceeded"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500548 {0x07279A00, 0, 0,
549 "Data protect, other volume set problem"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 {0x0B260000, 0, 0,
551 "Aborted command, invalid descriptor"},
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500552 {0x0B3F9000, 0, 0,
553 "Target operating conditions have changed, dual adapter takeover"},
554 {0x0B530200, 0, 0,
555 "Aborted command, medium removal prevented"},
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 {0x0B5A0000, 0, 0,
wenxiong@linux.vnet.ibm.comed7bd662013-06-04 18:57:35 -0500557 "Command terminated by host"},
558 {0x0B5B8000, 0, 0,
559 "Aborted command, command terminated by host"}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560};
561
562static const struct ipr_ses_table_entry ipr_ses_table[] = {
563 { "2104-DL1 ", "XXXXXXXXXXXXXXXX", 80 },
564 { "2104-TL1 ", "XXXXXXXXXXXXXXXX", 80 },
565 { "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
566 { "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
567 { "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
568 { "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
569 { "2104-DU3 ", "XXXXXXXXXXXXXXXX", 160 },
570 { "2104-TU3 ", "XXXXXXXXXXXXXXXX", 160 },
571 { "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
572 { "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
573 { "St V1S2 ", "XXXXXXXXXXXXXXXX", 160 },
574 { "HSBPD4M PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
575 { "VSBPD1H U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
576};
577
578/*
579 * Function Prototypes
580 */
581static int ipr_reset_alert(struct ipr_cmnd *);
582static void ipr_process_ccn(struct ipr_cmnd *);
583static void ipr_process_error(struct ipr_cmnd *);
584static void ipr_reset_ioa_job(struct ipr_cmnd *);
585static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
586 enum ipr_shutdown_type);
587
588#ifdef CONFIG_SCSI_IPR_TRACE
589/**
590 * ipr_trc_hook - Add a trace entry to the driver trace
591 * @ipr_cmd: ipr command struct
592 * @type: trace type
593 * @add_data: additional data
594 *
595 * Return value:
596 * none
597 **/
598static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599 u8 type, u32 add_data)
600{
601 struct ipr_trace_entry *trace_entry;
602 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Brian Kingbb7c5432015-07-14 11:41:31 -0500603 unsigned int trace_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Brian Kingbb7c5432015-07-14 11:41:31 -0500605 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
606 trace_entry = &ioa_cfg->trace[trace_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 trace_entry->time = jiffies;
608 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
609 trace_entry->type = type;
Wayne Boyera32c0552010-02-19 13:23:36 -0800610 if (ipr_cmd->ioa_cfg->sis64)
611 trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
612 else
613 trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
Brian King35a39692006-09-25 12:39:20 -0500614 trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
616 trace_entry->u.add_data = add_data;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600617 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618}
619#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -0300620#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621#endif
622
623/**
Brian King172cd6e2012-07-17 08:14:40 -0500624 * ipr_lock_and_done - Acquire lock and complete command
625 * @ipr_cmd: ipr command struct
626 *
627 * Return value:
628 * none
629 **/
630static void ipr_lock_and_done(struct ipr_cmnd *ipr_cmd)
631{
632 unsigned long lock_flags;
633 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
634
635 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
636 ipr_cmd->done(ipr_cmd);
637 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
638}
639
640/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
642 * @ipr_cmd: ipr command struct
643 *
644 * Return value:
645 * none
646 **/
647static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
648{
649 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700650 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
651 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
Wayne Boyera32c0552010-02-19 13:23:36 -0800652 dma_addr_t dma_addr = ipr_cmd->dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600653 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600655 hrrq_id = ioarcb->cmd_pkt.hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600657 ioarcb->cmd_pkt.hrrq_id = hrrq_id;
Wayne Boyera32c0552010-02-19 13:23:36 -0800658 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800660 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 ioarcb->read_ioadl_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800662
Wayne Boyer96d21f02010-05-10 09:13:27 -0700663 if (ipr_cmd->ioa_cfg->sis64) {
Wayne Boyera32c0552010-02-19 13:23:36 -0800664 ioarcb->u.sis64_addr_data.data_ioadl_addr =
665 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
Wayne Boyer96d21f02010-05-10 09:13:27 -0700666 ioasa64->u.gata.status = 0;
667 } else {
Wayne Boyera32c0552010-02-19 13:23:36 -0800668 ioarcb->write_ioadl_addr =
669 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
670 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
Wayne Boyer96d21f02010-05-10 09:13:27 -0700671 ioasa->u.gata.status = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -0800672 }
673
Wayne Boyer96d21f02010-05-10 09:13:27 -0700674 ioasa->hdr.ioasc = 0;
675 ioasa->hdr.residual_data_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 ipr_cmd->scsi_cmd = NULL;
Brian King35a39692006-09-25 12:39:20 -0500677 ipr_cmd->qc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 ipr_cmd->sense_buffer[0] = 0;
679 ipr_cmd->dma_use_sg = 0;
680}
681
682/**
683 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
684 * @ipr_cmd: ipr command struct
685 *
686 * Return value:
687 * none
688 **/
Brian King172cd6e2012-07-17 08:14:40 -0500689static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
690 void (*fast_done) (struct ipr_cmnd *))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691{
692 ipr_reinit_ipr_cmnd(ipr_cmd);
693 ipr_cmd->u.scratch = 0;
694 ipr_cmd->sibling = NULL;
Brian King6cdb0812014-10-30 17:27:10 -0500695 ipr_cmd->eh_comp = NULL;
Brian King172cd6e2012-07-17 08:14:40 -0500696 ipr_cmd->fast_done = fast_done;
Kees Cook738c6ec2017-08-18 16:53:24 -0700697 timer_setup(&ipr_cmd->timer, NULL, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698}
699
700/**
Brian King00bfef22012-07-17 08:13:52 -0500701 * __ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 * @ioa_cfg: ioa config struct
703 *
704 * Return value:
705 * pointer to ipr command struct
706 **/
707static
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600708struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600710 struct ipr_cmnd *ipr_cmd = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600712 if (likely(!list_empty(&hrrq->hrrq_free_q))) {
713 ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
714 struct ipr_cmnd, queue);
715 list_del(&ipr_cmd->queue);
716 }
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
719 return ipr_cmd;
720}
721
722/**
Brian King00bfef22012-07-17 08:13:52 -0500723 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block and initialize it
724 * @ioa_cfg: ioa config struct
725 *
726 * Return value:
727 * pointer to ipr command struct
728 **/
729static
730struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
731{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600732 struct ipr_cmnd *ipr_cmd =
733 __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
Brian King172cd6e2012-07-17 08:14:40 -0500734 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King00bfef22012-07-17 08:13:52 -0500735 return ipr_cmd;
736}
737
738/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
740 * @ioa_cfg: ioa config struct
741 * @clr_ints: interrupts to clear
742 *
743 * This function masks all interrupts on the adapter, then clears the
744 * interrupts specified in the mask
745 *
746 * Return value:
747 * none
748 **/
749static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
750 u32 clr_ints)
751{
752 volatile u32 int_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600753 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
755 /* Stop new interrupts */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600756 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
757 spin_lock(&ioa_cfg->hrrq[i]._lock);
758 ioa_cfg->hrrq[i].allow_interrupts = 0;
759 spin_unlock(&ioa_cfg->hrrq[i]._lock);
760 }
761 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763 /* Set interrupt mask to stop all new interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800764 if (ioa_cfg->sis64)
765 writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
766 else
767 writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
769 /* Clear any pending interrupts */
Wayne Boyer214777b2010-02-19 13:24:26 -0800770 if (ioa_cfg->sis64)
771 writel(~0, ioa_cfg->regs.clr_interrupt_reg);
772 writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
774}
775
776/**
777 * ipr_save_pcix_cmd_reg - Save PCI-X command register
778 * @ioa_cfg: ioa config struct
779 *
780 * Return value:
781 * 0 on success / -EIO on failure
782 **/
783static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
784{
785 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
786
Brian King7dce0e12007-01-23 11:25:30 -0600787 if (pcix_cmd_reg == 0)
788 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
790 if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
791 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
792 dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
793 return -EIO;
794 }
795
796 ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
797 return 0;
798}
799
800/**
801 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
802 * @ioa_cfg: ioa config struct
803 *
804 * Return value:
805 * 0 on success / -EIO on failure
806 **/
807static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
808{
809 int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
810
811 if (pcix_cmd_reg) {
812 if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
813 ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
814 dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
815 return -EIO;
816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 }
818
819 return 0;
820}
821
822/**
Brian Kingf646f322017-03-15 16:58:39 -0500823 * __ipr_sata_eh_done - done function for aborted SATA commands
824 * @ipr_cmd: ipr command struct
825 *
826 * This function is invoked for ops generated to SATA
827 * devices which are being aborted.
828 *
829 * Return value:
830 * none
831 **/
832static void __ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
833{
834 struct ata_queued_cmd *qc = ipr_cmd->qc;
835 struct ipr_sata_port *sata_port = qc->ap->private_data;
836
837 qc->err_mask |= AC_ERR_OTHER;
838 sata_port->ioasa.status |= ATA_BUSY;
839 ata_qc_complete(qc);
840 if (ipr_cmd->eh_comp)
841 complete(ipr_cmd->eh_comp);
842 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
843}
844
845/**
Brian King35a39692006-09-25 12:39:20 -0500846 * ipr_sata_eh_done - done function for aborted SATA commands
847 * @ipr_cmd: ipr command struct
848 *
849 * This function is invoked for ops generated to SATA
850 * devices which are being aborted.
851 *
852 * Return value:
853 * none
854 **/
855static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
856{
Brian Kingf646f322017-03-15 16:58:39 -0500857 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
858 unsigned long hrrq_flags;
Brian King35a39692006-09-25 12:39:20 -0500859
Brian Kingf646f322017-03-15 16:58:39 -0500860 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
861 __ipr_sata_eh_done(ipr_cmd);
862 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
863}
864
865/**
866 * __ipr_scsi_eh_done - mid-layer done function for aborted ops
867 * @ipr_cmd: ipr command struct
868 *
869 * This function is invoked by the interrupt handler for
870 * ops generated by the SCSI mid-layer which are being aborted.
871 *
872 * Return value:
873 * none
874 **/
875static void __ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
876{
877 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
878
879 scsi_cmd->result |= (DID_ERROR << 16);
880
881 scsi_dma_unmap(ipr_cmd->scsi_cmd);
882 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -0500883 if (ipr_cmd->eh_comp)
884 complete(ipr_cmd->eh_comp);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600885 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King35a39692006-09-25 12:39:20 -0500886}
887
888/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 * ipr_scsi_eh_done - mid-layer done function for aborted ops
890 * @ipr_cmd: ipr command struct
891 *
892 * This function is invoked by the interrupt handler for
893 * ops generated by the SCSI mid-layer which are being aborted.
894 *
895 * Return value:
896 * none
897 **/
898static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
899{
Brian Kingf646f322017-03-15 16:58:39 -0500900 unsigned long hrrq_flags;
901 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
Brian Kingf646f322017-03-15 16:58:39 -0500903 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
904 __ipr_scsi_eh_done(ipr_cmd);
905 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906}
907
908/**
909 * ipr_fail_all_ops - Fails all outstanding ops.
910 * @ioa_cfg: ioa config struct
911 *
912 * This function fails all outstanding ops.
913 *
914 * Return value:
915 * none
916 **/
917static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
918{
919 struct ipr_cmnd *ipr_cmd, *temp;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600920 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
922 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600923 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600924 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600925 list_for_each_entry_safe(ipr_cmd,
926 temp, &hrrq->hrrq_pending_q, queue) {
927 list_del(&ipr_cmd->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600929 ipr_cmd->s.ioasa.hdr.ioasc =
930 cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
931 ipr_cmd->s.ioasa.hdr.ilid =
932 cpu_to_be32(IPR_DRIVER_ILID);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600934 if (ipr_cmd->scsi_cmd)
Brian Kingf646f322017-03-15 16:58:39 -0500935 ipr_cmd->done = __ipr_scsi_eh_done;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600936 else if (ipr_cmd->qc)
Brian Kingf646f322017-03-15 16:58:39 -0500937 ipr_cmd->done = __ipr_sata_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600939 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
940 IPR_IOASC_IOA_WAS_RESET);
941 del_timer(&ipr_cmd->timer);
942 ipr_cmd->done(ipr_cmd);
943 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -0600944 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 LEAVE;
947}
948
949/**
Wayne Boyera32c0552010-02-19 13:23:36 -0800950 * ipr_send_command - Send driver initiated requests.
951 * @ipr_cmd: ipr command struct
952 *
953 * This function sends a command to the adapter using the correct write call.
954 * In the case of sis64, calculate the ioarcb size required. Then or in the
955 * appropriate bits.
956 *
957 * Return value:
958 * none
959 **/
960static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
961{
962 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
963 dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
964
965 if (ioa_cfg->sis64) {
966 /* The default size is 256 bytes */
967 send_dma_addr |= 0x1;
968
969 /* If the number of ioadls * size of ioadl > 128 bytes,
970 then use a 512 byte ioarcb */
971 if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
972 send_dma_addr |= 0x4;
973 writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
974 } else
975 writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
976}
977
978/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 * ipr_do_req - Send driver initiated requests.
980 * @ipr_cmd: ipr command struct
981 * @done: done function
982 * @timeout_func: timeout function
983 * @timeout: timeout value
984 *
985 * This function sends the specified command to the adapter with the
986 * timeout given. The done function is invoked on command completion.
987 *
988 * Return value:
989 * none
990 **/
991static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
992 void (*done) (struct ipr_cmnd *),
Kees Cook738c6ec2017-08-18 16:53:24 -0700993 void (*timeout_func) (struct timer_list *), u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -0600995 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
997 ipr_cmd->done = done;
998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 ipr_cmd->timer.expires = jiffies + timeout;
Kees Cook841b86f2017-10-23 09:40:42 +02001000 ipr_cmd->timer.function = timeout_func;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
1002 add_timer(&ipr_cmd->timer);
1003
1004 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
1005
Wayne Boyera32c0552010-02-19 13:23:36 -08001006 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007}
1008
1009/**
1010 * ipr_internal_cmd_done - Op done function for an internally generated op.
1011 * @ipr_cmd: ipr command struct
1012 *
1013 * This function is the op done function for an internally generated,
1014 * blocking op. It simply wakes the sleeping thread.
1015 *
1016 * Return value:
1017 * none
1018 **/
1019static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
1020{
1021 if (ipr_cmd->sibling)
1022 ipr_cmd->sibling = NULL;
1023 else
1024 complete(&ipr_cmd->completion);
1025}
1026
1027/**
Wayne Boyera32c0552010-02-19 13:23:36 -08001028 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
1029 * @ipr_cmd: ipr command struct
1030 * @dma_addr: dma address
1031 * @len: transfer length
1032 * @flags: ioadl flag value
1033 *
1034 * This function initializes an ioadl in the case where there is only a single
1035 * descriptor.
1036 *
1037 * Return value:
1038 * nothing
1039 **/
1040static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
1041 u32 len, int flags)
1042{
1043 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
1044 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
1045
1046 ipr_cmd->dma_use_sg = 1;
1047
1048 if (ipr_cmd->ioa_cfg->sis64) {
1049 ioadl64->flags = cpu_to_be32(flags);
1050 ioadl64->data_len = cpu_to_be32(len);
1051 ioadl64->address = cpu_to_be64(dma_addr);
1052
1053 ipr_cmd->ioarcb.ioadl_len =
1054 cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
1055 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1056 } else {
1057 ioadl->flags_and_data_len = cpu_to_be32(flags | len);
1058 ioadl->address = cpu_to_be32(dma_addr);
1059
1060 if (flags == IPR_IOADL_FLAGS_READ_LAST) {
1061 ipr_cmd->ioarcb.read_ioadl_len =
1062 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1063 ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
1064 } else {
1065 ipr_cmd->ioarcb.ioadl_len =
1066 cpu_to_be32(sizeof(struct ipr_ioadl_desc));
1067 ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
1068 }
1069 }
1070}
1071
1072/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 * ipr_send_blocking_cmd - Send command and sleep on its completion.
1074 * @ipr_cmd: ipr command struct
1075 * @timeout_func: function to invoke if command times out
1076 * @timeout: timeout
1077 *
1078 * Return value:
1079 * none
1080 **/
1081static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
Kees Cook738c6ec2017-08-18 16:53:24 -07001082 void (*timeout_func) (struct timer_list *),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 u32 timeout)
1084{
1085 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1086
1087 init_completion(&ipr_cmd->completion);
1088 ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
1089
1090 spin_unlock_irq(ioa_cfg->host->host_lock);
1091 wait_for_completion(&ipr_cmd->completion);
1092 spin_lock_irq(ioa_cfg->host->host_lock);
1093}
1094
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001095static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1096{
Brian King3f1c0582015-07-14 11:41:33 -05001097 unsigned int hrrq;
1098
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001099 if (ioa_cfg->hrrq_num == 1)
Brian King3f1c0582015-07-14 11:41:33 -05001100 hrrq = 0;
1101 else {
1102 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1103 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1104 }
1105 return hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001106}
1107
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108/**
1109 * ipr_send_hcam - Send an HCAM to the adapter.
1110 * @ioa_cfg: ioa config struct
1111 * @type: HCAM type
1112 * @hostrcb: hostrcb struct
1113 *
1114 * This function will send a Host Controlled Async command to the adapter.
1115 * If HCAMs are currently not allowed to be issued to the adapter, it will
1116 * place the hostrcb on the free queue.
1117 *
1118 * Return value:
1119 * none
1120 **/
1121static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
1122 struct ipr_hostrcb *hostrcb)
1123{
1124 struct ipr_cmnd *ipr_cmd;
1125 struct ipr_ioarcb *ioarcb;
1126
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06001127 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001129 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
1131
1132 ipr_cmd->u.hostrcb = hostrcb;
1133 ioarcb = &ipr_cmd->ioarcb;
1134
1135 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
1136 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
1137 ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
1138 ioarcb->cmd_pkt.cdb[1] = type;
1139 ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
1140 ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
1141
Wayne Boyera32c0552010-02-19 13:23:36 -08001142 ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
1143 sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144
1145 if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
1146 ipr_cmd->done = ipr_process_ccn;
1147 else
1148 ipr_cmd->done = ipr_process_error;
1149
1150 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
1151
Wayne Boyera32c0552010-02-19 13:23:36 -08001152 ipr_send_command(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 } else {
1154 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
1155 }
1156}
1157
1158/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001159 * ipr_update_ata_class - Update the ata class in the resource entry
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 * @res: resource entry struct
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001161 * @proto: cfgte device bus protocol value
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 *
1163 * Return value:
1164 * none
1165 **/
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001166static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03001168 switch (proto) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001169 case IPR_PROTO_SATA:
1170 case IPR_PROTO_SAS_STP:
1171 res->ata_class = ATA_DEV_ATA;
1172 break;
1173 case IPR_PROTO_SATA_ATAPI:
1174 case IPR_PROTO_SAS_STP_ATAPI:
1175 res->ata_class = ATA_DEV_ATAPI;
1176 break;
1177 default:
1178 res->ata_class = ATA_DEV_UNKNOWN;
1179 break;
1180 };
1181}
1182
1183/**
1184 * ipr_init_res_entry - Initialize a resource entry struct.
1185 * @res: resource entry struct
1186 * @cfgtew: config table entry wrapper struct
1187 *
1188 * Return value:
1189 * none
1190 **/
1191static void ipr_init_res_entry(struct ipr_resource_entry *res,
1192 struct ipr_config_table_entry_wrapper *cfgtew)
1193{
1194 int found = 0;
1195 unsigned int proto;
1196 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1197 struct ipr_resource_entry *gscsi_res = NULL;
1198
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06001199 res->needs_sync_complete = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 res->in_erp = 0;
1201 res->add_to_ml = 0;
1202 res->del_from_ml = 0;
1203 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06001204 res->reset_occurred = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05001206 res->sata_port = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001207
1208 if (ioa_cfg->sis64) {
1209 proto = cfgtew->u.cfgte64->proto;
Brian King359d96e2015-06-11 20:45:20 -05001210 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1211 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001212 res->qmodel = IPR_QUEUEING_MODEL64(res);
Wayne Boyer438b0332010-05-10 09:13:00 -07001213 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001214
1215 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1216 sizeof(res->res_path));
1217
1218 res->bus = 0;
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001219 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1220 sizeof(res->dev_lun.scsi_lun));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001221 res->lun = scsilun_to_int(&res->dev_lun);
1222
1223 if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1224 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1225 if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1226 found = 1;
1227 res->target = gscsi_res->target;
1228 break;
1229 }
1230 }
1231 if (!found) {
1232 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1233 ioa_cfg->max_devs_supported);
1234 set_bit(res->target, ioa_cfg->target_ids);
1235 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001236 } else if (res->type == IPR_RES_TYPE_IOAFP) {
1237 res->bus = IPR_IOAFP_VIRTUAL_BUS;
1238 res->target = 0;
1239 } else if (res->type == IPR_RES_TYPE_ARRAY) {
1240 res->bus = IPR_ARRAY_VIRTUAL_BUS;
1241 res->target = find_first_zero_bit(ioa_cfg->array_ids,
1242 ioa_cfg->max_devs_supported);
1243 set_bit(res->target, ioa_cfg->array_ids);
1244 } else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1245 res->bus = IPR_VSET_VIRTUAL_BUS;
1246 res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1247 ioa_cfg->max_devs_supported);
1248 set_bit(res->target, ioa_cfg->vset_ids);
1249 } else {
1250 res->target = find_first_zero_bit(ioa_cfg->target_ids,
1251 ioa_cfg->max_devs_supported);
1252 set_bit(res->target, ioa_cfg->target_ids);
1253 }
1254 } else {
1255 proto = cfgtew->u.cfgte->proto;
1256 res->qmodel = IPR_QUEUEING_MODEL(res);
1257 res->flags = cfgtew->u.cfgte->flags;
1258 if (res->flags & IPR_IS_IOA_RESOURCE)
1259 res->type = IPR_RES_TYPE_IOAFP;
1260 else
1261 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1262
1263 res->bus = cfgtew->u.cfgte->res_addr.bus;
1264 res->target = cfgtew->u.cfgte->res_addr.target;
1265 res->lun = cfgtew->u.cfgte->res_addr.lun;
Wayne Boyer46d74562010-08-11 07:15:17 -07001266 res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001267 }
1268
1269 ipr_update_ata_class(res, proto);
1270}
1271
1272/**
1273 * ipr_is_same_device - Determine if two devices are the same.
1274 * @res: resource entry struct
1275 * @cfgtew: config table entry wrapper struct
1276 *
1277 * Return value:
1278 * 1 if the devices are the same / 0 otherwise
1279 **/
1280static int ipr_is_same_device(struct ipr_resource_entry *res,
1281 struct ipr_config_table_entry_wrapper *cfgtew)
1282{
1283 if (res->ioa_cfg->sis64) {
1284 if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1285 sizeof(cfgtew->u.cfgte64->dev_id)) &&
Wayne Boyer0cb992e2010-11-04 09:35:58 -07001286 !memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001287 sizeof(cfgtew->u.cfgte64->lun))) {
1288 return 1;
1289 }
1290 } else {
1291 if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1292 res->target == cfgtew->u.cfgte->res_addr.target &&
1293 res->lun == cfgtew->u.cfgte->res_addr.lun)
1294 return 1;
1295 }
1296
1297 return 0;
1298}
1299
1300/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001301 * __ipr_format_res_path - Format the resource path for printing.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001302 * @res_path: resource path
1303 * @buf: buffer
Brian Kingb3b3b402013-01-11 17:43:49 -06001304 * @len: length of buffer provided
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001305 *
1306 * Return value:
1307 * pointer to buffer
1308 **/
Brian Kingb3b3b402013-01-11 17:43:49 -06001309static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001310{
1311 int i;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001312 char *p = buffer;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001313
Wayne Boyer46d74562010-08-11 07:15:17 -07001314 *p = '\0';
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07001315 p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1316 for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1317 p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001318
1319 return buffer;
1320}
1321
1322/**
Brian Kingb3b3b402013-01-11 17:43:49 -06001323 * ipr_format_res_path - Format the resource path for printing.
1324 * @ioa_cfg: ioa config struct
1325 * @res_path: resource path
1326 * @buf: buffer
1327 * @len: length of buffer provided
1328 *
1329 * Return value:
1330 * pointer to buffer
1331 **/
1332static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
1333 u8 *res_path, char *buffer, int len)
1334{
1335 char *p = buffer;
1336
1337 *p = '\0';
1338 p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
1339 __ipr_format_res_path(res_path, p, len - (buffer - p));
1340 return buffer;
1341}
1342
1343/**
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001344 * ipr_update_res_entry - Update the resource entry.
1345 * @res: resource entry struct
1346 * @cfgtew: config table entry wrapper struct
1347 *
1348 * Return value:
1349 * none
1350 **/
1351static void ipr_update_res_entry(struct ipr_resource_entry *res,
1352 struct ipr_config_table_entry_wrapper *cfgtew)
1353{
1354 char buffer[IPR_MAX_RES_PATH_LENGTH];
1355 unsigned int proto;
1356 int new_path = 0;
1357
1358 if (res->ioa_cfg->sis64) {
Brian King359d96e2015-06-11 20:45:20 -05001359 res->flags = be16_to_cpu(cfgtew->u.cfgte64->flags);
1360 res->res_flags = be16_to_cpu(cfgtew->u.cfgte64->res_flags);
Wayne Boyer75576bb2010-07-14 10:50:14 -07001361 res->type = cfgtew->u.cfgte64->res_type;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001362
1363 memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1364 sizeof(struct ipr_std_inq_data));
1365
1366 res->qmodel = IPR_QUEUEING_MODEL64(res);
1367 proto = cfgtew->u.cfgte64->proto;
1368 res->res_handle = cfgtew->u.cfgte64->res_handle;
1369 res->dev_id = cfgtew->u.cfgte64->dev_id;
1370
1371 memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1372 sizeof(res->dev_lun.scsi_lun));
1373
1374 if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1375 sizeof(res->res_path))) {
1376 memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1377 sizeof(res->res_path));
1378 new_path = 1;
1379 }
1380
1381 if (res->sdev && new_path)
1382 sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06001383 ipr_format_res_path(res->ioa_cfg,
1384 res->res_path, buffer, sizeof(buffer)));
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001385 } else {
1386 res->flags = cfgtew->u.cfgte->flags;
1387 if (res->flags & IPR_IS_IOA_RESOURCE)
1388 res->type = IPR_RES_TYPE_IOAFP;
1389 else
1390 res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1391
1392 memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1393 sizeof(struct ipr_std_inq_data));
1394
1395 res->qmodel = IPR_QUEUEING_MODEL(res);
1396 proto = cfgtew->u.cfgte->proto;
1397 res->res_handle = cfgtew->u.cfgte->res_handle;
1398 }
1399
1400 ipr_update_ata_class(res, proto);
1401}
1402
1403/**
1404 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1405 * for the resource.
1406 * @res: resource entry struct
1407 * @cfgtew: config table entry wrapper struct
1408 *
1409 * Return value:
1410 * none
1411 **/
1412static void ipr_clear_res_target(struct ipr_resource_entry *res)
1413{
1414 struct ipr_resource_entry *gscsi_res = NULL;
1415 struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1416
1417 if (!ioa_cfg->sis64)
1418 return;
1419
1420 if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1421 clear_bit(res->target, ioa_cfg->array_ids);
1422 else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1423 clear_bit(res->target, ioa_cfg->vset_ids);
1424 else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1425 list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1426 if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1427 return;
1428 clear_bit(res->target, ioa_cfg->target_ids);
1429
1430 } else if (res->bus == 0)
1431 clear_bit(res->target, ioa_cfg->target_ids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432}
1433
1434/**
1435 * ipr_handle_config_change - Handle a config change from the adapter
1436 * @ioa_cfg: ioa config struct
1437 * @hostrcb: hostrcb
1438 *
1439 * Return value:
1440 * none
1441 **/
1442static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001443 struct ipr_hostrcb *hostrcb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444{
1445 struct ipr_resource_entry *res = NULL;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001446 struct ipr_config_table_entry_wrapper cfgtew;
1447 __be32 cc_res_handle;
1448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 u32 is_ndn = 1;
1450
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001451 if (ioa_cfg->sis64) {
1452 cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1453 cc_res_handle = cfgtew.u.cfgte64->res_handle;
1454 } else {
1455 cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1456 cc_res_handle = cfgtew.u.cfgte->res_handle;
1457 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
1459 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001460 if (res->res_handle == cc_res_handle) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 is_ndn = 0;
1462 break;
1463 }
1464 }
1465
1466 if (is_ndn) {
1467 if (list_empty(&ioa_cfg->free_res_q)) {
1468 ipr_send_hcam(ioa_cfg,
1469 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1470 hostrcb);
1471 return;
1472 }
1473
1474 res = list_entry(ioa_cfg->free_res_q.next,
1475 struct ipr_resource_entry, queue);
1476
1477 list_del(&res->queue);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001478 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1480 }
1481
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001482 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
1484 if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1485 if (res->sdev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001487 res->res_handle = IPR_INVALID_RES_HANDLE;
Brian Kingf688f962014-12-02 12:47:37 -06001488 schedule_work(&ioa_cfg->work_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001489 } else {
1490 ipr_clear_res_target(res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08001492 }
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02001493 } else if (!res->sdev || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 res->add_to_ml = 1;
Brian Kingf688f962014-12-02 12:47:37 -06001495 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 }
1497
1498 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1499}
1500
1501/**
1502 * ipr_process_ccn - Op done function for a CCN.
1503 * @ipr_cmd: ipr command struct
1504 *
1505 * This function is the op done function for a configuration
1506 * change notification host controlled async from the adapter.
1507 *
1508 * Return value:
1509 * none
1510 **/
1511static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1512{
1513 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1514 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07001515 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516
Brian Kingafc3f832016-08-24 12:56:51 -05001517 list_del_init(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06001518 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
1520 if (ioasc) {
Brian King4fdd7c72015-03-26 11:23:50 -05001521 if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
1522 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 dev_err(&ioa_cfg->pdev->dev,
1524 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
1525
1526 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1527 } else {
1528 ipr_handle_config_change(ioa_cfg, hostrcb);
1529 }
1530}
1531
1532/**
Brian King8cf093e2007-04-26 16:00:14 -05001533 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1534 * @i: index into buffer
1535 * @buf: string to modify
1536 *
1537 * This function will strip all trailing whitespace, pad the end
1538 * of the string with a single space, and NULL terminate the string.
1539 *
1540 * Return value:
1541 * new length of string
1542 **/
1543static int strip_and_pad_whitespace(int i, char *buf)
1544{
1545 while (i && buf[i] == ' ')
1546 i--;
1547 buf[i+1] = ' ';
1548 buf[i+2] = '\0';
1549 return i + 2;
1550}
1551
1552/**
1553 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1554 * @prefix: string to print at start of printk
1555 * @hostrcb: hostrcb pointer
1556 * @vpd: vendor/product id/sn struct
1557 *
1558 * Return value:
1559 * none
1560 **/
1561static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1562 struct ipr_vpd *vpd)
1563{
1564 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1565 int i = 0;
1566
1567 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1568 i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1569
1570 memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1571 i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1572
1573 memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1574 buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1575
1576 ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1577}
1578
1579/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 * ipr_log_vpd - Log the passed VPD to the error log.
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001581 * @vpd: vendor/product id/sn struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 *
1583 * Return value:
1584 * none
1585 **/
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001586static void ipr_log_vpd(struct ipr_vpd *vpd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587{
1588 char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1589 + IPR_SERIAL_NUM_LEN];
1590
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001591 memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1592 memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 IPR_PROD_ID_LEN);
1594 buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1595 ipr_err("Vendor/Product ID: %s\n", buffer);
1596
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001597 memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001598 buffer[IPR_SERIAL_NUM_LEN] = '\0';
1599 ipr_err(" Serial Number: %s\n", buffer);
1600}
1601
1602/**
Brian King8cf093e2007-04-26 16:00:14 -05001603 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1604 * @prefix: string to print at start of printk
1605 * @hostrcb: hostrcb pointer
1606 * @vpd: vendor/product id/sn/wwn struct
1607 *
1608 * Return value:
1609 * none
1610 **/
1611static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1612 struct ipr_ext_vpd *vpd)
1613{
1614 ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1615 ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1616 be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1617}
1618
1619/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001620 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1621 * @vpd: vendor/product id/sn/wwn struct
1622 *
1623 * Return value:
1624 * none
1625 **/
1626static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1627{
1628 ipr_log_vpd(&vpd->vpd);
1629 ipr_err(" WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1630 be32_to_cpu(vpd->wwid[1]));
1631}
1632
1633/**
1634 * ipr_log_enhanced_cache_error - Log a cache error.
1635 * @ioa_cfg: ioa config struct
1636 * @hostrcb: hostrcb struct
1637 *
1638 * Return value:
1639 * none
1640 **/
1641static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1642 struct ipr_hostrcb *hostrcb)
1643{
Wayne Boyer4565e372010-02-19 13:24:07 -08001644 struct ipr_hostrcb_type_12_error *error;
1645
1646 if (ioa_cfg->sis64)
1647 error = &hostrcb->hcam.u.error64.u.type_12_error;
1648 else
1649 error = &hostrcb->hcam.u.error.u.type_12_error;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001650
1651 ipr_err("-----Current Configuration-----\n");
1652 ipr_err("Cache Directory Card Information:\n");
1653 ipr_log_ext_vpd(&error->ioa_vpd);
1654 ipr_err("Adapter Card Information:\n");
1655 ipr_log_ext_vpd(&error->cfc_vpd);
1656
1657 ipr_err("-----Expected Configuration-----\n");
1658 ipr_err("Cache Directory Card Information:\n");
1659 ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1660 ipr_err("Adapter Card Information:\n");
1661 ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1662
1663 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1664 be32_to_cpu(error->ioa_data[0]),
1665 be32_to_cpu(error->ioa_data[1]),
1666 be32_to_cpu(error->ioa_data[2]));
1667}
1668
1669/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 * ipr_log_cache_error - Log a cache error.
1671 * @ioa_cfg: ioa config struct
1672 * @hostrcb: hostrcb struct
1673 *
1674 * Return value:
1675 * none
1676 **/
1677static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1678 struct ipr_hostrcb *hostrcb)
1679{
1680 struct ipr_hostrcb_type_02_error *error =
1681 &hostrcb->hcam.u.error.u.type_02_error;
1682
1683 ipr_err("-----Current Configuration-----\n");
1684 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001685 ipr_log_vpd(&error->ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001687 ipr_log_vpd(&error->cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688
1689 ipr_err("-----Expected Configuration-----\n");
1690 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001691 ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001693 ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
1695 ipr_err("Additional IOA Data: %08X %08X %08X\n",
1696 be32_to_cpu(error->ioa_data[0]),
1697 be32_to_cpu(error->ioa_data[1]),
1698 be32_to_cpu(error->ioa_data[2]));
1699}
1700
1701/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001702 * ipr_log_enhanced_config_error - Log a configuration error.
1703 * @ioa_cfg: ioa config struct
1704 * @hostrcb: hostrcb struct
1705 *
1706 * Return value:
1707 * none
1708 **/
1709static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1710 struct ipr_hostrcb *hostrcb)
1711{
1712 int errors_logged, i;
1713 struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1714 struct ipr_hostrcb_type_13_error *error;
1715
1716 error = &hostrcb->hcam.u.error.u.type_13_error;
1717 errors_logged = be32_to_cpu(error->errors_logged);
1718
1719 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1720 be32_to_cpu(error->errors_detected), errors_logged);
1721
1722 dev_entry = error->dev;
1723
1724 for (i = 0; i < errors_logged; i++, dev_entry++) {
1725 ipr_err_separator;
1726
1727 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1728 ipr_log_ext_vpd(&dev_entry->vpd);
1729
1730 ipr_err("-----New Device Information-----\n");
1731 ipr_log_ext_vpd(&dev_entry->new_vpd);
1732
1733 ipr_err("Cache Directory Card Information:\n");
1734 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1735
1736 ipr_err("Adapter Card Information:\n");
1737 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1738 }
1739}
1740
1741/**
Wayne Boyer4565e372010-02-19 13:24:07 -08001742 * ipr_log_sis64_config_error - Log a device error.
1743 * @ioa_cfg: ioa config struct
1744 * @hostrcb: hostrcb struct
1745 *
1746 * Return value:
1747 * none
1748 **/
1749static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1750 struct ipr_hostrcb *hostrcb)
1751{
1752 int errors_logged, i;
1753 struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1754 struct ipr_hostrcb_type_23_error *error;
1755 char buffer[IPR_MAX_RES_PATH_LENGTH];
1756
1757 error = &hostrcb->hcam.u.error64.u.type_23_error;
1758 errors_logged = be32_to_cpu(error->errors_logged);
1759
1760 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1761 be32_to_cpu(error->errors_detected), errors_logged);
1762
1763 dev_entry = error->dev;
1764
1765 for (i = 0; i < errors_logged; i++, dev_entry++) {
1766 ipr_err_separator;
1767
1768 ipr_err("Device %d : %s", i + 1,
Brian Kingb3b3b402013-01-11 17:43:49 -06001769 __ipr_format_res_path(dev_entry->res_path,
1770 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08001771 ipr_log_ext_vpd(&dev_entry->vpd);
1772
1773 ipr_err("-----New Device Information-----\n");
1774 ipr_log_ext_vpd(&dev_entry->new_vpd);
1775
1776 ipr_err("Cache Directory Card Information:\n");
1777 ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1778
1779 ipr_err("Adapter Card Information:\n");
1780 ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1781 }
1782}
1783
1784/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 * ipr_log_config_error - Log a configuration error.
1786 * @ioa_cfg: ioa config struct
1787 * @hostrcb: hostrcb struct
1788 *
1789 * Return value:
1790 * none
1791 **/
1792static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1793 struct ipr_hostrcb *hostrcb)
1794{
1795 int errors_logged, i;
1796 struct ipr_hostrcb_device_data_entry *dev_entry;
1797 struct ipr_hostrcb_type_03_error *error;
1798
1799 error = &hostrcb->hcam.u.error.u.type_03_error;
1800 errors_logged = be32_to_cpu(error->errors_logged);
1801
1802 ipr_err("Device Errors Detected/Logged: %d/%d\n",
1803 be32_to_cpu(error->errors_detected), errors_logged);
1804
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001805 dev_entry = error->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806
1807 for (i = 0; i < errors_logged; i++, dev_entry++) {
1808 ipr_err_separator;
1809
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001810 ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001811 ipr_log_vpd(&dev_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812
1813 ipr_err("-----New Device Information-----\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001814 ipr_log_vpd(&dev_entry->new_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
1816 ipr_err("Cache Directory Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001817 ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818
1819 ipr_err("Adapter Card Information:\n");
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001820 ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
1822 ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1823 be32_to_cpu(dev_entry->ioa_data[0]),
1824 be32_to_cpu(dev_entry->ioa_data[1]),
1825 be32_to_cpu(dev_entry->ioa_data[2]),
1826 be32_to_cpu(dev_entry->ioa_data[3]),
1827 be32_to_cpu(dev_entry->ioa_data[4]));
1828 }
1829}
1830
1831/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001832 * ipr_log_enhanced_array_error - Log an array configuration error.
1833 * @ioa_cfg: ioa config struct
1834 * @hostrcb: hostrcb struct
1835 *
1836 * Return value:
1837 * none
1838 **/
1839static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1840 struct ipr_hostrcb *hostrcb)
1841{
1842 int i, num_entries;
1843 struct ipr_hostrcb_type_14_error *error;
1844 struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1845 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1846
1847 error = &hostrcb->hcam.u.error.u.type_14_error;
1848
1849 ipr_err_separator;
1850
1851 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1852 error->protection_level,
1853 ioa_cfg->host->host_no,
1854 error->last_func_vset_res_addr.bus,
1855 error->last_func_vset_res_addr.target,
1856 error->last_func_vset_res_addr.lun);
1857
1858 ipr_err_separator;
1859
1860 array_entry = error->array_member;
1861 num_entries = min_t(u32, be32_to_cpu(error->num_entries),
Wayne Boyer72620262010-09-27 10:45:28 -07001862 ARRAY_SIZE(error->array_member));
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001863
1864 for (i = 0; i < num_entries; i++, array_entry++) {
1865 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1866 continue;
1867
1868 if (be32_to_cpu(error->exposed_mode_adn) == i)
1869 ipr_err("Exposed Array Member %d:\n", i);
1870 else
1871 ipr_err("Array Member %d:\n", i);
1872
1873 ipr_log_ext_vpd(&array_entry->vpd);
1874 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1875 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1876 "Expected Location");
1877
1878 ipr_err_separator;
1879 }
1880}
1881
1882/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 * ipr_log_array_error - Log an array configuration error.
1884 * @ioa_cfg: ioa config struct
1885 * @hostrcb: hostrcb struct
1886 *
1887 * Return value:
1888 * none
1889 **/
1890static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1891 struct ipr_hostrcb *hostrcb)
1892{
1893 int i;
1894 struct ipr_hostrcb_type_04_error *error;
1895 struct ipr_hostrcb_array_data_entry *array_entry;
1896 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1897
1898 error = &hostrcb->hcam.u.error.u.type_04_error;
1899
1900 ipr_err_separator;
1901
1902 ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1903 error->protection_level,
1904 ioa_cfg->host->host_no,
1905 error->last_func_vset_res_addr.bus,
1906 error->last_func_vset_res_addr.target,
1907 error->last_func_vset_res_addr.lun);
1908
1909 ipr_err_separator;
1910
1911 array_entry = error->array_member;
1912
1913 for (i = 0; i < 18; i++) {
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001914 if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 continue;
1916
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001917 if (be32_to_cpu(error->exposed_mode_adn) == i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918 ipr_err("Exposed Array Member %d:\n", i);
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001919 else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 ipr_err("Array Member %d:\n", i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921
brking@us.ibm.comcfc32132005-11-01 17:00:18 -06001922 ipr_log_vpd(&array_entry->vpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923
brking@us.ibm.comfa15b1f2005-11-01 17:00:27 -06001924 ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1925 ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1926 "Expected Location");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
1928 ipr_err_separator;
1929
1930 if (i == 9)
1931 array_entry = error->array_member2;
1932 else
1933 array_entry++;
1934 }
1935}
1936
1937/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001938 * ipr_log_hex_data - Log additional hex IOA error data.
Brian Kingac719ab2006-11-21 10:28:42 -06001939 * @ioa_cfg: ioa config struct
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001940 * @data: IOA error data
1941 * @len: data length
1942 *
1943 * Return value:
1944 * none
1945 **/
Brian King359d96e2015-06-11 20:45:20 -05001946static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, __be32 *data, int len)
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001947{
1948 int i;
1949
1950 if (len == 0)
1951 return;
1952
Brian Kingac719ab2006-11-21 10:28:42 -06001953 if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1954 len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1955
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001956 for (i = 0; i < len / 4; i += 4) {
1957 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1958 be32_to_cpu(data[i]),
1959 be32_to_cpu(data[i+1]),
1960 be32_to_cpu(data[i+2]),
1961 be32_to_cpu(data[i+3]));
1962 }
1963}
1964
1965/**
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001966 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1967 * @ioa_cfg: ioa config struct
1968 * @hostrcb: hostrcb struct
1969 *
1970 * Return value:
1971 * none
1972 **/
1973static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1974 struct ipr_hostrcb *hostrcb)
1975{
1976 struct ipr_hostrcb_type_17_error *error;
1977
Wayne Boyer4565e372010-02-19 13:24:07 -08001978 if (ioa_cfg->sis64)
1979 error = &hostrcb->hcam.u.error64.u.type_17_error;
1980 else
1981 error = &hostrcb->hcam.u.error.u.type_17_error;
1982
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001983 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08001984 strim(error->failure_reason);
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001985
Brian King8cf093e2007-04-26 16:00:14 -05001986 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1987 be32_to_cpu(hostrcb->hcam.u.error.prc));
1988 ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06001989 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06001990 be32_to_cpu(hostrcb->hcam.length) -
1991 (offsetof(struct ipr_hostrcb_error, u) +
1992 offsetof(struct ipr_hostrcb_type_17_error, data)));
1993}
1994
1995/**
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06001996 * ipr_log_dual_ioa_error - Log a dual adapter error.
1997 * @ioa_cfg: ioa config struct
1998 * @hostrcb: hostrcb struct
1999 *
2000 * Return value:
2001 * none
2002 **/
2003static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
2004 struct ipr_hostrcb *hostrcb)
2005{
2006 struct ipr_hostrcb_type_07_error *error;
2007
2008 error = &hostrcb->hcam.u.error.u.type_07_error;
2009 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
KOSAKI Motohiroca54cb82009-12-14 18:01:15 -08002010 strim(error->failure_reason);
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002011
Brian King8cf093e2007-04-26 16:00:14 -05002012 ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
2013 be32_to_cpu(hostrcb->hcam.u.error.prc));
2014 ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
Brian Kingac719ab2006-11-21 10:28:42 -06002015 ipr_log_hex_data(ioa_cfg, error->data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002016 be32_to_cpu(hostrcb->hcam.length) -
2017 (offsetof(struct ipr_hostrcb_error, u) +
2018 offsetof(struct ipr_hostrcb_type_07_error, data)));
2019}
2020
Brian King49dc6a12006-11-21 10:28:35 -06002021static const struct {
2022 u8 active;
2023 char *desc;
2024} path_active_desc[] = {
2025 { IPR_PATH_NO_INFO, "Path" },
2026 { IPR_PATH_ACTIVE, "Active path" },
2027 { IPR_PATH_NOT_ACTIVE, "Inactive path" }
2028};
2029
2030static const struct {
2031 u8 state;
2032 char *desc;
2033} path_state_desc[] = {
2034 { IPR_PATH_STATE_NO_INFO, "has no path state information available" },
2035 { IPR_PATH_HEALTHY, "is healthy" },
2036 { IPR_PATH_DEGRADED, "is degraded" },
2037 { IPR_PATH_FAILED, "is failed" }
2038};
2039
2040/**
2041 * ipr_log_fabric_path - Log a fabric path error
2042 * @hostrcb: hostrcb struct
2043 * @fabric: fabric descriptor
2044 *
2045 * Return value:
2046 * none
2047 **/
2048static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
2049 struct ipr_hostrcb_fabric_desc *fabric)
2050{
2051 int i, j;
2052 u8 path_state = fabric->path_state;
2053 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2054 u8 state = path_state & IPR_PATH_STATE_MASK;
2055
2056 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2057 if (path_active_desc[i].active != active)
2058 continue;
2059
2060 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2061 if (path_state_desc[j].state != state)
2062 continue;
2063
2064 if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
2065 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
2066 path_active_desc[i].desc, path_state_desc[j].desc,
2067 fabric->ioa_port);
2068 } else if (fabric->cascaded_expander == 0xff) {
2069 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
2070 path_active_desc[i].desc, path_state_desc[j].desc,
2071 fabric->ioa_port, fabric->phy);
2072 } else if (fabric->phy == 0xff) {
2073 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
2074 path_active_desc[i].desc, path_state_desc[j].desc,
2075 fabric->ioa_port, fabric->cascaded_expander);
2076 } else {
2077 ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
2078 path_active_desc[i].desc, path_state_desc[j].desc,
2079 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2080 }
2081 return;
2082 }
2083 }
2084
2085 ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
2086 fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
2087}
2088
Wayne Boyer4565e372010-02-19 13:24:07 -08002089/**
2090 * ipr_log64_fabric_path - Log a fabric path error
2091 * @hostrcb: hostrcb struct
2092 * @fabric: fabric descriptor
2093 *
2094 * Return value:
2095 * none
2096 **/
2097static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
2098 struct ipr_hostrcb64_fabric_desc *fabric)
2099{
2100 int i, j;
2101 u8 path_state = fabric->path_state;
2102 u8 active = path_state & IPR_PATH_ACTIVE_MASK;
2103 u8 state = path_state & IPR_PATH_STATE_MASK;
2104 char buffer[IPR_MAX_RES_PATH_LENGTH];
2105
2106 for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
2107 if (path_active_desc[i].active != active)
2108 continue;
2109
2110 for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
2111 if (path_state_desc[j].state != state)
2112 continue;
2113
2114 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
2115 path_active_desc[i].desc, path_state_desc[j].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002116 ipr_format_res_path(hostrcb->ioa_cfg,
2117 fabric->res_path,
2118 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002119 return;
2120 }
2121 }
2122
2123 ipr_err("Path state=%02X Resource Path=%s\n", path_state,
Brian Kingb3b3b402013-01-11 17:43:49 -06002124 ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
2125 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002126}
2127
Brian King49dc6a12006-11-21 10:28:35 -06002128static const struct {
2129 u8 type;
2130 char *desc;
2131} path_type_desc[] = {
2132 { IPR_PATH_CFG_IOA_PORT, "IOA port" },
2133 { IPR_PATH_CFG_EXP_PORT, "Expander port" },
2134 { IPR_PATH_CFG_DEVICE_PORT, "Device port" },
2135 { IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
2136};
2137
2138static const struct {
2139 u8 status;
2140 char *desc;
2141} path_status_desc[] = {
2142 { IPR_PATH_CFG_NO_PROB, "Functional" },
2143 { IPR_PATH_CFG_DEGRADED, "Degraded" },
2144 { IPR_PATH_CFG_FAILED, "Failed" },
2145 { IPR_PATH_CFG_SUSPECT, "Suspect" },
2146 { IPR_PATH_NOT_DETECTED, "Missing" },
2147 { IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
2148};
2149
2150static const char *link_rate[] = {
2151 "unknown",
2152 "disabled",
2153 "phy reset problem",
2154 "spinup hold",
2155 "port selector",
2156 "unknown",
2157 "unknown",
2158 "unknown",
2159 "1.5Gbps",
2160 "3.0Gbps",
2161 "unknown",
2162 "unknown",
2163 "unknown",
2164 "unknown",
2165 "unknown",
2166 "unknown"
2167};
2168
2169/**
2170 * ipr_log_path_elem - Log a fabric path element.
2171 * @hostrcb: hostrcb struct
2172 * @cfg: fabric path element struct
2173 *
2174 * Return value:
2175 * none
2176 **/
2177static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
2178 struct ipr_hostrcb_config_element *cfg)
2179{
2180 int i, j;
2181 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2182 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2183
2184 if (type == IPR_PATH_CFG_NOT_EXIST)
2185 return;
2186
2187 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2188 if (path_type_desc[i].type != type)
2189 continue;
2190
2191 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2192 if (path_status_desc[j].status != status)
2193 continue;
2194
2195 if (type == IPR_PATH_CFG_IOA_PORT) {
2196 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2197 path_status_desc[j].desc, path_type_desc[i].desc,
2198 cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2199 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2200 } else {
2201 if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2202 ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2203 path_status_desc[j].desc, path_type_desc[i].desc,
2204 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2205 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2206 } else if (cfg->cascaded_expander == 0xff) {
2207 ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2208 "WWN=%08X%08X\n", path_status_desc[j].desc,
2209 path_type_desc[i].desc, cfg->phy,
2210 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2211 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2212 } else if (cfg->phy == 0xff) {
2213 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2214 "WWN=%08X%08X\n", path_status_desc[j].desc,
2215 path_type_desc[i].desc, cfg->cascaded_expander,
2216 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2217 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2218 } else {
2219 ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2220 "WWN=%08X%08X\n", path_status_desc[j].desc,
2221 path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2222 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2223 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2224 }
2225 }
2226 return;
2227 }
2228 }
2229
2230 ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2231 "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2232 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2233 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2234}
2235
2236/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002237 * ipr_log64_path_elem - Log a fabric path element.
2238 * @hostrcb: hostrcb struct
2239 * @cfg: fabric path element struct
2240 *
2241 * Return value:
2242 * none
2243 **/
2244static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2245 struct ipr_hostrcb64_config_element *cfg)
2246{
2247 int i, j;
2248 u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2249 u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2250 u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2251 char buffer[IPR_MAX_RES_PATH_LENGTH];
2252
2253 if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2254 return;
2255
2256 for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2257 if (path_type_desc[i].type != type)
2258 continue;
2259
2260 for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2261 if (path_status_desc[j].status != status)
2262 continue;
2263
2264 ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2265 path_status_desc[j].desc, path_type_desc[i].desc,
Brian Kingb3b3b402013-01-11 17:43:49 -06002266 ipr_format_res_path(hostrcb->ioa_cfg,
2267 cfg->res_path, buffer, sizeof(buffer)),
2268 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2269 be32_to_cpu(cfg->wwid[0]),
2270 be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002271 return;
2272 }
2273 }
2274 ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2275 "WWN=%08X%08X\n", cfg->type_status,
Brian Kingb3b3b402013-01-11 17:43:49 -06002276 ipr_format_res_path(hostrcb->ioa_cfg,
2277 cfg->res_path, buffer, sizeof(buffer)),
2278 link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2279 be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
Wayne Boyer4565e372010-02-19 13:24:07 -08002280}
2281
2282/**
Brian King49dc6a12006-11-21 10:28:35 -06002283 * ipr_log_fabric_error - Log a fabric error.
2284 * @ioa_cfg: ioa config struct
2285 * @hostrcb: hostrcb struct
2286 *
2287 * Return value:
2288 * none
2289 **/
2290static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2291 struct ipr_hostrcb *hostrcb)
2292{
2293 struct ipr_hostrcb_type_20_error *error;
2294 struct ipr_hostrcb_fabric_desc *fabric;
2295 struct ipr_hostrcb_config_element *cfg;
2296 int i, add_len;
2297
2298 error = &hostrcb->hcam.u.error.u.type_20_error;
2299 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2300 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2301
2302 add_len = be32_to_cpu(hostrcb->hcam.length) -
2303 (offsetof(struct ipr_hostrcb_error, u) +
2304 offsetof(struct ipr_hostrcb_type_20_error, desc));
2305
2306 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2307 ipr_log_fabric_path(hostrcb, fabric);
2308 for_each_fabric_cfg(fabric, cfg)
2309 ipr_log_path_elem(hostrcb, cfg);
2310
2311 add_len -= be16_to_cpu(fabric->length);
2312 fabric = (struct ipr_hostrcb_fabric_desc *)
2313 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2314 }
2315
Brian King359d96e2015-06-11 20:45:20 -05002316 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
Brian King49dc6a12006-11-21 10:28:35 -06002317}
2318
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002319/**
Wayne Boyer4565e372010-02-19 13:24:07 -08002320 * ipr_log_sis64_array_error - Log a sis64 array error.
2321 * @ioa_cfg: ioa config struct
2322 * @hostrcb: hostrcb struct
2323 *
2324 * Return value:
2325 * none
2326 **/
2327static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2328 struct ipr_hostrcb *hostrcb)
2329{
2330 int i, num_entries;
2331 struct ipr_hostrcb_type_24_error *error;
2332 struct ipr_hostrcb64_array_data_entry *array_entry;
2333 char buffer[IPR_MAX_RES_PATH_LENGTH];
2334 const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2335
2336 error = &hostrcb->hcam.u.error64.u.type_24_error;
2337
2338 ipr_err_separator;
2339
2340 ipr_err("RAID %s Array Configuration: %s\n",
2341 error->protection_level,
Brian Kingb3b3b402013-01-11 17:43:49 -06002342 ipr_format_res_path(ioa_cfg, error->last_res_path,
2343 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002344
2345 ipr_err_separator;
2346
2347 array_entry = error->array_member;
Wayne Boyer72620262010-09-27 10:45:28 -07002348 num_entries = min_t(u32, error->num_entries,
2349 ARRAY_SIZE(error->array_member));
Wayne Boyer4565e372010-02-19 13:24:07 -08002350
2351 for (i = 0; i < num_entries; i++, array_entry++) {
2352
2353 if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2354 continue;
2355
2356 if (error->exposed_mode_adn == i)
2357 ipr_err("Exposed Array Member %d:\n", i);
2358 else
2359 ipr_err("Array Member %d:\n", i);
2360
2361 ipr_err("Array Member %d:\n", i);
2362 ipr_log_ext_vpd(&array_entry->vpd);
Wayne Boyer72620262010-09-27 10:45:28 -07002363 ipr_err("Current Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002364 ipr_format_res_path(ioa_cfg, array_entry->res_path,
2365 buffer, sizeof(buffer)));
Wayne Boyer72620262010-09-27 10:45:28 -07002366 ipr_err("Expected Location: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06002367 ipr_format_res_path(ioa_cfg,
2368 array_entry->expected_res_path,
2369 buffer, sizeof(buffer)));
Wayne Boyer4565e372010-02-19 13:24:07 -08002370
2371 ipr_err_separator;
2372 }
2373}
2374
2375/**
2376 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2377 * @ioa_cfg: ioa config struct
2378 * @hostrcb: hostrcb struct
2379 *
2380 * Return value:
2381 * none
2382 **/
2383static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2384 struct ipr_hostrcb *hostrcb)
2385{
2386 struct ipr_hostrcb_type_30_error *error;
2387 struct ipr_hostrcb64_fabric_desc *fabric;
2388 struct ipr_hostrcb64_config_element *cfg;
2389 int i, add_len;
2390
2391 error = &hostrcb->hcam.u.error64.u.type_30_error;
2392
2393 error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2394 ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2395
2396 add_len = be32_to_cpu(hostrcb->hcam.length) -
2397 (offsetof(struct ipr_hostrcb64_error, u) +
2398 offsetof(struct ipr_hostrcb_type_30_error, desc));
2399
2400 for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2401 ipr_log64_fabric_path(hostrcb, fabric);
2402 for_each_fabric_cfg(fabric, cfg)
2403 ipr_log64_path_elem(hostrcb, cfg);
2404
2405 add_len -= be16_to_cpu(fabric->length);
2406 fabric = (struct ipr_hostrcb64_fabric_desc *)
2407 ((unsigned long)fabric + be16_to_cpu(fabric->length));
2408 }
2409
Brian King359d96e2015-06-11 20:45:20 -05002410 ipr_log_hex_data(ioa_cfg, (__be32 *)fabric, add_len);
Wayne Boyer4565e372010-02-19 13:24:07 -08002411}
2412
2413/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 * ipr_log_generic_error - Log an adapter error.
2415 * @ioa_cfg: ioa config struct
2416 * @hostrcb: hostrcb struct
2417 *
2418 * Return value:
2419 * none
2420 **/
2421static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2422 struct ipr_hostrcb *hostrcb)
2423{
Brian Kingac719ab2006-11-21 10:28:42 -06002424 ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002425 be32_to_cpu(hostrcb->hcam.length));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426}
2427
2428/**
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002429 * ipr_log_sis64_device_error - Log a cache error.
2430 * @ioa_cfg: ioa config struct
2431 * @hostrcb: hostrcb struct
2432 *
2433 * Return value:
2434 * none
2435 **/
2436static void ipr_log_sis64_device_error(struct ipr_ioa_cfg *ioa_cfg,
2437 struct ipr_hostrcb *hostrcb)
2438{
2439 struct ipr_hostrcb_type_21_error *error;
2440 char buffer[IPR_MAX_RES_PATH_LENGTH];
2441
2442 error = &hostrcb->hcam.u.error64.u.type_21_error;
2443
2444 ipr_err("-----Failing Device Information-----\n");
2445 ipr_err("World Wide Unique ID: %08X%08X%08X%08X\n",
2446 be32_to_cpu(error->wwn[0]), be32_to_cpu(error->wwn[1]),
2447 be32_to_cpu(error->wwn[2]), be32_to_cpu(error->wwn[3]));
2448 ipr_err("Device Resource Path: %s\n",
2449 __ipr_format_res_path(error->res_path,
2450 buffer, sizeof(buffer)));
2451 error->primary_problem_desc[sizeof(error->primary_problem_desc) - 1] = '\0';
2452 error->second_problem_desc[sizeof(error->second_problem_desc) - 1] = '\0';
2453 ipr_err("Primary Problem Description: %s\n", error->primary_problem_desc);
2454 ipr_err("Secondary Problem Description: %s\n", error->second_problem_desc);
2455 ipr_err("SCSI Sense Data:\n");
2456 ipr_log_hex_data(ioa_cfg, error->sense_data, sizeof(error->sense_data));
2457 ipr_err("SCSI Command Descriptor Block: \n");
2458 ipr_log_hex_data(ioa_cfg, error->cdb, sizeof(error->cdb));
2459
2460 ipr_err("Additional IOA Data:\n");
2461 ipr_log_hex_data(ioa_cfg, error->ioa_data, be32_to_cpu(error->length_of_error));
2462}
2463
2464/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2466 * @ioasc: IOASC
2467 *
2468 * This function will return the index of into the ipr_error_table
2469 * for the specified IOASC. If the IOASC is not in the table,
2470 * 0 will be returned, which points to the entry used for unknown errors.
2471 *
2472 * Return value:
2473 * index into the ipr_error_table
2474 **/
2475static u32 ipr_get_error(u32 ioasc)
2476{
2477 int i;
2478
2479 for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
Brian King35a39692006-09-25 12:39:20 -05002480 if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 return i;
2482
2483 return 0;
2484}
2485
2486/**
2487 * ipr_handle_log_data - Log an adapter error.
2488 * @ioa_cfg: ioa config struct
2489 * @hostrcb: hostrcb struct
2490 *
2491 * This function logs an adapter error to the system.
2492 *
2493 * Return value:
2494 * none
2495 **/
2496static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2497 struct ipr_hostrcb *hostrcb)
2498{
2499 u32 ioasc;
2500 int error_index;
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002501 struct ipr_hostrcb_type_21_error *error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
2503 if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2504 return;
2505
2506 if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2507 dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2508
Wayne Boyer4565e372010-02-19 13:24:07 -08002509 if (ioa_cfg->sis64)
2510 ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2511 else
2512 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
Wayne Boyer4565e372010-02-19 13:24:07 -08002514 if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2515 ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 /* Tell the midlayer we had a bus reset so it will handle the UA properly */
2517 scsi_report_bus_reset(ioa_cfg->host,
Wayne Boyer4565e372010-02-19 13:24:07 -08002518 hostrcb->hcam.u.error.fd_res_addr.bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 }
2520
2521 error_index = ipr_get_error(ioasc);
2522
2523 if (!ipr_error_table[error_index].log_hcam)
2524 return;
2525
wenxiong@linux.vnet.ibm.com3185ea62014-09-24 16:25:47 -05002526 if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
2527 hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
2528 error = &hostrcb->hcam.u.error64.u.type_21_error;
2529
2530 if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
2531 ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
2532 return;
2533 }
2534
Brian King49dc6a12006-11-21 10:28:35 -06002535 ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536
2537 /* Set indication we have logged an error */
2538 ioa_cfg->errors_logged++;
2539
Brian King933916f2007-03-29 12:43:30 -05002540 if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 return;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002542 if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2543 hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544
2545 switch (hostrcb->hcam.overlay_id) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 case IPR_HOST_RCB_OVERLAY_ID_2:
2547 ipr_log_cache_error(ioa_cfg, hostrcb);
2548 break;
2549 case IPR_HOST_RCB_OVERLAY_ID_3:
2550 ipr_log_config_error(ioa_cfg, hostrcb);
2551 break;
2552 case IPR_HOST_RCB_OVERLAY_ID_4:
2553 case IPR_HOST_RCB_OVERLAY_ID_6:
2554 ipr_log_array_error(ioa_cfg, hostrcb);
2555 break;
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06002556 case IPR_HOST_RCB_OVERLAY_ID_7:
2557 ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2558 break;
brking@us.ibm.comee0f05b2005-11-01 17:02:15 -06002559 case IPR_HOST_RCB_OVERLAY_ID_12:
2560 ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2561 break;
2562 case IPR_HOST_RCB_OVERLAY_ID_13:
2563 ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2564 break;
2565 case IPR_HOST_RCB_OVERLAY_ID_14:
2566 case IPR_HOST_RCB_OVERLAY_ID_16:
2567 ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2568 break;
2569 case IPR_HOST_RCB_OVERLAY_ID_17:
2570 ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2571 break;
Brian King49dc6a12006-11-21 10:28:35 -06002572 case IPR_HOST_RCB_OVERLAY_ID_20:
2573 ipr_log_fabric_error(ioa_cfg, hostrcb);
2574 break;
Wendy Xiong169b9ec2014-03-12 16:08:51 -05002575 case IPR_HOST_RCB_OVERLAY_ID_21:
2576 ipr_log_sis64_device_error(ioa_cfg, hostrcb);
2577 break;
Wayne Boyer4565e372010-02-19 13:24:07 -08002578 case IPR_HOST_RCB_OVERLAY_ID_23:
2579 ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2580 break;
2581 case IPR_HOST_RCB_OVERLAY_ID_24:
2582 case IPR_HOST_RCB_OVERLAY_ID_26:
2583 ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2584 break;
2585 case IPR_HOST_RCB_OVERLAY_ID_30:
2586 ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2587 break;
brking@us.ibm.comcf852032005-11-01 17:00:47 -06002588 case IPR_HOST_RCB_OVERLAY_ID_1:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 default:
brking@us.ibm.coma9cfca92005-11-01 17:00:41 -06002591 ipr_log_generic_error(ioa_cfg, hostrcb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 break;
2593 }
2594}
2595
Brian Kingafc3f832016-08-24 12:56:51 -05002596static struct ipr_hostrcb *ipr_get_free_hostrcb(struct ipr_ioa_cfg *ioa)
2597{
2598 struct ipr_hostrcb *hostrcb;
2599
2600 hostrcb = list_first_entry_or_null(&ioa->hostrcb_free_q,
2601 struct ipr_hostrcb, queue);
2602
2603 if (unlikely(!hostrcb)) {
2604 dev_info(&ioa->pdev->dev, "Reclaiming async error buffers.");
2605 hostrcb = list_first_entry_or_null(&ioa->hostrcb_report_q,
2606 struct ipr_hostrcb, queue);
2607 }
2608
2609 list_del_init(&hostrcb->queue);
2610 return hostrcb;
2611}
2612
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613/**
2614 * ipr_process_error - Op done function for an adapter error log.
2615 * @ipr_cmd: ipr command struct
2616 *
2617 * This function is the op done function for an error log host
2618 * controlled async from the adapter. It will log the error and
2619 * send the HCAM back to the adapter.
2620 *
2621 * Return value:
2622 * none
2623 **/
2624static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2625{
2626 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2627 struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07002628 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Wayne Boyer4565e372010-02-19 13:24:07 -08002629 u32 fd_ioasc;
2630
2631 if (ioa_cfg->sis64)
2632 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2633 else
2634 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635
Brian Kingafc3f832016-08-24 12:56:51 -05002636 list_del_init(&hostrcb->queue);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06002637 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638
2639 if (!ioasc) {
2640 ipr_handle_log_data(ioa_cfg, hostrcb);
Brian King65f56472007-04-26 16:00:12 -05002641 if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2642 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Brian King4fdd7c72015-03-26 11:23:50 -05002643 } else if (ioasc != IPR_IOASC_IOA_WAS_RESET &&
2644 ioasc != IPR_IOASC_ABORTED_CMD_TERM_BY_HOST) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645 dev_err(&ioa_cfg->pdev->dev,
2646 "Host RCB failed with IOASC: 0x%08X\n", ioasc);
2647 }
2648
Brian Kingafc3f832016-08-24 12:56:51 -05002649 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_report_q);
Brian King8a4236a2016-10-13 14:45:24 -05002650 schedule_work(&ioa_cfg->work_q);
Brian Kingafc3f832016-08-24 12:56:51 -05002651 hostrcb = ipr_get_free_hostrcb(ioa_cfg);
Brian Kingafc3f832016-08-24 12:56:51 -05002652
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2654}
2655
2656/**
2657 * ipr_timeout - An internally generated op has timed out.
2658 * @ipr_cmd: ipr command struct
2659 *
2660 * This function blocks host requests and initiates an
2661 * adapter reset.
2662 *
2663 * Return value:
2664 * none
2665 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07002666static void ipr_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667{
Kees Cook738c6ec2017-08-18 16:53:24 -07002668 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 unsigned long lock_flags = 0;
2670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2671
2672 ENTER;
2673 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2674
2675 ioa_cfg->errors_logged++;
2676 dev_err(&ioa_cfg->pdev->dev,
2677 "Adapter being reset due to command timeout.\n");
2678
2679 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2680 ioa_cfg->sdt_state = GET_DUMP;
2681
2682 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2683 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2684
2685 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2686 LEAVE;
2687}
2688
2689/**
2690 * ipr_oper_timeout - Adapter timed out transitioning to operational
2691 * @ipr_cmd: ipr command struct
2692 *
2693 * This function blocks host requests and initiates an
2694 * adapter reset.
2695 *
2696 * Return value:
2697 * none
2698 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07002699static void ipr_oper_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700{
Kees Cook738c6ec2017-08-18 16:53:24 -07002701 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 unsigned long lock_flags = 0;
2703 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2704
2705 ENTER;
2706 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2707
2708 ioa_cfg->errors_logged++;
2709 dev_err(&ioa_cfg->pdev->dev,
2710 "Adapter timed out transitioning to operational.\n");
2711
2712 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2713 ioa_cfg->sdt_state = GET_DUMP;
2714
2715 if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2716 if (ipr_fastfail)
2717 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2718 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2719 }
2720
2721 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2722 LEAVE;
2723}
2724
2725/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 * ipr_find_ses_entry - Find matching SES in SES table
2727 * @res: resource entry struct of SES
2728 *
2729 * Return value:
2730 * pointer to SES table entry / NULL on failure
2731 **/
2732static const struct ipr_ses_table_entry *
2733ipr_find_ses_entry(struct ipr_resource_entry *res)
2734{
2735 int i, j, matches;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002736 struct ipr_std_inq_vpids *vpids;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 const struct ipr_ses_table_entry *ste = ipr_ses_table;
2738
2739 for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2740 for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2741 if (ste->compare_product_id_byte[j] == 'X') {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002742 vpids = &res->std_inq_data.vpids;
2743 if (vpids->product_id[j] == ste->product_id[j])
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744 matches++;
2745 else
2746 break;
2747 } else
2748 matches++;
2749 }
2750
2751 if (matches == IPR_PROD_ID_LEN)
2752 return ste;
2753 }
2754
2755 return NULL;
2756}
2757
2758/**
2759 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2760 * @ioa_cfg: ioa config struct
2761 * @bus: SCSI bus
2762 * @bus_width: bus width
2763 *
2764 * Return value:
2765 * SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2766 * For a 2-byte wide SCSI bus, the maximum transfer speed is
2767 * twice the maximum transfer rate (e.g. for a wide enabled bus,
2768 * max 160MHz = max 320MB/sec).
2769 **/
2770static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2771{
2772 struct ipr_resource_entry *res;
2773 const struct ipr_ses_table_entry *ste;
2774 u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2775
2776 /* Loop through each config table entry in the config table buffer */
2777 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002778 if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 continue;
2780
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08002781 if (bus != res->bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 continue;
2783
2784 if (!(ste = ipr_find_ses_entry(res)))
2785 continue;
2786
2787 max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2788 }
2789
2790 return max_xfer_rate;
2791}
2792
2793/**
2794 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2795 * @ioa_cfg: ioa config struct
2796 * @max_delay: max delay in micro-seconds to wait
2797 *
2798 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2799 *
2800 * Return value:
2801 * 0 on success / other on failure
2802 **/
2803static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2804{
2805 volatile u32 pcii_reg;
2806 int delay = 1;
2807
2808 /* Read interrupt reg until IOA signals IO Debug Acknowledge */
2809 while (delay < max_delay) {
2810 pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2811
2812 if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2813 return 0;
2814
2815 /* udelay cannot be used if delay is more than a few milliseconds */
2816 if ((delay / 1000) > MAX_UDELAY_MS)
2817 mdelay(delay / 1000);
2818 else
2819 udelay(delay);
2820
2821 delay += delay;
2822 }
2823 return -EIO;
2824}
2825
2826/**
Wayne Boyerdcbad002010-02-19 13:24:14 -08002827 * ipr_get_sis64_dump_data_section - Dump IOA memory
2828 * @ioa_cfg: ioa config struct
2829 * @start_addr: adapter address to dump
2830 * @dest: destination kernel buffer
2831 * @length_in_words: length to dump in 4 byte words
2832 *
2833 * Return value:
2834 * 0 on success
2835 **/
2836static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2837 u32 start_addr,
2838 __be32 *dest, u32 length_in_words)
2839{
2840 int i;
2841
2842 for (i = 0; i < length_in_words; i++) {
2843 writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2844 *dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2845 dest++;
2846 }
2847
2848 return 0;
2849}
2850
2851/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 * ipr_get_ldump_data_section - Dump IOA memory
2853 * @ioa_cfg: ioa config struct
2854 * @start_addr: adapter address to dump
2855 * @dest: destination kernel buffer
2856 * @length_in_words: length to dump in 4 byte words
2857 *
2858 * Return value:
2859 * 0 on success / -EIO on failure
2860 **/
2861static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2862 u32 start_addr,
2863 __be32 *dest, u32 length_in_words)
2864{
2865 volatile u32 temp_pcii_reg;
2866 int i, delay = 0;
2867
Wayne Boyerdcbad002010-02-19 13:24:14 -08002868 if (ioa_cfg->sis64)
2869 return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2870 dest, length_in_words);
2871
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 /* Write IOA interrupt reg starting LDUMP state */
2873 writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
Wayne Boyer214777b2010-02-19 13:24:26 -08002874 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875
2876 /* Wait for IO debug acknowledge */
2877 if (ipr_wait_iodbg_ack(ioa_cfg,
2878 IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2879 dev_err(&ioa_cfg->pdev->dev,
2880 "IOA dump long data transfer timeout\n");
2881 return -EIO;
2882 }
2883
2884 /* Signal LDUMP interlocked - clear IO debug ack */
2885 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2886 ioa_cfg->regs.clr_interrupt_reg);
2887
2888 /* Write Mailbox with starting address */
2889 writel(start_addr, ioa_cfg->ioa_mailbox);
2890
2891 /* Signal address valid - clear IOA Reset alert */
2892 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002893 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894
2895 for (i = 0; i < length_in_words; i++) {
2896 /* Wait for IO debug acknowledge */
2897 if (ipr_wait_iodbg_ack(ioa_cfg,
2898 IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2899 dev_err(&ioa_cfg->pdev->dev,
2900 "IOA dump short data transfer timeout\n");
2901 return -EIO;
2902 }
2903
2904 /* Read data from mailbox and increment destination pointer */
2905 *dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2906 dest++;
2907
2908 /* For all but the last word of data, signal data received */
2909 if (i < (length_in_words - 1)) {
2910 /* Signal dump data received - Clear IO debug Ack */
2911 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2912 ioa_cfg->regs.clr_interrupt_reg);
2913 }
2914 }
2915
2916 /* Signal end of block transfer. Set reset alert then clear IO debug ack */
2917 writel(IPR_UPROCI_RESET_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002918 ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
2920 writel(IPR_UPROCI_IO_DEBUG_ALERT,
Wayne Boyer214777b2010-02-19 13:24:26 -08002921 ioa_cfg->regs.clr_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
2923 /* Signal dump data received - Clear IO debug Ack */
2924 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2925 ioa_cfg->regs.clr_interrupt_reg);
2926
2927 /* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2928 while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2929 temp_pcii_reg =
Wayne Boyer214777b2010-02-19 13:24:26 -08002930 readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
2932 if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2933 return 0;
2934
2935 udelay(10);
2936 delay += 10;
2937 }
2938
2939 return 0;
2940}
2941
2942#ifdef CONFIG_SCSI_IPR_DUMP
2943/**
2944 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2945 * @ioa_cfg: ioa config struct
2946 * @pci_address: adapter address
2947 * @length: length of data to copy
2948 *
2949 * Copy data from PCI adapter to kernel buffer.
2950 * Note: length MUST be a 4 byte multiple
2951 * Return value:
2952 * 0 on success / other on failure
2953 **/
2954static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2955 unsigned long pci_address, u32 length)
2956{
2957 int bytes_copied = 0;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002958 int cur_len, rc, rem_len, rem_page_len, max_dump_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 __be32 *page;
2960 unsigned long lock_flags = 0;
2961 struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2962
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002963 if (ioa_cfg->sis64)
2964 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
2965 else
2966 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
2967
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 while (bytes_copied < length &&
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03002969 (ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 if (ioa_dump->page_offset >= PAGE_SIZE ||
2971 ioa_dump->page_offset == 0) {
2972 page = (__be32 *)__get_free_page(GFP_ATOMIC);
2973
2974 if (!page) {
2975 ipr_trace;
2976 return bytes_copied;
2977 }
2978
2979 ioa_dump->page_offset = 0;
2980 ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2981 ioa_dump->next_page_index++;
2982 } else
2983 page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2984
2985 rem_len = length - bytes_copied;
2986 rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2987 cur_len = min(rem_len, rem_page_len);
2988
2989 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2990 if (ioa_cfg->sdt_state == ABORT_DUMP) {
2991 rc = -EIO;
2992 } else {
2993 rc = ipr_get_ldump_data_section(ioa_cfg,
2994 pci_address + bytes_copied,
2995 &page[ioa_dump->page_offset / 4],
2996 (cur_len / sizeof(u32)));
2997 }
2998 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2999
3000 if (!rc) {
3001 ioa_dump->page_offset += cur_len;
3002 bytes_copied += cur_len;
3003 } else {
3004 ipr_trace;
3005 break;
3006 }
3007 schedule();
3008 }
3009
3010 return bytes_copied;
3011}
3012
3013/**
3014 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
3015 * @hdr: dump entry header struct
3016 *
3017 * Return value:
3018 * nothing
3019 **/
3020static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
3021{
3022 hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
3023 hdr->num_elems = 1;
3024 hdr->offset = sizeof(*hdr);
3025 hdr->status = IPR_DUMP_STATUS_SUCCESS;
3026}
3027
3028/**
3029 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
3030 * @ioa_cfg: ioa config struct
3031 * @driver_dump: driver dump struct
3032 *
3033 * Return value:
3034 * nothing
3035 **/
3036static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
3037 struct ipr_driver_dump *driver_dump)
3038{
3039 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3040
3041 ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
3042 driver_dump->ioa_type_entry.hdr.len =
3043 sizeof(struct ipr_dump_ioa_type_entry) -
3044 sizeof(struct ipr_dump_entry_header);
3045 driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3046 driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
3047 driver_dump->ioa_type_entry.type = ioa_cfg->type;
3048 driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
3049 (ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
3050 ucode_vpd->minor_release[1];
3051 driver_dump->hdr.num_entries++;
3052}
3053
3054/**
3055 * ipr_dump_version_data - Fill in the driver version in the dump.
3056 * @ioa_cfg: ioa config struct
3057 * @driver_dump: driver dump struct
3058 *
3059 * Return value:
3060 * nothing
3061 **/
3062static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
3063 struct ipr_driver_dump *driver_dump)
3064{
3065 ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
3066 driver_dump->version_entry.hdr.len =
3067 sizeof(struct ipr_dump_version_entry) -
3068 sizeof(struct ipr_dump_entry_header);
3069 driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3070 driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
3071 strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
3072 driver_dump->hdr.num_entries++;
3073}
3074
3075/**
3076 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
3077 * @ioa_cfg: ioa config struct
3078 * @driver_dump: driver dump struct
3079 *
3080 * Return value:
3081 * nothing
3082 **/
3083static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
3084 struct ipr_driver_dump *driver_dump)
3085{
3086 ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
3087 driver_dump->trace_entry.hdr.len =
3088 sizeof(struct ipr_dump_trace_entry) -
3089 sizeof(struct ipr_dump_entry_header);
3090 driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3091 driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
3092 memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
3093 driver_dump->hdr.num_entries++;
3094}
3095
3096/**
3097 * ipr_dump_location_data - Fill in the IOA location in the dump.
3098 * @ioa_cfg: ioa config struct
3099 * @driver_dump: driver dump struct
3100 *
3101 * Return value:
3102 * nothing
3103 **/
3104static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
3105 struct ipr_driver_dump *driver_dump)
3106{
3107 ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
3108 driver_dump->location_entry.hdr.len =
3109 sizeof(struct ipr_dump_location_entry) -
3110 sizeof(struct ipr_dump_entry_header);
3111 driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
3112 driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
Kay Sievers71610f52008-12-03 22:41:36 +01003113 strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003114 driver_dump->hdr.num_entries++;
3115}
3116
3117/**
3118 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
3119 * @ioa_cfg: ioa config struct
3120 * @dump: dump struct
3121 *
3122 * Return value:
3123 * nothing
3124 **/
3125static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
3126{
3127 unsigned long start_addr, sdt_word;
3128 unsigned long lock_flags = 0;
3129 struct ipr_driver_dump *driver_dump = &dump->driver_dump;
3130 struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003131 u32 num_entries, max_num_entries, start_off, end_off;
3132 u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003133 struct ipr_sdt *sdt;
Wayne Boyerdcbad002010-02-19 13:24:14 -08003134 int valid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135 int i;
3136
3137 ENTER;
3138
3139 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3140
Brian King41e9a692011-09-21 08:51:11 -05003141 if (ioa_cfg->sdt_state != READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3143 return;
3144 }
3145
Wayne Boyer110def82010-11-04 09:36:16 -07003146 if (ioa_cfg->sis64) {
3147 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3148 ssleep(IPR_DUMP_DELAY_SECONDS);
3149 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3150 }
3151
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152 start_addr = readl(ioa_cfg->ioa_mailbox);
3153
Wayne Boyerdcbad002010-02-19 13:24:14 -08003154 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 dev_err(&ioa_cfg->pdev->dev,
3156 "Invalid dump table format: %lx\n", start_addr);
3157 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3158 return;
3159 }
3160
3161 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
3162
3163 driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
3164
3165 /* Initialize the overall dump header */
3166 driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
3167 driver_dump->hdr.num_entries = 1;
3168 driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
3169 driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
3170 driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
3171 driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
3172
3173 ipr_dump_version_data(ioa_cfg, driver_dump);
3174 ipr_dump_location_data(ioa_cfg, driver_dump);
3175 ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
3176 ipr_dump_trace_data(ioa_cfg, driver_dump);
3177
3178 /* Update dump_header */
3179 driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
3180
3181 /* IOA Dump entry */
3182 ipr_init_dump_entry_hdr(&ioa_dump->hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183 ioa_dump->hdr.len = 0;
3184 ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
3185 ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
3186
3187 /* First entries in sdt are actually a list of dump addresses and
3188 lengths to gather the real dump data. sdt represents the pointer
3189 to the ioa generated dump table. Dump data will be extracted based
3190 on entries in this table */
3191 sdt = &ioa_dump->sdt;
3192
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003193 if (ioa_cfg->sis64) {
3194 max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
3195 max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
3196 } else {
3197 max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
3198 max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
3199 }
3200
3201 bytes_to_copy = offsetof(struct ipr_sdt, entry) +
3202 (max_num_entries * sizeof(struct ipr_sdt_entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003204 bytes_to_copy / sizeof(__be32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205
3206 /* Smart Dump table is ready to use and the first entry is valid */
Wayne Boyerdcbad002010-02-19 13:24:14 -08003207 if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
3208 (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003209 dev_err(&ioa_cfg->pdev->dev,
3210 "Dump of IOA failed. Dump table not valid: %d, %X.\n",
3211 rc, be32_to_cpu(sdt->hdr.state));
3212 driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
3213 ioa_cfg->sdt_state = DUMP_OBTAINED;
3214 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3215 return;
3216 }
3217
3218 num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
3219
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003220 if (num_entries > max_num_entries)
3221 num_entries = max_num_entries;
3222
3223 /* Update dump length to the actual data to be copied */
3224 dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
3225 if (ioa_cfg->sis64)
3226 dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
3227 else
3228 dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229
3230 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3231
3232 for (i = 0; i < num_entries; i++) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003233 if (ioa_dump->hdr.len > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003234 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3235 break;
3236 }
3237
3238 if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
Wayne Boyerdcbad002010-02-19 13:24:14 -08003239 sdt_word = be32_to_cpu(sdt->entry[i].start_token);
3240 if (ioa_cfg->sis64)
3241 bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
3242 else {
3243 start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
3244 end_off = be32_to_cpu(sdt->entry[i].end_token);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003245
Wayne Boyerdcbad002010-02-19 13:24:14 -08003246 if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
3247 bytes_to_copy = end_off - start_off;
3248 else
3249 valid = 0;
3250 }
3251 if (valid) {
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003252 if (bytes_to_copy > max_dump_size) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253 sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
3254 continue;
3255 }
3256
3257 /* Copy data from adapter to driver buffers */
3258 bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
3259 bytes_to_copy);
3260
3261 ioa_dump->hdr.len += bytes_copied;
3262
3263 if (bytes_copied != bytes_to_copy) {
3264 driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3265 break;
3266 }
3267 }
3268 }
3269 }
3270
3271 dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3272
3273 /* Update dump_header */
3274 driver_dump->hdr.len += ioa_dump->hdr.len;
3275 wmb();
3276 ioa_cfg->sdt_state = DUMP_OBTAINED;
3277 LEAVE;
3278}
3279
3280#else
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003281#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282#endif
3283
3284/**
3285 * ipr_release_dump - Free adapter dump memory
3286 * @kref: kref struct
3287 *
3288 * Return value:
3289 * nothing
3290 **/
3291static void ipr_release_dump(struct kref *kref)
3292{
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003293 struct ipr_dump *dump = container_of(kref, struct ipr_dump, kref);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294 struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3295 unsigned long lock_flags = 0;
3296 int i;
3297
3298 ENTER;
3299 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3300 ioa_cfg->dump = NULL;
3301 ioa_cfg->sdt_state = INACTIVE;
3302 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3303
3304 for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3305 free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3306
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03003307 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308 kfree(dump);
3309 LEAVE;
3310}
3311
3312/**
3313 * ipr_worker_thread - Worker thread
David Howellsc4028952006-11-22 14:57:56 +00003314 * @work: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 *
3316 * Called at task level from a work thread. This function takes care
3317 * of adding and removing device from the mid-layer as configuration
3318 * changes are detected by the adapter.
3319 *
3320 * Return value:
3321 * nothing
3322 **/
David Howellsc4028952006-11-22 14:57:56 +00003323static void ipr_worker_thread(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324{
3325 unsigned long lock_flags;
3326 struct ipr_resource_entry *res;
3327 struct scsi_device *sdev;
3328 struct ipr_dump *dump;
David Howellsc4028952006-11-22 14:57:56 +00003329 struct ipr_ioa_cfg *ioa_cfg =
3330 container_of(work, struct ipr_ioa_cfg, work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331 u8 bus, target, lun;
3332 int did_work;
3333
3334 ENTER;
3335 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3336
Brian King41e9a692011-09-21 08:51:11 -05003337 if (ioa_cfg->sdt_state == READ_DUMP) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003338 dump = ioa_cfg->dump;
3339 if (!dump) {
3340 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3341 return;
3342 }
3343 kref_get(&dump->kref);
3344 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3345 ipr_get_ioa_dump(ioa_cfg, dump);
3346 kref_put(&dump->kref, ipr_release_dump);
3347
3348 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Brian King4c647e92011-10-15 09:08:56 -05003349 if (ioa_cfg->sdt_state == DUMP_OBTAINED && !ioa_cfg->dump_timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3351 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3352 return;
3353 }
3354
Brian Kingb0e17a92017-08-01 10:21:30 -05003355 if (ioa_cfg->scsi_unblock) {
3356 ioa_cfg->scsi_unblock = 0;
3357 ioa_cfg->scsi_blocked = 0;
3358 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3359 scsi_unblock_requests(ioa_cfg->host);
3360 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3361 if (ioa_cfg->scsi_blocked)
3362 scsi_block_requests(ioa_cfg->host);
3363 }
3364
Brian Kingb195d5e2016-07-15 14:48:03 -05003365 if (!ioa_cfg->scan_enabled) {
3366 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3367 return;
3368 }
3369
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370restart:
3371 do {
3372 did_work = 0;
Brian Kingf688f962014-12-02 12:47:37 -06003373 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3375 return;
3376 }
3377
3378 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3379 if (res->del_from_ml && res->sdev) {
3380 did_work = 1;
3381 sdev = res->sdev;
3382 if (!scsi_device_get(sdev)) {
Kleber Sacilotto de Souza5767a1c2011-02-14 20:19:31 -02003383 if (!res->add_to_ml)
3384 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3385 else
3386 res->del_from_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3388 scsi_remove_device(sdev);
3389 scsi_device_put(sdev);
3390 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3391 }
3392 break;
3393 }
3394 }
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003395 } while (did_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396
3397 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3398 if (res->add_to_ml) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08003399 bus = res->bus;
3400 target = res->target;
3401 lun = res->lun;
Brian King1121b792006-03-29 09:37:16 -06003402 res->add_to_ml = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003403 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3404 scsi_add_device(ioa_cfg->host, bus, target, lun);
3405 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3406 goto restart;
3407 }
3408 }
3409
Brian Kingf688f962014-12-02 12:47:37 -06003410 ioa_cfg->scan_done = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Tony Jonesee959b02008-02-22 00:13:36 +01003412 kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413 LEAVE;
3414}
3415
3416#ifdef CONFIG_SCSI_IPR_TRACE
3417/**
3418 * ipr_read_trace - Dump the adapter trace
Chris Wright2c3c8be2010-05-12 18:28:57 -07003419 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08003421 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 * @buf: buffer
3423 * @off: offset
3424 * @count: buffer size
3425 *
3426 * Return value:
3427 * number of bytes printed to buffer
3428 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07003429static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08003430 struct bin_attribute *bin_attr,
3431 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003432{
Tony Jonesee959b02008-02-22 00:13:36 +01003433 struct device *dev = container_of(kobj, struct device, kobj);
3434 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3436 unsigned long lock_flags = 0;
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003437 ssize_t ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438
3439 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003440 ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3441 IPR_TRACE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Akinobu Mitad777aaf2008-09-22 14:56:47 -07003443
3444 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445}
3446
3447static struct bin_attribute ipr_trace_attr = {
3448 .attr = {
3449 .name = "trace",
3450 .mode = S_IRUGO,
3451 },
3452 .size = 0,
3453 .read = ipr_read_trace,
3454};
3455#endif
3456
3457/**
3458 * ipr_show_fw_version - Show the firmware version
Tony Jonesee959b02008-02-22 00:13:36 +01003459 * @dev: class device struct
3460 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 *
3462 * Return value:
3463 * number of bytes printed to buffer
3464 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003465static ssize_t ipr_show_fw_version(struct device *dev,
3466 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467{
Tony Jonesee959b02008-02-22 00:13:36 +01003468 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3470 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3471 unsigned long lock_flags = 0;
3472 int len;
3473
3474 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3475 len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3476 ucode_vpd->major_release, ucode_vpd->card_type,
3477 ucode_vpd->minor_release[0],
3478 ucode_vpd->minor_release[1]);
3479 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3480 return len;
3481}
3482
Tony Jonesee959b02008-02-22 00:13:36 +01003483static struct device_attribute ipr_fw_version_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 .attr = {
3485 .name = "fw_version",
3486 .mode = S_IRUGO,
3487 },
3488 .show = ipr_show_fw_version,
3489};
3490
3491/**
3492 * ipr_show_log_level - Show the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003493 * @dev: class device struct
3494 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 *
3496 * Return value:
3497 * number of bytes printed to buffer
3498 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003499static ssize_t ipr_show_log_level(struct device *dev,
3500 struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501{
Tony Jonesee959b02008-02-22 00:13:36 +01003502 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3504 unsigned long lock_flags = 0;
3505 int len;
3506
3507 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3508 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3509 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3510 return len;
3511}
3512
3513/**
3514 * ipr_store_log_level - Change the adapter's error logging level
Tony Jonesee959b02008-02-22 00:13:36 +01003515 * @dev: class device struct
3516 * @buf: buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517 *
3518 * Return value:
3519 * number of bytes printed to buffer
3520 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003521static ssize_t ipr_store_log_level(struct device *dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003522 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003523 const char *buf, size_t count)
3524{
Tony Jonesee959b02008-02-22 00:13:36 +01003525 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3527 unsigned long lock_flags = 0;
3528
3529 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3530 ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3531 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3532 return strlen(buf);
3533}
3534
Tony Jonesee959b02008-02-22 00:13:36 +01003535static struct device_attribute ipr_log_level_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536 .attr = {
3537 .name = "log_level",
3538 .mode = S_IRUGO | S_IWUSR,
3539 },
3540 .show = ipr_show_log_level,
3541 .store = ipr_store_log_level
3542};
3543
3544/**
3545 * ipr_store_diagnostics - IOA Diagnostics interface
Tony Jonesee959b02008-02-22 00:13:36 +01003546 * @dev: device struct
3547 * @buf: buffer
3548 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549 *
3550 * This function will reset the adapter and wait a reasonable
3551 * amount of time for any errors that the adapter might log.
3552 *
3553 * Return value:
3554 * count on success / other on failure
3555 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003556static ssize_t ipr_store_diagnostics(struct device *dev,
3557 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003558 const char *buf, size_t count)
3559{
Tony Jonesee959b02008-02-22 00:13:36 +01003560 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003561 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3562 unsigned long lock_flags = 0;
3563 int rc = count;
3564
3565 if (!capable(CAP_SYS_ADMIN))
3566 return -EACCES;
3567
Linus Torvalds1da177e2005-04-16 15:20:36 -07003568 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003569 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003570 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3571 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3572 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3573 }
3574
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575 ioa_cfg->errors_logged = 0;
3576 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3577
3578 if (ioa_cfg->in_reset_reload) {
3579 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3580 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3581
3582 /* Wait for a second for any errors to be logged */
3583 msleep(1000);
3584 } else {
3585 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3586 return -EIO;
3587 }
3588
3589 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3590 if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3591 rc = -EIO;
3592 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3593
3594 return rc;
3595}
3596
Tony Jonesee959b02008-02-22 00:13:36 +01003597static struct device_attribute ipr_diagnostics_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 .attr = {
3599 .name = "run_diagnostics",
3600 .mode = S_IWUSR,
3601 },
3602 .store = ipr_store_diagnostics
3603};
3604
3605/**
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003606 * ipr_show_adapter_state - Show the adapter's state
Tony Jonesee959b02008-02-22 00:13:36 +01003607 * @class_dev: device struct
3608 * @buf: buffer
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003609 *
3610 * Return value:
3611 * number of bytes printed to buffer
3612 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003613static ssize_t ipr_show_adapter_state(struct device *dev,
3614 struct device_attribute *attr, char *buf)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003615{
Tony Jonesee959b02008-02-22 00:13:36 +01003616 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003617 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3618 unsigned long lock_flags = 0;
3619 int len;
3620
3621 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003622 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003623 len = snprintf(buf, PAGE_SIZE, "offline\n");
3624 else
3625 len = snprintf(buf, PAGE_SIZE, "online\n");
3626 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3627 return len;
3628}
3629
3630/**
3631 * ipr_store_adapter_state - Change adapter state
Tony Jonesee959b02008-02-22 00:13:36 +01003632 * @dev: device struct
3633 * @buf: buffer
3634 * @count: buffer size
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003635 *
3636 * This function will change the adapter's state.
3637 *
3638 * Return value:
3639 * count on success / other on failure
3640 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003641static ssize_t ipr_store_adapter_state(struct device *dev,
3642 struct device_attribute *attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003643 const char *buf, size_t count)
3644{
Tony Jonesee959b02008-02-22 00:13:36 +01003645 struct Scsi_Host *shost = class_to_shost(dev);
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003646 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3647 unsigned long lock_flags;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003648 int result = count, i;
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003649
3650 if (!capable(CAP_SYS_ADMIN))
3651 return -EACCES;
3652
3653 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06003654 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
3655 !strncmp(buf, "online", 6)) {
3656 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
3657 spin_lock(&ioa_cfg->hrrq[i]._lock);
3658 ioa_cfg->hrrq[i].ioa_is_dead = 0;
3659 spin_unlock(&ioa_cfg->hrrq[i]._lock);
3660 }
3661 wmb();
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003662 ioa_cfg->reset_retries = 0;
3663 ioa_cfg->in_ioa_bringdown = 0;
3664 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3665 }
3666 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3667 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3668
3669 return result;
3670}
3671
Tony Jonesee959b02008-02-22 00:13:36 +01003672static struct device_attribute ipr_ioa_state_attr = {
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003673 .attr = {
Brian King49dd0962008-04-28 17:36:20 -05003674 .name = "online_state",
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06003675 .mode = S_IRUGO | S_IWUSR,
3676 },
3677 .show = ipr_show_adapter_state,
3678 .store = ipr_store_adapter_state
3679};
3680
3681/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682 * ipr_store_reset_adapter - Reset the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01003683 * @dev: device struct
3684 * @buf: buffer
3685 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686 *
3687 * This function will reset the adapter.
3688 *
3689 * Return value:
3690 * count on success / other on failure
3691 **/
Tony Jonesee959b02008-02-22 00:13:36 +01003692static ssize_t ipr_store_reset_adapter(struct device *dev,
3693 struct device_attribute *attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003694 const char *buf, size_t count)
3695{
Tony Jonesee959b02008-02-22 00:13:36 +01003696 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003697 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3698 unsigned long lock_flags;
3699 int result = count;
3700
3701 if (!capable(CAP_SYS_ADMIN))
3702 return -EACCES;
3703
3704 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3705 if (!ioa_cfg->in_reset_reload)
3706 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3707 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3708 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3709
3710 return result;
3711}
3712
Tony Jonesee959b02008-02-22 00:13:36 +01003713static struct device_attribute ipr_ioa_reset_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003714 .attr = {
3715 .name = "reset_host",
3716 .mode = S_IWUSR,
3717 },
3718 .store = ipr_store_reset_adapter
3719};
3720
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003721static int ipr_iopoll(struct irq_poll *iop, int budget);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003722 /**
3723 * ipr_show_iopoll_weight - Show ipr polling mode
3724 * @dev: class device struct
3725 * @buf: buffer
3726 *
3727 * Return value:
3728 * number of bytes printed to buffer
3729 **/
3730static ssize_t ipr_show_iopoll_weight(struct device *dev,
3731 struct device_attribute *attr, char *buf)
3732{
3733 struct Scsi_Host *shost = class_to_shost(dev);
3734 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3735 unsigned long lock_flags = 0;
3736 int len;
3737
3738 spin_lock_irqsave(shost->host_lock, lock_flags);
3739 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
3740 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3741
3742 return len;
3743}
3744
3745/**
3746 * ipr_store_iopoll_weight - Change the adapter's polling mode
3747 * @dev: class device struct
3748 * @buf: buffer
3749 *
3750 * Return value:
3751 * number of bytes printed to buffer
3752 **/
3753static ssize_t ipr_store_iopoll_weight(struct device *dev,
3754 struct device_attribute *attr,
3755 const char *buf, size_t count)
3756{
3757 struct Scsi_Host *shost = class_to_shost(dev);
3758 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3759 unsigned long user_iopoll_weight;
3760 unsigned long lock_flags = 0;
3761 int i;
3762
3763 if (!ioa_cfg->sis64) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003764 dev_info(&ioa_cfg->pdev->dev, "irq_poll not supported on this adapter\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003765 return -EINVAL;
3766 }
3767 if (kstrtoul(buf, 10, &user_iopoll_weight))
3768 return -EINVAL;
3769
3770 if (user_iopoll_weight > 256) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003771 dev_info(&ioa_cfg->pdev->dev, "Invalid irq_poll weight. It must be less than 256\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003772 return -EINVAL;
3773 }
3774
3775 if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003776 dev_info(&ioa_cfg->pdev->dev, "Current irq_poll weight has the same weight\n");
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003777 return strlen(buf);
3778 }
3779
Jens Axboe89f8b332014-03-13 09:38:42 -06003780 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003781 for (i = 1; i < ioa_cfg->hrrq_num; i++)
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003782 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003783 }
3784
3785 spin_lock_irqsave(shost->host_lock, lock_flags);
3786 ioa_cfg->iopoll_weight = user_iopoll_weight;
Jens Axboe89f8b332014-03-13 09:38:42 -06003787 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003788 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +01003789 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003790 ioa_cfg->iopoll_weight, ipr_iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06003791 }
3792 }
3793 spin_unlock_irqrestore(shost->host_lock, lock_flags);
3794
3795 return strlen(buf);
3796}
3797
3798static struct device_attribute ipr_iopoll_weight_attr = {
3799 .attr = {
3800 .name = "iopoll_weight",
3801 .mode = S_IRUGO | S_IWUSR,
3802 },
3803 .show = ipr_show_iopoll_weight,
3804 .store = ipr_store_iopoll_weight
3805};
3806
Linus Torvalds1da177e2005-04-16 15:20:36 -07003807/**
3808 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3809 * @buf_len: buffer length
3810 *
3811 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3812 * list to use for microcode download
3813 *
3814 * Return value:
3815 * pointer to sglist / NULL on failure
3816 **/
3817static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3818{
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003819 int sg_size, order;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821
3822 /* Get the minimum size per scatter/gather element */
3823 sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3824
3825 /* Get the actual size per element */
3826 order = get_order(sg_size);
3827
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828 /* Allocate a scatter/gather list for the DMA */
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003829 sglist = kzalloc(sizeof(struct ipr_sglist), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830 if (sglist == NULL) {
3831 ipr_trace;
3832 return NULL;
3833 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834 sglist->order = order;
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003835 sglist->scatterlist = sgl_alloc_order(buf_len, order, false, GFP_KERNEL,
3836 &sglist->num_sg);
3837 if (!sglist->scatterlist) {
3838 kfree(sglist);
3839 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003840 }
3841
3842 return sglist;
3843}
3844
3845/**
3846 * ipr_free_ucode_buffer - Frees a microcode download buffer
3847 * @p_dnld: scatter/gather list pointer
3848 *
3849 * Free a DMA'able ucode download buffer previously allocated with
3850 * ipr_alloc_ucode_buffer
3851 *
3852 * Return value:
3853 * nothing
3854 **/
3855static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3856{
Bart Van Asschef95dc1b2018-02-12 08:58:17 -08003857 sgl_free_order(sglist->scatterlist, sglist->order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 kfree(sglist);
3859}
3860
3861/**
3862 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3863 * @sglist: scatter/gather list pointer
3864 * @buffer: buffer pointer
3865 * @len: buffer length
3866 *
3867 * Copy a microcode image from a user buffer into a buffer allocated by
3868 * ipr_alloc_ucode_buffer
3869 *
3870 * Return value:
3871 * 0 on success / other on failure
3872 **/
3873static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3874 u8 *buffer, u32 len)
3875{
3876 int bsize_elem, i, result = 0;
3877 struct scatterlist *scatterlist;
3878 void *kaddr;
3879
3880 /* Determine the actual number of bytes per element */
3881 bsize_elem = PAGE_SIZE * (1 << sglist->order);
3882
3883 scatterlist = sglist->scatterlist;
3884
3885 for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003886 struct page *page = sg_page(&scatterlist[i]);
3887
3888 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889 memcpy(kaddr, buffer, bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003890 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891
3892 scatterlist[i].length = bsize_elem;
3893
3894 if (result != 0) {
3895 ipr_trace;
3896 return result;
3897 }
3898 }
3899
3900 if (len % bsize_elem) {
Jens Axboe45711f12007-10-22 21:19:53 +02003901 struct page *page = sg_page(&scatterlist[i]);
3902
3903 kaddr = kmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003904 memcpy(kaddr, buffer, len % bsize_elem);
Jens Axboe45711f12007-10-22 21:19:53 +02003905 kunmap(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906
3907 scatterlist[i].length = len % bsize_elem;
3908 }
3909
3910 sglist->buffer_len = len;
3911 return result;
3912}
3913
3914/**
Wayne Boyera32c0552010-02-19 13:23:36 -08003915 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3916 * @ipr_cmd: ipr command struct
3917 * @sglist: scatter/gather list
3918 *
3919 * Builds a microcode download IOA data list (IOADL).
3920 *
3921 **/
3922static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3923 struct ipr_sglist *sglist)
3924{
3925 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3926 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3927 struct scatterlist *scatterlist = sglist->scatterlist;
3928 int i;
3929
3930 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3931 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3932 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3933
3934 ioarcb->ioadl_len =
3935 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3936 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3937 ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3938 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3939 ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3940 }
3941
3942 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3943}
3944
3945/**
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003946 * ipr_build_ucode_ioadl - Build a microcode download IOADL
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 * @ipr_cmd: ipr command struct
3948 * @sglist: scatter/gather list
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 *
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003950 * Builds a microcode download IOA data list (IOADL).
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952 **/
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003953static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3954 struct ipr_sglist *sglist)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08003957 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958 struct scatterlist *scatterlist = sglist->scatterlist;
3959 int i;
3960
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003961 ipr_cmd->dma_use_sg = sglist->num_dma_sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08003963 ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3964
3965 ioarcb->ioadl_len =
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3967
3968 for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3969 ioadl[i].flags_and_data_len =
3970 cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3971 ioadl[i].address =
3972 cpu_to_be32(sg_dma_address(&scatterlist[i]));
3973 }
3974
brking@us.ibm.com12baa422005-11-01 17:01:27 -06003975 ioadl[i-1].flags_and_data_len |=
3976 cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3977}
3978
3979/**
3980 * ipr_update_ioa_ucode - Update IOA's microcode
3981 * @ioa_cfg: ioa config struct
3982 * @sglist: scatter/gather list
3983 *
3984 * Initiate an adapter reset to update the IOA's microcode
3985 *
3986 * Return value:
3987 * 0 on success / -EIO on failure
3988 **/
3989static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3990 struct ipr_sglist *sglist)
3991{
3992 unsigned long lock_flags;
3993
3994 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03003995 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -05003996 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3997 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3998 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3999 }
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004000
4001 if (ioa_cfg->ucode_sglist) {
4002 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4003 dev_err(&ioa_cfg->pdev->dev,
4004 "Microcode download already in progress\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005 return -EIO;
4006 }
4007
Anton Blanchardd73341b2014-10-30 17:27:08 -05004008 sglist->num_dma_sg = dma_map_sg(&ioa_cfg->pdev->dev,
4009 sglist->scatterlist, sglist->num_sg,
4010 DMA_TO_DEVICE);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004011
4012 if (!sglist->num_dma_sg) {
4013 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4014 dev_err(&ioa_cfg->pdev->dev,
4015 "Failed to map microcode download buffer!\n");
4016 return -EIO;
4017 }
4018
4019 ioa_cfg->ucode_sglist = sglist;
4020 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
4021 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4022 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4023
4024 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4025 ioa_cfg->ucode_sglist = NULL;
4026 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027 return 0;
4028}
4029
4030/**
4031 * ipr_store_update_fw - Update the firmware on the adapter
Tony Jonesee959b02008-02-22 00:13:36 +01004032 * @class_dev: device struct
4033 * @buf: buffer
4034 * @count: buffer size
Linus Torvalds1da177e2005-04-16 15:20:36 -07004035 *
4036 * This function will update the firmware on the adapter.
4037 *
4038 * Return value:
4039 * count on success / other on failure
4040 **/
Tony Jonesee959b02008-02-22 00:13:36 +01004041static ssize_t ipr_store_update_fw(struct device *dev,
4042 struct device_attribute *attr,
4043 const char *buf, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004044{
Tony Jonesee959b02008-02-22 00:13:36 +01004045 struct Scsi_Host *shost = class_to_shost(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4047 struct ipr_ucode_image_header *image_hdr;
4048 const struct firmware *fw_entry;
4049 struct ipr_sglist *sglist;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004050 char fname[100];
4051 char *src;
Gabriel Krisman Bertazi21b81712016-02-25 13:54:20 -03004052 char *endline;
Insu Yund63c7dd2016-01-06 12:44:01 -05004053 int result, dnld_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054
4055 if (!capable(CAP_SYS_ADMIN))
4056 return -EACCES;
4057
Insu Yund63c7dd2016-01-06 12:44:01 -05004058 snprintf(fname, sizeof(fname), "%s", buf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059
Gabriel Krisman Bertazi21b81712016-02-25 13:54:20 -03004060 endline = strchr(fname, '\n');
4061 if (endline)
4062 *endline = '\0';
4063
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03004064 if (request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004065 dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
4066 return -EIO;
4067 }
4068
4069 image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
4070
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071 src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
4072 dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
4073 sglist = ipr_alloc_ucode_buffer(dnld_size);
4074
4075 if (!sglist) {
4076 dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
4077 release_firmware(fw_entry);
4078 return -ENOMEM;
4079 }
4080
4081 result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
4082
4083 if (result) {
4084 dev_err(&ioa_cfg->pdev->dev,
4085 "Microcode buffer copy to DMA buffer failed\n");
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004086 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087 }
4088
Wayne Boyer14ed9cc2011-10-03 20:54:37 -07004089 ipr_info("Updating microcode, please be patient. This may take up to 30 minutes.\n");
4090
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004091 result = ipr_update_ioa_ucode(ioa_cfg, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004093 if (!result)
4094 result = count;
4095out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096 ipr_free_ucode_buffer(sglist);
4097 release_firmware(fw_entry);
brking@us.ibm.com12baa422005-11-01 17:01:27 -06004098 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099}
4100
Tony Jonesee959b02008-02-22 00:13:36 +01004101static struct device_attribute ipr_update_fw_attr = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102 .attr = {
4103 .name = "update_fw",
4104 .mode = S_IWUSR,
4105 },
4106 .store = ipr_store_update_fw
4107};
4108
Wayne Boyer75576bb2010-07-14 10:50:14 -07004109/**
4110 * ipr_show_fw_type - Show the adapter's firmware type.
4111 * @dev: class device struct
4112 * @buf: buffer
4113 *
4114 * Return value:
4115 * number of bytes printed to buffer
4116 **/
4117static ssize_t ipr_show_fw_type(struct device *dev,
4118 struct device_attribute *attr, char *buf)
4119{
4120 struct Scsi_Host *shost = class_to_shost(dev);
4121 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4122 unsigned long lock_flags = 0;
4123 int len;
4124
4125 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4126 len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
4127 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4128 return len;
4129}
4130
4131static struct device_attribute ipr_ioa_fw_type_attr = {
4132 .attr = {
4133 .name = "fw_type",
4134 .mode = S_IRUGO,
4135 },
4136 .show = ipr_show_fw_type
4137};
4138
Brian Kingafc3f832016-08-24 12:56:51 -05004139static ssize_t ipr_read_async_err_log(struct file *filep, struct kobject *kobj,
4140 struct bin_attribute *bin_attr, char *buf,
4141 loff_t off, size_t count)
4142{
4143 struct device *cdev = container_of(kobj, struct device, kobj);
4144 struct Scsi_Host *shost = class_to_shost(cdev);
4145 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4146 struct ipr_hostrcb *hostrcb;
4147 unsigned long lock_flags = 0;
4148 int ret;
4149
4150 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4151 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4152 struct ipr_hostrcb, queue);
4153 if (!hostrcb) {
4154 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4155 return 0;
4156 }
4157 ret = memory_read_from_buffer(buf, count, &off, &hostrcb->hcam,
4158 sizeof(hostrcb->hcam));
4159 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4160 return ret;
4161}
4162
4163static ssize_t ipr_next_async_err_log(struct file *filep, struct kobject *kobj,
4164 struct bin_attribute *bin_attr, char *buf,
4165 loff_t off, size_t count)
4166{
4167 struct device *cdev = container_of(kobj, struct device, kobj);
4168 struct Scsi_Host *shost = class_to_shost(cdev);
4169 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4170 struct ipr_hostrcb *hostrcb;
4171 unsigned long lock_flags = 0;
4172
4173 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4174 hostrcb = list_first_entry_or_null(&ioa_cfg->hostrcb_report_q,
4175 struct ipr_hostrcb, queue);
4176 if (!hostrcb) {
4177 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4178 return count;
4179 }
4180
4181 /* Reclaim hostrcb before exit */
4182 list_move_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
4183 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4184 return count;
4185}
4186
4187static struct bin_attribute ipr_ioa_async_err_log = {
4188 .attr = {
4189 .name = "async_err_log",
4190 .mode = S_IRUGO | S_IWUSR,
4191 },
4192 .size = 0,
4193 .read = ipr_read_async_err_log,
4194 .write = ipr_next_async_err_log
4195};
4196
Tony Jonesee959b02008-02-22 00:13:36 +01004197static struct device_attribute *ipr_ioa_attrs[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198 &ipr_fw_version_attr,
4199 &ipr_log_level_attr,
4200 &ipr_diagnostics_attr,
brking@us.ibm.comf37eb542005-11-01 17:01:40 -06004201 &ipr_ioa_state_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 &ipr_ioa_reset_attr,
4203 &ipr_update_fw_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004204 &ipr_ioa_fw_type_attr,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06004205 &ipr_iopoll_weight_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206 NULL,
4207};
4208
4209#ifdef CONFIG_SCSI_IPR_DUMP
4210/**
4211 * ipr_read_dump - Dump the adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004212 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004214 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 * @buf: buffer
4216 * @off: offset
4217 * @count: buffer size
4218 *
4219 * Return value:
4220 * number of bytes printed to buffer
4221 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004222static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004223 struct bin_attribute *bin_attr,
4224 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004225{
Tony Jonesee959b02008-02-22 00:13:36 +01004226 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227 struct Scsi_Host *shost = class_to_shost(cdev);
4228 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4229 struct ipr_dump *dump;
4230 unsigned long lock_flags = 0;
4231 char *src;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004232 int len, sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004233 size_t rc = count;
4234
4235 if (!capable(CAP_SYS_ADMIN))
4236 return -EACCES;
4237
4238 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4239 dump = ioa_cfg->dump;
4240
4241 if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
4242 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4243 return 0;
4244 }
4245 kref_get(&dump->kref);
4246 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4247
4248 if (off > dump->driver_dump.hdr.len) {
4249 kref_put(&dump->kref, ipr_release_dump);
4250 return 0;
4251 }
4252
4253 if (off + count > dump->driver_dump.hdr.len) {
4254 count = dump->driver_dump.hdr.len - off;
4255 rc = count;
4256 }
4257
4258 if (count && off < sizeof(dump->driver_dump)) {
4259 if (off + count > sizeof(dump->driver_dump))
4260 len = sizeof(dump->driver_dump) - off;
4261 else
4262 len = count;
4263 src = (u8 *)&dump->driver_dump + off;
4264 memcpy(buf, src, len);
4265 buf += len;
4266 off += len;
4267 count -= len;
4268 }
4269
4270 off -= sizeof(dump->driver_dump);
4271
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004272 if (ioa_cfg->sis64)
4273 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4274 (be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
4275 sizeof(struct ipr_sdt_entry));
4276 else
4277 sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
4278 (IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
4279
4280 if (count && off < sdt_end) {
4281 if (off + count > sdt_end)
4282 len = sdt_end - off;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 else
4284 len = count;
4285 src = (u8 *)&dump->ioa_dump + off;
4286 memcpy(buf, src, len);
4287 buf += len;
4288 off += len;
4289 count -= len;
4290 }
4291
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004292 off -= sdt_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293
4294 while (count) {
4295 if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
4296 len = PAGE_ALIGN(off) - off;
4297 else
4298 len = count;
4299 src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
4300 src += off & ~PAGE_MASK;
4301 memcpy(buf, src, len);
4302 buf += len;
4303 off += len;
4304 count -= len;
4305 }
4306
4307 kref_put(&dump->kref, ipr_release_dump);
4308 return rc;
4309}
4310
4311/**
4312 * ipr_alloc_dump - Prepare for adapter dump
4313 * @ioa_cfg: ioa config struct
4314 *
4315 * Return value:
4316 * 0 on success / other on failure
4317 **/
4318static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
4319{
4320 struct ipr_dump *dump;
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004321 __be32 **ioa_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004322 unsigned long lock_flags = 0;
4323
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06004324 dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004325
4326 if (!dump) {
4327 ipr_err("Dump memory allocation failed\n");
4328 return -ENOMEM;
4329 }
4330
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004331 if (ioa_cfg->sis64)
4332 ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4333 else
4334 ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
4335
4336 if (!ioa_data) {
4337 ipr_err("Dump memory allocation failed\n");
4338 kfree(dump);
4339 return -ENOMEM;
4340 }
4341
4342 dump->ioa_dump.ioa_data = ioa_data;
4343
Linus Torvalds1da177e2005-04-16 15:20:36 -07004344 kref_init(&dump->kref);
4345 dump->ioa_cfg = ioa_cfg;
4346
4347 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4348
4349 if (INACTIVE != ioa_cfg->sdt_state) {
4350 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza4d4dd702011-04-26 19:23:29 -03004351 vfree(dump->ioa_dump.ioa_data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352 kfree(dump);
4353 return 0;
4354 }
4355
4356 ioa_cfg->dump = dump;
4357 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06004358 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359 ioa_cfg->dump_taken = 1;
4360 schedule_work(&ioa_cfg->work_q);
4361 }
4362 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4363
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364 return 0;
4365}
4366
4367/**
4368 * ipr_free_dump - Free adapter dump memory
4369 * @ioa_cfg: ioa config struct
4370 *
4371 * Return value:
4372 * 0 on success / other on failure
4373 **/
4374static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
4375{
4376 struct ipr_dump *dump;
4377 unsigned long lock_flags = 0;
4378
4379 ENTER;
4380
4381 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4382 dump = ioa_cfg->dump;
4383 if (!dump) {
4384 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4385 return 0;
4386 }
4387
4388 ioa_cfg->dump = NULL;
4389 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4390
4391 kref_put(&dump->kref, ipr_release_dump);
4392
4393 LEAVE;
4394 return 0;
4395}
4396
4397/**
4398 * ipr_write_dump - Setup dump state of adapter
Chris Wright2c3c8be2010-05-12 18:28:57 -07004399 * @filp: open sysfs file
Linus Torvalds1da177e2005-04-16 15:20:36 -07004400 * @kobj: kobject struct
Zhang Rui91a69022007-06-09 13:57:22 +08004401 * @bin_attr: bin_attribute struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402 * @buf: buffer
4403 * @off: offset
4404 * @count: buffer size
4405 *
4406 * Return value:
4407 * number of bytes printed to buffer
4408 **/
Chris Wright2c3c8be2010-05-12 18:28:57 -07004409static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
Zhang Rui91a69022007-06-09 13:57:22 +08004410 struct bin_attribute *bin_attr,
4411 char *buf, loff_t off, size_t count)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004412{
Tony Jonesee959b02008-02-22 00:13:36 +01004413 struct device *cdev = container_of(kobj, struct device, kobj);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004414 struct Scsi_Host *shost = class_to_shost(cdev);
4415 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
4416 int rc;
4417
4418 if (!capable(CAP_SYS_ADMIN))
4419 return -EACCES;
4420
4421 if (buf[0] == '1')
4422 rc = ipr_alloc_dump(ioa_cfg);
4423 else if (buf[0] == '0')
4424 rc = ipr_free_dump(ioa_cfg);
4425 else
4426 return -EINVAL;
4427
4428 if (rc)
4429 return rc;
4430 else
4431 return count;
4432}
4433
4434static struct bin_attribute ipr_dump_attr = {
4435 .attr = {
4436 .name = "dump",
4437 .mode = S_IRUSR | S_IWUSR,
4438 },
4439 .size = 0,
4440 .read = ipr_read_dump,
4441 .write = ipr_write_dump
4442};
4443#else
4444static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4445#endif
4446
4447/**
4448 * ipr_change_queue_depth - Change the device's queue depth
4449 * @sdev: scsi device struct
4450 * @qdepth: depth to set
Mike Christiee881a172009-10-15 17:46:39 -07004451 * @reason: calling context
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452 *
4453 * Return value:
4454 * actual depth set
4455 **/
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004456static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457{
Brian King35a39692006-09-25 12:39:20 -05004458 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4459 struct ipr_resource_entry *res;
4460 unsigned long lock_flags = 0;
4461
4462 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4463 res = (struct ipr_resource_entry *)sdev->hostdata;
4464
4465 if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4466 qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4467 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4468
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004469 scsi_change_queue_depth(sdev, qdepth);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 return sdev->queue_depth;
4471}
4472
4473/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004474 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4475 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004476 * @attr: device attribute structure
Linus Torvalds1da177e2005-04-16 15:20:36 -07004477 * @buf: buffer
4478 *
4479 * Return value:
4480 * number of bytes printed to buffer
4481 **/
Yani Ioannou10523b32005-05-17 06:43:37 -04004482static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483{
4484 struct scsi_device *sdev = to_scsi_device(dev);
4485 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4486 struct ipr_resource_entry *res;
4487 unsigned long lock_flags = 0;
4488 ssize_t len = -ENXIO;
4489
4490 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4491 res = (struct ipr_resource_entry *)sdev->hostdata;
4492 if (res)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004493 len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4495 return len;
4496}
4497
4498static struct device_attribute ipr_adapter_handle_attr = {
4499 .attr = {
4500 .name = "adapter_handle",
4501 .mode = S_IRUSR,
4502 },
4503 .show = ipr_show_adapter_handle
4504};
4505
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004506/**
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004507 * ipr_show_resource_path - Show the resource path or the resource address for
4508 * this device.
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004509 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004510 * @attr: device attribute structure
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004511 * @buf: buffer
4512 *
4513 * Return value:
4514 * number of bytes printed to buffer
4515 **/
4516static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4517{
4518 struct scsi_device *sdev = to_scsi_device(dev);
4519 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4520 struct ipr_resource_entry *res;
4521 unsigned long lock_flags = 0;
4522 ssize_t len = -ENXIO;
4523 char buffer[IPR_MAX_RES_PATH_LENGTH];
4524
4525 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4526 res = (struct ipr_resource_entry *)sdev->hostdata;
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004527 if (res && ioa_cfg->sis64)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004528 len = snprintf(buf, PAGE_SIZE, "%s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004529 __ipr_format_res_path(res->res_path, buffer,
4530 sizeof(buffer)));
Wayne Boyer5adcbeb2010-06-03 16:02:21 -07004531 else if (res)
4532 len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4533 res->bus, res->target, res->lun);
4534
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004535 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4536 return len;
4537}
4538
4539static struct device_attribute ipr_resource_path_attr = {
4540 .attr = {
4541 .name = "resource_path",
Wayne Boyer75576bb2010-07-14 10:50:14 -07004542 .mode = S_IRUGO,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004543 },
4544 .show = ipr_show_resource_path
4545};
4546
Wayne Boyer75576bb2010-07-14 10:50:14 -07004547/**
Wayne Boyer46d74562010-08-11 07:15:17 -07004548 * ipr_show_device_id - Show the device_id for this device.
4549 * @dev: device struct
4550 * @attr: device attribute structure
4551 * @buf: buffer
4552 *
4553 * Return value:
4554 * number of bytes printed to buffer
4555 **/
4556static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4557{
4558 struct scsi_device *sdev = to_scsi_device(dev);
4559 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4560 struct ipr_resource_entry *res;
4561 unsigned long lock_flags = 0;
4562 ssize_t len = -ENXIO;
4563
4564 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4565 res = (struct ipr_resource_entry *)sdev->hostdata;
4566 if (res && ioa_cfg->sis64)
Wen Xiongbb8647e2015-06-11 20:45:18 -05004567 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", be64_to_cpu(res->dev_id));
Wayne Boyer46d74562010-08-11 07:15:17 -07004568 else if (res)
4569 len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4570
4571 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4572 return len;
4573}
4574
4575static struct device_attribute ipr_device_id_attr = {
4576 .attr = {
4577 .name = "device_id",
4578 .mode = S_IRUGO,
4579 },
4580 .show = ipr_show_device_id
4581};
4582
4583/**
Wayne Boyer75576bb2010-07-14 10:50:14 -07004584 * ipr_show_resource_type - Show the resource type for this device.
4585 * @dev: device struct
Wayne Boyer46d74562010-08-11 07:15:17 -07004586 * @attr: device attribute structure
Wayne Boyer75576bb2010-07-14 10:50:14 -07004587 * @buf: buffer
4588 *
4589 * Return value:
4590 * number of bytes printed to buffer
4591 **/
4592static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4593{
4594 struct scsi_device *sdev = to_scsi_device(dev);
4595 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4596 struct ipr_resource_entry *res;
4597 unsigned long lock_flags = 0;
4598 ssize_t len = -ENXIO;
4599
4600 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4601 res = (struct ipr_resource_entry *)sdev->hostdata;
4602
4603 if (res)
4604 len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4605
4606 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4607 return len;
4608}
4609
4610static struct device_attribute ipr_resource_type_attr = {
4611 .attr = {
4612 .name = "resource_type",
4613 .mode = S_IRUGO,
4614 },
4615 .show = ipr_show_resource_type
4616};
4617
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004618/**
4619 * ipr_show_raw_mode - Show the adapter's raw mode
4620 * @dev: class device struct
4621 * @buf: buffer
4622 *
4623 * Return value:
4624 * number of bytes printed to buffer
4625 **/
4626static ssize_t ipr_show_raw_mode(struct device *dev,
4627 struct device_attribute *attr, char *buf)
4628{
4629 struct scsi_device *sdev = to_scsi_device(dev);
4630 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4631 struct ipr_resource_entry *res;
4632 unsigned long lock_flags = 0;
4633 ssize_t len;
4634
4635 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4636 res = (struct ipr_resource_entry *)sdev->hostdata;
4637 if (res)
4638 len = snprintf(buf, PAGE_SIZE, "%d\n", res->raw_mode);
4639 else
4640 len = -ENXIO;
4641 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4642 return len;
4643}
4644
4645/**
4646 * ipr_store_raw_mode - Change the adapter's raw mode
4647 * @dev: class device struct
4648 * @buf: buffer
4649 *
4650 * Return value:
4651 * number of bytes printed to buffer
4652 **/
4653static ssize_t ipr_store_raw_mode(struct device *dev,
4654 struct device_attribute *attr,
4655 const char *buf, size_t count)
4656{
4657 struct scsi_device *sdev = to_scsi_device(dev);
4658 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4659 struct ipr_resource_entry *res;
4660 unsigned long lock_flags = 0;
4661 ssize_t len;
4662
4663 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4664 res = (struct ipr_resource_entry *)sdev->hostdata;
4665 if (res) {
Gabriel Krisman Bertazie35d7f272015-08-19 11:47:06 -03004666 if (ipr_is_af_dasd_device(res)) {
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004667 res->raw_mode = simple_strtoul(buf, NULL, 10);
4668 len = strlen(buf);
4669 if (res->sdev)
4670 sdev_printk(KERN_INFO, res->sdev, "raw mode is %s\n",
4671 res->raw_mode ? "enabled" : "disabled");
4672 } else
4673 len = -EINVAL;
4674 } else
4675 len = -ENXIO;
4676 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4677 return len;
4678}
4679
4680static struct device_attribute ipr_raw_mode_attr = {
4681 .attr = {
4682 .name = "raw_mode",
4683 .mode = S_IRUGO | S_IWUSR,
4684 },
4685 .show = ipr_show_raw_mode,
4686 .store = ipr_store_raw_mode
4687};
4688
Linus Torvalds1da177e2005-04-16 15:20:36 -07004689static struct device_attribute *ipr_dev_attrs[] = {
4690 &ipr_adapter_handle_attr,
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004691 &ipr_resource_path_attr,
Wayne Boyer46d74562010-08-11 07:15:17 -07004692 &ipr_device_id_attr,
Wayne Boyer75576bb2010-07-14 10:50:14 -07004693 &ipr_resource_type_attr,
Wen Xiongf8ee25d2015-03-26 11:23:58 -05004694 &ipr_raw_mode_attr,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004695 NULL,
4696};
4697
4698/**
4699 * ipr_biosparam - Return the HSC mapping
4700 * @sdev: scsi device struct
4701 * @block_device: block device pointer
4702 * @capacity: capacity of the device
4703 * @parm: Array containing returned HSC values.
4704 *
4705 * This function generates the HSC parms that fdisk uses.
4706 * We want to make sure we return something that places partitions
4707 * on 4k boundaries for best performance with the IOA.
4708 *
4709 * Return value:
4710 * 0 on success
4711 **/
4712static int ipr_biosparam(struct scsi_device *sdev,
4713 struct block_device *block_device,
4714 sector_t capacity, int *parm)
4715{
4716 int heads, sectors;
4717 sector_t cylinders;
4718
4719 heads = 128;
4720 sectors = 32;
4721
4722 cylinders = capacity;
4723 sector_div(cylinders, (128 * 32));
4724
4725 /* return result */
4726 parm[0] = heads;
4727 parm[1] = sectors;
4728 parm[2] = cylinders;
4729
4730 return 0;
4731}
4732
4733/**
Brian King35a39692006-09-25 12:39:20 -05004734 * ipr_find_starget - Find target based on bus/target.
4735 * @starget: scsi target struct
4736 *
4737 * Return value:
4738 * resource entry pointer if found / NULL if not found
4739 **/
4740static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4741{
4742 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4743 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4744 struct ipr_resource_entry *res;
4745
4746 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004747 if ((res->bus == starget->channel) &&
Brian King0ee1d712012-03-14 21:20:06 -05004748 (res->target == starget->id)) {
Brian King35a39692006-09-25 12:39:20 -05004749 return res;
4750 }
4751 }
4752
4753 return NULL;
4754}
4755
4756static struct ata_port_info sata_port_info;
4757
4758/**
4759 * ipr_target_alloc - Prepare for commands to a SCSI target
4760 * @starget: scsi target struct
4761 *
4762 * If the device is a SATA device, this function allocates an
4763 * ATA port with libata, else it does nothing.
4764 *
4765 * Return value:
4766 * 0 on success / non-0 on failure
4767 **/
4768static int ipr_target_alloc(struct scsi_target *starget)
4769{
4770 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4771 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4772 struct ipr_sata_port *sata_port;
4773 struct ata_port *ap;
4774 struct ipr_resource_entry *res;
4775 unsigned long lock_flags;
4776
4777 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4778 res = ipr_find_starget(starget);
4779 starget->hostdata = NULL;
4780
4781 if (res && ipr_is_gata(res)) {
4782 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4783 sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4784 if (!sata_port)
4785 return -ENOMEM;
4786
4787 ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4788 if (ap) {
4789 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4790 sata_port->ioa_cfg = ioa_cfg;
4791 sata_port->ap = ap;
4792 sata_port->res = res;
4793
4794 res->sata_port = sata_port;
4795 ap->private_data = sata_port;
4796 starget->hostdata = sata_port;
4797 } else {
4798 kfree(sata_port);
4799 return -ENOMEM;
4800 }
4801 }
4802 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4803
4804 return 0;
4805}
4806
4807/**
4808 * ipr_target_destroy - Destroy a SCSI target
4809 * @starget: scsi target struct
4810 *
4811 * If the device was a SATA device, this function frees the libata
4812 * ATA port, else it does nothing.
4813 *
4814 **/
4815static void ipr_target_destroy(struct scsi_target *starget)
4816{
4817 struct ipr_sata_port *sata_port = starget->hostdata;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004818 struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4819 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4820
4821 if (ioa_cfg->sis64) {
Brian King0ee1d712012-03-14 21:20:06 -05004822 if (!ipr_find_starget(starget)) {
4823 if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4824 clear_bit(starget->id, ioa_cfg->array_ids);
4825 else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4826 clear_bit(starget->id, ioa_cfg->vset_ids);
4827 else if (starget->channel == 0)
4828 clear_bit(starget->id, ioa_cfg->target_ids);
4829 }
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004830 }
Brian King35a39692006-09-25 12:39:20 -05004831
4832 if (sata_port) {
4833 starget->hostdata = NULL;
4834 ata_sas_port_destroy(sata_port->ap);
4835 kfree(sata_port);
4836 }
4837}
4838
4839/**
4840 * ipr_find_sdev - Find device based on bus/target/lun.
4841 * @sdev: scsi device struct
4842 *
4843 * Return value:
4844 * resource entry pointer if found / NULL if not found
4845 **/
4846static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4847{
4848 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4849 struct ipr_resource_entry *res;
4850
4851 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004852 if ((res->bus == sdev->channel) &&
4853 (res->target == sdev->id) &&
4854 (res->lun == sdev->lun))
Brian King35a39692006-09-25 12:39:20 -05004855 return res;
4856 }
4857
4858 return NULL;
4859}
4860
4861/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862 * ipr_slave_destroy - Unconfigure a SCSI device
4863 * @sdev: scsi device struct
4864 *
4865 * Return value:
4866 * nothing
4867 **/
4868static void ipr_slave_destroy(struct scsi_device *sdev)
4869{
4870 struct ipr_resource_entry *res;
4871 struct ipr_ioa_cfg *ioa_cfg;
4872 unsigned long lock_flags = 0;
4873
4874 ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4875
4876 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4877 res = (struct ipr_resource_entry *) sdev->hostdata;
4878 if (res) {
Brian King35a39692006-09-25 12:39:20 -05004879 if (res->sata_port)
Tejun Heo3e4ec342010-05-10 21:41:30 +02004880 res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004881 sdev->hostdata = NULL;
4882 res->sdev = NULL;
Brian King35a39692006-09-25 12:39:20 -05004883 res->sata_port = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884 }
4885 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4886}
4887
4888/**
4889 * ipr_slave_configure - Configure a SCSI device
4890 * @sdev: scsi device struct
4891 *
4892 * This function configures the specified scsi device.
4893 *
4894 * Return value:
4895 * 0 on success
4896 **/
4897static int ipr_slave_configure(struct scsi_device *sdev)
4898{
4899 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4900 struct ipr_resource_entry *res;
Brian Kingdd406ef2009-04-22 08:58:02 -05004901 struct ata_port *ap = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902 unsigned long lock_flags = 0;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004903 char buffer[IPR_MAX_RES_PATH_LENGTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004904
4905 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4906 res = sdev->hostdata;
4907 if (res) {
4908 if (ipr_is_af_dasd_device(res))
4909 sdev->type = TYPE_RAID;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004910 if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004911 sdev->scsi_level = 4;
brking@us.ibm.com0726ce22005-11-01 17:01:01 -06004912 sdev->no_uld_attach = 1;
4913 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004914 if (ipr_is_vset_device(res)) {
Brian King60654e22014-12-02 12:47:46 -06004915 sdev->scsi_level = SCSI_SPC_3;
Brian King723cd772017-08-18 16:17:32 -05004916 sdev->no_report_opcodes = 1;
Jens Axboe242f9dc2008-09-14 05:55:09 -07004917 blk_queue_rq_timeout(sdev->request_queue,
4918 IPR_VSET_RW_TIMEOUT);
Martin K. Petersen086fa5f2010-02-26 00:20:38 -05004919 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004920 }
Brian Kingdd406ef2009-04-22 08:58:02 -05004921 if (ipr_is_gata(res) && res->sata_port)
4922 ap = res->sata_port->ap;
4923 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4924
4925 if (ap) {
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01004926 scsi_change_queue_depth(sdev, IPR_MAX_CMD_PER_ATA_LUN);
Brian Kingdd406ef2009-04-22 08:58:02 -05004927 ata_sas_slave_configure(sdev, ap);
Christoph Hellwigc8b09f62014-11-03 20:15:14 +01004928 }
4929
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08004930 if (ioa_cfg->sis64)
4931 sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
Brian Kingb3b3b402013-01-11 17:43:49 -06004932 ipr_format_res_path(ioa_cfg,
4933 res->res_path, buffer, sizeof(buffer)));
Brian Kingdd406ef2009-04-22 08:58:02 -05004934 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004935 }
4936 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4937 return 0;
4938}
4939
4940/**
Brian King35a39692006-09-25 12:39:20 -05004941 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4942 * @sdev: scsi device struct
4943 *
4944 * This function initializes an ATA port so that future commands
4945 * sent through queuecommand will work.
4946 *
4947 * Return value:
4948 * 0 on success
4949 **/
4950static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4951{
4952 struct ipr_sata_port *sata_port = NULL;
4953 int rc = -ENXIO;
4954
4955 ENTER;
4956 if (sdev->sdev_target)
4957 sata_port = sdev->sdev_target->hostdata;
Dan Williamsb2024452012-03-21 21:09:07 -07004958 if (sata_port) {
Brian King35a39692006-09-25 12:39:20 -05004959 rc = ata_sas_port_init(sata_port->ap);
Dan Williamsb2024452012-03-21 21:09:07 -07004960 if (rc == 0)
4961 rc = ata_sas_sync_probe(sata_port->ap);
4962 }
4963
Brian King35a39692006-09-25 12:39:20 -05004964 if (rc)
4965 ipr_slave_destroy(sdev);
4966
4967 LEAVE;
4968 return rc;
4969}
4970
4971/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07004972 * ipr_slave_alloc - Prepare for commands to a device.
4973 * @sdev: scsi device struct
4974 *
4975 * This function saves a pointer to the resource entry
4976 * in the scsi device struct if the device exists. We
4977 * can then use this pointer in ipr_queuecommand when
4978 * handling new commands.
4979 *
4980 * Return value:
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004981 * 0 on success / -ENXIO if device does not exist
Linus Torvalds1da177e2005-04-16 15:20:36 -07004982 **/
4983static int ipr_slave_alloc(struct scsi_device *sdev)
4984{
4985 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4986 struct ipr_resource_entry *res;
4987 unsigned long lock_flags;
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06004988 int rc = -ENXIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004989
4990 sdev->hostdata = NULL;
4991
4992 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4993
Brian King35a39692006-09-25 12:39:20 -05004994 res = ipr_find_sdev(sdev);
4995 if (res) {
4996 res->sdev = sdev;
4997 res->add_to_ml = 0;
4998 res->in_erp = 0;
4999 sdev->hostdata = res;
5000 if (!ipr_is_naca_model(res))
5001 res->needs_sync_complete = 1;
5002 rc = 0;
5003 if (ipr_is_gata(res)) {
5004 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5005 return ipr_ata_slave_alloc(sdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005006 }
5007 }
5008
5009 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5010
brking@us.ibm.com692aebf2005-11-01 17:01:07 -06005011 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005012}
5013
Brian King6cdb0812014-10-30 17:27:10 -05005014/**
5015 * ipr_match_lun - Match function for specified LUN
5016 * @ipr_cmd: ipr command struct
5017 * @device: device to match (sdev)
5018 *
5019 * Returns:
5020 * 1 if command matches sdev / 0 if command does not match sdev
5021 **/
5022static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
5023{
5024 if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
5025 return 1;
5026 return 0;
5027}
5028
5029/**
Brian King439ae282017-03-15 16:58:39 -05005030 * ipr_cmnd_is_free - Check if a command is free or not
5031 * @ipr_cmd ipr command struct
5032 *
5033 * Returns:
5034 * true / false
5035 **/
5036static bool ipr_cmnd_is_free(struct ipr_cmnd *ipr_cmd)
5037{
5038 struct ipr_cmnd *loop_cmd;
5039
5040 list_for_each_entry(loop_cmd, &ipr_cmd->hrrq->hrrq_free_q, queue) {
5041 if (loop_cmd == ipr_cmd)
5042 return true;
5043 }
5044
5045 return false;
5046}
5047
5048/**
Brian Kingef97d8a2017-03-15 16:58:41 -05005049 * ipr_match_res - Match function for specified resource entry
5050 * @ipr_cmd: ipr command struct
5051 * @resource: resource entry to match
5052 *
5053 * Returns:
5054 * 1 if command matches sdev / 0 if command does not match sdev
5055 **/
5056static int ipr_match_res(struct ipr_cmnd *ipr_cmd, void *resource)
5057{
5058 struct ipr_resource_entry *res = resource;
5059
5060 if (res && ipr_cmd->ioarcb.res_handle == res->res_handle)
5061 return 1;
5062 return 0;
5063}
5064
5065/**
Brian King6cdb0812014-10-30 17:27:10 -05005066 * ipr_wait_for_ops - Wait for matching commands to complete
5067 * @ipr_cmd: ipr command struct
5068 * @device: device to match (sdev)
5069 * @match: match function to use
5070 *
5071 * Returns:
5072 * SUCCESS / FAILED
5073 **/
5074static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
5075 int (*match)(struct ipr_cmnd *, void *))
5076{
5077 struct ipr_cmnd *ipr_cmd;
Brian King439ae282017-03-15 16:58:39 -05005078 int wait, i;
Brian King6cdb0812014-10-30 17:27:10 -05005079 unsigned long flags;
5080 struct ipr_hrr_queue *hrrq;
5081 signed long timeout = IPR_ABORT_TASK_TIMEOUT;
5082 DECLARE_COMPLETION_ONSTACK(comp);
5083
5084 ENTER;
5085 do {
5086 wait = 0;
5087
5088 for_each_hrrq(hrrq, ioa_cfg) {
5089 spin_lock_irqsave(hrrq->lock, flags);
Brian King439ae282017-03-15 16:58:39 -05005090 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5091 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5092 if (!ipr_cmnd_is_free(ipr_cmd)) {
5093 if (match(ipr_cmd, device)) {
5094 ipr_cmd->eh_comp = &comp;
5095 wait++;
5096 }
Brian King6cdb0812014-10-30 17:27:10 -05005097 }
5098 }
5099 spin_unlock_irqrestore(hrrq->lock, flags);
5100 }
5101
5102 if (wait) {
5103 timeout = wait_for_completion_timeout(&comp, timeout);
5104
5105 if (!timeout) {
5106 wait = 0;
5107
5108 for_each_hrrq(hrrq, ioa_cfg) {
5109 spin_lock_irqsave(hrrq->lock, flags);
Brian King439ae282017-03-15 16:58:39 -05005110 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5111 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5112 if (!ipr_cmnd_is_free(ipr_cmd)) {
5113 if (match(ipr_cmd, device)) {
5114 ipr_cmd->eh_comp = NULL;
5115 wait++;
5116 }
Brian King6cdb0812014-10-30 17:27:10 -05005117 }
5118 }
5119 spin_unlock_irqrestore(hrrq->lock, flags);
5120 }
5121
5122 if (wait)
5123 dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
5124 LEAVE;
5125 return wait ? FAILED : SUCCESS;
5126 }
5127 }
5128 } while (wait);
5129
5130 LEAVE;
5131 return SUCCESS;
5132}
5133
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005134static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005135{
5136 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005137 unsigned long lock_flags = 0;
5138 int rc = SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005139
5140 ENTER;
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005141 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
5142 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005143
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05005144 if (!ioa_cfg->in_reset_reload && !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005145 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005146 dev_err(&ioa_cfg->pdev->dev,
5147 "Adapter being reset as a result of error recovery.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005148
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005149 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5150 ioa_cfg->sdt_state = GET_DUMP;
5151 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005152
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005153 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5154 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5155 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005156
wenxiong@linux.vnet.ibm.com70233ac2013-01-11 17:43:54 -06005157 /* If we got hit with a host reset while we were already resetting
5158 the adapter for some reason, and the reset failed. */
5159 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
5160 ipr_trace;
5161 rc = FAILED;
5162 }
5163
5164 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005165 LEAVE;
5166 return rc;
5167}
5168
5169/**
Brian Kingc6513092006-03-29 09:37:43 -06005170 * ipr_device_reset - Reset the device
5171 * @ioa_cfg: ioa config struct
5172 * @res: resource entry struct
5173 *
5174 * This function issues a device reset to the affected device.
5175 * If the device is a SCSI device, a LUN reset will be sent
5176 * to the device first. If that does not work, a target reset
Brian King35a39692006-09-25 12:39:20 -05005177 * will be sent. If the device is a SATA device, a PHY reset will
5178 * be sent.
Brian Kingc6513092006-03-29 09:37:43 -06005179 *
5180 * Return value:
5181 * 0 on success / non-zero on failure
5182 **/
5183static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
5184 struct ipr_resource_entry *res)
5185{
5186 struct ipr_cmnd *ipr_cmd;
5187 struct ipr_ioarcb *ioarcb;
5188 struct ipr_cmd_pkt *cmd_pkt;
Brian King35a39692006-09-25 12:39:20 -05005189 struct ipr_ioarcb_ata_regs *regs;
Brian Kingc6513092006-03-29 09:37:43 -06005190 u32 ioasc;
5191
5192 ENTER;
5193 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5194 ioarcb = &ipr_cmd->ioarcb;
5195 cmd_pkt = &ioarcb->cmd_pkt;
Wayne Boyera32c0552010-02-19 13:23:36 -08005196
5197 if (ipr_cmd->ioa_cfg->sis64) {
5198 regs = &ipr_cmd->i.ata_ioadl.regs;
5199 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5200 } else
5201 regs = &ioarcb->u.add_data.u.regs;
Brian Kingc6513092006-03-29 09:37:43 -06005202
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005203 ioarcb->res_handle = res->res_handle;
Brian Kingc6513092006-03-29 09:37:43 -06005204 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5205 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
Brian King35a39692006-09-25 12:39:20 -05005206 if (ipr_is_gata(res)) {
5207 cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
Wayne Boyera32c0552010-02-19 13:23:36 -08005208 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
Brian King35a39692006-09-25 12:39:20 -05005209 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
5210 }
Brian Kingc6513092006-03-29 09:37:43 -06005211
5212 ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005213 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005214 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005215 if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
5216 if (ipr_cmd->ioa_cfg->sis64)
5217 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5218 sizeof(struct ipr_ioasa_gata));
5219 else
5220 memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5221 sizeof(struct ipr_ioasa_gata));
5222 }
Brian Kingc6513092006-03-29 09:37:43 -06005223
5224 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005225 return IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0;
Brian Kingc6513092006-03-29 09:37:43 -06005226}
5227
5228/**
Brian King35a39692006-09-25 12:39:20 -05005229 * ipr_sata_reset - Reset the SATA port
Tejun Heocc0680a2007-08-06 18:36:23 +09005230 * @link: SATA link to reset
Brian King35a39692006-09-25 12:39:20 -05005231 * @classes: class of the attached device
5232 *
Tejun Heocc0680a2007-08-06 18:36:23 +09005233 * This function issues a SATA phy reset to the affected ATA link.
Brian King35a39692006-09-25 12:39:20 -05005234 *
5235 * Return value:
5236 * 0 on success / non-zero on failure
5237 **/
Tejun Heocc0680a2007-08-06 18:36:23 +09005238static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
Andrew Morton120bda32007-03-26 02:17:43 -07005239 unsigned long deadline)
Brian King35a39692006-09-25 12:39:20 -05005240{
Tejun Heocc0680a2007-08-06 18:36:23 +09005241 struct ipr_sata_port *sata_port = link->ap->private_data;
Brian King35a39692006-09-25 12:39:20 -05005242 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5243 struct ipr_resource_entry *res;
5244 unsigned long lock_flags = 0;
Brian Kingef97d8a2017-03-15 16:58:41 -05005245 int rc = -ENXIO, ret;
Brian King35a39692006-09-25 12:39:20 -05005246
5247 ENTER;
5248 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005249 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06005250 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5251 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5252 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5253 }
5254
Brian King35a39692006-09-25 12:39:20 -05005255 res = sata_port->res;
5256 if (res) {
5257 rc = ipr_device_reset(ioa_cfg, res);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005258 *classes = res->ata_class;
Brian Kingef97d8a2017-03-15 16:58:41 -05005259 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Brian King35a39692006-09-25 12:39:20 -05005260
Brian Kingef97d8a2017-03-15 16:58:41 -05005261 ret = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5262 if (ret != SUCCESS) {
5263 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5264 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
5265 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5266
5267 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5268 }
5269 } else
5270 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5271
Brian King35a39692006-09-25 12:39:20 -05005272 LEAVE;
5273 return rc;
5274}
5275
5276/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005277 * ipr_eh_dev_reset - Reset the device
5278 * @scsi_cmd: scsi command struct
5279 *
5280 * This function issues a device reset to the affected device.
5281 * A LUN reset will be sent to the device first. If that does
5282 * not work, a target reset will be sent.
5283 *
5284 * Return value:
5285 * SUCCESS / FAILED
5286 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005287static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005288{
5289 struct ipr_cmnd *ipr_cmd;
5290 struct ipr_ioa_cfg *ioa_cfg;
5291 struct ipr_resource_entry *res;
Brian King35a39692006-09-25 12:39:20 -05005292 struct ata_port *ap;
Brian King439ae282017-03-15 16:58:39 -05005293 int rc = 0, i;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005294 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005295
5296 ENTER;
5297 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5298 res = scsi_cmd->device->hostdata;
5299
Linus Torvalds1da177e2005-04-16 15:20:36 -07005300 /*
5301 * If we are currently going through reset/reload, return failed. This will force the
5302 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
5303 * reset to complete
5304 */
5305 if (ioa_cfg->in_reset_reload)
5306 return FAILED;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005307 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005308 return FAILED;
5309
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005310 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005311 spin_lock(&hrrq->_lock);
Brian King439ae282017-03-15 16:58:39 -05005312 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5313 ipr_cmd = ioa_cfg->ipr_cmnd_list[i];
5314
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005315 if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
Brian King960e9642017-03-15 16:58:37 -05005316 if (!ipr_cmd->qc)
5317 continue;
Brian King439ae282017-03-15 16:58:39 -05005318 if (ipr_cmnd_is_free(ipr_cmd))
5319 continue;
Brian King960e9642017-03-15 16:58:37 -05005320
5321 ipr_cmd->done = ipr_sata_eh_done;
5322 if (!(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005323 ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
5324 ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
5325 }
Brian King7402ece2006-11-21 10:28:23 -06005326 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005327 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005328 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005329 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005330 res->resetting_device = 1;
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005331 scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
Brian King35a39692006-09-25 12:39:20 -05005332
5333 if (ipr_is_gata(res) && res->sata_port) {
5334 ap = res->sata_port->ap;
5335 spin_unlock_irq(scsi_cmd->device->host->host_lock);
Tejun Heoa1efdab2008-03-25 12:22:50 +09005336 ata_std_error_handler(ap);
Brian King35a39692006-09-25 12:39:20 -05005337 spin_lock_irq(scsi_cmd->device->host->host_lock);
5338 } else
5339 rc = ipr_device_reset(ioa_cfg, res);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005340 res->resetting_device = 0;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06005341 res->reset_occurred = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005342
Linus Torvalds1da177e2005-04-16 15:20:36 -07005343 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005344 return rc ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005345}
5346
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005347static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005348{
5349 int rc;
Brian King6cdb0812014-10-30 17:27:10 -05005350 struct ipr_ioa_cfg *ioa_cfg;
Brian Kingef97d8a2017-03-15 16:58:41 -05005351 struct ipr_resource_entry *res;
Brian King6cdb0812014-10-30 17:27:10 -05005352
5353 ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
Brian Kingef97d8a2017-03-15 16:58:41 -05005354 res = cmd->device->hostdata;
5355
5356 if (!res)
5357 return FAILED;
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005358
5359 spin_lock_irq(cmd->device->host->host_lock);
5360 rc = __ipr_eh_dev_reset(cmd);
5361 spin_unlock_irq(cmd->device->host->host_lock);
5362
Brian Kingef97d8a2017-03-15 16:58:41 -05005363 if (rc == SUCCESS) {
5364 if (ipr_is_gata(res) && res->sata_port)
5365 rc = ipr_wait_for_ops(ioa_cfg, res, ipr_match_res);
5366 else
5367 rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
5368 }
Brian King6cdb0812014-10-30 17:27:10 -05005369
Jeff Garzik 94d0e7b82005-05-28 07:55:48 -04005370 return rc;
5371}
5372
Linus Torvalds1da177e2005-04-16 15:20:36 -07005373/**
5374 * ipr_bus_reset_done - Op done function for bus reset.
5375 * @ipr_cmd: ipr command struct
5376 *
5377 * This function is the op done function for a bus reset
5378 *
5379 * Return value:
5380 * none
5381 **/
5382static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
5383{
5384 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5385 struct ipr_resource_entry *res;
5386
5387 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005388 if (!ioa_cfg->sis64)
5389 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
5390 if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
5391 scsi_report_bus_reset(ioa_cfg->host, res->bus);
5392 break;
5393 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005395
5396 /*
5397 * If abort has not completed, indicate the reset has, else call the
5398 * abort's done function to wake the sleeping eh thread
5399 */
5400 if (ipr_cmd->sibling->sibling)
5401 ipr_cmd->sibling->sibling = NULL;
5402 else
5403 ipr_cmd->sibling->done(ipr_cmd->sibling);
5404
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005405 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005406 LEAVE;
5407}
5408
5409/**
5410 * ipr_abort_timeout - An abort task has timed out
5411 * @ipr_cmd: ipr command struct
5412 *
5413 * This function handles when an abort task times out. If this
5414 * happens we issue a bus reset since we have resources tied
5415 * up that must be freed before returning to the midlayer.
5416 *
5417 * Return value:
5418 * none
5419 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07005420static void ipr_abort_timeout(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005421{
Kees Cook738c6ec2017-08-18 16:53:24 -07005422 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005423 struct ipr_cmnd *reset_cmd;
5424 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5425 struct ipr_cmd_pkt *cmd_pkt;
5426 unsigned long lock_flags = 0;
5427
5428 ENTER;
5429 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5430 if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
5431 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5432 return;
5433 }
5434
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005435 sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436 reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5437 ipr_cmd->sibling = reset_cmd;
5438 reset_cmd->sibling = ipr_cmd;
5439 reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
5440 cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
5441 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5442 cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
5443 cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
5444
5445 ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
5446 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5447 LEAVE;
5448}
5449
5450/**
5451 * ipr_cancel_op - Cancel specified op
5452 * @scsi_cmd: scsi command struct
5453 *
5454 * This function cancels specified op.
5455 *
5456 * Return value:
5457 * SUCCESS / FAILED
5458 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005459static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005460{
5461 struct ipr_cmnd *ipr_cmd;
5462 struct ipr_ioa_cfg *ioa_cfg;
5463 struct ipr_resource_entry *res;
5464 struct ipr_cmd_pkt *cmd_pkt;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005465 u32 ioasc, int_reg;
Brian King439ae282017-03-15 16:58:39 -05005466 int i, op_found = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005467 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005468
5469 ENTER;
5470 ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5471 res = scsi_cmd->device->hostdata;
5472
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005473 /* If we are currently going through reset/reload, return failed.
5474 * This will force the mid-layer to call ipr_eh_host_reset,
5475 * which will then go to sleep and wait for the reset to complete
5476 */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005477 if (ioa_cfg->in_reset_reload ||
5478 ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005479 return FAILED;
Kleber Sacilotto de Souzaa92fa252012-01-16 19:30:25 -02005480 if (!res)
5481 return FAILED;
5482
5483 /*
5484 * If we are aborting a timed out op, chances are that the timeout was caused
5485 * by a still not detected EEH error. In such cases, reading a register will
5486 * trigger the EEH recovery infrastructure.
5487 */
5488 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5489
5490 if (!ipr_is_gscsi(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07005491 return FAILED;
5492
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005493 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005494 spin_lock(&hrrq->_lock);
Brian King439ae282017-03-15 16:58:39 -05005495 for (i = hrrq->min_cmd_id; i <= hrrq->max_cmd_id; i++) {
5496 if (ioa_cfg->ipr_cmnd_list[i]->scsi_cmd == scsi_cmd) {
5497 if (!ipr_cmnd_is_free(ioa_cfg->ipr_cmnd_list[i])) {
5498 op_found = 1;
5499 break;
5500 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005501 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005502 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005503 spin_unlock(&hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005504 }
5505
5506 if (!op_found)
5507 return SUCCESS;
5508
5509 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08005510 ipr_cmd->ioarcb.res_handle = res->res_handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005511 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5512 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5513 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5514 ipr_cmd->u.sdev = scsi_cmd->device;
5515
Brian Kingfb3ed3c2006-03-29 09:37:37 -06005516 scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
5517 scsi_cmd->cmnd[0]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005518 ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
Wayne Boyer96d21f02010-05-10 09:13:27 -07005519 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005520
5521 /*
5522 * If the abort task timed out and we sent a bus reset, we will get
5523 * one the following responses to the abort
5524 */
5525 if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
5526 ioasc = 0;
5527 ipr_trace;
5528 }
5529
Kleber Sacilotto de Souzac4ee22a2013-03-14 13:52:23 -05005530 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06005531 if (!ipr_is_naca_model(res))
5532 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005533
5534 LEAVE;
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005535 return IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005536}
5537
5538/**
5539 * ipr_eh_abort - Abort a single op
5540 * @scsi_cmd: scsi command struct
5541 *
5542 * Return value:
Brian Kingf688f962014-12-02 12:47:37 -06005543 * 0 if scan in progress / 1 if scan is complete
5544 **/
5545static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
5546{
5547 unsigned long lock_flags;
5548 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
5549 int rc = 0;
5550
5551 spin_lock_irqsave(shost->host_lock, lock_flags);
5552 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
5553 rc = 1;
5554 if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
5555 rc = 1;
5556 spin_unlock_irqrestore(shost->host_lock, lock_flags);
5557 return rc;
5558}
5559
5560/**
5561 * ipr_eh_host_reset - Reset the host adapter
5562 * @scsi_cmd: scsi command struct
5563 *
5564 * Return value:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005565 * SUCCESS / FAILED
5566 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03005567static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005568{
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005569 unsigned long flags;
5570 int rc;
Brian King6cdb0812014-10-30 17:27:10 -05005571 struct ipr_ioa_cfg *ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005572
5573 ENTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005574
Brian King6cdb0812014-10-30 17:27:10 -05005575 ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
5576
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005577 spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
5578 rc = ipr_cancel_op(scsi_cmd);
5579 spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005580
Brian King6cdb0812014-10-30 17:27:10 -05005581 if (rc == SUCCESS)
5582 rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005583 LEAVE;
Jeff Garzik 8fa728a2005-05-28 07:54:40 -04005584 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005585}
5586
5587/**
5588 * ipr_handle_other_interrupt - Handle "other" interrupts
5589 * @ioa_cfg: ioa config struct
Wayne Boyer634651f2010-08-27 14:45:07 -07005590 * @int_reg: interrupt register
Linus Torvalds1da177e2005-04-16 15:20:36 -07005591 *
5592 * Return value:
5593 * IRQ_NONE / IRQ_HANDLED
5594 **/
Wayne Boyer634651f2010-08-27 14:45:07 -07005595static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
Wayne Boyer630ad8312011-04-07 12:12:30 -07005596 u32 int_reg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005597{
5598 irqreturn_t rc = IRQ_HANDLED;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005599 u32 int_mask_reg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005600
Wayne Boyer7dacb642011-04-12 10:29:02 -07005601 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
5602 int_reg &= ~int_mask_reg;
5603
5604 /* If an interrupt on the adapter did not occur, ignore it.
5605 * Or in the case of SIS 64, check for a stage change interrupt.
5606 */
5607 if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
5608 if (ioa_cfg->sis64) {
5609 int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5610 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5611 if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
5612
5613 /* clear stage change */
5614 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
5615 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
5616 list_del(&ioa_cfg->reset_cmd->queue);
5617 del_timer(&ioa_cfg->reset_cmd->timer);
5618 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
5619 return IRQ_HANDLED;
5620 }
5621 }
5622
5623 return IRQ_NONE;
5624 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005625
5626 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5627 /* Mask the interrupt */
5628 writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005629 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5630
5631 list_del(&ioa_cfg->reset_cmd->queue);
5632 del_timer(&ioa_cfg->reset_cmd->timer);
5633 ipr_reset_ioa_job(ioa_cfg->reset_cmd);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005634 } else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
Brian King7dd21302012-03-14 21:20:08 -05005635 if (ioa_cfg->clear_isr) {
5636 if (ipr_debug && printk_ratelimit())
5637 dev_err(&ioa_cfg->pdev->dev,
5638 "Spurious interrupt detected. 0x%08X\n", int_reg);
5639 writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5640 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5641 return IRQ_NONE;
5642 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005643 } else {
5644 if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5645 ioa_cfg->ioa_unit_checked = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005646 else if (int_reg & IPR_PCII_NO_HOST_RRQ)
5647 dev_err(&ioa_cfg->pdev->dev,
5648 "No Host RRQ. 0x%08X\n", int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005649 else
5650 dev_err(&ioa_cfg->pdev->dev,
5651 "Permanent IOA failure. 0x%08X\n", int_reg);
5652
5653 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5654 ioa_cfg->sdt_state = GET_DUMP;
5655
5656 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5657 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5658 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005659
Linus Torvalds1da177e2005-04-16 15:20:36 -07005660 return rc;
5661}
5662
5663/**
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005664 * ipr_isr_eh - Interrupt service routine error handler
5665 * @ioa_cfg: ioa config struct
5666 * @msg: message to log
5667 *
5668 * Return value:
5669 * none
5670 **/
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005671static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005672{
5673 ioa_cfg->errors_logged++;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005674 dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005675
5676 if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5677 ioa_cfg->sdt_state = GET_DUMP;
5678
5679 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5680}
5681
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005682static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005683 struct list_head *doneq)
5684{
5685 u32 ioasc;
5686 u16 cmd_index;
5687 struct ipr_cmnd *ipr_cmd;
5688 struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
5689 int num_hrrq = 0;
5690
5691 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005692 if (!hrr_queue->allow_interrupts)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005693 return 0;
5694
5695 while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5696 hrr_queue->toggle_bit) {
5697
5698 cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
5699 IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
5700 IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5701
5702 if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
5703 cmd_index < hrr_queue->min_cmd_id)) {
5704 ipr_isr_eh(ioa_cfg,
5705 "Invalid response handle from IOA: ",
5706 cmd_index);
5707 break;
5708 }
5709
5710 ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5711 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5712
5713 ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5714
5715 list_move_tail(&ipr_cmd->queue, doneq);
5716
5717 if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
5718 hrr_queue->hrrq_curr++;
5719 } else {
5720 hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
5721 hrr_queue->toggle_bit ^= 1u;
5722 }
5723 num_hrrq++;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005724 if (budget > 0 && num_hrrq >= budget)
5725 break;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005726 }
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005727
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005728 return num_hrrq;
5729}
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005730
Christoph Hellwig511cbce2015-11-10 14:56:14 +01005731static int ipr_iopoll(struct irq_poll *iop, int budget)
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005732{
5733 struct ipr_ioa_cfg *ioa_cfg;
5734 struct ipr_hrr_queue *hrrq;
5735 struct ipr_cmnd *ipr_cmd, *temp;
5736 unsigned long hrrq_flags;
5737 int completed_ops;
5738 LIST_HEAD(doneq);
5739
5740 hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
5741 ioa_cfg = hrrq->ioa_cfg;
5742
5743 spin_lock_irqsave(hrrq->lock, hrrq_flags);
5744 completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
5745
5746 if (completed_ops < budget)
Christoph Hellwig511cbce2015-11-10 14:56:14 +01005747 irq_poll_complete(iop);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005748 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5749
5750 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5751 list_del(&ipr_cmd->queue);
5752 del_timer(&ipr_cmd->timer);
5753 ipr_cmd->fast_done(ipr_cmd);
5754 }
5755
5756 return completed_ops;
5757}
5758
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005759/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07005760 * ipr_isr - Interrupt service routine
5761 * @irq: irq number
5762 * @devp: pointer to ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07005763 *
5764 * Return value:
5765 * IRQ_NONE / IRQ_HANDLED
5766 **/
David Howells7d12e782006-10-05 14:55:46 +01005767static irqreturn_t ipr_isr(int irq, void *devp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005768{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005769 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
5770 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005771 unsigned long hrrq_flags = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005772 u32 int_reg = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005773 int num_hrrq = 0;
Wayne Boyer7dacb642011-04-12 10:29:02 -07005774 int irq_none = 0;
Brian King172cd6e2012-07-17 08:14:40 -05005775 struct ipr_cmnd *ipr_cmd, *temp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005776 irqreturn_t rc = IRQ_NONE;
Brian King172cd6e2012-07-17 08:14:40 -05005777 LIST_HEAD(doneq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005778
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005779 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005780 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005781 if (!hrrq->allow_interrupts) {
5782 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005783 return IRQ_NONE;
5784 }
5785
Linus Torvalds1da177e2005-04-16 15:20:36 -07005786 while (1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005787 if (ipr_process_hrrq(hrrq, -1, &doneq)) {
5788 rc = IRQ_HANDLED;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005789
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005790 if (!ioa_cfg->clear_isr)
5791 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005792
Linus Torvalds1da177e2005-04-16 15:20:36 -07005793 /* Clear the PCI interrupt */
Wayne Boyera5442ba2011-05-17 09:18:53 -07005794 num_hrrq = 0;
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005795 do {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005796 writel(IPR_PCII_HRRQ_UPDATED,
5797 ioa_cfg->regs.clr_interrupt_reg32);
Wayne Boyer7dacb642011-04-12 10:29:02 -07005798 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005799 } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005800 num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
Wayne Boyer3feeb89d2009-10-20 11:09:00 -07005801
Wayne Boyer7dacb642011-04-12 10:29:02 -07005802 } else if (rc == IRQ_NONE && irq_none == 0) {
5803 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5804 irq_none++;
Wayne Boyera5442ba2011-05-17 09:18:53 -07005805 } else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
5806 int_reg & IPR_PCII_HRRQ_UPDATED) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005807 ipr_isr_eh(ioa_cfg,
5808 "Error clearing HRRQ: ", num_hrrq);
Brian King172cd6e2012-07-17 08:14:40 -05005809 rc = IRQ_HANDLED;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005810 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005811 } else
5812 break;
5813 }
5814
5815 if (unlikely(rc == IRQ_NONE))
Wayne Boyer634651f2010-08-27 14:45:07 -07005816 rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005817
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005818 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King172cd6e2012-07-17 08:14:40 -05005819 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5820 list_del(&ipr_cmd->queue);
5821 del_timer(&ipr_cmd->timer);
5822 ipr_cmd->fast_done(ipr_cmd);
5823 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005824 return rc;
5825}
Brian King172cd6e2012-07-17 08:14:40 -05005826
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005827/**
5828 * ipr_isr_mhrrq - Interrupt service routine
5829 * @irq: irq number
5830 * @devp: pointer to ioa config struct
5831 *
5832 * Return value:
5833 * IRQ_NONE / IRQ_HANDLED
5834 **/
5835static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
5836{
5837 struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005838 struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005839 unsigned long hrrq_flags = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005840 struct ipr_cmnd *ipr_cmd, *temp;
5841 irqreturn_t rc = IRQ_NONE;
5842 LIST_HEAD(doneq);
5843
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005844 spin_lock_irqsave(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005845
5846 /* If interrupts are disabled, ignore the interrupt */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005847 if (!hrrq->allow_interrupts) {
5848 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005849 return IRQ_NONE;
5850 }
5851
Jens Axboe89f8b332014-03-13 09:38:42 -06005852 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005853 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5854 hrrq->toggle_bit) {
Christoph Hellwigea511902015-12-07 06:41:11 -08005855 irq_poll_sched(&hrrq->iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005856 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
5857 return IRQ_HANDLED;
5858 }
5859 } else {
5860 if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5861 hrrq->toggle_bit)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005862
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -06005863 if (ipr_process_hrrq(hrrq, -1, &doneq))
5864 rc = IRQ_HANDLED;
5865 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005866
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06005867 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06005868
5869 list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
5870 list_del(&ipr_cmd->queue);
5871 del_timer(&ipr_cmd->timer);
5872 ipr_cmd->fast_done(ipr_cmd);
5873 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005874 return rc;
5875}
5876
5877/**
Wayne Boyera32c0552010-02-19 13:23:36 -08005878 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -07005879 * @ioa_cfg: ioa config struct
5880 * @ipr_cmd: ipr command struct
5881 *
5882 * Return value:
5883 * 0 on success / -1 on failure
5884 **/
Wayne Boyera32c0552010-02-19 13:23:36 -08005885static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5886 struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005887{
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005888 int i, nseg;
5889 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005890 u32 length;
5891 u32 ioadl_flags = 0;
5892 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5893 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08005894 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005895
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005896 length = scsi_bufflen(scsi_cmd);
5897 if (!length)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005898 return 0;
5899
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005900 nseg = scsi_dma_map(scsi_cmd);
5901 if (nseg < 0) {
Anton Blanchard51f52a42011-05-09 10:07:40 +10005902 if (printk_ratelimit())
Anton Blanchardd73341b2014-10-30 17:27:08 -05005903 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005904 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005905 }
5906
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005907 ipr_cmd->dma_use_sg = nseg;
5908
Wayne Boyer438b0332010-05-10 09:13:00 -07005909 ioarcb->data_transfer_length = cpu_to_be32(length);
Wayne Boyerb8803b12010-05-14 08:55:13 -07005910 ioarcb->ioadl_len =
5911 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
Wayne Boyer438b0332010-05-10 09:13:00 -07005912
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005913 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5914 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5915 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08005916 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5917 ioadl_flags = IPR_IOADL_FLAGS_READ;
5918
5919 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5920 ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5921 ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5922 ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5923 }
5924
5925 ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5926 return 0;
5927}
5928
5929/**
5930 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5931 * @ioa_cfg: ioa config struct
5932 * @ipr_cmd: ipr command struct
5933 *
5934 * Return value:
5935 * 0 on success / -1 on failure
5936 **/
5937static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5938 struct ipr_cmnd *ipr_cmd)
5939{
5940 int i, nseg;
5941 struct scatterlist *sg;
5942 u32 length;
5943 u32 ioadl_flags = 0;
5944 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5945 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5946 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5947
5948 length = scsi_bufflen(scsi_cmd);
5949 if (!length)
5950 return 0;
5951
5952 nseg = scsi_dma_map(scsi_cmd);
5953 if (nseg < 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05005954 dev_err(&ioa_cfg->pdev->dev, "scsi_dma_map failed!\n");
Wayne Boyera32c0552010-02-19 13:23:36 -08005955 return -1;
5956 }
5957
5958 ipr_cmd->dma_use_sg = nseg;
5959
5960 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5961 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5962 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5963 ioarcb->data_transfer_length = cpu_to_be32(length);
5964 ioarcb->ioadl_len =
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005965 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5966 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5967 ioadl_flags = IPR_IOADL_FLAGS_READ;
5968 ioarcb->read_data_transfer_length = cpu_to_be32(length);
5969 ioarcb->read_ioadl_len =
5970 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5971 }
5972
Wayne Boyera32c0552010-02-19 13:23:36 -08005973 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5974 ioadl = ioarcb->u.add_data.u.ioadl;
5975 ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5976 offsetof(struct ipr_ioarcb, u.add_data));
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09005977 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5978 }
5979
5980 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5981 ioadl[i].flags_and_data_len =
5982 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5983 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5984 }
5985
5986 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5987 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005988}
5989
5990/**
Brian Kingf646f322017-03-15 16:58:39 -05005991 * __ipr_erp_done - Process completion of ERP for a device
Linus Torvalds1da177e2005-04-16 15:20:36 -07005992 * @ipr_cmd: ipr command struct
5993 *
5994 * This function copies the sense buffer into the scsi_cmd
5995 * struct and pushes the scsi_done function.
5996 *
5997 * Return value:
5998 * nothing
5999 **/
Brian Kingf646f322017-03-15 16:58:39 -05006000static void __ipr_erp_done(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006001{
6002 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6003 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006004 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006005
6006 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
6007 scsi_cmd->result |= (DID_ERROR << 16);
Brian Kingfb3ed3c2006-03-29 09:37:37 -06006008 scmd_printk(KERN_ERR, scsi_cmd,
6009 "Request Sense failed with IOASC: 0x%08X\n", ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006010 } else {
6011 memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
6012 SCSI_SENSE_BUFFERSIZE);
6013 }
6014
6015 if (res) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006016 if (!ipr_is_naca_model(res))
6017 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006018 res->in_erp = 0;
6019 }
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006020 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006021 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006022 if (ipr_cmd->eh_comp)
6023 complete(ipr_cmd->eh_comp);
6024 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006025}
6026
6027/**
Brian Kingf646f322017-03-15 16:58:39 -05006028 * ipr_erp_done - Process completion of ERP for a device
6029 * @ipr_cmd: ipr command struct
6030 *
6031 * This function copies the sense buffer into the scsi_cmd
6032 * struct and pushes the scsi_done function.
6033 *
6034 * Return value:
6035 * nothing
6036 **/
6037static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
6038{
6039 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6040 unsigned long hrrq_flags;
6041
6042 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6043 __ipr_erp_done(ipr_cmd);
6044 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006045}
6046
6047/**
6048 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
6049 * @ipr_cmd: ipr command struct
6050 *
6051 * Return value:
6052 * none
6053 **/
6054static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
6055{
Brian King51b1c7e2007-03-29 12:43:50 -05006056 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006057 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Wayne Boyera32c0552010-02-19 13:23:36 -08006058 dma_addr_t dma_addr = ipr_cmd->dma_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006059
6060 memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
Wayne Boyera32c0552010-02-19 13:23:36 -08006061 ioarcb->data_transfer_length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006062 ioarcb->read_data_transfer_length = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08006063 ioarcb->ioadl_len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006064 ioarcb->read_ioadl_len = 0;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006065 ioasa->hdr.ioasc = 0;
6066 ioasa->hdr.residual_data_len = 0;
Wayne Boyera32c0552010-02-19 13:23:36 -08006067
6068 if (ipr_cmd->ioa_cfg->sis64)
6069 ioarcb->u.sis64_addr_data.data_ioadl_addr =
6070 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
6071 else {
6072 ioarcb->write_ioadl_addr =
6073 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
6074 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6075 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006076}
6077
6078/**
Brian Kingf646f322017-03-15 16:58:39 -05006079 * __ipr_erp_request_sense - Send request sense to a device
Linus Torvalds1da177e2005-04-16 15:20:36 -07006080 * @ipr_cmd: ipr command struct
6081 *
6082 * This function sends a request sense to a device as a result
6083 * of a check condition.
6084 *
6085 * Return value:
6086 * nothing
6087 **/
Brian Kingf646f322017-03-15 16:58:39 -05006088static void __ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006089{
6090 struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006091 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006092
6093 if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
Brian Kingf646f322017-03-15 16:58:39 -05006094 __ipr_erp_done(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006095 return;
6096 }
6097
6098 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6099
6100 cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
6101 cmd_pkt->cdb[0] = REQUEST_SENSE;
6102 cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
6103 cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
6104 cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6105 cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
6106
Wayne Boyera32c0552010-02-19 13:23:36 -08006107 ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
6108 SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006109
6110 ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
6111 IPR_REQUEST_SENSE_TIMEOUT * 2);
6112}
6113
6114/**
Brian Kingf646f322017-03-15 16:58:39 -05006115 * ipr_erp_request_sense - Send request sense to a device
6116 * @ipr_cmd: ipr command struct
6117 *
6118 * This function sends a request sense to a device as a result
6119 * of a check condition.
6120 *
6121 * Return value:
6122 * nothing
6123 **/
6124static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
6125{
6126 struct ipr_hrr_queue *hrrq = ipr_cmd->hrrq;
6127 unsigned long hrrq_flags;
6128
6129 spin_lock_irqsave(&hrrq->_lock, hrrq_flags);
6130 __ipr_erp_request_sense(ipr_cmd);
6131 spin_unlock_irqrestore(&hrrq->_lock, hrrq_flags);
6132}
6133
6134/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006135 * ipr_erp_cancel_all - Send cancel all to a device
6136 * @ipr_cmd: ipr command struct
6137 *
6138 * This function sends a cancel all to a device to clear the
6139 * queue. If we are running TCQ on the device, QERR is set to 1,
6140 * which means all outstanding ops have been dropped on the floor.
6141 * Cancel all will return them to us.
6142 *
6143 * Return value:
6144 * nothing
6145 **/
6146static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
6147{
6148 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6149 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
6150 struct ipr_cmd_pkt *cmd_pkt;
6151
6152 res->in_erp = 1;
6153
6154 ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
6155
Christoph Hellwig17ea0122014-11-24 15:36:20 +01006156 if (!scsi_cmd->device->simple_tags) {
Brian Kingf646f322017-03-15 16:58:39 -05006157 __ipr_erp_request_sense(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006158 return;
6159 }
6160
6161 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
6162 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
6163 cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
6164
6165 ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
6166 IPR_CANCEL_ALL_TIMEOUT);
6167}
6168
6169/**
6170 * ipr_dump_ioasa - Dump contents of IOASA
6171 * @ioa_cfg: ioa config struct
6172 * @ipr_cmd: ipr command struct
Brian Kingfe964d02006-03-29 09:37:29 -06006173 * @res: resource entry struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006174 *
6175 * This function is invoked by the interrupt handler when ops
6176 * fail. It will log the IOASA if appropriate. Only called
6177 * for GPDD ops.
6178 *
6179 * Return value:
6180 * none
6181 **/
6182static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
Brian Kingfe964d02006-03-29 09:37:29 -06006183 struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006184{
6185 int i;
6186 u16 data_len;
Brian Kingb0692dd2007-03-29 12:43:09 -05006187 u32 ioasc, fd_ioasc;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006188 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006189 __be32 *ioasa_data = (__be32 *)ioasa;
6190 int error_index;
6191
Wayne Boyer96d21f02010-05-10 09:13:27 -07006192 ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
6193 fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006194
6195 if (0 == ioasc)
6196 return;
6197
6198 if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
6199 return;
6200
Brian Kingb0692dd2007-03-29 12:43:09 -05006201 if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
6202 error_index = ipr_get_error(fd_ioasc);
6203 else
6204 error_index = ipr_get_error(ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006205
6206 if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
6207 /* Don't log an error if the IOA already logged one */
Wayne Boyer96d21f02010-05-10 09:13:27 -07006208 if (ioasa->hdr.ilid != 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006209 return;
6210
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006211 if (!ipr_is_gscsi(res))
6212 return;
6213
Linus Torvalds1da177e2005-04-16 15:20:36 -07006214 if (ipr_error_table[error_index].log_ioasa == 0)
6215 return;
6216 }
6217
Brian Kingfe964d02006-03-29 09:37:29 -06006218 ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006219
Wayne Boyer96d21f02010-05-10 09:13:27 -07006220 data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
6221 if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
6222 data_len = sizeof(struct ipr_ioasa64);
6223 else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006224 data_len = sizeof(struct ipr_ioasa);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006225
6226 ipr_err("IOASA Dump:\n");
6227
6228 for (i = 0; i < data_len / 4; i += 4) {
6229 ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
6230 be32_to_cpu(ioasa_data[i]),
6231 be32_to_cpu(ioasa_data[i+1]),
6232 be32_to_cpu(ioasa_data[i+2]),
6233 be32_to_cpu(ioasa_data[i+3]));
6234 }
6235}
6236
6237/**
6238 * ipr_gen_sense - Generate SCSI sense data from an IOASA
6239 * @ioasa: IOASA
6240 * @sense_buf: sense data buffer
6241 *
6242 * Return value:
6243 * none
6244 **/
6245static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
6246{
6247 u32 failing_lba;
6248 u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
6249 struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006250 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6251 u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006252
6253 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
6254
6255 if (ioasc >= IPR_FIRST_DRIVER_IOASC)
6256 return;
6257
6258 ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
6259
6260 if (ipr_is_vset_device(res) &&
6261 ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
6262 ioasa->u.vset.failing_lba_hi != 0) {
6263 sense_buf[0] = 0x72;
6264 sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
6265 sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
6266 sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
6267
6268 sense_buf[7] = 12;
6269 sense_buf[8] = 0;
6270 sense_buf[9] = 0x0A;
6271 sense_buf[10] = 0x80;
6272
6273 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
6274
6275 sense_buf[12] = (failing_lba & 0xff000000) >> 24;
6276 sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
6277 sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
6278 sense_buf[15] = failing_lba & 0x000000ff;
6279
6280 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6281
6282 sense_buf[16] = (failing_lba & 0xff000000) >> 24;
6283 sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
6284 sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
6285 sense_buf[19] = failing_lba & 0x000000ff;
6286 } else {
6287 sense_buf[0] = 0x70;
6288 sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
6289 sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
6290 sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
6291
6292 /* Illegal request */
6293 if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
Wayne Boyer96d21f02010-05-10 09:13:27 -07006294 (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006295 sense_buf[7] = 10; /* additional length */
6296
6297 /* IOARCB was in error */
6298 if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
6299 sense_buf[15] = 0xC0;
6300 else /* Parameter data was invalid */
6301 sense_buf[15] = 0x80;
6302
6303 sense_buf[16] =
6304 ((IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07006305 be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006306 sense_buf[17] =
6307 (IPR_FIELD_POINTER_MASK &
Wayne Boyer96d21f02010-05-10 09:13:27 -07006308 be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006309 } else {
6310 if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
6311 if (ipr_is_vset_device(res))
6312 failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
6313 else
6314 failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
6315
6316 sense_buf[0] |= 0x80; /* Or in the Valid bit */
6317 sense_buf[3] = (failing_lba & 0xff000000) >> 24;
6318 sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
6319 sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
6320 sense_buf[6] = failing_lba & 0x000000ff;
6321 }
6322
6323 sense_buf[7] = 6; /* additional length */
6324 }
6325 }
6326}
6327
6328/**
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006329 * ipr_get_autosense - Copy autosense data to sense buffer
6330 * @ipr_cmd: ipr command struct
6331 *
6332 * This function copies the autosense buffer to the buffer
6333 * in the scsi_cmd, if there is autosense available.
6334 *
6335 * Return value:
6336 * 1 if autosense was available / 0 if not
6337 **/
6338static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
6339{
Wayne Boyer96d21f02010-05-10 09:13:27 -07006340 struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
6341 struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006342
Wayne Boyer96d21f02010-05-10 09:13:27 -07006343 if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006344 return 0;
6345
Wayne Boyer96d21f02010-05-10 09:13:27 -07006346 if (ipr_cmd->ioa_cfg->sis64)
6347 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
6348 min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
6349 SCSI_SENSE_BUFFERSIZE));
6350 else
6351 memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
6352 min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
6353 SCSI_SENSE_BUFFERSIZE));
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006354 return 1;
6355}
6356
6357/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006358 * ipr_erp_start - Process an error response for a SCSI op
6359 * @ioa_cfg: ioa config struct
6360 * @ipr_cmd: ipr command struct
6361 *
6362 * This function determines whether or not to initiate ERP
6363 * on the affected device.
6364 *
6365 * Return value:
6366 * nothing
6367 **/
6368static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
6369 struct ipr_cmnd *ipr_cmd)
6370{
6371 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6372 struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006373 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King8a048992007-04-26 16:00:10 -05006374 u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006375
6376 if (!res) {
Brian Kingf646f322017-03-15 16:58:39 -05006377 __ipr_scsi_eh_done(ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006378 return;
6379 }
6380
Brian King8a048992007-04-26 16:00:10 -05006381 if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006382 ipr_gen_sense(ipr_cmd);
6383
Brian Kingcc9bd5d2007-03-29 12:43:01 -05006384 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6385
Brian King8a048992007-04-26 16:00:10 -05006386 switch (masked_ioasc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006387 case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006388 if (ipr_is_naca_model(res))
6389 scsi_cmd->result |= (DID_ABORT << 16);
6390 else
6391 scsi_cmd->result |= (DID_IMM_RETRY << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006392 break;
6393 case IPR_IOASC_IR_RESOURCE_HANDLE:
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006394 case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006395 scsi_cmd->result |= (DID_NO_CONNECT << 16);
6396 break;
6397 case IPR_IOASC_HW_SEL_TIMEOUT:
6398 scsi_cmd->result |= (DID_NO_CONNECT << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006399 if (!ipr_is_naca_model(res))
6400 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006401 break;
6402 case IPR_IOASC_SYNC_REQUIRED:
6403 if (!res->in_erp)
6404 res->needs_sync_complete = 1;
6405 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6406 break;
6407 case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
brking@us.ibm.comb0df54b2005-11-01 17:01:47 -06006408 case IPR_IOASA_IR_DUAL_IOA_DISABLED:
Mauricio Faria de Oliveira785a4702017-04-11 11:46:04 -03006409 /*
6410 * exception: do not set DID_PASSTHROUGH on CHECK CONDITION
6411 * so SCSI mid-layer and upper layers handle it accordingly.
6412 */
6413 if (scsi_cmd->result != SAM_STAT_CHECK_CONDITION)
6414 scsi_cmd->result |= (DID_PASSTHROUGH << 16);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006415 break;
6416 case IPR_IOASC_BUS_WAS_RESET:
6417 case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
6418 /*
6419 * Report the bus reset and ask for a retry. The device
6420 * will give CC/UA the next command.
6421 */
6422 if (!res->resetting_device)
6423 scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
6424 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006425 if (!ipr_is_naca_model(res))
6426 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006427 break;
6428 case IPR_IOASC_HW_DEV_BUS_STATUS:
6429 scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
6430 if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006431 if (!ipr_get_autosense(ipr_cmd)) {
6432 if (!ipr_is_naca_model(res)) {
6433 ipr_erp_cancel_all(ipr_cmd);
6434 return;
6435 }
6436 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006437 }
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006438 if (!ipr_is_naca_model(res))
6439 res->needs_sync_complete = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006440 break;
6441 case IPR_IOASC_NR_INIT_CMD_REQUIRED:
6442 break;
Wen Xiongf8ee25d2015-03-26 11:23:58 -05006443 case IPR_IOASC_IR_NON_OPTIMIZED:
6444 if (res->raw_mode) {
6445 res->raw_mode = 0;
6446 scsi_cmd->result |= (DID_IMM_RETRY << 16);
6447 } else
6448 scsi_cmd->result |= (DID_ERROR << 16);
6449 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006450 default:
Brian King5b7304f2006-08-02 14:57:51 -05006451 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6452 scsi_cmd->result |= (DID_ERROR << 16);
brking@us.ibm.comee0a90f2005-11-01 17:02:22 -06006453 if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07006454 res->needs_sync_complete = 1;
6455 break;
6456 }
6457
FUJITA Tomonori63015bc2007-05-26 00:26:59 +09006458 scsi_dma_unmap(ipr_cmd->scsi_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006459 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006460 if (ipr_cmd->eh_comp)
6461 complete(ipr_cmd->eh_comp);
6462 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006463}
6464
6465/**
6466 * ipr_scsi_done - mid-layer done function
6467 * @ipr_cmd: ipr command struct
6468 *
6469 * This function is invoked by the interrupt handler for
6470 * ops generated by the SCSI mid-layer
6471 *
6472 * Return value:
6473 * none
6474 **/
6475static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6476{
6477 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6478 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006479 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King36b8e182015-07-14 11:41:29 -05006480 unsigned long lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006481
Wayne Boyer96d21f02010-05-10 09:13:27 -07006482 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
Linus Torvalds1da177e2005-04-16 15:20:36 -07006483
6484 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
Brian King172cd6e2012-07-17 08:14:40 -05006485 scsi_dma_unmap(scsi_cmd);
6486
Brian King36b8e182015-07-14 11:41:29 -05006487 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006488 scsi_cmd->scsi_done(scsi_cmd);
Brian King66a0d592017-03-15 16:58:36 -05006489 if (ipr_cmd->eh_comp)
6490 complete(ipr_cmd->eh_comp);
6491 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Brian King36b8e182015-07-14 11:41:29 -05006492 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006493 } else {
Brian King36b8e182015-07-14 11:41:29 -05006494 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6495 spin_lock(&ipr_cmd->hrrq->_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006496 ipr_erp_start(ioa_cfg, ipr_cmd);
Brian King36b8e182015-07-14 11:41:29 -05006497 spin_unlock(&ipr_cmd->hrrq->_lock);
6498 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Brian King172cd6e2012-07-17 08:14:40 -05006499 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006500}
6501
6502/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006503 * ipr_queuecommand - Queue a mid-layer request
Brian King00bfef22012-07-17 08:13:52 -05006504 * @shost: scsi host struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006505 * @scsi_cmd: scsi command struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07006506 *
6507 * This function queues a request generated by the mid-layer.
6508 *
6509 * Return value:
6510 * 0 on success
6511 * SCSI_MLQUEUE_DEVICE_BUSY if device is busy
6512 * SCSI_MLQUEUE_HOST_BUSY if host is busy
6513 **/
Brian King00bfef22012-07-17 08:13:52 -05006514static int ipr_queuecommand(struct Scsi_Host *shost,
6515 struct scsi_cmnd *scsi_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006516{
6517 struct ipr_ioa_cfg *ioa_cfg;
6518 struct ipr_resource_entry *res;
6519 struct ipr_ioarcb *ioarcb;
6520 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006521 unsigned long hrrq_flags, lock_flags;
Dan Carpenterd12f1572012-07-30 11:18:22 +03006522 int rc;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006523 struct ipr_hrr_queue *hrrq;
6524 int hrrq_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006525
Brian King00bfef22012-07-17 08:13:52 -05006526 ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
6527
Linus Torvalds1da177e2005-04-16 15:20:36 -07006528 scsi_cmd->result = (DID_OK << 16);
Brian King00bfef22012-07-17 08:13:52 -05006529 res = scsi_cmd->device->hostdata;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006530
6531 if (ipr_is_gata(res) && res->sata_port) {
6532 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6533 rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
6534 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6535 return rc;
6536 }
6537
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006538 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6539 hrrq = &ioa_cfg->hrrq[hrrq_id];
Linus Torvalds1da177e2005-04-16 15:20:36 -07006540
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006541 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006542 /*
6543 * We are currently blocking all devices due to a host reset
6544 * We have told the host to stop giving us new requests, but
6545 * ERP ops don't count. FIXME
6546 */
Brian Kingbfae7822013-01-30 23:45:08 -06006547 if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006548 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006549 return SCSI_MLQUEUE_HOST_BUSY;
Brian King00bfef22012-07-17 08:13:52 -05006550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006551
6552 /*
6553 * FIXME - Create scsi_set_host_offline interface
6554 * and the ioa_is_dead check can be removed
6555 */
Brian Kingbfae7822013-01-30 23:45:08 -06006556 if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006557 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006558 goto err_nodev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006559 }
6560
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006561 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6562 if (ipr_cmd == NULL) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006563 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006564 return SCSI_MLQUEUE_HOST_BUSY;
6565 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006566 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006567
Brian King172cd6e2012-07-17 08:14:40 -05006568 ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006569 ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006570
6571 memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
6572 ipr_cmd->scsi_cmd = scsi_cmd;
Brian King172cd6e2012-07-17 08:14:40 -05006573 ipr_cmd->done = ipr_scsi_eh_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006574
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006575 if (ipr_is_gscsi(res)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006576 if (scsi_cmd->underflow == 0)
6577 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6578
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006579 if (res->reset_occurred) {
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006580 res->reset_occurred = 0;
Wayne Boyerab6c10b2011-03-31 09:56:10 -07006581 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
Wendy Xiong0b1f8d42014-01-21 12:16:39 -06006582 }
Gabriel Krisman Bertazi4f92d012015-11-03 16:26:07 -02006583 }
6584
6585 if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
6586 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6587
Linus Torvalds1da177e2005-04-16 15:20:36 -07006588 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
Christoph Hellwig50668632014-10-30 14:30:06 +01006589 if (scsi_cmd->flags & SCMD_TAGGED)
6590 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_SIMPLE_TASK;
6591 else
6592 ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_UNTAGGED_TASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006593 }
6594
6595 if (scsi_cmd->cmnd[0] >= 0xC0 &&
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006596 (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006597 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006598 }
Gabriel Krisman Bertazi3cb4fc12015-08-19 11:47:05 -03006599 if (res->raw_mode && ipr_is_af_dasd_device(res)) {
Wen Xiongf8ee25d2015-03-26 11:23:58 -05006600 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_PIPE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006601
Gabriel Krisman Bertazi3cb4fc12015-08-19 11:47:05 -03006602 if (scsi_cmd->underflow == 0)
6603 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6604 }
6605
Dan Carpenterd12f1572012-07-30 11:18:22 +03006606 if (ioa_cfg->sis64)
6607 rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
6608 else
6609 rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006610
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006611 spin_lock_irqsave(hrrq->lock, hrrq_flags);
6612 if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006613 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006614 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006615 if (!rc)
6616 scsi_dma_unmap(scsi_cmd);
Brian Kinga5fb4072012-03-14 21:20:09 -05006617 return SCSI_MLQUEUE_HOST_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006618 }
6619
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006620 if (unlikely(hrrq->ioa_is_dead)) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006621 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006622 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006623 scsi_dma_unmap(scsi_cmd);
6624 goto err_nodev;
6625 }
6626
6627 ioarcb->res_handle = res->res_handle;
6628 if (res->needs_sync_complete) {
6629 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
6630 res->needs_sync_complete = 0;
6631 }
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006632 list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
Brian King00bfef22012-07-17 08:13:52 -05006633 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian Kinga5fb4072012-03-14 21:20:09 -05006634 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006635 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006636 return 0;
6637
6638err_nodev:
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006639 spin_lock_irqsave(hrrq->lock, hrrq_flags);
Brian King00bfef22012-07-17 08:13:52 -05006640 memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
6641 scsi_cmd->result = (DID_NO_CONNECT << 16);
6642 scsi_cmd->scsi_done(scsi_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006643 spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006644 return 0;
6645}
6646
6647/**
Brian King35a39692006-09-25 12:39:20 -05006648 * ipr_ioctl - IOCTL handler
6649 * @sdev: scsi device struct
6650 * @cmd: IOCTL cmd
6651 * @arg: IOCTL arg
6652 *
6653 * Return value:
6654 * 0 on success / other on failure
6655 **/
Adrian Bunkbd705f22006-11-21 10:28:48 -06006656static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
Brian King35a39692006-09-25 12:39:20 -05006657{
6658 struct ipr_resource_entry *res;
6659
6660 res = (struct ipr_resource_entry *)sdev->hostdata;
Brian King0ce3a7e2008-07-11 13:37:50 -05006661 if (res && ipr_is_gata(res)) {
6662 if (cmd == HDIO_GET_IDENTITY)
6663 return -ENOTTY;
Jeff Garzik94be9a52009-01-16 10:17:09 -05006664 return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
Brian King0ce3a7e2008-07-11 13:37:50 -05006665 }
Brian King35a39692006-09-25 12:39:20 -05006666
6667 return -EINVAL;
6668}
6669
6670/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07006671 * ipr_info - Get information about the card/driver
6672 * @scsi_host: scsi host struct
6673 *
6674 * Return value:
6675 * pointer to buffer with description string
6676 **/
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006677static const char *ipr_ioa_info(struct Scsi_Host *host)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006678{
6679 static char buffer[512];
6680 struct ipr_ioa_cfg *ioa_cfg;
6681 unsigned long lock_flags = 0;
6682
6683 ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
6684
6685 spin_lock_irqsave(host->host_lock, lock_flags);
6686 sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
6687 spin_unlock_irqrestore(host->host_lock, lock_flags);
6688
6689 return buffer;
6690}
6691
6692static struct scsi_host_template driver_template = {
6693 .module = THIS_MODULE,
6694 .name = "IPR",
6695 .info = ipr_ioa_info,
Brian King35a39692006-09-25 12:39:20 -05006696 .ioctl = ipr_ioctl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006697 .queuecommand = ipr_queuecommand,
6698 .eh_abort_handler = ipr_eh_abort,
6699 .eh_device_reset_handler = ipr_eh_dev_reset,
6700 .eh_host_reset_handler = ipr_eh_host_reset,
6701 .slave_alloc = ipr_slave_alloc,
6702 .slave_configure = ipr_slave_configure,
6703 .slave_destroy = ipr_slave_destroy,
Brian Kingf688f962014-12-02 12:47:37 -06006704 .scan_finished = ipr_scan_finished,
Brian King35a39692006-09-25 12:39:20 -05006705 .target_alloc = ipr_target_alloc,
6706 .target_destroy = ipr_target_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006707 .change_queue_depth = ipr_change_queue_depth,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006708 .bios_param = ipr_biosparam,
6709 .can_queue = IPR_MAX_COMMANDS,
6710 .this_id = -1,
6711 .sg_tablesize = IPR_MAX_SGLIST,
6712 .max_sectors = IPR_IOA_MAX_SECTORS,
6713 .cmd_per_lun = IPR_MAX_CMD_PER_LUN,
6714 .use_clustering = ENABLE_CLUSTERING,
6715 .shost_attrs = ipr_ioa_attrs,
6716 .sdev_attrs = ipr_dev_attrs,
Martin K. Petersen54b2b502013-10-23 06:25:40 -04006717 .proc_name = IPR_NAME,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006718};
6719
Brian King35a39692006-09-25 12:39:20 -05006720/**
6721 * ipr_ata_phy_reset - libata phy_reset handler
6722 * @ap: ata port to reset
6723 *
6724 **/
6725static void ipr_ata_phy_reset(struct ata_port *ap)
6726{
6727 unsigned long flags;
6728 struct ipr_sata_port *sata_port = ap->private_data;
6729 struct ipr_resource_entry *res = sata_port->res;
6730 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6731 int rc;
6732
6733 ENTER;
6734 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006735 while (ioa_cfg->in_reset_reload) {
Brian King35a39692006-09-25 12:39:20 -05006736 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6737 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6738 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6739 }
6740
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006741 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
Brian King35a39692006-09-25 12:39:20 -05006742 goto out_unlock;
6743
6744 rc = ipr_device_reset(ioa_cfg, res);
6745
6746 if (rc) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02006747 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006748 goto out_unlock;
6749 }
6750
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006751 ap->link.device[0].class = res->ata_class;
6752 if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
Tejun Heo3e4ec342010-05-10 21:41:30 +02006753 ap->link.device[0].class = ATA_DEV_NONE;
Brian King35a39692006-09-25 12:39:20 -05006754
6755out_unlock:
6756 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6757 LEAVE;
6758}
6759
6760/**
6761 * ipr_ata_post_internal - Cleanup after an internal command
6762 * @qc: ATA queued command
6763 *
6764 * Return value:
6765 * none
6766 **/
6767static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
6768{
6769 struct ipr_sata_port *sata_port = qc->ap->private_data;
6770 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6771 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006772 struct ipr_hrr_queue *hrrq;
Brian King35a39692006-09-25 12:39:20 -05006773 unsigned long flags;
6774
6775 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03006776 while (ioa_cfg->in_reset_reload) {
Brian King73d98ff2006-11-21 10:27:58 -06006777 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6778 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6779 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
6780 }
6781
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006782 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006783 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006784 list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
6785 if (ipr_cmd->qc == qc) {
6786 ipr_device_reset(ioa_cfg, sata_port->res);
6787 break;
6788 }
Brian King35a39692006-09-25 12:39:20 -05006789 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006790 spin_unlock(&hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006791 }
6792 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
6793}
6794
6795/**
Brian King35a39692006-09-25 12:39:20 -05006796 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
6797 * @regs: destination
6798 * @tf: source ATA taskfile
6799 *
6800 * Return value:
6801 * none
6802 **/
6803static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
6804 struct ata_taskfile *tf)
6805{
6806 regs->feature = tf->feature;
6807 regs->nsect = tf->nsect;
6808 regs->lbal = tf->lbal;
6809 regs->lbam = tf->lbam;
6810 regs->lbah = tf->lbah;
6811 regs->device = tf->device;
6812 regs->command = tf->command;
6813 regs->hob_feature = tf->hob_feature;
6814 regs->hob_nsect = tf->hob_nsect;
6815 regs->hob_lbal = tf->hob_lbal;
6816 regs->hob_lbam = tf->hob_lbam;
6817 regs->hob_lbah = tf->hob_lbah;
6818 regs->ctl = tf->ctl;
6819}
6820
6821/**
6822 * ipr_sata_done - done function for SATA commands
6823 * @ipr_cmd: ipr command struct
6824 *
6825 * This function is invoked by the interrupt handler for
6826 * ops generated by the SCSI mid-layer to SATA devices
6827 *
6828 * Return value:
6829 * none
6830 **/
6831static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
6832{
6833 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6834 struct ata_queued_cmd *qc = ipr_cmd->qc;
6835 struct ipr_sata_port *sata_port = qc->ap->private_data;
6836 struct ipr_resource_entry *res = sata_port->res;
Wayne Boyer96d21f02010-05-10 09:13:27 -07006837 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian King35a39692006-09-25 12:39:20 -05006838
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006839 spin_lock(&ipr_cmd->hrrq->_lock);
Wayne Boyer96d21f02010-05-10 09:13:27 -07006840 if (ipr_cmd->ioa_cfg->sis64)
6841 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6842 sizeof(struct ipr_ioasa_gata));
6843 else
6844 memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6845 sizeof(struct ipr_ioasa_gata));
Brian King35a39692006-09-25 12:39:20 -05006846 ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6847
Wayne Boyer96d21f02010-05-10 09:13:27 -07006848 if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08006849 scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
Brian King35a39692006-09-25 12:39:20 -05006850
6851 if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
Wayne Boyer96d21f02010-05-10 09:13:27 -07006852 qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
Brian King35a39692006-09-25 12:39:20 -05006853 else
Wayne Boyer96d21f02010-05-10 09:13:27 -07006854 qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06006855 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006856 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King35a39692006-09-25 12:39:20 -05006857 ata_qc_complete(qc);
6858}
6859
6860/**
Wayne Boyera32c0552010-02-19 13:23:36 -08006861 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6862 * @ipr_cmd: ipr command struct
6863 * @qc: ATA queued command
6864 *
6865 **/
6866static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6867 struct ata_queued_cmd *qc)
6868{
6869 u32 ioadl_flags = 0;
6870 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006871 struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ata_ioadl.ioadl64;
Wayne Boyera32c0552010-02-19 13:23:36 -08006872 struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6873 int len = qc->nbytes;
6874 struct scatterlist *sg;
6875 unsigned int si;
6876 dma_addr_t dma_addr = ipr_cmd->dma_addr;
6877
6878 if (len == 0)
6879 return;
6880
6881 if (qc->dma_dir == DMA_TO_DEVICE) {
6882 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6883 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6884 } else if (qc->dma_dir == DMA_FROM_DEVICE)
6885 ioadl_flags = IPR_IOADL_FLAGS_READ;
6886
6887 ioarcb->data_transfer_length = cpu_to_be32(len);
6888 ioarcb->ioadl_len =
6889 cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6890 ioarcb->u.sis64_addr_data.data_ioadl_addr =
wenxiong@linux.vnet.ibm.com1ac7c262013-04-18 21:32:48 -05006891 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl.ioadl64));
Wayne Boyera32c0552010-02-19 13:23:36 -08006892
6893 for_each_sg(qc->sg, sg, qc->n_elem, si) {
6894 ioadl64->flags = cpu_to_be32(ioadl_flags);
6895 ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6896 ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6897
6898 last_ioadl64 = ioadl64;
6899 ioadl64++;
6900 }
6901
6902 if (likely(last_ioadl64))
6903 last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6904}
6905
6906/**
Brian King35a39692006-09-25 12:39:20 -05006907 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6908 * @ipr_cmd: ipr command struct
6909 * @qc: ATA queued command
6910 *
6911 **/
6912static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6913 struct ata_queued_cmd *qc)
6914{
6915 u32 ioadl_flags = 0;
6916 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08006917 struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006918 struct ipr_ioadl_desc *last_ioadl = NULL;
James Bottomleydde20202008-02-19 11:36:56 +01006919 int len = qc->nbytes;
Brian King35a39692006-09-25 12:39:20 -05006920 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09006921 unsigned int si;
Brian King35a39692006-09-25 12:39:20 -05006922
6923 if (len == 0)
6924 return;
6925
6926 if (qc->dma_dir == DMA_TO_DEVICE) {
6927 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6928 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
Wayne Boyera32c0552010-02-19 13:23:36 -08006929 ioarcb->data_transfer_length = cpu_to_be32(len);
6930 ioarcb->ioadl_len =
Brian King35a39692006-09-25 12:39:20 -05006931 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6932 } else if (qc->dma_dir == DMA_FROM_DEVICE) {
6933 ioadl_flags = IPR_IOADL_FLAGS_READ;
6934 ioarcb->read_data_transfer_length = cpu_to_be32(len);
6935 ioarcb->read_ioadl_len =
6936 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6937 }
6938
Tejun Heoff2aeb12007-12-05 16:43:11 +09006939 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Brian King35a39692006-09-25 12:39:20 -05006940 ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6941 ioadl->address = cpu_to_be32(sg_dma_address(sg));
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006942
6943 last_ioadl = ioadl;
6944 ioadl++;
Brian King35a39692006-09-25 12:39:20 -05006945 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04006946
6947 if (likely(last_ioadl))
6948 last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
Brian King35a39692006-09-25 12:39:20 -05006949}
6950
6951/**
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06006952 * ipr_qc_defer - Get a free ipr_cmd
6953 * @qc: queued command
6954 *
6955 * Return value:
6956 * 0 if success
6957 **/
6958static int ipr_qc_defer(struct ata_queued_cmd *qc)
6959{
6960 struct ata_port *ap = qc->ap;
6961 struct ipr_sata_port *sata_port = ap->private_data;
6962 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6963 struct ipr_cmnd *ipr_cmd;
6964 struct ipr_hrr_queue *hrrq;
6965 int hrrq_id;
6966
6967 hrrq_id = ipr_get_hrrq_index(ioa_cfg);
6968 hrrq = &ioa_cfg->hrrq[hrrq_id];
6969
6970 qc->lldd_task = NULL;
6971 spin_lock(&hrrq->_lock);
6972 if (unlikely(hrrq->ioa_is_dead)) {
6973 spin_unlock(&hrrq->_lock);
6974 return 0;
6975 }
6976
6977 if (unlikely(!hrrq->allow_cmds)) {
6978 spin_unlock(&hrrq->_lock);
6979 return ATA_DEFER_LINK;
6980 }
6981
6982 ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
6983 if (ipr_cmd == NULL) {
6984 spin_unlock(&hrrq->_lock);
6985 return ATA_DEFER_LINK;
6986 }
6987
6988 qc->lldd_task = ipr_cmd;
6989 spin_unlock(&hrrq->_lock);
6990 return 0;
6991}
6992
6993/**
Brian King35a39692006-09-25 12:39:20 -05006994 * ipr_qc_issue - Issue a SATA qc to a device
6995 * @qc: queued command
6996 *
6997 * Return value:
6998 * 0 if success
6999 **/
7000static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
7001{
7002 struct ata_port *ap = qc->ap;
7003 struct ipr_sata_port *sata_port = ap->private_data;
7004 struct ipr_resource_entry *res = sata_port->res;
7005 struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
7006 struct ipr_cmnd *ipr_cmd;
7007 struct ipr_ioarcb *ioarcb;
7008 struct ipr_ioarcb_ata_regs *regs;
7009
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007010 if (qc->lldd_task == NULL)
7011 ipr_qc_defer(qc);
7012
7013 ipr_cmd = qc->lldd_task;
7014 if (ipr_cmd == NULL)
Brian King0feeed82007-03-29 12:43:43 -05007015 return AC_ERR_SYSTEM;
Brian King35a39692006-09-25 12:39:20 -05007016
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007017 qc->lldd_task = NULL;
7018 spin_lock(&ipr_cmd->hrrq->_lock);
7019 if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
7020 ipr_cmd->hrrq->ioa_is_dead)) {
7021 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
7022 spin_unlock(&ipr_cmd->hrrq->_lock);
7023 return AC_ERR_SYSTEM;
7024 }
7025
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007026 ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
Brian King35a39692006-09-25 12:39:20 -05007027 ioarcb = &ipr_cmd->ioarcb;
Brian King35a39692006-09-25 12:39:20 -05007028
Wayne Boyera32c0552010-02-19 13:23:36 -08007029 if (ioa_cfg->sis64) {
7030 regs = &ipr_cmd->i.ata_ioadl.regs;
7031 ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
7032 } else
7033 regs = &ioarcb->u.add_data.u.regs;
7034
7035 memset(regs, 0, sizeof(*regs));
7036 ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
Brian King35a39692006-09-25 12:39:20 -05007037
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007038 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Brian King35a39692006-09-25 12:39:20 -05007039 ipr_cmd->qc = qc;
7040 ipr_cmd->done = ipr_sata_done;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007041 ipr_cmd->ioarcb.res_handle = res->res_handle;
Brian King35a39692006-09-25 12:39:20 -05007042 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
7043 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
7044 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
James Bottomleydde20202008-02-19 11:36:56 +01007045 ipr_cmd->dma_use_sg = qc->n_elem;
Brian King35a39692006-09-25 12:39:20 -05007046
Wayne Boyera32c0552010-02-19 13:23:36 -08007047 if (ioa_cfg->sis64)
7048 ipr_build_ata_ioadl64(ipr_cmd, qc);
7049 else
7050 ipr_build_ata_ioadl(ipr_cmd, qc);
7051
Brian King35a39692006-09-25 12:39:20 -05007052 regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
7053 ipr_copy_sata_tf(regs, &qc->tf);
7054 memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007055 ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
Brian King35a39692006-09-25 12:39:20 -05007056
7057 switch (qc->tf.protocol) {
7058 case ATA_PROT_NODATA:
7059 case ATA_PROT_PIO:
7060 break;
7061
7062 case ATA_PROT_DMA:
7063 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7064 break;
7065
Tejun Heo0dc36882007-12-18 16:34:43 -05007066 case ATAPI_PROT_PIO:
7067 case ATAPI_PROT_NODATA:
Brian King35a39692006-09-25 12:39:20 -05007068 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7069 break;
7070
Tejun Heo0dc36882007-12-18 16:34:43 -05007071 case ATAPI_PROT_DMA:
Brian King35a39692006-09-25 12:39:20 -05007072 regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
7073 regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
7074 break;
7075
7076 default:
7077 WARN_ON(1);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007078 spin_unlock(&ipr_cmd->hrrq->_lock);
Brian King0feeed82007-03-29 12:43:43 -05007079 return AC_ERR_INVALID;
Brian King35a39692006-09-25 12:39:20 -05007080 }
7081
Wayne Boyera32c0552010-02-19 13:23:36 -08007082 ipr_send_command(ipr_cmd);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007083 spin_unlock(&ipr_cmd->hrrq->_lock);
Wayne Boyera32c0552010-02-19 13:23:36 -08007084
Brian King35a39692006-09-25 12:39:20 -05007085 return 0;
7086}
7087
7088/**
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007089 * ipr_qc_fill_rtf - Read result TF
7090 * @qc: ATA queued command
7091 *
7092 * Return value:
7093 * true
7094 **/
7095static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
7096{
7097 struct ipr_sata_port *sata_port = qc->ap->private_data;
7098 struct ipr_ioasa_gata *g = &sata_port->ioasa;
7099 struct ata_taskfile *tf = &qc->result_tf;
7100
7101 tf->feature = g->error;
7102 tf->nsect = g->nsect;
7103 tf->lbal = g->lbal;
7104 tf->lbam = g->lbam;
7105 tf->lbah = g->lbah;
7106 tf->device = g->device;
7107 tf->command = g->status;
7108 tf->hob_nsect = g->hob_nsect;
7109 tf->hob_lbal = g->hob_lbal;
7110 tf->hob_lbam = g->hob_lbam;
7111 tf->hob_lbah = g->hob_lbah;
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007112
7113 return true;
7114}
7115
Brian King35a39692006-09-25 12:39:20 -05007116static struct ata_port_operations ipr_sata_ops = {
Brian King35a39692006-09-25 12:39:20 -05007117 .phy_reset = ipr_ata_phy_reset,
Tejun Heoa1efdab2008-03-25 12:22:50 +09007118 .hardreset = ipr_sata_reset,
Brian King35a39692006-09-25 12:39:20 -05007119 .post_internal_cmd = ipr_ata_post_internal,
Brian King35a39692006-09-25 12:39:20 -05007120 .qc_prep = ata_noop_qc_prep,
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007121 .qc_defer = ipr_qc_defer,
Brian King35a39692006-09-25 12:39:20 -05007122 .qc_issue = ipr_qc_issue,
Tejun Heo4c9bf4e2008-04-07 22:47:20 +09007123 .qc_fill_rtf = ipr_qc_fill_rtf,
Brian King35a39692006-09-25 12:39:20 -05007124 .port_start = ata_sas_port_start,
7125 .port_stop = ata_sas_port_stop
7126};
7127
7128static struct ata_port_info sata_port_info = {
Shaohua Li5067c042015-03-12 10:32:18 -07007129 .flags = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
7130 ATA_FLAG_SAS_HOST,
Sergei Shtylyov0f2e0332011-01-21 20:32:01 +03007131 .pio_mask = ATA_PIO4_ONLY,
7132 .mwdma_mask = ATA_MWDMA2,
7133 .udma_mask = ATA_UDMA6,
Brian King35a39692006-09-25 12:39:20 -05007134 .port_ops = &ipr_sata_ops
7135};
7136
Linus Torvalds1da177e2005-04-16 15:20:36 -07007137#ifdef CONFIG_PPC_PSERIES
7138static const u16 ipr_blocked_processors[] = {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00007139 PVR_NORTHSTAR,
7140 PVR_PULSAR,
7141 PVR_POWER4,
7142 PVR_ICESTAR,
7143 PVR_SSTAR,
7144 PVR_POWER4p,
7145 PVR_630,
7146 PVR_630p
Linus Torvalds1da177e2005-04-16 15:20:36 -07007147};
7148
7149/**
7150 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
7151 * @ioa_cfg: ioa cfg struct
7152 *
7153 * Adapters that use Gemstone revision < 3.1 do not work reliably on
7154 * certain pSeries hardware. This function determines if the given
7155 * adapter is in one of these confgurations or not.
7156 *
7157 * Return value:
7158 * 1 if adapter is not supported / 0 if adapter is supported
7159 **/
7160static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
7161{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007162 int i;
7163
Auke Kok44c10132007-06-08 15:46:36 -07007164 if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007165 for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++) {
Michael Ellermand3dbeef2012-08-19 21:44:01 +00007166 if (pvr_version_is(ipr_blocked_processors[i]))
Auke Kok44c10132007-06-08 15:46:36 -07007167 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007168 }
7169 }
7170 return 0;
7171}
7172#else
7173#define ipr_invalid_adapter(ioa_cfg) 0
7174#endif
7175
7176/**
7177 * ipr_ioa_bringdown_done - IOA bring down completion.
7178 * @ipr_cmd: ipr command struct
7179 *
7180 * This function processes the completion of an adapter bring down.
7181 * It wakes any reset sleepers.
7182 *
7183 * Return value:
7184 * IPR_RC_JOB_RETURN
7185 **/
7186static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
7187{
7188 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05007189 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007190
7191 ENTER;
Brian Kingbfae7822013-01-30 23:45:08 -06007192 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
7193 ipr_trace;
Brian Kingb0e17a92017-08-01 10:21:30 -05007194 ioa_cfg->scsi_unblock = 1;
7195 schedule_work(&ioa_cfg->work_q);
Brian Kingbfae7822013-01-30 23:45:08 -06007196 }
7197
Linus Torvalds1da177e2005-04-16 15:20:36 -07007198 ioa_cfg->in_reset_reload = 0;
7199 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com96b04db2013-04-17 09:34:06 -05007200 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
7201 spin_lock(&ioa_cfg->hrrq[i]._lock);
7202 ioa_cfg->hrrq[i].ioa_is_dead = 1;
7203 spin_unlock(&ioa_cfg->hrrq[i]._lock);
7204 }
7205 wmb();
7206
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007207 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007208 wake_up_all(&ioa_cfg->reset_wait_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007209 LEAVE;
7210
7211 return IPR_RC_JOB_RETURN;
7212}
7213
7214/**
7215 * ipr_ioa_reset_done - IOA reset completion.
7216 * @ipr_cmd: ipr command struct
7217 *
7218 * This function processes the completion of an adapter reset.
7219 * It schedules any necessary mid-layer add/removes and
7220 * wakes any reset sleepers.
7221 *
7222 * Return value:
7223 * IPR_RC_JOB_RETURN
7224 **/
7225static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
7226{
7227 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7228 struct ipr_resource_entry *res;
Brian Kingafc3f832016-08-24 12:56:51 -05007229 int j;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007230
7231 ENTER;
7232 ioa_cfg->in_reset_reload = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06007233 for (j = 0; j < ioa_cfg->hrrq_num; j++) {
7234 spin_lock(&ioa_cfg->hrrq[j]._lock);
7235 ioa_cfg->hrrq[j].allow_cmds = 1;
7236 spin_unlock(&ioa_cfg->hrrq[j]._lock);
7237 }
7238 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007239 ioa_cfg->reset_cmd = NULL;
brking@us.ibm.com3d1d0da2005-11-01 17:01:54 -06007240 ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007241
7242 list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
Brian Kingf688f962014-12-02 12:47:37 -06007243 if (res->add_to_ml || res->del_from_ml) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007244 ipr_trace;
7245 break;
7246 }
7247 }
7248 schedule_work(&ioa_cfg->work_q);
7249
Brian Kingafc3f832016-08-24 12:56:51 -05007250 for (j = 0; j < IPR_NUM_HCAMS; j++) {
7251 list_del_init(&ioa_cfg->hostrcb[j]->queue);
7252 if (j < IPR_NUM_LOG_HCAMS)
7253 ipr_send_hcam(ioa_cfg,
7254 IPR_HCAM_CDB_OP_CODE_LOG_DATA,
7255 ioa_cfg->hostrcb[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007256 else
Brian Kingafc3f832016-08-24 12:56:51 -05007257 ipr_send_hcam(ioa_cfg,
7258 IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
7259 ioa_cfg->hostrcb[j]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007260 }
7261
Brian King6bb04172007-04-26 16:00:08 -05007262 scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007263 dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
7264
7265 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007266 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007267 wake_up_all(&ioa_cfg->reset_wait_q);
7268
Brian Kingb0e17a92017-08-01 10:21:30 -05007269 ioa_cfg->scsi_unblock = 1;
Brian Kingf688f962014-12-02 12:47:37 -06007270 schedule_work(&ioa_cfg->work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007271 LEAVE;
7272 return IPR_RC_JOB_RETURN;
7273}
7274
7275/**
7276 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
7277 * @supported_dev: supported device struct
7278 * @vpids: vendor product id struct
7279 *
7280 * Return value:
7281 * none
7282 **/
7283static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
7284 struct ipr_std_inq_vpids *vpids)
7285{
7286 memset(supported_dev, 0, sizeof(struct ipr_supported_device));
7287 memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
7288 supported_dev->num_records = 1;
7289 supported_dev->data_length =
7290 cpu_to_be16(sizeof(struct ipr_supported_device));
7291 supported_dev->reserved = 0;
7292}
7293
7294/**
7295 * ipr_set_supported_devs - Send Set Supported Devices for a device
7296 * @ipr_cmd: ipr command struct
7297 *
Wayne Boyera32c0552010-02-19 13:23:36 -08007298 * This function sends a Set Supported Devices to the adapter
Linus Torvalds1da177e2005-04-16 15:20:36 -07007299 *
7300 * Return value:
7301 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7302 **/
7303static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
7304{
7305 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7306 struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007307 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7308 struct ipr_resource_entry *res = ipr_cmd->u.res;
7309
7310 ipr_cmd->job_step = ipr_ioa_reset_done;
7311
7312 list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
Brian Kinge4fbf442006-03-29 09:37:22 -06007313 if (!ipr_is_scsi_disk(res))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007314 continue;
7315
7316 ipr_cmd->u.res = res;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007317 ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007318
7319 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7320 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7321 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7322
7323 ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007324 ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007325 ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
7326 ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
7327
Wayne Boyera32c0552010-02-19 13:23:36 -08007328 ipr_init_ioadl(ipr_cmd,
7329 ioa_cfg->vpd_cbs_dma +
7330 offsetof(struct ipr_misc_cbs, supp_dev),
7331 sizeof(struct ipr_supported_device),
7332 IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007333
7334 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7335 IPR_SET_SUP_DEVICE_TIMEOUT);
7336
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007337 if (!ioa_cfg->sis64)
7338 ipr_cmd->job_step = ipr_set_supported_devs;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007339 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007340 return IPR_RC_JOB_RETURN;
7341 }
7342
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007343 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007344 return IPR_RC_JOB_CONTINUE;
7345}
7346
7347/**
7348 * ipr_get_mode_page - Locate specified mode page
7349 * @mode_pages: mode page buffer
7350 * @page_code: page code to find
7351 * @len: minimum required length for mode page
7352 *
7353 * Return value:
7354 * pointer to mode page / NULL on failure
7355 **/
7356static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
7357 u32 page_code, u32 len)
7358{
7359 struct ipr_mode_page_hdr *mode_hdr;
7360 u32 page_length;
7361 u32 length;
7362
7363 if (!mode_pages || (mode_pages->hdr.length == 0))
7364 return NULL;
7365
7366 length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
7367 mode_hdr = (struct ipr_mode_page_hdr *)
7368 (mode_pages->data + mode_pages->hdr.block_desc_len);
7369
7370 while (length) {
7371 if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
7372 if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
7373 return mode_hdr;
7374 break;
7375 } else {
7376 page_length = (sizeof(struct ipr_mode_page_hdr) +
7377 mode_hdr->page_length);
7378 length -= page_length;
7379 mode_hdr = (struct ipr_mode_page_hdr *)
7380 ((unsigned long)mode_hdr + page_length);
7381 }
7382 }
7383 return NULL;
7384}
7385
7386/**
7387 * ipr_check_term_power - Check for term power errors
7388 * @ioa_cfg: ioa config struct
7389 * @mode_pages: IOAFP mode pages buffer
7390 *
7391 * Check the IOAFP's mode page 28 for term power errors
7392 *
7393 * Return value:
7394 * nothing
7395 **/
7396static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
7397 struct ipr_mode_pages *mode_pages)
7398{
7399 int i;
7400 int entry_length;
7401 struct ipr_dev_bus_entry *bus;
7402 struct ipr_mode_page28 *mode_page;
7403
7404 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7405 sizeof(struct ipr_mode_page28));
7406
7407 entry_length = mode_page->entry_length;
7408
7409 bus = mode_page->bus;
7410
7411 for (i = 0; i < mode_page->num_entries; i++) {
7412 if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
7413 dev_err(&ioa_cfg->pdev->dev,
7414 "Term power is absent on scsi bus %d\n",
7415 bus->res_addr.bus);
7416 }
7417
7418 bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
7419 }
7420}
7421
7422/**
7423 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
7424 * @ioa_cfg: ioa config struct
7425 *
7426 * Looks through the config table checking for SES devices. If
7427 * the SES device is in the SES table indicating a maximum SCSI
7428 * bus speed, the speed is limited for the bus.
7429 *
7430 * Return value:
7431 * none
7432 **/
7433static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
7434{
7435 u32 max_xfer_rate;
7436 int i;
7437
7438 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
7439 max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
7440 ioa_cfg->bus_attr[i].bus_width);
7441
7442 if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
7443 ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
7444 }
7445}
7446
7447/**
7448 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
7449 * @ioa_cfg: ioa config struct
7450 * @mode_pages: mode page 28 buffer
7451 *
7452 * Updates mode page 28 based on driver configuration
7453 *
7454 * Return value:
7455 * none
7456 **/
7457static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03007458 struct ipr_mode_pages *mode_pages)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007459{
7460 int i, entry_length;
7461 struct ipr_dev_bus_entry *bus;
7462 struct ipr_bus_attributes *bus_attr;
7463 struct ipr_mode_page28 *mode_page;
7464
7465 mode_page = ipr_get_mode_page(mode_pages, 0x28,
7466 sizeof(struct ipr_mode_page28));
7467
7468 entry_length = mode_page->entry_length;
7469
7470 /* Loop for each device bus entry */
7471 for (i = 0, bus = mode_page->bus;
7472 i < mode_page->num_entries;
7473 i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
7474 if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
7475 dev_err(&ioa_cfg->pdev->dev,
7476 "Invalid resource address reported: 0x%08X\n",
7477 IPR_GET_PHYS_LOC(bus->res_addr));
7478 continue;
7479 }
7480
7481 bus_attr = &ioa_cfg->bus_attr[i];
7482 bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
7483 bus->bus_width = bus_attr->bus_width;
7484 bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
7485 bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
7486 if (bus_attr->qas_enabled)
7487 bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
7488 else
7489 bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
7490 }
7491}
7492
7493/**
7494 * ipr_build_mode_select - Build a mode select command
7495 * @ipr_cmd: ipr command struct
7496 * @res_handle: resource handle to send command to
7497 * @parm: Byte 2 of Mode Sense command
7498 * @dma_addr: DMA buffer address
7499 * @xfer_len: data transfer length
7500 *
7501 * Return value:
7502 * none
7503 **/
7504static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
Wayne Boyera32c0552010-02-19 13:23:36 -08007505 __be32 res_handle, u8 parm,
7506 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007507{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007508 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7509
7510 ioarcb->res_handle = res_handle;
7511 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7512 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
7513 ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
7514 ioarcb->cmd_pkt.cdb[1] = parm;
7515 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7516
Wayne Boyera32c0552010-02-19 13:23:36 -08007517 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007518}
7519
7520/**
7521 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
7522 * @ipr_cmd: ipr command struct
7523 *
7524 * This function sets up the SCSI bus attributes and sends
7525 * a Mode Select for Page 28 to activate them.
7526 *
7527 * Return value:
7528 * IPR_RC_JOB_RETURN
7529 **/
7530static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
7531{
7532 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7533 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7534 int length;
7535
7536 ENTER;
Brian King47338042006-02-08 20:57:42 -06007537 ipr_scsi_bus_speed_limit(ioa_cfg);
7538 ipr_check_term_power(ioa_cfg, mode_pages);
7539 ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
7540 length = mode_pages->hdr.length + 1;
7541 mode_pages->hdr.length = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542
7543 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7544 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7545 length);
7546
Wayne Boyerf72919e2010-02-19 13:24:21 -08007547 ipr_cmd->job_step = ipr_set_supported_devs;
7548 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7549 struct ipr_resource_entry, queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007550 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7551
7552 LEAVE;
7553 return IPR_RC_JOB_RETURN;
7554}
7555
7556/**
7557 * ipr_build_mode_sense - Builds a mode sense command
7558 * @ipr_cmd: ipr command struct
7559 * @res: resource entry struct
7560 * @parm: Byte 2 of mode sense command
7561 * @dma_addr: DMA address of mode sense buffer
7562 * @xfer_len: Size of DMA buffer
7563 *
7564 * Return value:
7565 * none
7566 **/
7567static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
7568 __be32 res_handle,
Wayne Boyera32c0552010-02-19 13:23:36 -08007569 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007570{
Linus Torvalds1da177e2005-04-16 15:20:36 -07007571 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7572
7573 ioarcb->res_handle = res_handle;
7574 ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
7575 ioarcb->cmd_pkt.cdb[2] = parm;
7576 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7577 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7578
Wayne Boyera32c0552010-02-19 13:23:36 -08007579 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007580}
7581
7582/**
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007583 * ipr_reset_cmd_failed - Handle failure of IOA reset command
7584 * @ipr_cmd: ipr command struct
7585 *
7586 * This function handles the failure of an IOA bringup command.
7587 *
7588 * Return value:
7589 * IPR_RC_JOB_RETURN
7590 **/
7591static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
7592{
7593 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007594 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007595
7596 dev_err(&ioa_cfg->pdev->dev,
7597 "0x%02X failed with IOASC: 0x%08X\n",
7598 ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
7599
7600 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06007601 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007602 return IPR_RC_JOB_RETURN;
7603}
7604
7605/**
7606 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
7607 * @ipr_cmd: ipr command struct
7608 *
7609 * This function handles the failure of a Mode Sense to the IOAFP.
7610 * Some adapters do not handle all mode pages.
7611 *
7612 * Return value:
7613 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7614 **/
7615static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
7616{
Wayne Boyerf72919e2010-02-19 13:24:21 -08007617 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer96d21f02010-05-10 09:13:27 -07007618 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007619
7620 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
Wayne Boyerf72919e2010-02-19 13:24:21 -08007621 ipr_cmd->job_step = ipr_set_supported_devs;
7622 ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
7623 struct ipr_resource_entry, queue);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007624 return IPR_RC_JOB_CONTINUE;
7625 }
7626
7627 return ipr_reset_cmd_failed(ipr_cmd);
7628}
7629
7630/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007631 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
7632 * @ipr_cmd: ipr command struct
7633 *
7634 * This function send a Page 28 mode sense to the IOA to
7635 * retrieve SCSI bus attributes.
7636 *
7637 * Return value:
7638 * IPR_RC_JOB_RETURN
7639 **/
7640static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
7641{
7642 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7643
7644 ENTER;
7645 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7646 0x28, ioa_cfg->vpd_cbs_dma +
7647 offsetof(struct ipr_misc_cbs, mode_pages),
7648 sizeof(struct ipr_mode_pages));
7649
7650 ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06007651 ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007652
7653 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7654
7655 LEAVE;
7656 return IPR_RC_JOB_RETURN;
7657}
7658
7659/**
Brian Kingac09c342007-04-26 16:00:16 -05007660 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
7661 * @ipr_cmd: ipr command struct
7662 *
7663 * This function enables dual IOA RAID support if possible.
7664 *
7665 * Return value:
7666 * IPR_RC_JOB_RETURN
7667 **/
7668static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
7669{
7670 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7671 struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
7672 struct ipr_mode_page24 *mode_page;
7673 int length;
7674
7675 ENTER;
7676 mode_page = ipr_get_mode_page(mode_pages, 0x24,
7677 sizeof(struct ipr_mode_page24));
7678
7679 if (mode_page)
7680 mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
7681
7682 length = mode_pages->hdr.length + 1;
7683 mode_pages->hdr.length = 0;
7684
7685 ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
7686 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
7687 length);
7688
7689 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7690 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7691
7692 LEAVE;
7693 return IPR_RC_JOB_RETURN;
7694}
7695
7696/**
7697 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
7698 * @ipr_cmd: ipr command struct
7699 *
7700 * This function handles the failure of a Mode Sense to the IOAFP.
7701 * Some adapters do not handle all mode pages.
7702 *
7703 * Return value:
7704 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7705 **/
7706static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
7707{
Wayne Boyer96d21f02010-05-10 09:13:27 -07007708 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Brian Kingac09c342007-04-26 16:00:16 -05007709
7710 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
7711 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
7712 return IPR_RC_JOB_CONTINUE;
7713 }
7714
7715 return ipr_reset_cmd_failed(ipr_cmd);
7716}
7717
7718/**
7719 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
7720 * @ipr_cmd: ipr command struct
7721 *
7722 * This function send a mode sense to the IOA to retrieve
7723 * the IOA Advanced Function Control mode page.
7724 *
7725 * Return value:
7726 * IPR_RC_JOB_RETURN
7727 **/
7728static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
7729{
7730 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7731
7732 ENTER;
7733 ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
7734 0x24, ioa_cfg->vpd_cbs_dma +
7735 offsetof(struct ipr_misc_cbs, mode_pages),
7736 sizeof(struct ipr_mode_pages));
7737
7738 ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
7739 ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
7740
7741 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7742
7743 LEAVE;
7744 return IPR_RC_JOB_RETURN;
7745}
7746
7747/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07007748 * ipr_init_res_table - Initialize the resource table
7749 * @ipr_cmd: ipr command struct
7750 *
7751 * This function looks through the existing resource table, comparing
7752 * it with the config table. This function will take care of old/new
7753 * devices and schedule adding/removing them from the mid-layer
7754 * as appropriate.
7755 *
7756 * Return value:
7757 * IPR_RC_JOB_CONTINUE
7758 **/
7759static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
7760{
7761 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7762 struct ipr_resource_entry *res, *temp;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007763 struct ipr_config_table_entry_wrapper cfgtew;
7764 int entries, found, flag, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007765 LIST_HEAD(old_res);
7766
7767 ENTER;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007768 if (ioa_cfg->sis64)
7769 flag = ioa_cfg->u.cfg_table64->hdr64.flags;
7770 else
7771 flag = ioa_cfg->u.cfg_table->hdr.flags;
7772
7773 if (flag & IPR_UCODE_DOWNLOAD_REQ)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007774 dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
7775
7776 list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
7777 list_move_tail(&res->queue, &old_res);
7778
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007779 if (ioa_cfg->sis64)
Wayne Boyer438b0332010-05-10 09:13:00 -07007780 entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007781 else
7782 entries = ioa_cfg->u.cfg_table->hdr.num_entries;
7783
7784 for (i = 0; i < entries; i++) {
7785 if (ioa_cfg->sis64)
7786 cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
7787 else
7788 cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07007789 found = 0;
7790
7791 list_for_each_entry_safe(res, temp, &old_res, queue) {
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007792 if (ipr_is_same_device(res, &cfgtew)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007793 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
7794 found = 1;
7795 break;
7796 }
7797 }
7798
7799 if (!found) {
7800 if (list_empty(&ioa_cfg->free_res_q)) {
7801 dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
7802 break;
7803 }
7804
7805 found = 1;
7806 res = list_entry(ioa_cfg->free_res_q.next,
7807 struct ipr_resource_entry, queue);
7808 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007809 ipr_init_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007810 res->add_to_ml = 1;
Wayne Boyer56115592010-06-10 14:46:34 -07007811 } else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
7812 res->sdev->allow_restart = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007813
7814 if (found)
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007815 ipr_update_res_entry(res, &cfgtew);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007816 }
7817
7818 list_for_each_entry_safe(res, temp, &old_res, queue) {
7819 if (res->sdev) {
7820 res->del_from_ml = 1;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007821 res->res_handle = IPR_INVALID_RES_HANDLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007822 list_move_tail(&res->queue, &ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007823 }
7824 }
7825
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007826 list_for_each_entry_safe(res, temp, &old_res, queue) {
7827 ipr_clear_res_target(res);
7828 list_move_tail(&res->queue, &ioa_cfg->free_res_q);
7829 }
7830
Brian Kingac09c342007-04-26 16:00:16 -05007831 if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7832 ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
7833 else
7834 ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007835
7836 LEAVE;
7837 return IPR_RC_JOB_CONTINUE;
7838}
7839
7840/**
7841 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
7842 * @ipr_cmd: ipr command struct
7843 *
7844 * This function sends a Query IOA Configuration command
7845 * to the adapter to retrieve the IOA configuration table.
7846 *
7847 * Return value:
7848 * IPR_RC_JOB_RETURN
7849 **/
7850static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
7851{
7852 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7853 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007854 struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
Brian Kingac09c342007-04-26 16:00:16 -05007855 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007856
7857 ENTER;
Brian Kingac09c342007-04-26 16:00:16 -05007858 if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
7859 ioa_cfg->dual_raid = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007860 dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
7861 ucode_vpd->major_release, ucode_vpd->card_type,
7862 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
7863 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7864 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7865
7866 ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
Wayne Boyer438b0332010-05-10 09:13:00 -07007867 ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007868 ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
7869 ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007870
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08007871 ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
Wayne Boyera32c0552010-02-19 13:23:36 -08007872 IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007873
7874 ipr_cmd->job_step = ipr_init_res_table;
7875
7876 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7877
7878 LEAVE;
7879 return IPR_RC_JOB_RETURN;
7880}
7881
Gabriel Krisman Bertazi1a47af22015-11-03 16:26:09 -02007882static int ipr_ioa_service_action_failed(struct ipr_cmnd *ipr_cmd)
7883{
7884 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7885
7886 if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT)
7887 return IPR_RC_JOB_CONTINUE;
7888
7889 return ipr_reset_cmd_failed(ipr_cmd);
7890}
7891
7892static void ipr_build_ioa_service_action(struct ipr_cmnd *ipr_cmd,
7893 __be32 res_handle, u8 sa_code)
7894{
7895 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7896
7897 ioarcb->res_handle = res_handle;
7898 ioarcb->cmd_pkt.cdb[0] = IPR_IOA_SERVICE_ACTION;
7899 ioarcb->cmd_pkt.cdb[1] = sa_code;
7900 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7901}
7902
7903/**
7904 * ipr_ioafp_set_caching_parameters - Issue Set Cache parameters service
7905 * action
7906 *
7907 * Return value:
7908 * none
7909 **/
7910static int ipr_ioafp_set_caching_parameters(struct ipr_cmnd *ipr_cmd)
7911{
7912 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7913 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7914 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
7915
7916 ENTER;
7917
7918 ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7919
7920 if (pageC4->cache_cap[0] & IPR_CAP_SYNC_CACHE) {
7921 ipr_build_ioa_service_action(ipr_cmd,
7922 cpu_to_be32(IPR_IOA_RES_HANDLE),
7923 IPR_IOA_SA_CHANGE_CACHE_PARAMS);
7924
7925 ioarcb->cmd_pkt.cdb[2] = 0x40;
7926
7927 ipr_cmd->job_step_failed = ipr_ioa_service_action_failed;
7928 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7929 IPR_SET_SUP_DEVICE_TIMEOUT);
7930
7931 LEAVE;
7932 return IPR_RC_JOB_RETURN;
7933 }
7934
7935 LEAVE;
7936 return IPR_RC_JOB_CONTINUE;
7937}
7938
Linus Torvalds1da177e2005-04-16 15:20:36 -07007939/**
7940 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
7941 * @ipr_cmd: ipr command struct
7942 *
7943 * This utility function sends an inquiry to the adapter.
7944 *
7945 * Return value:
7946 * none
7947 **/
7948static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
Wayne Boyera32c0552010-02-19 13:23:36 -08007949 dma_addr_t dma_addr, u8 xfer_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007950{
7951 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007952
7953 ENTER;
7954 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7955 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7956
7957 ioarcb->cmd_pkt.cdb[0] = INQUIRY;
7958 ioarcb->cmd_pkt.cdb[1] = flags;
7959 ioarcb->cmd_pkt.cdb[2] = page;
7960 ioarcb->cmd_pkt.cdb[4] = xfer_len;
7961
Wayne Boyera32c0552010-02-19 13:23:36 -08007962 ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007963
7964 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7965 LEAVE;
7966}
7967
7968/**
brking@us.ibm.com62275042005-11-01 17:01:14 -06007969 * ipr_inquiry_page_supported - Is the given inquiry page supported
7970 * @page0: inquiry page 0 buffer
7971 * @page: page code.
7972 *
7973 * This function determines if the specified inquiry page is supported.
7974 *
7975 * Return value:
7976 * 1 if page is supported / 0 if not
7977 **/
7978static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7979{
7980 int i;
7981
7982 for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7983 if (page0->page[i] == page)
7984 return 1;
7985
7986 return 0;
7987}
7988
7989/**
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02007990 * ipr_ioafp_pageC4_inquiry - Send a Page 0xC4 Inquiry to the adapter.
7991 * @ipr_cmd: ipr command struct
7992 *
7993 * This function sends a Page 0xC4 inquiry to the adapter
7994 * to retrieve software VPD information.
7995 *
7996 * Return value:
7997 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7998 **/
7999static int ipr_ioafp_pageC4_inquiry(struct ipr_cmnd *ipr_cmd)
8000{
8001 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8002 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8003 struct ipr_inquiry_pageC4 *pageC4 = &ioa_cfg->vpd_cbs->pageC4_data;
8004
8005 ENTER;
Gabriel Krisman Bertazi1a47af22015-11-03 16:26:09 -02008006 ipr_cmd->job_step = ipr_ioafp_set_caching_parameters;
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02008007 memset(pageC4, 0, sizeof(*pageC4));
8008
8009 if (ipr_inquiry_page_supported(page0, 0xC4)) {
8010 ipr_ioafp_inquiry(ipr_cmd, 1, 0xC4,
8011 (ioa_cfg->vpd_cbs_dma
8012 + offsetof(struct ipr_misc_cbs,
8013 pageC4_data)),
8014 sizeof(struct ipr_inquiry_pageC4));
8015 return IPR_RC_JOB_RETURN;
8016 }
8017
8018 LEAVE;
8019 return IPR_RC_JOB_CONTINUE;
8020}
8021
8022/**
Brian Kingac09c342007-04-26 16:00:16 -05008023 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
8024 * @ipr_cmd: ipr command struct
8025 *
8026 * This function sends a Page 0xD0 inquiry to the adapter
8027 * to retrieve adapter capabilities.
8028 *
8029 * Return value:
8030 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8031 **/
8032static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
8033{
8034 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8035 struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
8036 struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
8037
8038 ENTER;
Gabriel Krisman Bertazi1021b3f2015-11-03 16:26:08 -02008039 ipr_cmd->job_step = ipr_ioafp_pageC4_inquiry;
Brian Kingac09c342007-04-26 16:00:16 -05008040 memset(cap, 0, sizeof(*cap));
8041
8042 if (ipr_inquiry_page_supported(page0, 0xD0)) {
8043 ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
8044 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
8045 sizeof(struct ipr_inquiry_cap));
8046 return IPR_RC_JOB_RETURN;
8047 }
8048
8049 LEAVE;
8050 return IPR_RC_JOB_CONTINUE;
8051}
8052
8053/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008054 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
8055 * @ipr_cmd: ipr command struct
8056 *
8057 * This function sends a Page 3 inquiry to the adapter
8058 * to retrieve software VPD information.
8059 *
8060 * Return value:
8061 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8062 **/
8063static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
8064{
8065 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008066
8067 ENTER;
8068
Brian Kingac09c342007-04-26 16:00:16 -05008069 ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008070
8071 ipr_ioafp_inquiry(ipr_cmd, 1, 3,
8072 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
8073 sizeof(struct ipr_inquiry_page3));
8074
8075 LEAVE;
8076 return IPR_RC_JOB_RETURN;
8077}
8078
8079/**
8080 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
8081 * @ipr_cmd: ipr command struct
8082 *
8083 * This function sends a Page 0 inquiry to the adapter
8084 * to retrieve supported inquiry pages.
8085 *
8086 * Return value:
8087 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8088 **/
8089static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
8090{
8091 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008092 char type[5];
8093
8094 ENTER;
8095
8096 /* Grab the type out of the VPD and store it away */
8097 memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
8098 type[4] = '\0';
8099 ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
8100
Brian Kingf688f962014-12-02 12:47:37 -06008101 if (ipr_invalid_adapter(ioa_cfg)) {
8102 dev_err(&ioa_cfg->pdev->dev,
8103 "Adapter not supported in this hardware configuration.\n");
8104
8105 if (!ipr_testmode) {
8106 ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
8107 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8108 list_add_tail(&ipr_cmd->queue,
8109 &ioa_cfg->hrrq->hrrq_free_q);
8110 return IPR_RC_JOB_RETURN;
8111 }
8112 }
8113
brking@us.ibm.com62275042005-11-01 17:01:14 -06008114 ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008115
brking@us.ibm.com62275042005-11-01 17:01:14 -06008116 ipr_ioafp_inquiry(ipr_cmd, 1, 0,
8117 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
8118 sizeof(struct ipr_inquiry_page0));
Linus Torvalds1da177e2005-04-16 15:20:36 -07008119
8120 LEAVE;
8121 return IPR_RC_JOB_RETURN;
8122}
8123
8124/**
8125 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
8126 * @ipr_cmd: ipr command struct
8127 *
8128 * This function sends a standard inquiry to the adapter.
8129 *
8130 * Return value:
8131 * IPR_RC_JOB_RETURN
8132 **/
8133static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
8134{
8135 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8136
8137 ENTER;
brking@us.ibm.com62275042005-11-01 17:01:14 -06008138 ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008139
8140 ipr_ioafp_inquiry(ipr_cmd, 0, 0,
8141 ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
8142 sizeof(struct ipr_ioa_vpd));
8143
8144 LEAVE;
8145 return IPR_RC_JOB_RETURN;
8146}
8147
8148/**
Wayne Boyer214777b2010-02-19 13:24:26 -08008149 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008150 * @ipr_cmd: ipr command struct
8151 *
8152 * This function send an Identify Host Request Response Queue
8153 * command to establish the HRRQ with the adapter.
8154 *
8155 * Return value:
8156 * IPR_RC_JOB_RETURN
8157 **/
Wayne Boyer214777b2010-02-19 13:24:26 -08008158static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008159{
8160 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8161 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008162 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008163
8164 ENTER;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008165 ipr_cmd->job_step = ipr_ioafp_std_inquiry;
Brian King87adbe02016-09-16 16:51:37 -05008166 if (ioa_cfg->identify_hrrq_index == 0)
8167 dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07008168
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008169 if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
8170 hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
Linus Torvalds1da177e2005-04-16 15:20:36 -07008171
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008172 ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
8173 ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008174
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008175 ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8176 if (ioa_cfg->sis64)
8177 ioarcb->cmd_pkt.cdb[1] = 0x1;
8178
8179 if (ioa_cfg->nvectors == 1)
8180 ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
8181 else
8182 ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
8183
8184 ioarcb->cmd_pkt.cdb[2] =
8185 ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
8186 ioarcb->cmd_pkt.cdb[3] =
8187 ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
8188 ioarcb->cmd_pkt.cdb[4] =
8189 ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
8190 ioarcb->cmd_pkt.cdb[5] =
8191 ((u64) hrrq->host_rrq_dma) & 0xff;
8192 ioarcb->cmd_pkt.cdb[7] =
8193 ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
8194 ioarcb->cmd_pkt.cdb[8] =
8195 (sizeof(u32) * hrrq->size) & 0xff;
8196
8197 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008198 ioarcb->cmd_pkt.cdb[9] =
8199 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008200
8201 if (ioa_cfg->sis64) {
8202 ioarcb->cmd_pkt.cdb[10] =
8203 ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
8204 ioarcb->cmd_pkt.cdb[11] =
8205 ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
8206 ioarcb->cmd_pkt.cdb[12] =
8207 ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
8208 ioarcb->cmd_pkt.cdb[13] =
8209 ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
8210 }
8211
8212 if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008213 ioarcb->cmd_pkt.cdb[14] =
8214 ioa_cfg->identify_hrrq_index;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008215
8216 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
8217 IPR_INTERNAL_TIMEOUT);
8218
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008219 if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
8220 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008221
8222 LEAVE;
8223 return IPR_RC_JOB_RETURN;
Wayne Boyer214777b2010-02-19 13:24:26 -08008224 }
8225
Linus Torvalds1da177e2005-04-16 15:20:36 -07008226 LEAVE;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008227 return IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008228}
8229
8230/**
8231 * ipr_reset_timer_done - Adapter reset timer function
8232 * @ipr_cmd: ipr command struct
8233 *
8234 * Description: This function is used in adapter reset processing
8235 * for timing events. If the reset_cmd pointer in the IOA
8236 * config struct is not this adapter's we are doing nested
8237 * resets and fail_all_ops will take care of freeing the
8238 * command block.
8239 *
8240 * Return value:
8241 * none
8242 **/
Kees Cook738c6ec2017-08-18 16:53:24 -07008243static void ipr_reset_timer_done(struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008244{
Kees Cook738c6ec2017-08-18 16:53:24 -07008245 struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008246 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8247 unsigned long lock_flags = 0;
8248
8249 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8250
8251 if (ioa_cfg->reset_cmd == ipr_cmd) {
8252 list_del(&ipr_cmd->queue);
8253 ipr_cmd->done(ipr_cmd);
8254 }
8255
8256 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8257}
8258
8259/**
8260 * ipr_reset_start_timer - Start a timer for adapter reset job
8261 * @ipr_cmd: ipr command struct
8262 * @timeout: timeout value
8263 *
8264 * Description: This function is used in adapter reset processing
8265 * for timing events. If the reset_cmd pointer in the IOA
8266 * config struct is not this adapter's we are doing nested
8267 * resets and fail_all_ops will take care of freeing the
8268 * command block.
8269 *
8270 * Return value:
8271 * none
8272 **/
8273static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
8274 unsigned long timeout)
8275{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008276
8277 ENTER;
8278 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008279 ipr_cmd->done = ipr_reset_ioa_job;
8280
Linus Torvalds1da177e2005-04-16 15:20:36 -07008281 ipr_cmd->timer.expires = jiffies + timeout;
Kees Cook841b86f2017-10-23 09:40:42 +02008282 ipr_cmd->timer.function = ipr_reset_timer_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008283 add_timer(&ipr_cmd->timer);
8284}
8285
8286/**
8287 * ipr_init_ioa_mem - Initialize ioa_cfg control block
8288 * @ioa_cfg: ioa cfg struct
8289 *
8290 * Return value:
8291 * nothing
8292 **/
8293static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
8294{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008295 struct ipr_hrr_queue *hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008296
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008297 for_each_hrrq(hrrq, ioa_cfg) {
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008298 spin_lock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008299 memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
8300
8301 /* Initialize Host RRQ pointers */
8302 hrrq->hrrq_start = hrrq->host_rrq;
8303 hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
8304 hrrq->hrrq_curr = hrrq->hrrq_start;
8305 hrrq->toggle_bit = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008306 spin_unlock(&hrrq->_lock);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008307 }
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008308 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008309
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008310 ioa_cfg->identify_hrrq_index = 0;
8311 if (ioa_cfg->hrrq_num == 1)
8312 atomic_set(&ioa_cfg->hrrq_index, 0);
8313 else
8314 atomic_set(&ioa_cfg->hrrq_index, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008315
8316 /* Zero out config table */
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08008317 memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008318}
8319
8320/**
Wayne Boyer214777b2010-02-19 13:24:26 -08008321 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
8322 * @ipr_cmd: ipr command struct
8323 *
8324 * Return value:
8325 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8326 **/
8327static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
8328{
8329 unsigned long stage, stage_time;
8330 u32 feedback;
8331 volatile u32 int_reg;
8332 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8333 u64 maskval = 0;
8334
8335 feedback = readl(ioa_cfg->regs.init_feedback_reg);
8336 stage = feedback & IPR_IPL_INIT_STAGE_MASK;
8337 stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
8338
8339 ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
8340
8341 /* sanity check the stage_time value */
Wayne Boyer438b0332010-05-10 09:13:00 -07008342 if (stage_time == 0)
8343 stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
8344 else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
Wayne Boyer214777b2010-02-19 13:24:26 -08008345 stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
8346 else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
8347 stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
8348
8349 if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
8350 writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
8351 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8352 stage_time = ioa_cfg->transop_timeout;
8353 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8354 } else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
Wayne Boyer1df79ca2010-07-14 10:49:43 -07008355 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
8356 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8357 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
8358 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8359 maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
8360 writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
8361 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8362 return IPR_RC_JOB_CONTINUE;
8363 }
Wayne Boyer214777b2010-02-19 13:24:26 -08008364 }
8365
Wayne Boyer214777b2010-02-19 13:24:26 -08008366 ipr_cmd->timer.expires = jiffies + stage_time * HZ;
Kees Cook841b86f2017-10-23 09:40:42 +02008367 ipr_cmd->timer.function = ipr_oper_timeout;
Wayne Boyer214777b2010-02-19 13:24:26 -08008368 ipr_cmd->done = ipr_reset_ioa_job;
8369 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008370
8371 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Wayne Boyer214777b2010-02-19 13:24:26 -08008372
8373 return IPR_RC_JOB_RETURN;
8374}
8375
8376/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008377 * ipr_reset_enable_ioa - Enable the IOA following a reset.
8378 * @ipr_cmd: ipr command struct
8379 *
8380 * This function reinitializes some control blocks and
8381 * enables destructive diagnostics on the adapter.
8382 *
8383 * Return value:
8384 * IPR_RC_JOB_RETURN
8385 **/
8386static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
8387{
8388 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8389 volatile u32 int_reg;
Wayne Boyer7be96902010-05-10 09:14:07 -07008390 volatile u64 maskval;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008391 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008392
8393 ENTER;
Wayne Boyer214777b2010-02-19 13:24:26 -08008394 ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008395 ipr_init_ioa_mem(ioa_cfg);
8396
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06008397 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
8398 spin_lock(&ioa_cfg->hrrq[i]._lock);
8399 ioa_cfg->hrrq[i].allow_interrupts = 1;
8400 spin_unlock(&ioa_cfg->hrrq[i]._lock);
8401 }
8402 wmb();
Wayne Boyer8701f182010-06-04 10:26:50 -07008403 if (ioa_cfg->sis64) {
8404 /* Set the adapter to the correct endian mode. */
8405 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8406 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8407 }
8408
Wayne Boyer7be96902010-05-10 09:14:07 -07008409 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008410
8411 if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
8412 writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
Wayne Boyer214777b2010-02-19 13:24:26 -08008413 ioa_cfg->regs.clr_interrupt_mask_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008414 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8415 return IPR_RC_JOB_CONTINUE;
8416 }
8417
8418 /* Enable destructive diagnostics on IOA */
Wayne Boyer214777b2010-02-19 13:24:26 -08008419 writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008420
Wayne Boyer7be96902010-05-10 09:14:07 -07008421 if (ioa_cfg->sis64) {
8422 maskval = IPR_PCII_IPL_STAGE_CHANGE;
8423 maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
8424 writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
8425 } else
8426 writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer214777b2010-02-19 13:24:26 -08008427
Linus Torvalds1da177e2005-04-16 15:20:36 -07008428 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8429
8430 dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
8431
Wayne Boyer214777b2010-02-19 13:24:26 -08008432 if (ioa_cfg->sis64) {
8433 ipr_cmd->job_step = ipr_reset_next_stage;
8434 return IPR_RC_JOB_CONTINUE;
8435 }
8436
Brian King5469cb52007-03-29 12:42:40 -05008437 ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
Kees Cook841b86f2017-10-23 09:40:42 +02008438 ipr_cmd->timer.function = ipr_oper_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008439 ipr_cmd->done = ipr_reset_ioa_job;
8440 add_timer(&ipr_cmd->timer);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06008441 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008442
8443 LEAVE;
8444 return IPR_RC_JOB_RETURN;
8445}
8446
8447/**
8448 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
8449 * @ipr_cmd: ipr command struct
8450 *
8451 * This function is invoked when an adapter dump has run out
8452 * of processing time.
8453 *
8454 * Return value:
8455 * IPR_RC_JOB_CONTINUE
8456 **/
8457static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
8458{
8459 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8460
8461 if (ioa_cfg->sdt_state == GET_DUMP)
Brian King41e9a692011-09-21 08:51:11 -05008462 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8463 else if (ioa_cfg->sdt_state == READ_DUMP)
Linus Torvalds1da177e2005-04-16 15:20:36 -07008464 ioa_cfg->sdt_state = ABORT_DUMP;
8465
Brian King4c647e92011-10-15 09:08:56 -05008466 ioa_cfg->dump_timeout = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008467 ipr_cmd->job_step = ipr_reset_alert;
8468
8469 return IPR_RC_JOB_CONTINUE;
8470}
8471
8472/**
8473 * ipr_unit_check_no_data - Log a unit check/no data error log
8474 * @ioa_cfg: ioa config struct
8475 *
8476 * Logs an error indicating the adapter unit checked, but for some
8477 * reason, we were unable to fetch the unit check buffer.
8478 *
8479 * Return value:
8480 * nothing
8481 **/
8482static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
8483{
8484 ioa_cfg->errors_logged++;
8485 dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
8486}
8487
8488/**
8489 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
8490 * @ioa_cfg: ioa config struct
8491 *
8492 * Fetches the unit check buffer from the adapter by clocking the data
8493 * through the mailbox register.
8494 *
8495 * Return value:
8496 * nothing
8497 **/
8498static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
8499{
8500 unsigned long mailbox;
8501 struct ipr_hostrcb *hostrcb;
8502 struct ipr_uc_sdt sdt;
8503 int rc, length;
Brian King65f56472007-04-26 16:00:12 -05008504 u32 ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008505
8506 mailbox = readl(ioa_cfg->ioa_mailbox);
8507
Wayne Boyerdcbad002010-02-19 13:24:14 -08008508 if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008509 ipr_unit_check_no_data(ioa_cfg);
8510 return;
8511 }
8512
8513 memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
8514 rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
8515 (sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
8516
Wayne Boyerdcbad002010-02-19 13:24:14 -08008517 if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
8518 ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
8519 (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008520 ipr_unit_check_no_data(ioa_cfg);
8521 return;
8522 }
8523
8524 /* Find length of the first sdt entry (UC buffer) */
Wayne Boyerdcbad002010-02-19 13:24:14 -08008525 if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
8526 length = be32_to_cpu(sdt.entry[0].end_token);
8527 else
8528 length = (be32_to_cpu(sdt.entry[0].end_token) -
8529 be32_to_cpu(sdt.entry[0].start_token)) &
8530 IPR_FMT2_MBX_ADDR_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008531
8532 hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
8533 struct ipr_hostrcb, queue);
Brian Kingafc3f832016-08-24 12:56:51 -05008534 list_del_init(&hostrcb->queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008535 memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
8536
8537 rc = ipr_get_ldump_data_section(ioa_cfg,
Wayne Boyerdcbad002010-02-19 13:24:14 -08008538 be32_to_cpu(sdt.entry[0].start_token),
Linus Torvalds1da177e2005-04-16 15:20:36 -07008539 (__be32 *)&hostrcb->hcam,
8540 min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
8541
Brian King65f56472007-04-26 16:00:12 -05008542 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07008543 ipr_handle_log_data(ioa_cfg, hostrcb);
Wayne Boyer4565e372010-02-19 13:24:07 -08008544 ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
Brian King65f56472007-04-26 16:00:12 -05008545 if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
8546 ioa_cfg->sdt_state == GET_DUMP)
8547 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8548 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07008549 ipr_unit_check_no_data(ioa_cfg);
8550
8551 list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
8552}
8553
8554/**
Wayne Boyer110def82010-11-04 09:36:16 -07008555 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
8556 * @ipr_cmd: ipr command struct
8557 *
8558 * Description: This function will call to get the unit check buffer.
8559 *
8560 * Return value:
8561 * IPR_RC_JOB_RETURN
8562 **/
8563static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
8564{
8565 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8566
8567 ENTER;
8568 ioa_cfg->ioa_unit_checked = 0;
8569 ipr_get_unit_check_buffer(ioa_cfg);
8570 ipr_cmd->job_step = ipr_reset_alert;
8571 ipr_reset_start_timer(ipr_cmd, 0);
8572
8573 LEAVE;
8574 return IPR_RC_JOB_RETURN;
8575}
8576
Gabriel Krisman Bertazif41f1d92015-11-03 16:26:06 -02008577static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
8578{
8579 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8580
8581 ENTER;
8582
8583 if (ioa_cfg->sdt_state != GET_DUMP)
8584 return IPR_RC_JOB_RETURN;
8585
8586 if (!ioa_cfg->sis64 || !ipr_cmd->u.time_left ||
8587 (readl(ioa_cfg->regs.sense_interrupt_reg) &
8588 IPR_PCII_MAILBOX_STABLE)) {
8589
8590 if (!ipr_cmd->u.time_left)
8591 dev_err(&ioa_cfg->pdev->dev,
8592 "Timed out waiting for Mailbox register.\n");
8593
8594 ioa_cfg->sdt_state = READ_DUMP;
8595 ioa_cfg->dump_timeout = 0;
8596 if (ioa_cfg->sis64)
8597 ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
8598 else
8599 ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
8600 ipr_cmd->job_step = ipr_reset_wait_for_dump;
8601 schedule_work(&ioa_cfg->work_q);
8602
8603 } else {
8604 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8605 ipr_reset_start_timer(ipr_cmd,
8606 IPR_CHECK_FOR_RESET_TIMEOUT);
8607 }
8608
8609 LEAVE;
8610 return IPR_RC_JOB_RETURN;
8611}
8612
Wayne Boyer110def82010-11-04 09:36:16 -07008613/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008614 * ipr_reset_restore_cfg_space - Restore PCI config space.
8615 * @ipr_cmd: ipr command struct
8616 *
8617 * Description: This function restores the saved PCI config space of
8618 * the adapter, fails all outstanding ops back to the callers, and
8619 * fetches the dump/unit check if applicable to this reset.
8620 *
8621 * Return value:
8622 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8623 **/
8624static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
8625{
8626 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyer630ad8312011-04-07 12:12:30 -07008627 u32 int_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008628
8629 ENTER;
Kleber Sacilotto de Souza99c965d2009-11-25 20:13:43 -02008630 ioa_cfg->pdev->state_saved = true;
Jon Mason1d3c16a2010-11-30 17:43:26 -06008631 pci_restore_state(ioa_cfg->pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008632
8633 if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
Wayne Boyer96d21f02010-05-10 09:13:27 -07008634 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008635 return IPR_RC_JOB_CONTINUE;
8636 }
8637
8638 ipr_fail_all_ops(ioa_cfg);
8639
Wayne Boyer8701f182010-06-04 10:26:50 -07008640 if (ioa_cfg->sis64) {
8641 /* Set the adapter to the correct endian mode. */
8642 writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
8643 int_reg = readl(ioa_cfg->regs.endian_swap_reg);
8644 }
8645
Linus Torvalds1da177e2005-04-16 15:20:36 -07008646 if (ioa_cfg->ioa_unit_checked) {
Wayne Boyer110def82010-11-04 09:36:16 -07008647 if (ioa_cfg->sis64) {
8648 ipr_cmd->job_step = ipr_reset_get_unit_check_job;
8649 ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
8650 return IPR_RC_JOB_RETURN;
8651 } else {
8652 ioa_cfg->ioa_unit_checked = 0;
8653 ipr_get_unit_check_buffer(ioa_cfg);
8654 ipr_cmd->job_step = ipr_reset_alert;
8655 ipr_reset_start_timer(ipr_cmd, 0);
8656 return IPR_RC_JOB_RETURN;
8657 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07008658 }
8659
8660 if (ioa_cfg->in_ioa_bringdown) {
8661 ipr_cmd->job_step = ipr_ioa_bringdown_done;
Gabriel Krisman Bertazif41f1d92015-11-03 16:26:06 -02008662 } else if (ioa_cfg->sdt_state == GET_DUMP) {
8663 ipr_cmd->job_step = ipr_dump_mailbox_wait;
8664 ipr_cmd->u.time_left = IPR_WAIT_FOR_MAILBOX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008665 } else {
8666 ipr_cmd->job_step = ipr_reset_enable_ioa;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008667 }
8668
Wayne Boyer438b0332010-05-10 09:13:00 -07008669 LEAVE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008670 return IPR_RC_JOB_CONTINUE;
8671}
8672
8673/**
Brian Kinge619e1a2007-01-23 11:25:37 -06008674 * ipr_reset_bist_done - BIST has completed on the adapter.
8675 * @ipr_cmd: ipr command struct
8676 *
8677 * Description: Unblock config space and resume the reset process.
8678 *
8679 * Return value:
8680 * IPR_RC_JOB_CONTINUE
8681 **/
8682static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
8683{
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008684 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8685
Brian Kinge619e1a2007-01-23 11:25:37 -06008686 ENTER;
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008687 if (ioa_cfg->cfg_locked)
8688 pci_cfg_access_unlock(ioa_cfg->pdev);
8689 ioa_cfg->cfg_locked = 0;
Brian Kinge619e1a2007-01-23 11:25:37 -06008690 ipr_cmd->job_step = ipr_reset_restore_cfg_space;
8691 LEAVE;
8692 return IPR_RC_JOB_CONTINUE;
8693}
8694
8695/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008696 * ipr_reset_start_bist - Run BIST on the adapter.
8697 * @ipr_cmd: ipr command struct
8698 *
8699 * Description: This function runs BIST on the adapter, then delays 2 seconds.
8700 *
8701 * Return value:
8702 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8703 **/
8704static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
8705{
8706 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008707 int rc = PCIBIOS_SUCCESSFUL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008708
8709 ENTER;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008710 if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
8711 writel(IPR_UPROCI_SIS64_START_BIST,
8712 ioa_cfg->regs.set_uproc_interrupt_reg32);
8713 else
8714 rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
8715
8716 if (rc == PCIBIOS_SUCCESSFUL) {
Brian Kinge619e1a2007-01-23 11:25:37 -06008717 ipr_cmd->job_step = ipr_reset_bist_done;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008718 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8719 rc = IPR_RC_JOB_RETURN;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008720 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008721 if (ioa_cfg->cfg_locked)
8722 pci_cfg_access_unlock(ipr_cmd->ioa_cfg->pdev);
8723 ioa_cfg->cfg_locked = 0;
Wayne Boyercb237ef2010-06-17 11:51:40 -07008724 ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
8725 rc = IPR_RC_JOB_CONTINUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008726 }
8727
8728 LEAVE;
8729 return rc;
8730}
8731
8732/**
Brian King463fc692007-05-07 17:09:05 -05008733 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
8734 * @ipr_cmd: ipr command struct
8735 *
8736 * Description: This clears PCI reset to the adapter and delays two seconds.
8737 *
8738 * Return value:
8739 * IPR_RC_JOB_RETURN
8740 **/
8741static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
8742{
8743 ENTER;
Brian King463fc692007-05-07 17:09:05 -05008744 ipr_cmd->job_step = ipr_reset_bist_done;
8745 ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
8746 LEAVE;
8747 return IPR_RC_JOB_RETURN;
8748}
8749
8750/**
Brian King2796ca52015-03-26 11:23:52 -05008751 * ipr_reset_reset_work - Pulse a PCIe fundamental reset
8752 * @work: work struct
8753 *
8754 * Description: This pulses warm reset to a slot.
8755 *
8756 **/
8757static void ipr_reset_reset_work(struct work_struct *work)
8758{
8759 struct ipr_cmnd *ipr_cmd = container_of(work, struct ipr_cmnd, work);
8760 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8761 struct pci_dev *pdev = ioa_cfg->pdev;
8762 unsigned long lock_flags = 0;
8763
8764 ENTER;
8765 pci_set_pcie_reset_state(pdev, pcie_warm_reset);
8766 msleep(jiffies_to_msecs(IPR_PCI_RESET_TIMEOUT));
8767 pci_set_pcie_reset_state(pdev, pcie_deassert_reset);
8768
8769 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8770 if (ioa_cfg->reset_cmd == ipr_cmd)
8771 ipr_reset_ioa_job(ipr_cmd);
8772 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8773 LEAVE;
8774}
8775
8776/**
Brian King463fc692007-05-07 17:09:05 -05008777 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
8778 * @ipr_cmd: ipr command struct
8779 *
8780 * Description: This asserts PCI reset to the adapter.
8781 *
8782 * Return value:
8783 * IPR_RC_JOB_RETURN
8784 **/
8785static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
8786{
8787 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
Brian King463fc692007-05-07 17:09:05 -05008788
8789 ENTER;
Brian King2796ca52015-03-26 11:23:52 -05008790 INIT_WORK(&ipr_cmd->work, ipr_reset_reset_work);
8791 queue_work(ioa_cfg->reset_work_q, &ipr_cmd->work);
Brian King463fc692007-05-07 17:09:05 -05008792 ipr_cmd->job_step = ipr_reset_slot_reset_done;
Brian King463fc692007-05-07 17:09:05 -05008793 LEAVE;
8794 return IPR_RC_JOB_RETURN;
8795}
8796
8797/**
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008798 * ipr_reset_block_config_access_wait - Wait for permission to block config access
8799 * @ipr_cmd: ipr command struct
8800 *
8801 * Description: This attempts to block config access to the IOA.
8802 *
8803 * Return value:
8804 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8805 **/
8806static int ipr_reset_block_config_access_wait(struct ipr_cmnd *ipr_cmd)
8807{
8808 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8809 int rc = IPR_RC_JOB_CONTINUE;
8810
8811 if (pci_cfg_access_trylock(ioa_cfg->pdev)) {
8812 ioa_cfg->cfg_locked = 1;
8813 ipr_cmd->job_step = ioa_cfg->reset;
8814 } else {
8815 if (ipr_cmd->u.time_left) {
8816 rc = IPR_RC_JOB_RETURN;
8817 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8818 ipr_reset_start_timer(ipr_cmd,
8819 IPR_CHECK_FOR_RESET_TIMEOUT);
8820 } else {
8821 ipr_cmd->job_step = ioa_cfg->reset;
8822 dev_err(&ioa_cfg->pdev->dev,
8823 "Timed out waiting to lock config access. Resetting anyway.\n");
8824 }
8825 }
8826
8827 return rc;
8828}
8829
8830/**
8831 * ipr_reset_block_config_access - Block config access to the IOA
8832 * @ipr_cmd: ipr command struct
8833 *
8834 * Description: This attempts to block config access to the IOA
8835 *
8836 * Return value:
8837 * IPR_RC_JOB_CONTINUE
8838 **/
8839static int ipr_reset_block_config_access(struct ipr_cmnd *ipr_cmd)
8840{
8841 ipr_cmd->ioa_cfg->cfg_locked = 0;
8842 ipr_cmd->job_step = ipr_reset_block_config_access_wait;
8843 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8844 return IPR_RC_JOB_CONTINUE;
8845}
8846
8847/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07008848 * ipr_reset_allowed - Query whether or not IOA can be reset
8849 * @ioa_cfg: ioa config struct
8850 *
8851 * Return value:
8852 * 0 if reset not allowed / non-zero if reset is allowed
8853 **/
8854static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
8855{
8856 volatile u32 temp_reg;
8857
8858 temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8859 return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
8860}
8861
8862/**
8863 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
8864 * @ipr_cmd: ipr command struct
8865 *
8866 * Description: This function waits for adapter permission to run BIST,
8867 * then runs BIST. If the adapter does not give permission after a
8868 * reasonable time, we will reset the adapter anyway. The impact of
8869 * resetting the adapter without warning the adapter is the risk of
8870 * losing the persistent error log on the adapter. If the adapter is
8871 * reset while it is writing to the flash on the adapter, the flash
8872 * segment will have bad ECC and be zeroed.
8873 *
8874 * Return value:
8875 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8876 **/
8877static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
8878{
8879 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8880 int rc = IPR_RC_JOB_RETURN;
8881
8882 if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
8883 ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
8884 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8885 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008886 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008887 rc = IPR_RC_JOB_CONTINUE;
8888 }
8889
8890 return rc;
8891}
8892
8893/**
Wayne Boyer8701f182010-06-04 10:26:50 -07008894 * ipr_reset_alert - Alert the adapter of a pending reset
Linus Torvalds1da177e2005-04-16 15:20:36 -07008895 * @ipr_cmd: ipr command struct
8896 *
8897 * Description: This function alerts the adapter that it will be reset.
8898 * If memory space is not currently enabled, proceed directly
8899 * to running BIST on the adapter. The timer must always be started
8900 * so we guarantee we do not run BIST from ipr_isr.
8901 *
8902 * Return value:
8903 * IPR_RC_JOB_RETURN
8904 **/
8905static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
8906{
8907 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8908 u16 cmd_reg;
8909 int rc;
8910
8911 ENTER;
8912 rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
8913
8914 if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
8915 ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
Wayne Boyer214777b2010-02-19 13:24:26 -08008916 writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
Linus Torvalds1da177e2005-04-16 15:20:36 -07008917 ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
8918 } else {
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01008919 ipr_cmd->job_step = ipr_reset_block_config_access;
Linus Torvalds1da177e2005-04-16 15:20:36 -07008920 }
8921
8922 ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
8923 ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
8924
8925 LEAVE;
8926 return IPR_RC_JOB_RETURN;
8927}
8928
8929/**
Brian King4fdd7c72015-03-26 11:23:50 -05008930 * ipr_reset_quiesce_done - Complete IOA disconnect
8931 * @ipr_cmd: ipr command struct
8932 *
8933 * Description: Freeze the adapter to complete quiesce processing
8934 *
8935 * Return value:
8936 * IPR_RC_JOB_CONTINUE
8937 **/
8938static int ipr_reset_quiesce_done(struct ipr_cmnd *ipr_cmd)
8939{
8940 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8941
8942 ENTER;
8943 ipr_cmd->job_step = ipr_ioa_bringdown_done;
8944 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8945 LEAVE;
8946 return IPR_RC_JOB_CONTINUE;
8947}
8948
8949/**
8950 * ipr_reset_cancel_hcam_done - Check for outstanding commands
8951 * @ipr_cmd: ipr command struct
8952 *
8953 * Description: Ensure nothing is outstanding to the IOA and
8954 * proceed with IOA disconnect. Otherwise reset the IOA.
8955 *
8956 * Return value:
8957 * IPR_RC_JOB_RETURN / IPR_RC_JOB_CONTINUE
8958 **/
8959static int ipr_reset_cancel_hcam_done(struct ipr_cmnd *ipr_cmd)
8960{
8961 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8962 struct ipr_cmnd *loop_cmd;
8963 struct ipr_hrr_queue *hrrq;
8964 int rc = IPR_RC_JOB_CONTINUE;
8965 int count = 0;
8966
8967 ENTER;
8968 ipr_cmd->job_step = ipr_reset_quiesce_done;
8969
8970 for_each_hrrq(hrrq, ioa_cfg) {
8971 spin_lock(&hrrq->_lock);
8972 list_for_each_entry(loop_cmd, &hrrq->hrrq_pending_q, queue) {
8973 count++;
8974 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8975 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
8976 rc = IPR_RC_JOB_RETURN;
8977 break;
8978 }
8979 spin_unlock(&hrrq->_lock);
8980
8981 if (count)
8982 break;
8983 }
8984
8985 LEAVE;
8986 return rc;
8987}
8988
8989/**
8990 * ipr_reset_cancel_hcam - Cancel outstanding HCAMs
8991 * @ipr_cmd: ipr command struct
8992 *
8993 * Description: Cancel any oustanding HCAMs to the IOA.
8994 *
8995 * Return value:
8996 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
8997 **/
8998static int ipr_reset_cancel_hcam(struct ipr_cmnd *ipr_cmd)
8999{
9000 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9001 int rc = IPR_RC_JOB_CONTINUE;
9002 struct ipr_cmd_pkt *cmd_pkt;
9003 struct ipr_cmnd *hcam_cmd;
9004 struct ipr_hrr_queue *hrrq = &ioa_cfg->hrrq[IPR_INIT_HRRQ];
9005
9006 ENTER;
9007 ipr_cmd->job_step = ipr_reset_cancel_hcam_done;
9008
9009 if (!hrrq->ioa_is_dead) {
9010 if (!list_empty(&ioa_cfg->hostrcb_pending_q)) {
9011 list_for_each_entry(hcam_cmd, &hrrq->hrrq_pending_q, queue) {
9012 if (hcam_cmd->ioarcb.cmd_pkt.cdb[0] != IPR_HOST_CONTROLLED_ASYNC)
9013 continue;
9014
9015 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9016 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9017 cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
9018 cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
9019 cmd_pkt->cdb[0] = IPR_CANCEL_REQUEST;
9020 cmd_pkt->cdb[1] = IPR_CANCEL_64BIT_IOARCB;
9021 cmd_pkt->cdb[10] = ((u64) hcam_cmd->dma_addr >> 56) & 0xff;
9022 cmd_pkt->cdb[11] = ((u64) hcam_cmd->dma_addr >> 48) & 0xff;
9023 cmd_pkt->cdb[12] = ((u64) hcam_cmd->dma_addr >> 40) & 0xff;
9024 cmd_pkt->cdb[13] = ((u64) hcam_cmd->dma_addr >> 32) & 0xff;
9025 cmd_pkt->cdb[2] = ((u64) hcam_cmd->dma_addr >> 24) & 0xff;
9026 cmd_pkt->cdb[3] = ((u64) hcam_cmd->dma_addr >> 16) & 0xff;
9027 cmd_pkt->cdb[4] = ((u64) hcam_cmd->dma_addr >> 8) & 0xff;
9028 cmd_pkt->cdb[5] = ((u64) hcam_cmd->dma_addr) & 0xff;
9029
9030 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9031 IPR_CANCEL_TIMEOUT);
9032
9033 rc = IPR_RC_JOB_RETURN;
9034 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9035 break;
9036 }
9037 }
9038 } else
9039 ipr_cmd->job_step = ipr_reset_alert;
9040
9041 LEAVE;
9042 return rc;
9043}
9044
9045/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009046 * ipr_reset_ucode_download_done - Microcode download completion
9047 * @ipr_cmd: ipr command struct
9048 *
9049 * Description: This function unmaps the microcode download buffer.
9050 *
9051 * Return value:
9052 * IPR_RC_JOB_CONTINUE
9053 **/
9054static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
9055{
9056 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9057 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9058
Anton Blanchardd73341b2014-10-30 17:27:08 -05009059 dma_unmap_sg(&ioa_cfg->pdev->dev, sglist->scatterlist,
Linus Torvalds1da177e2005-04-16 15:20:36 -07009060 sglist->num_sg, DMA_TO_DEVICE);
9061
9062 ipr_cmd->job_step = ipr_reset_alert;
9063 return IPR_RC_JOB_CONTINUE;
9064}
9065
9066/**
9067 * ipr_reset_ucode_download - Download microcode to the adapter
9068 * @ipr_cmd: ipr command struct
9069 *
9070 * Description: This function checks to see if it there is microcode
9071 * to download to the adapter. If there is, a download is performed.
9072 *
9073 * Return value:
9074 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9075 **/
9076static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
9077{
9078 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9079 struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
9080
9081 ENTER;
9082 ipr_cmd->job_step = ipr_reset_alert;
9083
9084 if (!sglist)
9085 return IPR_RC_JOB_CONTINUE;
9086
9087 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9088 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
9089 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
9090 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
9091 ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
9092 ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
9093 ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
9094
Wayne Boyera32c0552010-02-19 13:23:36 -08009095 if (ioa_cfg->sis64)
9096 ipr_build_ucode_ioadl64(ipr_cmd, sglist);
9097 else
9098 ipr_build_ucode_ioadl(ipr_cmd, sglist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009099 ipr_cmd->job_step = ipr_reset_ucode_download_done;
9100
9101 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
9102 IPR_WRITE_BUFFER_TIMEOUT);
9103
9104 LEAVE;
9105 return IPR_RC_JOB_RETURN;
9106}
9107
9108/**
9109 * ipr_reset_shutdown_ioa - Shutdown the adapter
9110 * @ipr_cmd: ipr command struct
9111 *
9112 * Description: This function issues an adapter shutdown of the
9113 * specified type to the specified adapter as part of the
9114 * adapter reset job.
9115 *
9116 * Return value:
9117 * IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
9118 **/
9119static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
9120{
9121 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9122 enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
9123 unsigned long timeout;
9124 int rc = IPR_RC_JOB_CONTINUE;
9125
9126 ENTER;
Brian King4fdd7c72015-03-26 11:23:50 -05009127 if (shutdown_type == IPR_SHUTDOWN_QUIESCE)
9128 ipr_cmd->job_step = ipr_reset_cancel_hcam;
9129 else if (shutdown_type != IPR_SHUTDOWN_NONE &&
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009130 !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009131 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9132 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9133 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9134 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
9135
Brian Kingac09c342007-04-26 16:00:16 -05009136 if (shutdown_type == IPR_SHUTDOWN_NORMAL)
9137 timeout = IPR_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009138 else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
9139 timeout = IPR_INTERNAL_TIMEOUT;
Brian Kingac09c342007-04-26 16:00:16 -05009140 else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
9141 timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009142 else
Brian Kingac09c342007-04-26 16:00:16 -05009143 timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009144
9145 ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
9146
9147 rc = IPR_RC_JOB_RETURN;
9148 ipr_cmd->job_step = ipr_reset_ucode_download;
9149 } else
9150 ipr_cmd->job_step = ipr_reset_alert;
9151
9152 LEAVE;
9153 return rc;
9154}
9155
9156/**
9157 * ipr_reset_ioa_job - Adapter reset job
9158 * @ipr_cmd: ipr command struct
9159 *
9160 * Description: This function is the job router for the adapter reset job.
9161 *
9162 * Return value:
9163 * none
9164 **/
9165static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
9166{
9167 u32 rc, ioasc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009168 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9169
9170 do {
Wayne Boyer96d21f02010-05-10 09:13:27 -07009171 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009172
9173 if (ioa_cfg->reset_cmd != ipr_cmd) {
9174 /*
9175 * We are doing nested adapter resets and this is
9176 * not the current reset job.
9177 */
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009178 list_add_tail(&ipr_cmd->queue,
9179 &ipr_cmd->hrrq->hrrq_free_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009180 return;
9181 }
9182
9183 if (IPR_IOASC_SENSE_KEY(ioasc)) {
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06009184 rc = ipr_cmd->job_step_failed(ipr_cmd);
9185 if (rc == IPR_RC_JOB_RETURN)
9186 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009187 }
9188
9189 ipr_reinit_ipr_cmnd(ipr_cmd);
brking@us.ibm.comdfed8232005-11-01 17:02:55 -06009190 ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009191 rc = ipr_cmd->job_step(ipr_cmd);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009192 } while (rc == IPR_RC_JOB_CONTINUE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009193}
9194
9195/**
9196 * _ipr_initiate_ioa_reset - Initiate an adapter reset
9197 * @ioa_cfg: ioa config struct
9198 * @job_step: first job step of reset job
9199 * @shutdown_type: shutdown type
9200 *
9201 * Description: This function will initiate the reset of the given adapter
9202 * starting at the selected job step.
9203 * If the caller needs to wait on the completion of the reset,
9204 * the caller must sleep on the reset_wait_q.
9205 *
9206 * Return value:
9207 * none
9208 **/
9209static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9210 int (*job_step) (struct ipr_cmnd *),
9211 enum ipr_shutdown_type shutdown_type)
9212{
9213 struct ipr_cmnd *ipr_cmd;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009214 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009215
9216 ioa_cfg->in_reset_reload = 1;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009217 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9218 spin_lock(&ioa_cfg->hrrq[i]._lock);
9219 ioa_cfg->hrrq[i].allow_cmds = 0;
9220 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9221 }
9222 wmb();
Brian Kingb0e17a92017-08-01 10:21:30 -05009223 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
9224 ioa_cfg->scsi_unblock = 0;
9225 ioa_cfg->scsi_blocked = 1;
Brian Kingbfae7822013-01-30 23:45:08 -06009226 scsi_block_requests(ioa_cfg->host);
Brian Kingb0e17a92017-08-01 10:21:30 -05009227 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009228
9229 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9230 ioa_cfg->reset_cmd = ipr_cmd;
9231 ipr_cmd->job_step = job_step;
9232 ipr_cmd->u.shutdown_type = shutdown_type;
9233
9234 ipr_reset_ioa_job(ipr_cmd);
9235}
9236
9237/**
9238 * ipr_initiate_ioa_reset - Initiate an adapter reset
9239 * @ioa_cfg: ioa config struct
9240 * @shutdown_type: shutdown type
9241 *
9242 * Description: This function will initiate the reset of the given adapter.
9243 * If the caller needs to wait on the completion of the reset,
9244 * the caller must sleep on the reset_wait_q.
9245 *
9246 * Return value:
9247 * none
9248 **/
9249static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
9250 enum ipr_shutdown_type shutdown_type)
9251{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009252 int i;
9253
9254 if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009255 return;
9256
Brian King41e9a692011-09-21 08:51:11 -05009257 if (ioa_cfg->in_reset_reload) {
9258 if (ioa_cfg->sdt_state == GET_DUMP)
9259 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
9260 else if (ioa_cfg->sdt_state == READ_DUMP)
9261 ioa_cfg->sdt_state = ABORT_DUMP;
9262 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009263
9264 if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
9265 dev_err(&ioa_cfg->pdev->dev,
9266 "IOA taken offline - error recovery failed\n");
9267
9268 ioa_cfg->reset_retries = 0;
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009269 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9270 spin_lock(&ioa_cfg->hrrq[i]._lock);
9271 ioa_cfg->hrrq[i].ioa_is_dead = 1;
9272 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9273 }
9274 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -07009275
9276 if (ioa_cfg->in_ioa_bringdown) {
9277 ioa_cfg->reset_cmd = NULL;
9278 ioa_cfg->in_reset_reload = 0;
9279 ipr_fail_all_ops(ioa_cfg);
9280 wake_up_all(&ioa_cfg->reset_wait_q);
9281
Brian Kingbfae7822013-01-30 23:45:08 -06009282 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
Brian Kingb0e17a92017-08-01 10:21:30 -05009283 ioa_cfg->scsi_unblock = 1;
9284 schedule_work(&ioa_cfg->work_q);
Brian Kingbfae7822013-01-30 23:45:08 -06009285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009286 return;
9287 } else {
9288 ioa_cfg->in_ioa_bringdown = 1;
9289 shutdown_type = IPR_SHUTDOWN_NONE;
9290 }
9291 }
9292
9293 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
9294 shutdown_type);
9295}
9296
9297/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009298 * ipr_reset_freeze - Hold off all I/O activity
9299 * @ipr_cmd: ipr command struct
9300 *
9301 * Description: If the PCI slot is frozen, hold off all I/O
9302 * activity; then, as soon as the slot is available again,
9303 * initiate an adapter reset.
9304 */
9305static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
9306{
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009307 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9308 int i;
9309
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009310 /* Disallow new interrupts, avoid loop */
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009311 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9312 spin_lock(&ioa_cfg->hrrq[i]._lock);
9313 ioa_cfg->hrrq[i].allow_interrupts = 0;
9314 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9315 }
9316 wmb();
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009317 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009318 ipr_cmd->done = ipr_reset_ioa_job;
9319 return IPR_RC_JOB_RETURN;
9320}
9321
9322/**
Brian King6270e592014-01-21 12:16:41 -06009323 * ipr_pci_mmio_enabled - Called when MMIO has been re-enabled
9324 * @pdev: PCI device struct
9325 *
9326 * Description: This routine is called to tell us that the MMIO
9327 * access to the IOA has been restored
9328 */
9329static pci_ers_result_t ipr_pci_mmio_enabled(struct pci_dev *pdev)
9330{
9331 unsigned long flags = 0;
9332 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9333
9334 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9335 if (!ioa_cfg->probe_done)
9336 pci_save_state(pdev);
9337 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9338 return PCI_ERS_RESULT_NEED_RESET;
9339}
9340
9341/**
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009342 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
9343 * @pdev: PCI device struct
9344 *
9345 * Description: This routine is called to tell us that the PCI bus
9346 * is down. Can't do anything here, except put the device driver
9347 * into a holding pattern, waiting for the PCI bus to come back.
9348 */
9349static void ipr_pci_frozen(struct pci_dev *pdev)
9350{
9351 unsigned long flags = 0;
9352 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9353
9354 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009355 if (ioa_cfg->probe_done)
9356 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009357 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9358}
9359
9360/**
9361 * ipr_pci_slot_reset - Called when PCI slot has been reset.
9362 * @pdev: PCI device struct
9363 *
9364 * Description: This routine is called by the pci error recovery
9365 * code after the PCI slot has been reset, just before we
9366 * should resume normal operations.
9367 */
9368static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
9369{
9370 unsigned long flags = 0;
9371 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
9372
9373 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009374 if (ioa_cfg->probe_done) {
9375 if (ioa_cfg->needs_warm_reset)
9376 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9377 else
9378 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
9379 IPR_SHUTDOWN_NONE);
9380 } else
9381 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009382 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9383 return PCI_ERS_RESULT_RECOVERED;
9384}
9385
9386/**
9387 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
9388 * @pdev: PCI device struct
9389 *
9390 * Description: This routine is called when the PCI bus has
9391 * permanently failed.
9392 */
9393static void ipr_pci_perm_failure(struct pci_dev *pdev)
9394{
9395 unsigned long flags = 0;
9396 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -06009397 int i;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009398
9399 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King6270e592014-01-21 12:16:41 -06009400 if (ioa_cfg->probe_done) {
9401 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
9402 ioa_cfg->sdt_state = ABORT_DUMP;
9403 ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES - 1;
9404 ioa_cfg->in_ioa_bringdown = 1;
9405 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9406 spin_lock(&ioa_cfg->hrrq[i]._lock);
9407 ioa_cfg->hrrq[i].allow_cmds = 0;
9408 spin_unlock(&ioa_cfg->hrrq[i]._lock);
9409 }
9410 wmb();
9411 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9412 } else
9413 wake_up_all(&ioa_cfg->eeh_wait_q);
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009414 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9415}
9416
9417/**
9418 * ipr_pci_error_detected - Called when a PCI error is detected.
9419 * @pdev: PCI device struct
9420 * @state: PCI channel state
9421 *
9422 * Description: Called when a PCI error is detected.
9423 *
9424 * Return value:
9425 * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
9426 */
9427static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
9428 pci_channel_state_t state)
9429{
9430 switch (state) {
9431 case pci_channel_io_frozen:
9432 ipr_pci_frozen(pdev);
Brian King6270e592014-01-21 12:16:41 -06009433 return PCI_ERS_RESULT_CAN_RECOVER;
Linas Vepstasf8a88b192006-02-03 16:52:42 -06009434 case pci_channel_io_perm_failure:
9435 ipr_pci_perm_failure(pdev);
9436 return PCI_ERS_RESULT_DISCONNECT;
9437 break;
9438 default:
9439 break;
9440 }
9441 return PCI_ERS_RESULT_NEED_RESET;
9442}
9443
9444/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009445 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
9446 * @ioa_cfg: ioa cfg struct
9447 *
Masahiro Yamada183b8022017-02-27 14:29:20 -08009448 * Description: This is the second phase of adapter initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -07009449 * This function takes care of initilizing the adapter to the point
9450 * where it can accept new commands.
9451
9452 * Return value:
Joe Perchesb1c11812008-02-03 17:28:22 +02009453 * 0 on success / -EIO on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009454 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009455static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009456{
9457 int rc = 0;
9458 unsigned long host_lock_flags = 0;
9459
9460 ENTER;
9461 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
9462 dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
Brian King6270e592014-01-21 12:16:41 -06009463 ioa_cfg->probe_done = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -06009464 if (ioa_cfg->needs_hard_reset) {
9465 ioa_cfg->needs_hard_reset = 0;
9466 ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
9467 } else
9468 _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
9469 IPR_SHUTDOWN_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009470 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009471
9472 LEAVE;
9473 return rc;
9474}
9475
9476/**
9477 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
9478 * @ioa_cfg: ioa config struct
9479 *
9480 * Return value:
9481 * none
9482 **/
9483static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
9484{
9485 int i;
9486
Brian Kinga65e8f12015-03-26 11:23:55 -05009487 if (ioa_cfg->ipr_cmnd_list) {
9488 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
9489 if (ioa_cfg->ipr_cmnd_list[i])
9490 dma_pool_free(ioa_cfg->ipr_cmd_pool,
9491 ioa_cfg->ipr_cmnd_list[i],
9492 ioa_cfg->ipr_cmnd_list_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009493
Brian Kinga65e8f12015-03-26 11:23:55 -05009494 ioa_cfg->ipr_cmnd_list[i] = NULL;
9495 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009496 }
9497
9498 if (ioa_cfg->ipr_cmd_pool)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009499 dma_pool_destroy(ioa_cfg->ipr_cmd_pool);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009500
Brian King89aad422012-03-14 21:20:10 -05009501 kfree(ioa_cfg->ipr_cmnd_list);
9502 kfree(ioa_cfg->ipr_cmnd_list_dma);
9503 ioa_cfg->ipr_cmnd_list = NULL;
9504 ioa_cfg->ipr_cmnd_list_dma = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009505 ioa_cfg->ipr_cmd_pool = NULL;
9506}
9507
9508/**
9509 * ipr_free_mem - Frees memory allocated for an adapter
9510 * @ioa_cfg: ioa cfg struct
9511 *
9512 * Return value:
9513 * nothing
9514 **/
9515static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
9516{
9517 int i;
9518
9519 kfree(ioa_cfg->res_entries);
Anton Blanchardd73341b2014-10-30 17:27:08 -05009520 dma_free_coherent(&ioa_cfg->pdev->dev, sizeof(struct ipr_misc_cbs),
9521 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009522 ipr_free_cmd_blks(ioa_cfg);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009523
9524 for (i = 0; i < ioa_cfg->hrrq_num; i++)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009525 dma_free_coherent(&ioa_cfg->pdev->dev,
9526 sizeof(u32) * ioa_cfg->hrrq[i].size,
9527 ioa_cfg->hrrq[i].host_rrq,
9528 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009529
Anton Blanchardd73341b2014-10-30 17:27:08 -05009530 dma_free_coherent(&ioa_cfg->pdev->dev, ioa_cfg->cfg_table_size,
9531 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009532
Brian Kingafc3f832016-08-24 12:56:51 -05009533 for (i = 0; i < IPR_MAX_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009534 dma_free_coherent(&ioa_cfg->pdev->dev,
9535 sizeof(struct ipr_hostrcb),
9536 ioa_cfg->hostrcb[i],
9537 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009538 }
9539
9540 ipr_free_dump(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009541 kfree(ioa_cfg->trace);
9542}
9543
9544/**
Brian King2796ca52015-03-26 11:23:52 -05009545 * ipr_free_irqs - Free all allocated IRQs for the adapter.
9546 * @ioa_cfg: ipr cfg struct
9547 *
9548 * This function frees all allocated IRQs for the
9549 * specified adapter.
9550 *
9551 * Return value:
9552 * none
9553 **/
9554static void ipr_free_irqs(struct ipr_ioa_cfg *ioa_cfg)
9555{
9556 struct pci_dev *pdev = ioa_cfg->pdev;
Christoph Hellwiga299ee62016-09-11 15:31:24 +02009557 int i;
Brian King2796ca52015-03-26 11:23:52 -05009558
Christoph Hellwiga299ee62016-09-11 15:31:24 +02009559 for (i = 0; i < ioa_cfg->nvectors; i++)
9560 free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]);
9561 pci_free_irq_vectors(pdev);
Brian King2796ca52015-03-26 11:23:52 -05009562}
9563
9564/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07009565 * ipr_free_all_resources - Free all allocated resources for an adapter.
9566 * @ipr_cmd: ipr command struct
9567 *
9568 * This function frees all allocated resources for the
9569 * specified adapter.
9570 *
9571 * Return value:
9572 * none
9573 **/
9574static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
9575{
9576 struct pci_dev *pdev = ioa_cfg->pdev;
9577
9578 ENTER;
Brian King2796ca52015-03-26 11:23:52 -05009579 ipr_free_irqs(ioa_cfg);
9580 if (ioa_cfg->reset_work_q)
9581 destroy_workqueue(ioa_cfg->reset_work_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009582 iounmap(ioa_cfg->hdw_dma_regs);
9583 pci_release_regions(pdev);
9584 ipr_free_mem(ioa_cfg);
9585 scsi_host_put(ioa_cfg->host);
9586 pci_disable_device(pdev);
9587 LEAVE;
9588}
9589
9590/**
9591 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
9592 * @ioa_cfg: ioa config struct
9593 *
9594 * Return value:
9595 * 0 on success / -ENOMEM on allocation failure
9596 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009597static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009598{
9599 struct ipr_cmnd *ipr_cmd;
9600 struct ipr_ioarcb *ioarcb;
9601 dma_addr_t dma_addr;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009602 int i, entries_each_hrrq, hrrq_id = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009603
Anton Blanchardd73341b2014-10-30 17:27:08 -05009604 ioa_cfg->ipr_cmd_pool = dma_pool_create(IPR_NAME, &ioa_cfg->pdev->dev,
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -03009605 sizeof(struct ipr_cmnd), 512, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009606
9607 if (!ioa_cfg->ipr_cmd_pool)
9608 return -ENOMEM;
9609
Brian King89aad422012-03-14 21:20:10 -05009610 ioa_cfg->ipr_cmnd_list = kcalloc(IPR_NUM_CMD_BLKS, sizeof(struct ipr_cmnd *), GFP_KERNEL);
9611 ioa_cfg->ipr_cmnd_list_dma = kcalloc(IPR_NUM_CMD_BLKS, sizeof(dma_addr_t), GFP_KERNEL);
9612
9613 if (!ioa_cfg->ipr_cmnd_list || !ioa_cfg->ipr_cmnd_list_dma) {
9614 ipr_free_cmd_blks(ioa_cfg);
9615 return -ENOMEM;
9616 }
9617
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009618 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
9619 if (ioa_cfg->hrrq_num > 1) {
9620 if (i == 0) {
9621 entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
9622 ioa_cfg->hrrq[i].min_cmd_id = 0;
Colin Ian Kingb82378e2017-12-01 13:33:27 +00009623 ioa_cfg->hrrq[i].max_cmd_id =
9624 (entries_each_hrrq - 1);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009625 } else {
9626 entries_each_hrrq =
9627 IPR_NUM_BASE_CMD_BLKS/
9628 (ioa_cfg->hrrq_num - 1);
9629 ioa_cfg->hrrq[i].min_cmd_id =
9630 IPR_NUM_INTERNAL_CMD_BLKS +
9631 (i - 1) * entries_each_hrrq;
9632 ioa_cfg->hrrq[i].max_cmd_id =
9633 (IPR_NUM_INTERNAL_CMD_BLKS +
9634 i * entries_each_hrrq - 1);
9635 }
9636 } else {
9637 entries_each_hrrq = IPR_NUM_CMD_BLKS;
9638 ioa_cfg->hrrq[i].min_cmd_id = 0;
9639 ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
9640 }
9641 ioa_cfg->hrrq[i].size = entries_each_hrrq;
9642 }
9643
9644 BUG_ON(ioa_cfg->hrrq_num == 0);
9645
9646 i = IPR_NUM_CMD_BLKS -
9647 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
9648 if (i > 0) {
9649 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
9650 ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
9651 }
9652
Linus Torvalds1da177e2005-04-16 15:20:36 -07009653 for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
Souptick Joarder8b1bb6d2018-03-08 18:41:57 +05309654 ipr_cmd = dma_pool_zalloc(ioa_cfg->ipr_cmd_pool,
9655 GFP_KERNEL, &dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009656
9657 if (!ipr_cmd) {
9658 ipr_free_cmd_blks(ioa_cfg);
9659 return -ENOMEM;
9660 }
9661
Linus Torvalds1da177e2005-04-16 15:20:36 -07009662 ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
9663 ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
9664
9665 ioarcb = &ipr_cmd->ioarcb;
Wayne Boyera32c0552010-02-19 13:23:36 -08009666 ipr_cmd->dma_addr = dma_addr;
9667 if (ioa_cfg->sis64)
9668 ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
9669 else
9670 ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
9671
Linus Torvalds1da177e2005-04-16 15:20:36 -07009672 ioarcb->host_response_handle = cpu_to_be32(i << 2);
Wayne Boyera32c0552010-02-19 13:23:36 -08009673 if (ioa_cfg->sis64) {
9674 ioarcb->u.sis64_addr_data.data_ioadl_addr =
9675 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
9676 ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009677 cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
Wayne Boyera32c0552010-02-19 13:23:36 -08009678 } else {
9679 ioarcb->write_ioadl_addr =
9680 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
9681 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
9682 ioarcb->ioasa_host_pci_addr =
Wayne Boyer96d21f02010-05-10 09:13:27 -07009683 cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
Wayne Boyera32c0552010-02-19 13:23:36 -08009684 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009685 ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
9686 ipr_cmd->cmd_index = i;
9687 ipr_cmd->ioa_cfg = ioa_cfg;
9688 ipr_cmd->sense_buffer_dma = dma_addr +
9689 offsetof(struct ipr_cmnd, sense_buffer);
9690
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009691 ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
9692 ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
9693 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
9694 if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
9695 hrrq_id++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009696 }
9697
9698 return 0;
9699}
9700
9701/**
9702 * ipr_alloc_mem - Allocate memory for an adapter
9703 * @ioa_cfg: ioa config struct
9704 *
9705 * Return value:
9706 * 0 on success / non-zero for error
9707 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009708static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009709{
9710 struct pci_dev *pdev = ioa_cfg->pdev;
9711 int i, rc = -ENOMEM;
9712
9713 ENTER;
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06009714 ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009715 ioa_cfg->max_devs_supported, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009716
9717 if (!ioa_cfg->res_entries)
9718 goto out;
9719
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009720 for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07009721 list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009722 ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
9723 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009724
Anton Blanchardd73341b2014-10-30 17:27:08 -05009725 ioa_cfg->vpd_cbs = dma_alloc_coherent(&pdev->dev,
9726 sizeof(struct ipr_misc_cbs),
9727 &ioa_cfg->vpd_cbs_dma,
9728 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009729
9730 if (!ioa_cfg->vpd_cbs)
9731 goto out_free_res_entries;
9732
9733 if (ipr_alloc_cmd_blks(ioa_cfg))
9734 goto out_free_vpd_cbs;
9735
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009736 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009737 ioa_cfg->hrrq[i].host_rrq = dma_alloc_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009738 sizeof(u32) * ioa_cfg->hrrq[i].size,
Anton Blanchardd73341b2014-10-30 17:27:08 -05009739 &ioa_cfg->hrrq[i].host_rrq_dma,
9740 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009741
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009742 if (!ioa_cfg->hrrq[i].host_rrq) {
9743 while (--i > 0)
Anton Blanchardd73341b2014-10-30 17:27:08 -05009744 dma_free_coherent(&pdev->dev,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009745 sizeof(u32) * ioa_cfg->hrrq[i].size,
9746 ioa_cfg->hrrq[i].host_rrq,
9747 ioa_cfg->hrrq[i].host_rrq_dma);
9748 goto out_ipr_free_cmd_blocks;
9749 }
9750 ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
9751 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009752
Anton Blanchardd73341b2014-10-30 17:27:08 -05009753 ioa_cfg->u.cfg_table = dma_alloc_coherent(&pdev->dev,
9754 ioa_cfg->cfg_table_size,
9755 &ioa_cfg->cfg_table_dma,
9756 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009757
Wayne Boyer3e7ebdf2010-02-19 13:23:59 -08009758 if (!ioa_cfg->u.cfg_table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009759 goto out_free_host_rrq;
9760
Brian Kingafc3f832016-08-24 12:56:51 -05009761 for (i = 0; i < IPR_MAX_HCAMS; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009762 ioa_cfg->hostrcb[i] = dma_alloc_coherent(&pdev->dev,
9763 sizeof(struct ipr_hostrcb),
9764 &ioa_cfg->hostrcb_dma[i],
9765 GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009766
9767 if (!ioa_cfg->hostrcb[i])
9768 goto out_free_hostrcb_dma;
9769
9770 ioa_cfg->hostrcb[i]->hostrcb_dma =
9771 ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
Brian King49dc6a12006-11-21 10:28:35 -06009772 ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009773 list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
9774 }
9775
brking@us.ibm.com0bc42e32005-11-01 17:01:20 -06009776 ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009777 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
9778
9779 if (!ioa_cfg->trace)
9780 goto out_free_hostrcb_dma;
9781
Linus Torvalds1da177e2005-04-16 15:20:36 -07009782 rc = 0;
9783out:
9784 LEAVE;
9785 return rc;
9786
9787out_free_hostrcb_dma:
9788 while (i-- > 0) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009789 dma_free_coherent(&pdev->dev, sizeof(struct ipr_hostrcb),
9790 ioa_cfg->hostrcb[i],
9791 ioa_cfg->hostrcb_dma[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009792 }
Anton Blanchardd73341b2014-10-30 17:27:08 -05009793 dma_free_coherent(&pdev->dev, ioa_cfg->cfg_table_size,
9794 ioa_cfg->u.cfg_table, ioa_cfg->cfg_table_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009795out_free_host_rrq:
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009796 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
Anton Blanchardd73341b2014-10-30 17:27:08 -05009797 dma_free_coherent(&pdev->dev,
9798 sizeof(u32) * ioa_cfg->hrrq[i].size,
9799 ioa_cfg->hrrq[i].host_rrq,
9800 ioa_cfg->hrrq[i].host_rrq_dma);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009801 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009802out_ipr_free_cmd_blocks:
9803 ipr_free_cmd_blks(ioa_cfg);
9804out_free_vpd_cbs:
Anton Blanchardd73341b2014-10-30 17:27:08 -05009805 dma_free_coherent(&pdev->dev, sizeof(struct ipr_misc_cbs),
9806 ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
Linus Torvalds1da177e2005-04-16 15:20:36 -07009807out_free_res_entries:
9808 kfree(ioa_cfg->res_entries);
9809 goto out;
9810}
9811
9812/**
9813 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
9814 * @ioa_cfg: ioa config struct
9815 *
9816 * Return value:
9817 * none
9818 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009819static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009820{
9821 int i;
9822
9823 for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
9824 ioa_cfg->bus_attr[i].bus = i;
9825 ioa_cfg->bus_attr[i].qas_enabled = 0;
9826 ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
9827 if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
9828 ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
9829 else
9830 ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
9831 }
9832}
9833
9834/**
Brian King6270e592014-01-21 12:16:41 -06009835 * ipr_init_regs - Initialize IOA registers
Linus Torvalds1da177e2005-04-16 15:20:36 -07009836 * @ioa_cfg: ioa config struct
Linus Torvalds1da177e2005-04-16 15:20:36 -07009837 *
9838 * Return value:
Brian King6270e592014-01-21 12:16:41 -06009839 * none
Linus Torvalds1da177e2005-04-16 15:20:36 -07009840 **/
Brian King6270e592014-01-21 12:16:41 -06009841static void ipr_init_regs(struct ipr_ioa_cfg *ioa_cfg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009842{
9843 const struct ipr_interrupt_offsets *p;
9844 struct ipr_interrupts *t;
9845 void __iomem *base;
9846
Linus Torvalds1da177e2005-04-16 15:20:36 -07009847 p = &ioa_cfg->chip_cfg->regs;
9848 t = &ioa_cfg->regs;
9849 base = ioa_cfg->hdw_dma_regs;
9850
9851 t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
9852 t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009853 t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009854 t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009855 t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009856 t->clr_interrupt_reg = base + p->clr_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009857 t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009858 t->sense_interrupt_reg = base + p->sense_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009859 t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009860 t->ioarrin_reg = base + p->ioarrin_reg;
9861 t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009862 t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009863 t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009864 t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
Linus Torvalds1da177e2005-04-16 15:20:36 -07009865 t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
Wayne Boyer214777b2010-02-19 13:24:26 -08009866 t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009867
9868 if (ioa_cfg->sis64) {
Wayne Boyer214777b2010-02-19 13:24:26 -08009869 t->init_feedback_reg = base + p->init_feedback_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009870 t->dump_addr_reg = base + p->dump_addr_reg;
9871 t->dump_data_reg = base + p->dump_data_reg;
Wayne Boyer8701f182010-06-04 10:26:50 -07009872 t->endian_swap_reg = base + p->endian_swap_reg;
Wayne Boyerdcbad002010-02-19 13:24:14 -08009873 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07009874}
9875
9876/**
Brian King6270e592014-01-21 12:16:41 -06009877 * ipr_init_ioa_cfg - Initialize IOA config struct
9878 * @ioa_cfg: ioa config struct
9879 * @host: scsi host struct
9880 * @pdev: PCI dev struct
9881 *
9882 * Return value:
9883 * none
9884 **/
9885static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
9886 struct Scsi_Host *host, struct pci_dev *pdev)
9887{
9888 int i;
9889
9890 ioa_cfg->host = host;
9891 ioa_cfg->pdev = pdev;
9892 ioa_cfg->log_level = ipr_log_level;
9893 ioa_cfg->doorbell = IPR_DOORBELL;
9894 sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
9895 sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
9896 sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
9897 sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
9898 sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
9899 sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
9900
9901 INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
9902 INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
Brian Kingafc3f832016-08-24 12:56:51 -05009903 INIT_LIST_HEAD(&ioa_cfg->hostrcb_report_q);
Brian King6270e592014-01-21 12:16:41 -06009904 INIT_LIST_HEAD(&ioa_cfg->free_res_q);
9905 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
9906 INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
9907 init_waitqueue_head(&ioa_cfg->reset_wait_q);
9908 init_waitqueue_head(&ioa_cfg->msi_wait_q);
9909 init_waitqueue_head(&ioa_cfg->eeh_wait_q);
9910 ioa_cfg->sdt_state = INACTIVE;
9911
9912 ipr_initialize_bus_attr(ioa_cfg);
9913 ioa_cfg->max_devs_supported = ipr_max_devs;
9914
9915 if (ioa_cfg->sis64) {
9916 host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
9917 host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
9918 if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
9919 ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
9920 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
9921 + ((sizeof(struct ipr_config_table_entry64)
9922 * ioa_cfg->max_devs_supported)));
9923 } else {
9924 host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
9925 host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
9926 if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
9927 ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
9928 ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
9929 + ((sizeof(struct ipr_config_table_entry)
9930 * ioa_cfg->max_devs_supported)));
9931 }
9932
Brian Kingf688f962014-12-02 12:47:37 -06009933 host->max_channel = IPR_VSET_BUS;
Brian King6270e592014-01-21 12:16:41 -06009934 host->unique_id = host->host_no;
9935 host->max_cmd_len = IPR_MAX_CDB_LEN;
9936 host->can_queue = ioa_cfg->max_cmds;
9937 pci_set_drvdata(pdev, ioa_cfg);
9938
9939 for (i = 0; i < ARRAY_SIZE(ioa_cfg->hrrq); i++) {
9940 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
9941 INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
9942 spin_lock_init(&ioa_cfg->hrrq[i]._lock);
9943 if (i == 0)
9944 ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
9945 else
9946 ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
9947 }
9948}
9949
9950/**
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009951 * ipr_get_chip_info - Find adapter chip information
Linus Torvalds1da177e2005-04-16 15:20:36 -07009952 * @dev_id: PCI device id struct
9953 *
9954 * Return value:
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009955 * ptr to chip information on success / NULL on failure
Linus Torvalds1da177e2005-04-16 15:20:36 -07009956 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -08009957static const struct ipr_chip_t *
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009958ipr_get_chip_info(const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07009959{
9960 int i;
9961
Linus Torvalds1da177e2005-04-16 15:20:36 -07009962 for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
9963 if (ipr_chip[i].vendor == dev_id->vendor &&
9964 ipr_chip[i].device == dev_id->device)
Wayne Boyer1be7bd82009-06-17 09:55:35 -07009965 return &ipr_chip[i];
Linus Torvalds1da177e2005-04-16 15:20:36 -07009966 return NULL;
9967}
9968
Brian King6270e592014-01-21 12:16:41 -06009969/**
9970 * ipr_wait_for_pci_err_recovery - Wait for any PCI error recovery to complete
9971 * during probe time
9972 * @ioa_cfg: ioa config struct
9973 *
9974 * Return value:
9975 * None
9976 **/
9977static void ipr_wait_for_pci_err_recovery(struct ipr_ioa_cfg *ioa_cfg)
9978{
9979 struct pci_dev *pdev = ioa_cfg->pdev;
9980
9981 if (pci_channel_offline(pdev)) {
9982 wait_event_timeout(ioa_cfg->eeh_wait_q,
9983 !pci_channel_offline(pdev),
9984 IPR_PCI_ERROR_RECOVERY_TIMEOUT);
9985 pci_restore_state(pdev);
9986 }
9987}
9988
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -06009989static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
9990{
9991 int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
9992
9993 for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
9994 snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
9995 "host%d-%d", ioa_cfg->host->host_no, vec_idx);
9996 ioa_cfg->vectors_info[vec_idx].
9997 desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
9998 }
9999}
10000
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010001static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg,
10002 struct pci_dev *pdev)
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010003{
10004 int i, rc;
10005
10006 for (i = 1; i < ioa_cfg->nvectors; i++) {
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010007 rc = request_irq(pci_irq_vector(pdev, i),
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010008 ipr_isr_mhrrq,
10009 0,
10010 ioa_cfg->vectors_info[i].desc,
10011 &ioa_cfg->hrrq[i]);
10012 if (rc) {
10013 while (--i >= 0)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010014 free_irq(pci_irq_vector(pdev, i),
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010015 &ioa_cfg->hrrq[i]);
10016 return rc;
10017 }
10018 }
10019 return 0;
10020}
10021
Linus Torvalds1da177e2005-04-16 15:20:36 -070010022/**
Wayne Boyer95fecd92009-06-16 15:13:28 -070010023 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
10024 * @pdev: PCI device struct
10025 *
10026 * Description: Simply set the msi_received flag to 1 indicating that
10027 * Message Signaled Interrupts are supported.
10028 *
10029 * Return value:
10030 * 0 on success / non-zero on failure
10031 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010032static irqreturn_t ipr_test_intr(int irq, void *devp)
Wayne Boyer95fecd92009-06-16 15:13:28 -070010033{
10034 struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
10035 unsigned long lock_flags = 0;
10036 irqreturn_t rc = IRQ_HANDLED;
10037
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010038 dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010039 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10040
10041 ioa_cfg->msi_received = 1;
10042 wake_up(&ioa_cfg->msi_wait_q);
10043
10044 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10045 return rc;
10046}
10047
10048/**
10049 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
10050 * @pdev: PCI device struct
10051 *
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010052 * Description: This routine sets up and initiates a test interrupt to determine
Wayne Boyer95fecd92009-06-16 15:13:28 -070010053 * if the interrupt is received via the ipr_test_intr() service routine.
10054 * If the tests fails, the driver will fall back to LSI.
10055 *
10056 * Return value:
10057 * 0 on success / non-zero on failure
10058 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010059static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
Wayne Boyer95fecd92009-06-16 15:13:28 -070010060{
10061 int rc;
10062 volatile u32 int_reg;
10063 unsigned long lock_flags = 0;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010064 int irq = pci_irq_vector(pdev, 0);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010065
10066 ENTER;
10067
10068 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10069 init_waitqueue_head(&ioa_cfg->msi_wait_q);
10070 ioa_cfg->msi_received = 0;
10071 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
Wayne Boyer214777b2010-02-19 13:24:26 -080010072 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010073 int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
10074 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10075
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010076 rc = request_irq(irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010077 if (rc) {
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010078 dev_err(&pdev->dev, "Can not assign irq %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010079 return rc;
10080 } else if (ipr_debug)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010081 dev_info(&pdev->dev, "IRQ assigned: %d\n", irq);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010082
Wayne Boyer214777b2010-02-19 13:24:26 -080010083 writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010084 int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
10085 wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010086 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010087 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
10088
Wayne Boyer95fecd92009-06-16 15:13:28 -070010089 if (!ioa_cfg->msi_received) {
10090 /* MSI test failed */
10091 dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
10092 rc = -EOPNOTSUPP;
10093 } else if (ipr_debug)
10094 dev_info(&pdev->dev, "MSI test succeeded.\n");
10095
10096 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10097
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010098 free_irq(irq, ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010099
10100 LEAVE;
10101
10102 return rc;
10103}
10104
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010105 /* ipr_probe_ioa - Allocates memory and does first stage of initialization
Linus Torvalds1da177e2005-04-16 15:20:36 -070010106 * @pdev: PCI device struct
10107 * @dev_id: PCI device id struct
10108 *
10109 * Return value:
10110 * 0 on success / non-zero on failure
10111 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010112static int ipr_probe_ioa(struct pci_dev *pdev,
10113 const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010114{
10115 struct ipr_ioa_cfg *ioa_cfg;
10116 struct Scsi_Host *host;
10117 unsigned long ipr_regs_pci;
10118 void __iomem *ipr_regs;
Eric Sesterhenna2a65a32006-09-25 16:59:07 -070010119 int rc = PCIBIOS_SUCCESSFUL;
Brian King473b1e82007-05-02 10:44:11 -050010120 volatile u32 mask, uproc, interrupts;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010121 unsigned long lock_flags, driver_lock_flags;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010122 unsigned int irq_flag;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010123
10124 ENTER;
10125
Linus Torvalds1da177e2005-04-16 15:20:36 -070010126 dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010127 host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
10128
10129 if (!host) {
10130 dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
10131 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -060010132 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010133 }
10134
10135 ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
10136 memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
Dan Williams8d8e7d12012-07-09 21:06:08 -070010137 ata_host_init(&ioa_cfg->ata_host, &pdev->dev, &ipr_sata_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010138
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010139 ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010140
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010141 if (!ioa_cfg->ipr_chip) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010142 dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
10143 dev_id->vendor, dev_id->device);
10144 goto out_scsi_host_put;
10145 }
10146
Wayne Boyera32c0552010-02-19 13:23:36 -080010147 /* set SIS 32 or SIS 64 */
10148 ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010149 ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
Brian King7dd21302012-03-14 21:20:08 -050010150 ioa_cfg->clear_isr = ioa_cfg->chip_cfg->clear_isr;
Brian King89aad422012-03-14 21:20:10 -050010151 ioa_cfg->max_cmds = ioa_cfg->chip_cfg->max_cmds;
Wayne Boyer1be7bd82009-06-17 09:55:35 -070010152
Brian King5469cb52007-03-29 12:42:40 -050010153 if (ipr_transop_timeout)
10154 ioa_cfg->transop_timeout = ipr_transop_timeout;
10155 else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
10156 ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
10157 else
10158 ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
10159
Auke Kok44c10132007-06-08 15:46:36 -070010160 ioa_cfg->revid = pdev->revision;
Brian King463fc692007-05-07 17:09:05 -050010161
Brian King6270e592014-01-21 12:16:41 -060010162 ipr_init_ioa_cfg(ioa_cfg, host, pdev);
10163
Linus Torvalds1da177e2005-04-16 15:20:36 -070010164 ipr_regs_pci = pci_resource_start(pdev, 0);
10165
10166 rc = pci_request_regions(pdev, IPR_NAME);
10167 if (rc < 0) {
10168 dev_err(&pdev->dev,
10169 "Couldn't register memory range of registers\n");
10170 goto out_scsi_host_put;
10171 }
10172
Brian King6270e592014-01-21 12:16:41 -060010173 rc = pci_enable_device(pdev);
10174
10175 if (rc || pci_channel_offline(pdev)) {
10176 if (pci_channel_offline(pdev)) {
10177 ipr_wait_for_pci_err_recovery(ioa_cfg);
10178 rc = pci_enable_device(pdev);
10179 }
10180
10181 if (rc) {
10182 dev_err(&pdev->dev, "Cannot enable adapter\n");
10183 ipr_wait_for_pci_err_recovery(ioa_cfg);
10184 goto out_release_regions;
10185 }
10186 }
10187
Arjan van de Ven25729a72008-09-28 16:18:02 -070010188 ipr_regs = pci_ioremap_bar(pdev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010189
10190 if (!ipr_regs) {
10191 dev_err(&pdev->dev,
10192 "Couldn't map memory range of registers\n");
10193 rc = -ENOMEM;
Brian King6270e592014-01-21 12:16:41 -060010194 goto out_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010195 }
10196
10197 ioa_cfg->hdw_dma_regs = ipr_regs;
10198 ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
10199 ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
10200
Brian King6270e592014-01-21 12:16:41 -060010201 ipr_init_regs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010202
Wayne Boyera32c0552010-02-19 13:23:36 -080010203 if (ioa_cfg->sis64) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010204 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
Wayne Boyera32c0552010-02-19 13:23:36 -080010205 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010206 dev_dbg(&pdev->dev, "Failed to set 64 bit DMA mask\n");
10207 rc = dma_set_mask_and_coherent(&pdev->dev,
10208 DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -080010209 }
Wayne Boyera32c0552010-02-19 13:23:36 -080010210 } else
Anton Blanchard869404c2014-10-30 17:27:09 -050010211 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Wayne Boyera32c0552010-02-19 13:23:36 -080010212
Linus Torvalds1da177e2005-04-16 15:20:36 -070010213 if (rc < 0) {
Anton Blanchard869404c2014-10-30 17:27:09 -050010214 dev_err(&pdev->dev, "Failed to set DMA mask\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070010215 goto cleanup_nomem;
10216 }
10217
10218 rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
10219 ioa_cfg->chip_cfg->cache_line_size);
10220
10221 if (rc != PCIBIOS_SUCCESSFUL) {
10222 dev_err(&pdev->dev, "Write of cache line size failed\n");
Brian King6270e592014-01-21 12:16:41 -060010223 ipr_wait_for_pci_err_recovery(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010224 rc = -EIO;
10225 goto cleanup_nomem;
10226 }
10227
Brian King6270e592014-01-21 12:16:41 -060010228 /* Issue MMIO read to ensure card is not in EEH */
10229 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
10230 ipr_wait_for_pci_err_recovery(ioa_cfg);
10231
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010232 if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
10233 dev_err(&pdev->dev, "The max number of MSIX is %d\n",
10234 IPR_MAX_MSIX_VECTORS);
10235 ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
10236 }
10237
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010238 irq_flag = PCI_IRQ_LEGACY;
10239 if (ioa_cfg->ipr_chip->has_msi)
10240 irq_flag |= PCI_IRQ_MSI | PCI_IRQ_MSIX;
10241 rc = pci_alloc_irq_vectors(pdev, 1, ipr_number_of_msix, irq_flag);
10242 if (rc < 0) {
10243 ipr_wait_for_pci_err_recovery(ioa_cfg);
10244 goto cleanup_nomem;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010245 }
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010246 ioa_cfg->nvectors = rc;
10247
10248 if (!pdev->msi_enabled && !pdev->msix_enabled)
10249 ioa_cfg->clear_isr = 1;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010250
Brian King6270e592014-01-21 12:16:41 -060010251 pci_set_master(pdev);
10252
10253 if (pci_channel_offline(pdev)) {
10254 ipr_wait_for_pci_err_recovery(ioa_cfg);
10255 pci_set_master(pdev);
10256 if (pci_channel_offline(pdev)) {
10257 rc = -EIO;
10258 goto out_msi_disable;
10259 }
10260 }
10261
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010262 if (pdev->msi_enabled || pdev->msix_enabled) {
Wayne Boyer95fecd92009-06-16 15:13:28 -070010263 rc = ipr_test_msi(ioa_cfg, pdev);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010264 switch (rc) {
10265 case 0:
10266 dev_info(&pdev->dev,
10267 "Request for %d MSI%ss succeeded.", ioa_cfg->nvectors,
10268 pdev->msix_enabled ? "-X" : "");
10269 break;
10270 case -EOPNOTSUPP:
Brian King6270e592014-01-21 12:16:41 -060010271 ipr_wait_for_pci_err_recovery(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010272 pci_free_irq_vectors(pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010273
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010274 ioa_cfg->nvectors = 1;
Benjamin Herrenschmidt9dadfb92016-11-30 15:28:55 -060010275 ioa_cfg->clear_isr = 1;
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010276 break;
10277 default:
Wayne Boyer95fecd92009-06-16 15:13:28 -070010278 goto out_msi_disable;
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010279 }
10280 }
10281
10282 ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
10283 (unsigned int)num_online_cpus(),
10284 (unsigned int)IPR_MAX_HRRQ_NUM);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010285
Linus Torvalds1da177e2005-04-16 15:20:36 -070010286 if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -070010287 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010288
10289 if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
Julia Lawallf170c682011-07-11 14:08:25 -070010290 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010291
10292 rc = ipr_alloc_mem(ioa_cfg);
10293 if (rc < 0) {
10294 dev_err(&pdev->dev,
10295 "Couldn't allocate enough memory for device driver!\n");
Julia Lawallf170c682011-07-11 14:08:25 -070010296 goto out_msi_disable;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010297 }
10298
Brian King6270e592014-01-21 12:16:41 -060010299 /* Save away PCI config space for use following IOA reset */
10300 rc = pci_save_state(pdev);
10301
10302 if (rc != PCIBIOS_SUCCESSFUL) {
10303 dev_err(&pdev->dev, "Failed to save PCI config space\n");
10304 rc = -EIO;
10305 goto cleanup_nolog;
10306 }
10307
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010308 /*
10309 * If HRRQ updated interrupt is not masked, or reset alert is set,
10310 * the card is in an unknown state and needs a hard reset
10311 */
Wayne Boyer214777b2010-02-19 13:24:26 -080010312 mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
10313 interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
10314 uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010315 if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
10316 ioa_cfg->needs_hard_reset = 1;
Anton Blanchard5d7c20b2011-08-01 19:43:45 +100010317 if ((interrupts & IPR_PCII_ERROR_INTERRUPTS) || reset_devices)
Brian King473b1e82007-05-02 10:44:11 -050010318 ioa_cfg->needs_hard_reset = 1;
10319 if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
10320 ioa_cfg->ioa_unit_checked = 1;
brking@us.ibm.comce155cc2005-11-17 09:35:12 -060010321
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010322 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010323 ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
wenxiong@linux.vnet.ibm.com56d6aa32013-01-11 17:43:51 -060010324 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010325
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010326 if (pdev->msi_enabled || pdev->msix_enabled) {
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010327 name_msi_vectors(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010328 rc = request_irq(pci_irq_vector(pdev, 0), ipr_isr, 0,
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010329 ioa_cfg->vectors_info[0].desc,
10330 &ioa_cfg->hrrq[0]);
10331 if (!rc)
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010332 rc = ipr_request_other_msi_irqs(ioa_cfg, pdev);
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010333 } else {
10334 rc = request_irq(pdev->irq, ipr_isr,
10335 IRQF_SHARED,
10336 IPR_NAME, &ioa_cfg->hrrq[0]);
10337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010338 if (rc) {
10339 dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
10340 pdev->irq, rc);
10341 goto cleanup_nolog;
10342 }
10343
Brian King463fc692007-05-07 17:09:05 -050010344 if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
10345 (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
10346 ioa_cfg->needs_warm_reset = 1;
10347 ioa_cfg->reset = ipr_reset_slot_reset;
Brian King2796ca52015-03-26 11:23:52 -050010348
10349 ioa_cfg->reset_work_q = alloc_ordered_workqueue("ipr_reset_%d",
10350 WQ_MEM_RECLAIM, host->host_no);
10351
10352 if (!ioa_cfg->reset_work_q) {
10353 dev_err(&pdev->dev, "Couldn't register reset workqueue\n");
Wei Yongjunc8e18ac2016-07-29 16:00:45 +000010354 rc = -ENOMEM;
Brian King2796ca52015-03-26 11:23:52 -050010355 goto out_free_irq;
10356 }
Brian King463fc692007-05-07 17:09:05 -050010357 } else
10358 ioa_cfg->reset = ipr_reset_start_bist;
10359
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010360 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010361 list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010362 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010363
10364 LEAVE;
10365out:
10366 return rc;
10367
Brian King2796ca52015-03-26 11:23:52 -050010368out_free_irq:
10369 ipr_free_irqs(ioa_cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010370cleanup_nolog:
10371 ipr_free_mem(ioa_cfg);
Wayne Boyer95fecd92009-06-16 15:13:28 -070010372out_msi_disable:
Brian King6270e592014-01-21 12:16:41 -060010373 ipr_wait_for_pci_err_recovery(ioa_cfg);
Christoph Hellwiga299ee62016-09-11 15:31:24 +020010374 pci_free_irq_vectors(pdev);
Julia Lawallf170c682011-07-11 14:08:25 -070010375cleanup_nomem:
10376 iounmap(ipr_regs);
Brian King6270e592014-01-21 12:16:41 -060010377out_disable:
10378 pci_disable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010379out_release_regions:
10380 pci_release_regions(pdev);
10381out_scsi_host_put:
10382 scsi_host_put(host);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010383 goto out;
10384}
10385
10386/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010387 * ipr_initiate_ioa_bringdown - Bring down an adapter
10388 * @ioa_cfg: ioa config struct
10389 * @shutdown_type: shutdown type
10390 *
10391 * Description: This function will initiate bringing down the adapter.
10392 * This consists of issuing an IOA shutdown to the adapter
10393 * to flush the cache, and running BIST.
10394 * If the caller needs to wait on the completion of the reset,
10395 * the caller must sleep on the reset_wait_q.
10396 *
10397 * Return value:
10398 * none
10399 **/
10400static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
10401 enum ipr_shutdown_type shutdown_type)
10402{
10403 ENTER;
10404 if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
10405 ioa_cfg->sdt_state = ABORT_DUMP;
10406 ioa_cfg->reset_retries = 0;
10407 ioa_cfg->in_ioa_bringdown = 1;
10408 ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
10409 LEAVE;
10410}
10411
10412/**
10413 * __ipr_remove - Remove a single adapter
10414 * @pdev: pci device struct
10415 *
10416 * Adapter hot plug remove entry point.
10417 *
10418 * Return value:
10419 * none
10420 **/
10421static void __ipr_remove(struct pci_dev *pdev)
10422{
10423 unsigned long host_lock_flags = 0;
10424 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Brian Kingbfae7822013-01-30 23:45:08 -060010425 int i;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010426 unsigned long driver_lock_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010427 ENTER;
10428
10429 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -030010430 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -050010431 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10432 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10433 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10434 }
10435
Brian Kingbfae7822013-01-30 23:45:08 -060010436 for (i = 0; i < ioa_cfg->hrrq_num; i++) {
10437 spin_lock(&ioa_cfg->hrrq[i]._lock);
10438 ioa_cfg->hrrq[i].removing_ioa = 1;
10439 spin_unlock(&ioa_cfg->hrrq[i]._lock);
10440 }
10441 wmb();
Linus Torvalds1da177e2005-04-16 15:20:36 -070010442 ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
10443
10444 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10445 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Tejun Heo43829732012-08-20 14:51:24 -070010446 flush_work(&ioa_cfg->work_q);
Brian King2796ca52015-03-26 11:23:52 -050010447 if (ioa_cfg->reset_work_q)
10448 flush_workqueue(ioa_cfg->reset_work_q);
wenxiong@linux.vnet.ibm.com9077a942013-03-14 13:52:24 -050010449 INIT_LIST_HEAD(&ioa_cfg->used_res_q);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010450 spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
10451
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010452 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010453 list_del(&ioa_cfg->queue);
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010454 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010455
10456 if (ioa_cfg->sdt_state == ABORT_DUMP)
10457 ioa_cfg->sdt_state = WAIT_FOR_DUMP;
10458 spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
10459
10460 ipr_free_all_resources(ioa_cfg);
10461
10462 LEAVE;
10463}
10464
10465/**
10466 * ipr_remove - IOA hot plug remove entry point
10467 * @pdev: pci device struct
10468 *
10469 * Adapter hot plug remove entry point.
10470 *
10471 * Return value:
10472 * none
10473 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010474static void ipr_remove(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010475{
10476 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
10477
10478 ENTER;
10479
Tony Jonesee959b02008-02-22 00:13:36 +010010480 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010481 &ipr_trace_attr);
Tony Jonesee959b02008-02-22 00:13:36 +010010482 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010483 &ipr_dump_attr);
Brian Kingafc3f832016-08-24 12:56:51 -050010484 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10485 &ipr_ioa_async_err_log);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010486 scsi_remove_host(ioa_cfg->host);
10487
10488 __ipr_remove(pdev);
10489
10490 LEAVE;
10491}
10492
10493/**
10494 * ipr_probe - Adapter hot plug add entry point
10495 *
10496 * Return value:
10497 * 0 on success / non-zero on failure
10498 **/
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010499static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010500{
10501 struct ipr_ioa_cfg *ioa_cfg;
Brian Kingb195d5e2016-07-15 14:48:03 -050010502 unsigned long flags;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010503 int rc, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010504
10505 rc = ipr_probe_ioa(pdev, dev_id);
10506
10507 if (rc)
10508 return rc;
10509
10510 ioa_cfg = pci_get_drvdata(pdev);
10511 rc = ipr_probe_ioa_part2(ioa_cfg);
10512
10513 if (rc) {
10514 __ipr_remove(pdev);
10515 return rc;
10516 }
10517
10518 rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
10519
10520 if (rc) {
10521 __ipr_remove(pdev);
10522 return rc;
10523 }
10524
Tony Jonesee959b02008-02-22 00:13:36 +010010525 rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010526 &ipr_trace_attr);
10527
10528 if (rc) {
10529 scsi_remove_host(ioa_cfg->host);
10530 __ipr_remove(pdev);
10531 return rc;
10532 }
10533
Brian Kingafc3f832016-08-24 12:56:51 -050010534 rc = sysfs_create_bin_file(&ioa_cfg->host->shost_dev.kobj,
10535 &ipr_ioa_async_err_log);
10536
10537 if (rc) {
10538 ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
10539 &ipr_dump_attr);
10540 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
10541 &ipr_trace_attr);
10542 scsi_remove_host(ioa_cfg->host);
10543 __ipr_remove(pdev);
10544 return rc;
10545 }
10546
Tony Jonesee959b02008-02-22 00:13:36 +010010547 rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010548 &ipr_dump_attr);
10549
10550 if (rc) {
Brian Kingafc3f832016-08-24 12:56:51 -050010551 sysfs_remove_bin_file(&ioa_cfg->host->shost_dev.kobj,
10552 &ipr_ioa_async_err_log);
Tony Jonesee959b02008-02-22 00:13:36 +010010553 ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010554 &ipr_trace_attr);
10555 scsi_remove_host(ioa_cfg->host);
10556 __ipr_remove(pdev);
10557 return rc;
10558 }
Brian Kinga3d1ddd2016-08-08 17:53:12 -050010559 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
10560 ioa_cfg->scan_enabled = 1;
10561 schedule_work(&ioa_cfg->work_q);
10562 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010563
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010564 ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
10565
Jens Axboe89f8b332014-03-13 09:38:42 -060010566 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010567 for (i = 1; i < ioa_cfg->hrrq_num; i++) {
Christoph Hellwig511cbce2015-11-10 14:56:14 +010010568 irq_poll_init(&ioa_cfg->hrrq[i].iopoll,
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010569 ioa_cfg->iopoll_weight, ipr_iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010570 }
10571 }
10572
Brian Kinga3d1ddd2016-08-08 17:53:12 -050010573 scsi_scan_host(ioa_cfg->host);
10574
Linus Torvalds1da177e2005-04-16 15:20:36 -070010575 return 0;
10576}
10577
10578/**
10579 * ipr_shutdown - Shutdown handler.
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010580 * @pdev: pci device struct
Linus Torvalds1da177e2005-04-16 15:20:36 -070010581 *
10582 * This function is invoked upon system shutdown/reboot. It will issue
10583 * an adapter shutdown to the adapter to flush the write cache.
10584 *
10585 * Return value:
10586 * none
10587 **/
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010588static void ipr_shutdown(struct pci_dev *pdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070010589{
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010590 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010591 unsigned long lock_flags = 0;
Brian King4fdd7c72015-03-26 11:23:50 -050010592 enum ipr_shutdown_type shutdown_type = IPR_SHUTDOWN_NORMAL;
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010593 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -070010594
10595 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
Jens Axboe89f8b332014-03-13 09:38:42 -060010596 if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010597 ioa_cfg->iopoll_weight = 0;
10598 for (i = 1; i < ioa_cfg->hrrq_num; i++)
Christoph Hellwig511cbce2015-11-10 14:56:14 +010010599 irq_poll_disable(&ioa_cfg->hrrq[i].iopoll);
wenxiong@linux.vnet.ibm.comb53d1242013-01-11 17:43:52 -060010600 }
10601
Kleber Sacilotto de Souza203fa3f2012-08-22 18:14:14 -030010602 while (ioa_cfg->in_reset_reload) {
Brian King970ea292007-04-26 16:00:06 -050010603 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10604 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
10605 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
10606 }
10607
Brian King4fdd7c72015-03-26 11:23:50 -050010608 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64)
10609 shutdown_type = IPR_SHUTDOWN_QUIESCE;
10610
10611 ipr_initiate_ioa_bringdown(ioa_cfg, shutdown_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010612 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
10613 wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
Brian King4fdd7c72015-03-26 11:23:50 -050010614 if (ipr_fast_reboot && system_state == SYSTEM_RESTART && ioa_cfg->sis64) {
Brian King2796ca52015-03-26 11:23:52 -050010615 ipr_free_irqs(ioa_cfg);
Brian King4fdd7c72015-03-26 11:23:50 -050010616 pci_disable_device(ioa_cfg->pdev);
10617 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070010618}
10619
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010620static struct pci_device_id ipr_pci_table[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070010621 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010622 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010623 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010624 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010625 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010626 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010627 { PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
Brian King6d84c942007-01-23 11:25:23 -060010628 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010629 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010630 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010631 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010632 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010633 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King6d84c942007-01-23 11:25:23 -060010634 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010635 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
Brian King5469cb52007-03-29 12:42:40 -050010636 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
10637 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010638 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -060010639 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010640 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -050010641 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10642 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010643 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -050010644 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10645 IPR_USE_LONG_TRANSOP_TIMEOUT },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010646 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King6d84c942007-01-23 11:25:23 -060010647 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010648 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King22d2e402007-04-26 16:00:13 -050010649 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
10650 IPR_USE_LONG_TRANSOP_TIMEOUT},
Brian King60e74862006-11-21 10:28:10 -060010651 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
Brian King5469cb52007-03-29 12:42:40 -050010652 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
10653 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010654 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King22d2e402007-04-26 16:00:13 -050010655 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
10656 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King185eb312007-03-29 12:42:53 -050010657 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King185eb312007-03-29 12:42:53 -050010658 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
10659 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Wayne Boyerb0f56d32010-06-24 13:34:14 -070010660 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
10661 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
Brian King5469cb52007-03-29 12:42:40 -050010662 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
Brian King463fc692007-05-07 17:09:05 -050010663 IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010664 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
Brian King6d84c942007-01-23 11:25:23 -060010665 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010666 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King6d84c942007-01-23 11:25:23 -060010667 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
brking@us.ibm.com86f51432005-11-01 17:02:42 -060010668 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010669 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
10670 IPR_USE_LONG_TRANSOP_TIMEOUT },
Brian King60e74862006-11-21 10:28:10 -060010671 { PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
Brian King5469cb52007-03-29 12:42:40 -050010672 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
10673 IPR_USE_LONG_TRANSOP_TIMEOUT },
Wayne Boyerd7b46272010-02-19 13:24:38 -080010674 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10675 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
10676 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10677 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
10678 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
10679 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
Wayne Boyer32622bd2010-10-18 20:24:34 -070010680 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010681 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
10682 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer5a918352011-10-27 11:58:21 -070010683 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
10684 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
Wayne Boyer32622bd2010-10-18 20:24:34 -070010685 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010686 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010687 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010688 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010689 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010690 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010691 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
Wayne Boyercd9b3d02012-02-23 11:54:55 -080010692 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10693 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
10694 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wayne Boyerd7b46272010-02-19 13:24:38 -080010695 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
wenxiong@linux.vnet.ibm.comb8d5d562013-01-11 17:43:47 -060010696 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10697 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
10698 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10699 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
10700 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10701 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
10702 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10703 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010704 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10705 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D9, 0, 0, 0 },
10706 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
Wendy Xiongf94d9962014-01-21 12:16:40 -060010707 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57DA, 0, 0, 0 },
10708 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
wenxiong@linux.vnet.ibm.com43c5fda2013-07-10 10:46:27 -050010709 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EB, 0, 0, 0 },
10710 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10711 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EC, 0, 0, 0 },
10712 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10713 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57ED, 0, 0, 0 },
10714 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10715 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EE, 0, 0, 0 },
10716 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10717 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57EF, 0, 0, 0 },
10718 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10719 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57F0, 0, 0, 0 },
Wendy Xiong5eeac3e2014-03-12 16:08:52 -050010720 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10721 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCA, 0, 0, 0 },
10722 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10723 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CD2, 0, 0, 0 },
10724 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
10725 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2CCD, 0, 0, 0 },
Wen Xiong00da9ff2016-07-12 16:02:07 -050010726 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10727 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580A, 0, 0, 0 },
10728 { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_RATTLESNAKE,
10729 PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_580B, 0, 0, 0 },
Linus Torvalds1da177e2005-04-16 15:20:36 -070010730 { }
10731};
10732MODULE_DEVICE_TABLE(pci, ipr_pci_table);
10733
Stephen Hemmingera55b2d22012-09-07 09:33:16 -070010734static const struct pci_error_handlers ipr_err_handler = {
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010735 .error_detected = ipr_pci_error_detected,
Brian King6270e592014-01-21 12:16:41 -060010736 .mmio_enabled = ipr_pci_mmio_enabled,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010737 .slot_reset = ipr_pci_slot_reset,
10738};
10739
Linus Torvalds1da177e2005-04-16 15:20:36 -070010740static struct pci_driver ipr_driver = {
10741 .name = IPR_NAME,
10742 .id_table = ipr_pci_table,
10743 .probe = ipr_probe,
Greg Kroah-Hartman6f039792012-12-21 13:08:55 -080010744 .remove = ipr_remove,
Greg Kroah-Hartmand18c3db2005-06-23 17:35:56 -070010745 .shutdown = ipr_shutdown,
Linas Vepstasf8a88b192006-02-03 16:52:42 -060010746 .err_handler = &ipr_err_handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -070010747};
10748
10749/**
Wayne Boyerf72919e2010-02-19 13:24:21 -080010750 * ipr_halt_done - Shutdown prepare completion
10751 *
10752 * Return value:
10753 * none
10754 **/
10755static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
10756{
wenxiong@linux.vnet.ibm.com05a65382013-01-11 17:43:50 -060010757 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010758}
10759
10760/**
10761 * ipr_halt - Issue shutdown prepare to all adapters
10762 *
10763 * Return value:
10764 * NOTIFY_OK on success / NOTIFY_DONE on failure
10765 **/
10766static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
10767{
10768 struct ipr_cmnd *ipr_cmd;
10769 struct ipr_ioa_cfg *ioa_cfg;
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010770 unsigned long flags = 0, driver_lock_flags;
Wayne Boyerf72919e2010-02-19 13:24:21 -080010771
10772 if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
10773 return NOTIFY_DONE;
10774
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010775 spin_lock_irqsave(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010776
10777 list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
10778 spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
Brian King4fdd7c72015-03-26 11:23:50 -050010779 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
10780 (ipr_fast_reboot && event == SYS_RESTART && ioa_cfg->sis64)) {
Wayne Boyerf72919e2010-02-19 13:24:21 -080010781 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10782 continue;
10783 }
10784
10785 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
10786 ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
10787 ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
10788 ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
10789 ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
10790
10791 ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
10792 spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
10793 }
wenxiong@linux.vnet.ibm.comfeccada2013-05-24 09:59:13 -050010794 spin_unlock_irqrestore(&ipr_driver_lock, driver_lock_flags);
Wayne Boyerf72919e2010-02-19 13:24:21 -080010795
10796 return NOTIFY_OK;
10797}
10798
10799static struct notifier_block ipr_notifier = {
10800 ipr_halt, NULL, 0
10801};
10802
10803/**
Linus Torvalds1da177e2005-04-16 15:20:36 -070010804 * ipr_init - Module entry point
10805 *
10806 * Return value:
10807 * 0 on success / negative value on failure
10808 **/
10809static int __init ipr_init(void)
10810{
10811 ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
10812 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
10813
Wayne Boyerf72919e2010-02-19 13:24:21 -080010814 register_reboot_notifier(&ipr_notifier);
Henrik Kretzschmardcbccbde2006-09-25 16:58:58 -070010815 return pci_register_driver(&ipr_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010816}
10817
10818/**
10819 * ipr_exit - Module unload
10820 *
10821 * Module unload entry point.
10822 *
10823 * Return value:
10824 * none
10825 **/
10826static void __exit ipr_exit(void)
10827{
Wayne Boyerf72919e2010-02-19 13:24:21 -080010828 unregister_reboot_notifier(&ipr_notifier);
Linus Torvalds1da177e2005-04-16 15:20:36 -070010829 pci_unregister_driver(&ipr_driver);
10830}
10831
10832module_init(ipr_init);
10833module_exit(ipr_exit);