blob: 38d4d08283e2ead00b4a8e37258d66dcb05a25b8 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Mark Lord1fd2e1c2008-01-26 18:33:59 -050032 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040039
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
Jeff Garzik4a05e202007-05-24 23:40:15 -040044 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
Jeff Garzik4a05e202007-05-24 23:40:15 -040062*/
63
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Brett Russ20f733e2005-09-01 18:26:17 -040072#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050073#include <linux/device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040074#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050075#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040076#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040078
79#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050080#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040081
82enum {
83 /* BAR's are enumerated in terms of pci_resource_start() terms */
84 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
85 MV_IO_BAR = 2, /* offset 0x18: IO space */
86 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
87
88 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
89 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
90
91 MV_PCI_REG_BASE = 0,
92 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040093 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
94 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
95 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
96 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
97 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
98
Brett Russ20f733e2005-09-01 18:26:17 -040099 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500100 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500101 MV_GPIO_PORT_CTL = 0x104f0,
102 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400103
104 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
105 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
106 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
107 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
108
Brett Russ31961942005-09-30 01:36:00 -0400109 MV_MAX_Q_DEPTH = 32,
110 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
111
112 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
113 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400114 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
115 */
116 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
117 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500118 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400119 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400120
Brett Russ20f733e2005-09-01 18:26:17 -0400121 MV_PORTS_PER_HC = 4,
122 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
123 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400124 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400125 MV_PORT_MASK = 3,
126
127 /* Host Flags */
128 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
129 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400130 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400131 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
132 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500133 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400134
Brett Russ31961942005-09-30 01:36:00 -0400135 CRQB_FLAG_READ = (1 << 0),
136 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400137 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
138 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400139 CRQB_CMD_ADDR_SHIFT = 8,
140 CRQB_CMD_CS = (0x2 << 11),
141 CRQB_CMD_LAST = (1 << 15),
142
143 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400144 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
145 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400146
147 EPRD_FLAG_END_OF_TBL = (1 << 31),
148
Brett Russ20f733e2005-09-01 18:26:17 -0400149 /* PCI interface registers */
150
Brett Russ31961942005-09-30 01:36:00 -0400151 PCI_COMMAND_OFS = 0xc00,
152
Brett Russ20f733e2005-09-01 18:26:17 -0400153 PCI_MAIN_CMD_STS_OFS = 0xd30,
154 STOP_PCI_MASTER = (1 << 2),
155 PCI_MASTER_EMPTY = (1 << 3),
156 GLOB_SFT_RST = (1 << 4),
157
Jeff Garzik522479f2005-11-12 22:14:02 -0500158 MV_PCI_MODE = 0xd00,
159 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
160 MV_PCI_DISC_TIMER = 0xd04,
161 MV_PCI_MSI_TRIGGER = 0xc38,
162 MV_PCI_SERR_MASK = 0xc28,
163 MV_PCI_XBAR_TMOUT = 0x1d04,
164 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
165 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
166 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
167 MV_PCI_ERR_COMMAND = 0x1d50,
168
Mark Lord02a121d2007-12-01 13:07:22 -0500169 PCI_IRQ_CAUSE_OFS = 0x1d58,
170 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400171 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
172
Mark Lord02a121d2007-12-01 13:07:22 -0500173 PCIE_IRQ_CAUSE_OFS = 0x1900,
174 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500175 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500176
Brett Russ20f733e2005-09-01 18:26:17 -0400177 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
178 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
179 PORT0_ERR = (1 << 0), /* shift by port # */
180 PORT0_DONE = (1 << 1), /* shift by port # */
181 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
182 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
183 PCI_ERR = (1 << 18),
184 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
185 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500186 PORTS_0_3_COAL_DONE = (1 << 8),
187 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400188 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
189 GPIO_INT = (1 << 22),
190 SELF_INT = (1 << 23),
191 TWSI_INT = (1 << 24),
192 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500193 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500194 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400195 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
196 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500197 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
198 HC_MAIN_RSVD_5),
Brett Russ20f733e2005-09-01 18:26:17 -0400199
200 /* SATAHC registers */
201 HC_CFG_OFS = 0,
202
203 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400204 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400205 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
206 DEV_IRQ = (1 << 8), /* shift by port # */
207
208 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400209 SHD_BLK_OFS = 0x100,
210 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400211
212 /* SATA registers */
213 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
214 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500215 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500216 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500217 PHY_MODE4 = 0x314,
218 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500219 MV5_PHY_MODE = 0x74,
220 MV5_LT_MODE = 0x30,
221 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500222 SATA_INTERFACE_CTL = 0x050,
223
224 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400225
226 /* Port registers */
227 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500228 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
229 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
230 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
231 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
232 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400233
234 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
235 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400236 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
237 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
238 EDMA_ERR_DEV = (1 << 2), /* device error */
239 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
240 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
241 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400242 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
243 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400244 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400245 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400246 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
247 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
248 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
249 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500250
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400251 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500252 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
253 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
254 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
255 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
256
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400257 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500258
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400259 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500260 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
261 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
262 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
263 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
264 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
265
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400266 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500267
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400268 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400269 EDMA_ERR_OVERRUN_5 = (1 << 5),
270 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500271
272 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
273 EDMA_ERR_LNK_CTRL_RX_1 |
274 EDMA_ERR_LNK_CTRL_RX_3 |
275 EDMA_ERR_LNK_CTRL_TX,
276
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400277 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
278 EDMA_ERR_PRD_PAR |
279 EDMA_ERR_DEV_DCON |
280 EDMA_ERR_DEV_CON |
281 EDMA_ERR_SERR |
282 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400283 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400284 EDMA_ERR_CRPB_PAR |
285 EDMA_ERR_INTRL_PAR |
286 EDMA_ERR_IORDY |
287 EDMA_ERR_LNK_CTRL_RX_2 |
288 EDMA_ERR_LNK_DATA_RX |
289 EDMA_ERR_LNK_DATA_TX |
290 EDMA_ERR_TRANS_PROTO,
291 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
292 EDMA_ERR_PRD_PAR |
293 EDMA_ERR_DEV_DCON |
294 EDMA_ERR_DEV_CON |
295 EDMA_ERR_OVERRUN_5 |
296 EDMA_ERR_UNDERRUN_5 |
297 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400298 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400299 EDMA_ERR_CRPB_PAR |
300 EDMA_ERR_INTRL_PAR |
301 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400302
Brett Russ31961942005-09-30 01:36:00 -0400303 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
304 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400305
306 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
307 EDMA_REQ_Q_PTR_SHIFT = 5,
308
309 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
310 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
311 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400312 EDMA_RSP_Q_PTR_SHIFT = 3,
313
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400314 EDMA_CMD_OFS = 0x28, /* EDMA command register */
315 EDMA_EN = (1 << 0), /* enable EDMA */
316 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
317 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400318
Jeff Garzikc9d39132005-11-13 17:47:51 -0500319 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500320 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500321
Brett Russ31961942005-09-30 01:36:00 -0400322 /* Host private flags (hp_flags) */
323 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500324 MV_HP_ERRATA_50XXB0 = (1 << 1),
325 MV_HP_ERRATA_50XXB2 = (1 << 2),
326 MV_HP_ERRATA_60X1B2 = (1 << 3),
327 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500328 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400329 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
330 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
331 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500332 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400333
Brett Russ31961942005-09-30 01:36:00 -0400334 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400335 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500336 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400337 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400338};
339
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400340#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
341#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500342#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500343
Jeff Garzik095fec82005-11-12 09:50:49 -0500344enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400345 /* DMA boundary 0xffff is required by the s/g splitting
346 * we need on /length/ in mv_fill-sg().
347 */
348 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500349
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400350 /* mask of register bits containing lower 32 bits
351 * of EDMA request queue DMA address
352 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500353 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
354
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400355 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500356 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
357};
358
Jeff Garzik522479f2005-11-12 22:14:02 -0500359enum chip_type {
360 chip_504x,
361 chip_508x,
362 chip_5080,
363 chip_604x,
364 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500365 chip_6042,
366 chip_7042,
Jeff Garzik522479f2005-11-12 22:14:02 -0500367};
368
Brett Russ31961942005-09-30 01:36:00 -0400369/* Command ReQuest Block: 32B */
370struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400371 __le32 sg_addr;
372 __le32 sg_addr_hi;
373 __le16 ctrl_flags;
374 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400375};
376
Jeff Garzike4e7b892006-01-31 12:18:41 -0500377struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400378 __le32 addr;
379 __le32 addr_hi;
380 __le32 flags;
381 __le32 len;
382 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500383};
384
Brett Russ31961942005-09-30 01:36:00 -0400385/* Command ResPonse Block: 8B */
386struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400387 __le16 id;
388 __le16 flags;
389 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400390};
391
392/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
393struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400394 __le32 addr;
395 __le32 flags_size;
396 __le32 addr_hi;
397 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400398};
399
400struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400401 struct mv_crqb *crqb;
402 dma_addr_t crqb_dma;
403 struct mv_crpb *crpb;
404 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500405 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
406 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400407
408 unsigned int req_idx;
409 unsigned int resp_idx;
410
Brett Russ31961942005-09-30 01:36:00 -0400411 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400412};
413
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500414struct mv_port_signal {
415 u32 amps;
416 u32 pre;
417};
418
Mark Lord02a121d2007-12-01 13:07:22 -0500419struct mv_host_priv {
420 u32 hp_flags;
421 struct mv_port_signal signal[8];
422 const struct mv_hw_ops *ops;
423 u32 irq_cause_ofs;
424 u32 irq_mask_ofs;
425 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500426 /*
427 * These consistent DMA memory pools give us guaranteed
428 * alignment for hardware-accessed data structures,
429 * and less memory waste in accomplishing the alignment.
430 */
431 struct dma_pool *crqb_pool;
432 struct dma_pool *crpb_pool;
433 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500434};
435
Jeff Garzik47c2b672005-11-12 21:13:17 -0500436struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500437 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
438 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500439 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
440 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
441 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500442 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
443 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500444 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
445 void (*reset_bus)(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500446};
447
Brett Russ20f733e2005-09-01 18:26:17 -0400448static void mv_irq_clear(struct ata_port *ap);
Tejun Heoda3dbb12007-07-16 14:29:40 +0900449static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
450static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
451static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
452static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400453static int mv_port_start(struct ata_port *ap);
454static void mv_port_stop(struct ata_port *ap);
455static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500456static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900457static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400458static void mv_error_handler(struct ata_port *ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400459static void mv_eh_freeze(struct ata_port *ap);
460static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500461static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400462static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
463
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500464static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
465 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500466static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
467static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
468 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500469static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
470 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500471static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
472static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500473
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500474static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
475 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500476static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
477static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
478 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500479static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
480 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500481static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
482static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500483static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500485static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
486 void __iomem *port_mmio, int want_ncq);
487static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500488
Mark Lordeb73d552008-01-29 13:24:00 -0500489/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
490 * because we have to allow room for worst case splitting of
491 * PRDs for 64K boundaries in mv_fill_sg().
492 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400493static struct scsi_host_template mv5_sht = {
Brett Russ20f733e2005-09-01 18:26:17 -0400494 .module = THIS_MODULE,
495 .name = DRV_NAME,
496 .ioctl = ata_scsi_ioctl,
497 .queuecommand = ata_scsi_queuecmd,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400498 .can_queue = ATA_DEF_QUEUE,
499 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400500 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400501 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
502 .emulated = ATA_SHT_EMULATED,
503 .use_clustering = 1,
504 .proc_name = DRV_NAME,
505 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400506 .slave_configure = ata_scsi_slave_config,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400507 .slave_destroy = ata_scsi_slave_destroy,
508 .bios_param = ata_std_bios_param,
509};
510
511static struct scsi_host_template mv6_sht = {
512 .module = THIS_MODULE,
513 .name = DRV_NAME,
514 .ioctl = ata_scsi_ioctl,
515 .queuecommand = ata_scsi_queuecmd,
Mark Lord138bfdd2008-01-26 18:33:18 -0500516 .change_queue_depth = ata_scsi_change_queue_depth,
517 .can_queue = MV_MAX_Q_DEPTH - 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400518 .this_id = ATA_SHT_THIS_ID,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400519 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400520 .cmd_per_lun = ATA_SHT_CMD_PER_LUN,
521 .emulated = ATA_SHT_EMULATED,
Jeff Garzikd88184f2007-02-26 01:26:06 -0500522 .use_clustering = 1,
Brett Russ20f733e2005-09-01 18:26:17 -0400523 .proc_name = DRV_NAME,
524 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzik3be6cbd2007-10-18 16:21:18 -0400525 .slave_configure = ata_scsi_slave_config,
Tejun Heoccf68c32006-05-31 18:28:09 +0900526 .slave_destroy = ata_scsi_slave_destroy,
Brett Russ20f733e2005-09-01 18:26:17 -0400527 .bios_param = ata_std_bios_param,
Brett Russ20f733e2005-09-01 18:26:17 -0400528};
529
Jeff Garzikc9d39132005-11-13 17:47:51 -0500530static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500531 .tf_load = ata_tf_load,
532 .tf_read = ata_tf_read,
533 .check_status = ata_check_status,
534 .exec_command = ata_exec_command,
535 .dev_select = ata_std_dev_select,
536
Jeff Garzikcffacd82007-03-09 09:46:47 -0500537 .cable_detect = ata_cable_sata,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500538
539 .qc_prep = mv_qc_prep,
540 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900541 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500542
Jeff Garzikc9d39132005-11-13 17:47:51 -0500543 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900544 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500545
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400546 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400547 .freeze = mv_eh_freeze,
548 .thaw = mv_eh_thaw,
549
Jeff Garzikc9d39132005-11-13 17:47:51 -0500550 .scr_read = mv5_scr_read,
551 .scr_write = mv5_scr_write,
552
553 .port_start = mv_port_start,
554 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500555};
556
557static const struct ata_port_operations mv6_ops = {
Mark Lordf2738272008-01-26 18:32:29 -0500558 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400559 .tf_load = ata_tf_load,
560 .tf_read = ata_tf_read,
561 .check_status = ata_check_status,
562 .exec_command = ata_exec_command,
563 .dev_select = ata_std_dev_select,
564
Jeff Garzikcffacd82007-03-09 09:46:47 -0500565 .cable_detect = ata_cable_sata,
Brett Russ20f733e2005-09-01 18:26:17 -0400566
Brett Russ31961942005-09-30 01:36:00 -0400567 .qc_prep = mv_qc_prep,
568 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900569 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400570
Brett Russ20f733e2005-09-01 18:26:17 -0400571 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900572 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400573
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400574 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400575 .freeze = mv_eh_freeze,
576 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500577 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400578
Brett Russ20f733e2005-09-01 18:26:17 -0400579 .scr_read = mv_scr_read,
580 .scr_write = mv_scr_write,
581
Brett Russ31961942005-09-30 01:36:00 -0400582 .port_start = mv_port_start,
583 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400584};
585
Jeff Garzike4e7b892006-01-31 12:18:41 -0500586static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500587 .tf_load = ata_tf_load,
588 .tf_read = ata_tf_read,
589 .check_status = ata_check_status,
590 .exec_command = ata_exec_command,
591 .dev_select = ata_std_dev_select,
592
Jeff Garzikcffacd82007-03-09 09:46:47 -0500593 .cable_detect = ata_cable_sata,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500594
595 .qc_prep = mv_qc_prep_iie,
596 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900597 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500598
Jeff Garzike4e7b892006-01-31 12:18:41 -0500599 .irq_clear = mv_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900600 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500601
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400602 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400603 .freeze = mv_eh_freeze,
604 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500605 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400606
Jeff Garzike4e7b892006-01-31 12:18:41 -0500607 .scr_read = mv_scr_read,
608 .scr_write = mv_scr_write,
609
610 .port_start = mv_port_start,
611 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500612};
613
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100614static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400615 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400616 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400617 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400618 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500619 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400620 },
621 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400622 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400623 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400624 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500625 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400626 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500627 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400628 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500629 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400630 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500631 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500632 },
Brett Russ20f733e2005-09-01 18:26:17 -0400633 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500634 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
635 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400636 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400637 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500638 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400639 },
640 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400641 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500642 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400643 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400644 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500645 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400646 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500647 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500648 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
649 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500650 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400651 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500652 .port_ops = &mv_iie_ops,
653 },
654 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500655 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
656 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500657 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400658 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500659 .port_ops = &mv_iie_ops,
660 },
Brett Russ20f733e2005-09-01 18:26:17 -0400661};
662
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500663static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400664 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
665 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
666 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
667 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100668 /* RocketRAID 1740/174x have different identifiers */
669 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
670 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400671
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400672 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
673 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
674 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
675 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
676 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500677
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400678 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
679
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200680 /* Adaptec 1430SA */
681 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
682
Mark Lord02a121d2007-12-01 13:07:22 -0500683 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800684 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
685
Mark Lord02a121d2007-12-01 13:07:22 -0500686 /* Highpoint RocketRAID PCIe series */
687 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
688 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
689
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400690 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400691};
692
693static struct pci_driver mv_pci_driver = {
694 .name = DRV_NAME,
695 .id_table = mv_pci_tbl,
696 .probe = mv_init_one,
697 .remove = ata_pci_remove_one,
698};
699
Jeff Garzik47c2b672005-11-12 21:13:17 -0500700static const struct mv_hw_ops mv5xxx_ops = {
701 .phy_errata = mv5_phy_errata,
702 .enable_leds = mv5_enable_leds,
703 .read_preamp = mv5_read_preamp,
704 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500705 .reset_flash = mv5_reset_flash,
706 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500707};
708
709static const struct mv_hw_ops mv6xxx_ops = {
710 .phy_errata = mv6_phy_errata,
711 .enable_leds = mv6_enable_leds,
712 .read_preamp = mv6_read_preamp,
713 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500714 .reset_flash = mv6_reset_flash,
715 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500716};
717
Brett Russ20f733e2005-09-01 18:26:17 -0400718/*
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500719 * module options
720 */
721static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
722
723
Jeff Garzikd88184f2007-02-26 01:26:06 -0500724/* move to PCI layer or libata core? */
725static int pci_go_64(struct pci_dev *pdev)
726{
727 int rc;
728
729 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
730 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
731 if (rc) {
732 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
733 if (rc) {
734 dev_printk(KERN_ERR, &pdev->dev,
735 "64-bit DMA enable failed\n");
736 return rc;
737 }
738 }
739 } else {
740 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
741 if (rc) {
742 dev_printk(KERN_ERR, &pdev->dev,
743 "32-bit DMA enable failed\n");
744 return rc;
745 }
746 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
747 if (rc) {
748 dev_printk(KERN_ERR, &pdev->dev,
749 "32-bit consistent DMA enable failed\n");
750 return rc;
751 }
752 }
753
754 return rc;
755}
756
Jeff Garzikddef9bb2006-02-02 16:17:06 -0500757/*
Brett Russ20f733e2005-09-01 18:26:17 -0400758 * Functions
759 */
760
761static inline void writelfl(unsigned long data, void __iomem *addr)
762{
763 writel(data, addr);
764 (void) readl(addr); /* flush to avoid PCI posted write */
765}
766
Brett Russ20f733e2005-09-01 18:26:17 -0400767static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
768{
769 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
770}
771
Jeff Garzikc9d39132005-11-13 17:47:51 -0500772static inline unsigned int mv_hc_from_port(unsigned int port)
773{
774 return port >> MV_PORT_HC_SHIFT;
775}
776
777static inline unsigned int mv_hardport_from_port(unsigned int port)
778{
779 return port & MV_PORT_MASK;
780}
781
782static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
783 unsigned int port)
784{
785 return mv_hc_base(base, mv_hc_from_port(port));
786}
787
Brett Russ20f733e2005-09-01 18:26:17 -0400788static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
789{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500790 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500791 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500792 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400793}
794
795static inline void __iomem *mv_ap_base(struct ata_port *ap)
796{
Tejun Heo0d5ff562007-02-01 15:06:36 +0900797 return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400798}
799
Jeff Garzikcca39742006-08-24 03:19:22 -0400800static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400801{
Jeff Garzikcca39742006-08-24 03:19:22 -0400802 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400803}
804
805static void mv_irq_clear(struct ata_port *ap)
806{
807}
808
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400809static void mv_set_edma_ptrs(void __iomem *port_mmio,
810 struct mv_host_priv *hpriv,
811 struct mv_port_priv *pp)
812{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400813 u32 index;
814
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400815 /*
816 * initialize request queue
817 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400818 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
819
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400820 WARN_ON(pp->crqb_dma & 0x3ff);
821 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400822 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400823 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
824
825 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400826 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400827 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
828 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400829 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400830
831 /*
832 * initialize response queue
833 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400834 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
835
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400836 WARN_ON(pp->crpb_dma & 0xff);
837 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
838
839 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400840 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400841 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
842 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400843 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400844
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400845 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400846 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400847}
848
Brett Russ05b308e2005-10-05 17:08:53 -0400849/**
850 * mv_start_dma - Enable eDMA engine
851 * @base: port base address
852 * @pp: port private data
853 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900854 * Verify the local cache of the eDMA state is accurate with a
855 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400856 *
857 * LOCKING:
858 * Inherited from caller.
859 */
Mark Lord0c589122008-01-26 18:31:16 -0500860static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500861 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400862{
Mark Lord72109162008-01-26 18:31:33 -0500863 int want_ncq = (protocol == ATA_PROT_NCQ);
864
865 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
866 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
867 if (want_ncq != using_ncq)
868 __mv_stop_dma(ap);
869 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400870 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500871 struct mv_host_priv *hpriv = ap->host->private_data;
872 int hard_port = mv_hardport_from_port(ap->port_no);
873 void __iomem *hc_mmio = mv_hc_base_from_port(
874 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
875 u32 hc_irq_cause, ipending;
876
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400877 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500878 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400879
Mark Lord0c589122008-01-26 18:31:16 -0500880 /* clear EDMA interrupt indicator, if any */
881 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
882 ipending = (DEV_IRQ << hard_port) |
883 (CRPB_DMA_DONE << hard_port);
884 if (hc_irq_cause & ipending) {
885 writelfl(hc_irq_cause & ~ipending,
886 hc_mmio + HC_IRQ_CAUSE_OFS);
887 }
888
Mark Lord72109162008-01-26 18:31:33 -0500889 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500890
891 /* clear FIS IRQ Cause */
892 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
893
Mark Lordf630d562008-01-26 18:31:00 -0500894 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400895
Mark Lordf630d562008-01-26 18:31:00 -0500896 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400897 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
898 }
Mark Lordf630d562008-01-26 18:31:00 -0500899 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400900}
901
Brett Russ05b308e2005-10-05 17:08:53 -0400902/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400903 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400904 * @ap: ATA channel to manipulate
905 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900906 * Verify the local cache of the eDMA state is accurate with a
907 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400908 *
909 * LOCKING:
910 * Inherited from caller.
911 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400912static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400913{
914 void __iomem *port_mmio = mv_ap_base(ap);
915 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400916 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400917 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400918
Jeff Garzik4537deb2007-07-12 14:30:19 -0400919 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400920 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400921 */
Brett Russ31961942005-09-30 01:36:00 -0400922 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
923 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400924 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900925 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400926 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500927
Brett Russ31961942005-09-30 01:36:00 -0400928 /* now properly wait for the eDMA to stop */
929 for (i = 1000; i > 0; i--) {
930 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb2007-07-12 14:30:19 -0400931 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400932 break;
Jeff Garzik4537deb2007-07-12 14:30:19 -0400933
Brett Russ31961942005-09-30 01:36:00 -0400934 udelay(100);
935 }
936
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400937 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900938 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400939 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400940 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400941
942 return err;
Brett Russ31961942005-09-30 01:36:00 -0400943}
944
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400945static int mv_stop_dma(struct ata_port *ap)
946{
947 unsigned long flags;
948 int rc;
949
950 spin_lock_irqsave(&ap->host->lock, flags);
951 rc = __mv_stop_dma(ap);
952 spin_unlock_irqrestore(&ap->host->lock, flags);
953
954 return rc;
955}
956
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400957#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400958static void mv_dump_mem(void __iomem *start, unsigned bytes)
959{
Brett Russ31961942005-09-30 01:36:00 -0400960 int b, w;
961 for (b = 0; b < bytes; ) {
962 DPRINTK("%p: ", start + b);
963 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400964 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400965 b += sizeof(u32);
966 }
967 printk("\n");
968 }
Brett Russ31961942005-09-30 01:36:00 -0400969}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400970#endif
971
Brett Russ31961942005-09-30 01:36:00 -0400972static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
973{
974#ifdef ATA_DEBUG
975 int b, w;
976 u32 dw;
977 for (b = 0; b < bytes; ) {
978 DPRINTK("%02x: ", b);
979 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400980 (void) pci_read_config_dword(pdev, b, &dw);
981 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400982 b += sizeof(u32);
983 }
984 printk("\n");
985 }
986#endif
987}
988static void mv_dump_all_regs(void __iomem *mmio_base, int port,
989 struct pci_dev *pdev)
990{
991#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500992 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400993 port >> MV_PORT_HC_SHIFT);
994 void __iomem *port_base;
995 int start_port, num_ports, p, start_hc, num_hcs, hc;
996
997 if (0 > port) {
998 start_hc = start_port = 0;
999 num_ports = 8; /* shld be benign for 4 port devs */
1000 num_hcs = 2;
1001 } else {
1002 start_hc = port >> MV_PORT_HC_SHIFT;
1003 start_port = port;
1004 num_ports = num_hcs = 1;
1005 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001006 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -04001007 num_ports > 1 ? num_ports - 1 : start_port);
1008
1009 if (NULL != pdev) {
1010 DPRINTK("PCI config space regs:\n");
1011 mv_dump_pci_cfg(pdev, 0x68);
1012 }
1013 DPRINTK("PCI regs:\n");
1014 mv_dump_mem(mmio_base+0xc00, 0x3c);
1015 mv_dump_mem(mmio_base+0xd00, 0x34);
1016 mv_dump_mem(mmio_base+0xf00, 0x4);
1017 mv_dump_mem(mmio_base+0x1d00, 0x6c);
1018 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c372006-04-10 23:20:22 -07001019 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -04001020 DPRINTK("HC regs (HC %i):\n", hc);
1021 mv_dump_mem(hc_base, 0x1c);
1022 }
1023 for (p = start_port; p < start_port + num_ports; p++) {
1024 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001025 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001026 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001027 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -04001028 mv_dump_mem(port_base+0x300, 0x60);
1029 }
1030#endif
1031}
1032
Brett Russ20f733e2005-09-01 18:26:17 -04001033static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1034{
1035 unsigned int ofs;
1036
1037 switch (sc_reg_in) {
1038 case SCR_STATUS:
1039 case SCR_CONTROL:
1040 case SCR_ERROR:
1041 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1042 break;
1043 case SCR_ACTIVE:
1044 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1045 break;
1046 default:
1047 ofs = 0xffffffffU;
1048 break;
1049 }
1050 return ofs;
1051}
1052
Tejun Heoda3dbb12007-07-16 14:29:40 +09001053static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001054{
1055 unsigned int ofs = mv_scr_offset(sc_reg_in);
1056
Tejun Heoda3dbb12007-07-16 14:29:40 +09001057 if (ofs != 0xffffffffU) {
1058 *val = readl(mv_ap_base(ap) + ofs);
1059 return 0;
1060 } else
1061 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001062}
1063
Tejun Heoda3dbb12007-07-16 14:29:40 +09001064static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001065{
1066 unsigned int ofs = mv_scr_offset(sc_reg_in);
1067
Tejun Heoda3dbb12007-07-16 14:29:40 +09001068 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001069 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001070 return 0;
1071 } else
1072 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001073}
1074
Mark Lordf2738272008-01-26 18:32:29 -05001075static void mv6_dev_config(struct ata_device *adev)
1076{
1077 /*
1078 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1079 * See mv_qc_prep() for more info.
1080 */
1081 if (adev->flags & ATA_DFLAG_NCQ)
1082 if (adev->max_sectors > ATA_MAX_SECTORS)
1083 adev->max_sectors = ATA_MAX_SECTORS;
1084}
1085
Mark Lord72109162008-01-26 18:31:33 -05001086static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1087 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001088{
Mark Lord0c589122008-01-26 18:31:16 -05001089 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001090
1091 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001092 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001093
Mark Lord0c589122008-01-26 18:31:16 -05001094 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001095 cfg |= (1 << 8); /* enab config burst size mask */
1096
Mark Lord0c589122008-01-26 18:31:16 -05001097 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001098 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1099
1100 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001101 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1102 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001103 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001104 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001105 }
1106
Mark Lord72109162008-01-26 18:31:33 -05001107 if (want_ncq) {
1108 cfg |= EDMA_CFG_NCQ;
1109 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1110 } else
1111 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1112
Jeff Garzike4e7b892006-01-31 12:18:41 -05001113 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1114}
1115
Mark Lordda2fa9b2008-01-26 18:32:45 -05001116static void mv_port_free_dma_mem(struct ata_port *ap)
1117{
1118 struct mv_host_priv *hpriv = ap->host->private_data;
1119 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001120 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001121
1122 if (pp->crqb) {
1123 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1124 pp->crqb = NULL;
1125 }
1126 if (pp->crpb) {
1127 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1128 pp->crpb = NULL;
1129 }
Mark Lordeb73d552008-01-29 13:24:00 -05001130 /*
1131 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1132 * For later hardware, we have one unique sg_tbl per NCQ tag.
1133 */
1134 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1135 if (pp->sg_tbl[tag]) {
1136 if (tag == 0 || !IS_GEN_I(hpriv))
1137 dma_pool_free(hpriv->sg_tbl_pool,
1138 pp->sg_tbl[tag],
1139 pp->sg_tbl_dma[tag]);
1140 pp->sg_tbl[tag] = NULL;
1141 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001142 }
1143}
1144
Brett Russ05b308e2005-10-05 17:08:53 -04001145/**
1146 * mv_port_start - Port specific init/start routine.
1147 * @ap: ATA channel to manipulate
1148 *
1149 * Allocate and point to DMA memory, init port private memory,
1150 * zero indices.
1151 *
1152 * LOCKING:
1153 * Inherited from caller.
1154 */
Brett Russ31961942005-09-30 01:36:00 -04001155static int mv_port_start(struct ata_port *ap)
1156{
Jeff Garzikcca39742006-08-24 03:19:22 -04001157 struct device *dev = ap->host->dev;
1158 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001159 struct mv_port_priv *pp;
1160 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001161 unsigned long flags;
Mark Lordeb73d552008-01-29 13:24:00 -05001162 int tag, rc;
Brett Russ31961942005-09-30 01:36:00 -04001163
Tejun Heo24dc5f32007-01-20 16:00:28 +09001164 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001165 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001166 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001167 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001168
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001169 rc = ata_pad_alloc(ap, dev);
1170 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001171 return rc;
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001172
Mark Lordda2fa9b2008-01-26 18:32:45 -05001173 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1174 if (!pp->crqb)
1175 return -ENOMEM;
1176 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001177
Mark Lordda2fa9b2008-01-26 18:32:45 -05001178 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1179 if (!pp->crpb)
1180 goto out_port_free_dma_mem;
1181 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001182
Mark Lordeb73d552008-01-29 13:24:00 -05001183 /*
1184 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1185 * For later hardware, we need one unique sg_tbl per NCQ tag.
1186 */
1187 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1188 if (tag == 0 || !IS_GEN_I(hpriv)) {
1189 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1190 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1191 if (!pp->sg_tbl[tag])
1192 goto out_port_free_dma_mem;
1193 } else {
1194 pp->sg_tbl[tag] = pp->sg_tbl[0];
1195 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1196 }
1197 }
Brett Russ31961942005-09-30 01:36:00 -04001198
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001199 spin_lock_irqsave(&ap->host->lock, flags);
1200
Mark Lord72109162008-01-26 18:31:33 -05001201 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001202 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001203
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001204 spin_unlock_irqrestore(&ap->host->lock, flags);
1205
Brett Russ31961942005-09-30 01:36:00 -04001206 /* Don't turn on EDMA here...do it before DMA commands only. Else
1207 * we'll be unable to send non-data, PIO, etc due to restricted access
1208 * to shadow regs.
1209 */
Brett Russ31961942005-09-30 01:36:00 -04001210 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001211
1212out_port_free_dma_mem:
1213 mv_port_free_dma_mem(ap);
1214 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001215}
1216
Brett Russ05b308e2005-10-05 17:08:53 -04001217/**
1218 * mv_port_stop - Port specific cleanup/stop routine.
1219 * @ap: ATA channel to manipulate
1220 *
1221 * Stop DMA, cleanup port memory.
1222 *
1223 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001224 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001225 */
Brett Russ31961942005-09-30 01:36:00 -04001226static void mv_port_stop(struct ata_port *ap)
1227{
Brett Russ31961942005-09-30 01:36:00 -04001228 mv_stop_dma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001229 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001230}
1231
Brett Russ05b308e2005-10-05 17:08:53 -04001232/**
1233 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1234 * @qc: queued command whose SG list to source from
1235 *
1236 * Populate the SG list and mark the last entry.
1237 *
1238 * LOCKING:
1239 * Inherited from caller.
1240 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001241static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001242{
1243 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001244 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001245 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001246 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001247
Mark Lordeb73d552008-01-29 13:24:00 -05001248 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001249 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001250 dma_addr_t addr = sg_dma_address(sg);
1251 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001252
Olof Johansson4007b492007-10-02 20:45:27 -05001253 while (sg_len) {
1254 u32 offset = addr & 0xffff;
1255 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001256
Olof Johansson4007b492007-10-02 20:45:27 -05001257 if ((offset + sg_len > 0x10000))
1258 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001259
Olof Johansson4007b492007-10-02 20:45:27 -05001260 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1261 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001262 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001263
1264 sg_len -= len;
1265 addr += len;
1266
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001267 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001268 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001269 }
Brett Russ31961942005-09-30 01:36:00 -04001270 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001271
1272 if (likely(last_sg))
1273 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001274}
1275
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001276static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001277{
Mark Lord559eeda2006-05-19 16:40:15 -04001278 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001279 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001280 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001281}
1282
Brett Russ05b308e2005-10-05 17:08:53 -04001283/**
1284 * mv_qc_prep - Host specific command preparation.
1285 * @qc: queued command to prepare
1286 *
1287 * This routine simply redirects to the general purpose routine
1288 * if command is not DMA. Else, it handles prep of the CRQB
1289 * (command request block), does some sanity checking, and calls
1290 * the SG load routine.
1291 *
1292 * LOCKING:
1293 * Inherited from caller.
1294 */
Brett Russ31961942005-09-30 01:36:00 -04001295static void mv_qc_prep(struct ata_queued_cmd *qc)
1296{
1297 struct ata_port *ap = qc->ap;
1298 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001299 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001300 struct ata_taskfile *tf;
1301 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001302 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001303
Mark Lord138bfdd2008-01-26 18:33:18 -05001304 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1305 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001306 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001307
Brett Russ31961942005-09-30 01:36:00 -04001308 /* Fill in command request block
1309 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001310 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001311 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001312 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001313 flags |= qc->tag << CRQB_TAG_SHIFT;
1314
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001315 /* get current queue index from software */
1316 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001317
Mark Lorda6432432006-05-19 16:36:36 -04001318 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001319 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001320 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001321 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001322 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1323
1324 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001325 tf = &qc->tf;
1326
1327 /* Sadly, the CRQB cannot accomodate all registers--there are
1328 * only 11 bytes...so we must pick and choose required
1329 * registers based on the command. So, we drop feature and
1330 * hob_feature for [RW] DMA commands, but they are needed for
1331 * NCQ. NCQ will drop hob_nsect.
1332 */
1333 switch (tf->command) {
1334 case ATA_CMD_READ:
1335 case ATA_CMD_READ_EXT:
1336 case ATA_CMD_WRITE:
1337 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001338 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001339 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1340 break;
Brett Russ31961942005-09-30 01:36:00 -04001341 case ATA_CMD_FPDMA_READ:
1342 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001343 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001344 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1345 break;
Brett Russ31961942005-09-30 01:36:00 -04001346 default:
1347 /* The only other commands EDMA supports in non-queued and
1348 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1349 * of which are defined/used by Linux. If we get here, this
1350 * driver needs work.
1351 *
1352 * FIXME: modify libata to give qc_prep a return value and
1353 * return error here.
1354 */
1355 BUG_ON(tf->command);
1356 break;
1357 }
1358 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1359 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1360 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1361 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1362 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1363 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1364 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1365 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1366 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1367
Jeff Garzike4e7b892006-01-31 12:18:41 -05001368 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001369 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001370 mv_fill_sg(qc);
1371}
1372
1373/**
1374 * mv_qc_prep_iie - Host specific command preparation.
1375 * @qc: queued command to prepare
1376 *
1377 * This routine simply redirects to the general purpose routine
1378 * if command is not DMA. Else, it handles prep of the CRQB
1379 * (command request block), does some sanity checking, and calls
1380 * the SG load routine.
1381 *
1382 * LOCKING:
1383 * Inherited from caller.
1384 */
1385static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1386{
1387 struct ata_port *ap = qc->ap;
1388 struct mv_port_priv *pp = ap->private_data;
1389 struct mv_crqb_iie *crqb;
1390 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001391 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001392 u32 flags = 0;
1393
Mark Lord138bfdd2008-01-26 18:33:18 -05001394 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1395 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001396 return;
1397
Jeff Garzike4e7b892006-01-31 12:18:41 -05001398 /* Fill in Gen IIE command request block
1399 */
1400 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1401 flags |= CRQB_FLAG_READ;
1402
Tejun Heobeec7db2006-02-11 19:11:13 +09001403 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001404 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001405 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001406
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001407 /* get current queue index from software */
1408 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001409
1410 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001411 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1412 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001413 crqb->flags = cpu_to_le32(flags);
1414
1415 tf = &qc->tf;
1416 crqb->ata_cmd[0] = cpu_to_le32(
1417 (tf->command << 16) |
1418 (tf->feature << 24)
1419 );
1420 crqb->ata_cmd[1] = cpu_to_le32(
1421 (tf->lbal << 0) |
1422 (tf->lbam << 8) |
1423 (tf->lbah << 16) |
1424 (tf->device << 24)
1425 );
1426 crqb->ata_cmd[2] = cpu_to_le32(
1427 (tf->hob_lbal << 0) |
1428 (tf->hob_lbam << 8) |
1429 (tf->hob_lbah << 16) |
1430 (tf->hob_feature << 24)
1431 );
1432 crqb->ata_cmd[3] = cpu_to_le32(
1433 (tf->nsect << 0) |
1434 (tf->hob_nsect << 8)
1435 );
1436
1437 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1438 return;
Brett Russ31961942005-09-30 01:36:00 -04001439 mv_fill_sg(qc);
1440}
1441
Brett Russ05b308e2005-10-05 17:08:53 -04001442/**
1443 * mv_qc_issue - Initiate a command to the host
1444 * @qc: queued command to start
1445 *
1446 * This routine simply redirects to the general purpose routine
1447 * if command is not DMA. Else, it sanity checks our local
1448 * caches of the request producer/consumer indices then enables
1449 * DMA and bumps the request producer index.
1450 *
1451 * LOCKING:
1452 * Inherited from caller.
1453 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001454static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001455{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001456 struct ata_port *ap = qc->ap;
1457 void __iomem *port_mmio = mv_ap_base(ap);
1458 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001459 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001460
Mark Lord138bfdd2008-01-26 18:33:18 -05001461 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1462 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001463 /* We're about to send a non-EDMA capable command to the
1464 * port. Turn off EDMA so there won't be problems accessing
1465 * shadow block, etc registers.
1466 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001467 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001468 return ata_qc_issue_prot(qc);
1469 }
1470
Mark Lord72109162008-01-26 18:31:33 -05001471 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001472
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001473 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001474
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001475 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001476
1477 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001478 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1479 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001480
1481 return 0;
1482}
1483
Brett Russ05b308e2005-10-05 17:08:53 -04001484/**
Brett Russ05b308e2005-10-05 17:08:53 -04001485 * mv_err_intr - Handle error interrupts on the port
1486 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001487 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001488 *
1489 * In most cases, just clear the interrupt and move on. However,
1490 * some cases require an eDMA reset, which is done right before
1491 * the COMRESET in mv_phy_reset(). The SERR case requires a
1492 * clear of pending errors in the SATA SERROR register. Finally,
1493 * if the port disabled DMA, update our cached copy to match.
1494 *
1495 * LOCKING:
1496 * Inherited from caller.
1497 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001498static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001499{
Brett Russ31961942005-09-30 01:36:00 -04001500 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001501 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1502 struct mv_port_priv *pp = ap->private_data;
1503 struct mv_host_priv *hpriv = ap->host->private_data;
1504 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1505 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001506 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001507
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001508 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001509
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001510 if (!edma_enabled) {
1511 /* just a guess: do we need to do this? should we
1512 * expand this, and do it in all cases?
1513 */
Tejun Heo936fd732007-08-06 18:36:23 +09001514 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1515 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001516 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001517
1518 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1519
1520 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1521
1522 /*
1523 * all generations share these EDMA error cause bits
1524 */
1525
1526 if (edma_err_cause & EDMA_ERR_DEV)
1527 err_mask |= AC_ERR_DEV;
1528 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001529 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001530 EDMA_ERR_INTRL_PAR)) {
1531 err_mask |= AC_ERR_ATA_BUS;
1532 action |= ATA_EH_HARDRESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001533 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001534 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001535 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1536 ata_ehi_hotplugged(ehi);
1537 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001538 "dev disconnect" : "dev connect");
Mark Lord3606a382008-01-26 18:28:23 -05001539 action |= ATA_EH_HARDRESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001540 }
1541
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001542 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001543 eh_freeze_mask = EDMA_EH_FREEZE_5;
1544
1545 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1546 struct mv_port_priv *pp = ap->private_data;
1547 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001548 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001549 }
1550 } else {
1551 eh_freeze_mask = EDMA_EH_FREEZE;
1552
1553 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1554 struct mv_port_priv *pp = ap->private_data;
1555 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001556 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001557 }
1558
1559 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001560 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1561 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001562 err_mask = AC_ERR_ATA_BUS;
1563 action |= ATA_EH_HARDRESET;
1564 }
1565 }
Brett Russ20f733e2005-09-01 18:26:17 -04001566
1567 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001568 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001569
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001570 if (!err_mask) {
1571 err_mask = AC_ERR_OTHER;
1572 action |= ATA_EH_HARDRESET;
1573 }
1574
1575 ehi->serror |= serr;
1576 ehi->action |= action;
1577
1578 if (qc)
1579 qc->err_mask |= err_mask;
1580 else
1581 ehi->err_mask |= err_mask;
1582
1583 if (edma_err_cause & eh_freeze_mask)
1584 ata_port_freeze(ap);
1585 else
1586 ata_port_abort(ap);
1587}
1588
1589static void mv_intr_pio(struct ata_port *ap)
1590{
1591 struct ata_queued_cmd *qc;
1592 u8 ata_status;
1593
1594 /* ignore spurious intr if drive still BUSY */
1595 ata_status = readb(ap->ioaddr.status_addr);
1596 if (unlikely(ata_status & ATA_BUSY))
1597 return;
1598
1599 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001600 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001601 if (unlikely(!qc)) /* no active tag */
1602 return;
1603 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1604 return;
1605
1606 /* and finally, complete the ATA command */
1607 qc->err_mask |= ac_err_mask(ata_status);
1608 ata_qc_complete(qc);
1609}
1610
1611static void mv_intr_edma(struct ata_port *ap)
1612{
1613 void __iomem *port_mmio = mv_ap_base(ap);
1614 struct mv_host_priv *hpriv = ap->host->private_data;
1615 struct mv_port_priv *pp = ap->private_data;
1616 struct ata_queued_cmd *qc;
1617 u32 out_index, in_index;
1618 bool work_done = false;
1619
1620 /* get h/w response queue pointer */
1621 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1622 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1623
1624 while (1) {
1625 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001626 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001627
1628 /* get s/w response queue last-read pointer, and compare */
1629 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1630 if (in_index == out_index)
1631 break;
1632
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001633 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001634 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001635 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001636
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001637 /* Gen II/IIE: get active ATA command via tag, to enable
1638 * support for queueing. this works transparently for
1639 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001640 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001641 else
1642 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001643
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001644 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001645
Mark Lordcb924412008-01-26 18:32:09 -05001646 /* For non-NCQ mode, the lower 8 bits of status
1647 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1648 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001649 */
1650 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001651 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001652 mv_err_intr(ap, qc);
1653 return;
1654 }
1655
1656 /* and finally, complete the ATA command */
1657 if (qc) {
1658 qc->err_mask |=
1659 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1660 ata_qc_complete(qc);
1661 }
1662
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001663 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001664 * indicate (after the loop completes) to hardware
1665 * that we have consumed a response queue entry.
1666 */
1667 work_done = true;
1668 pp->resp_idx++;
1669 }
1670
1671 if (work_done)
1672 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1673 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1674 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001675}
1676
Brett Russ05b308e2005-10-05 17:08:53 -04001677/**
1678 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001679 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001680 * @relevant: port error bits relevant to this host controller
1681 * @hc: which host controller we're to look at
1682 *
1683 * Read then write clear the HC interrupt status then walk each
1684 * port connected to the HC and see if it needs servicing. Port
1685 * success ints are reported in the HC interrupt status reg, the
1686 * port error ints are reported in the higher level main
1687 * interrupt status register and thus are passed in via the
1688 * 'relevant' argument.
1689 *
1690 * LOCKING:
1691 * Inherited from caller.
1692 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001693static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001694{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001695 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04001696 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001697 u32 hc_irq_cause;
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001698 int port, port0;
Brett Russ20f733e2005-09-01 18:26:17 -04001699
Jeff Garzik35177262007-02-24 21:26:42 -05001700 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001701 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001702 else
Brett Russ20f733e2005-09-01 18:26:17 -04001703 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001704
1705 /* we'll need the HC success int register in most cases */
1706 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001707 if (!hc_irq_cause)
1708 return;
1709
1710 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001711
1712 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001713 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001714
1715 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001716 struct ata_port *ap = host->ports[port];
Mark Lord63af2a52006-03-29 09:50:31 -05001717 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001718 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001719
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001720 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001721 continue;
1722
Brett Russ31961942005-09-30 01:36:00 -04001723 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001724 if (port >= MV_PORTS_PER_HC) {
1725 shift++; /* skip bit 8 in the HC Main IRQ reg */
1726 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001727 have_err_bits = ((PORT0_ERR << shift) & relevant);
1728
1729 if (unlikely(have_err_bits)) {
1730 struct ata_queued_cmd *qc;
1731
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001732 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001733 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1734 continue;
1735
1736 mv_err_intr(ap, qc);
1737 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001738 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001739
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001740 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1741
1742 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1743 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1744 mv_intr_edma(ap);
1745 } else {
1746 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1747 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001748 }
1749 }
1750 VPRINTK("EXIT\n");
1751}
1752
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001753static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1754{
Mark Lord02a121d2007-12-01 13:07:22 -05001755 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001756 struct ata_port *ap;
1757 struct ata_queued_cmd *qc;
1758 struct ata_eh_info *ehi;
1759 unsigned int i, err_mask, printed = 0;
1760 u32 err_cause;
1761
Mark Lord02a121d2007-12-01 13:07:22 -05001762 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001763
1764 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1765 err_cause);
1766
1767 DPRINTK("All regs @ PCI error\n");
1768 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1769
Mark Lord02a121d2007-12-01 13:07:22 -05001770 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001771
1772 for (i = 0; i < host->n_ports; i++) {
1773 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001774 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001775 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001776 ata_ehi_clear_desc(ehi);
1777 if (!printed++)
1778 ata_ehi_push_desc(ehi,
1779 "PCI err cause 0x%08x", err_cause);
1780 err_mask = AC_ERR_HOST_BUS;
1781 ehi->action = ATA_EH_HARDRESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001782 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001783 if (qc)
1784 qc->err_mask |= err_mask;
1785 else
1786 ehi->err_mask |= err_mask;
1787
1788 ata_port_freeze(ap);
1789 }
1790 }
1791}
1792
Brett Russ05b308e2005-10-05 17:08:53 -04001793/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001794 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001795 * @irq: unused
1796 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001797 *
1798 * Read the read only register to determine if any host
1799 * controllers have pending interrupts. If so, call lower level
1800 * routine to handle. Also check for PCI errors which are only
1801 * reported here.
1802 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001803 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001804 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001805 * interrupts.
1806 */
David Howells7d12e782006-10-05 14:55:46 +01001807static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001808{
Jeff Garzikcca39742006-08-24 03:19:22 -04001809 struct ata_host *host = dev_instance;
Brett Russ20f733e2005-09-01 18:26:17 -04001810 unsigned int hc, handled = 0, n_hcs;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001811 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
Mark Lord646a4da2008-01-26 18:30:37 -05001812 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001813
Mark Lord646a4da2008-01-26 18:30:37 -05001814 spin_lock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001815 irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS);
Mark Lord646a4da2008-01-26 18:30:37 -05001816 irq_mask = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001817
1818 /* check the cases where we either have nothing pending or have read
1819 * a bogus register value which can indicate HW removal or PCI fault
1820 */
Mark Lord646a4da2008-01-26 18:30:37 -05001821 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1822 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001823
Jeff Garzikcca39742006-08-24 03:19:22 -04001824 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001825
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001826 if (unlikely(irq_stat & PCI_ERR)) {
1827 mv_pci_error(host, mmio);
1828 handled = 1;
1829 goto out_unlock; /* skip all other HC irq handling */
1830 }
1831
Brett Russ20f733e2005-09-01 18:26:17 -04001832 for (hc = 0; hc < n_hcs; hc++) {
1833 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1834 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001835 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001836 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001837 }
1838 }
Mark Lord615ab952006-05-19 16:24:56 -04001839
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001840out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001841 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001842
1843 return IRQ_RETVAL(handled);
1844}
1845
Jeff Garzikc9d39132005-11-13 17:47:51 -05001846static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1847{
1848 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1849 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1850
1851 return hc_mmio + ofs;
1852}
1853
1854static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1855{
1856 unsigned int ofs;
1857
1858 switch (sc_reg_in) {
1859 case SCR_STATUS:
1860 case SCR_ERROR:
1861 case SCR_CONTROL:
1862 ofs = sc_reg_in * sizeof(u32);
1863 break;
1864 default:
1865 ofs = 0xffffffffU;
1866 break;
1867 }
1868 return ofs;
1869}
1870
Tejun Heoda3dbb12007-07-16 14:29:40 +09001871static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001872{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001873 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1874 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001875 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1876
Tejun Heoda3dbb12007-07-16 14:29:40 +09001877 if (ofs != 0xffffffffU) {
1878 *val = readl(addr + ofs);
1879 return 0;
1880 } else
1881 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001882}
1883
Tejun Heoda3dbb12007-07-16 14:29:40 +09001884static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001885{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001886 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
1887 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001888 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1889
Tejun Heoda3dbb12007-07-16 14:29:40 +09001890 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001891 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001892 return 0;
1893 } else
1894 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001895}
1896
Jeff Garzik522479f2005-11-12 22:14:02 -05001897static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
1898{
Jeff Garzik522479f2005-11-12 22:14:02 -05001899 int early_5080;
1900
Auke Kok44c10132007-06-08 15:46:36 -07001901 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001902
1903 if (!early_5080) {
1904 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1905 tmp |= (1 << 0);
1906 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1907 }
1908
1909 mv_reset_pci_bus(pdev, mmio);
1910}
1911
1912static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1913{
1914 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1915}
1916
Jeff Garzik47c2b672005-11-12 21:13:17 -05001917static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001918 void __iomem *mmio)
1919{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001920 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1921 u32 tmp;
1922
1923 tmp = readl(phy_mmio + MV5_PHY_MODE);
1924
1925 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1926 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001927}
1928
Jeff Garzik47c2b672005-11-12 21:13:17 -05001929static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001930{
Jeff Garzik522479f2005-11-12 22:14:02 -05001931 u32 tmp;
1932
1933 writel(0, mmio + MV_GPIO_PORT_CTL);
1934
1935 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1936
1937 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1938 tmp |= ~(1 << 0);
1939 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001940}
1941
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001942static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1943 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001944{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001945 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1946 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1947 u32 tmp;
1948 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1949
1950 if (fix_apm_sq) {
1951 tmp = readl(phy_mmio + MV5_LT_MODE);
1952 tmp |= (1 << 19);
1953 writel(tmp, phy_mmio + MV5_LT_MODE);
1954
1955 tmp = readl(phy_mmio + MV5_PHY_CTL);
1956 tmp &= ~0x3;
1957 tmp |= 0x1;
1958 writel(tmp, phy_mmio + MV5_PHY_CTL);
1959 }
1960
1961 tmp = readl(phy_mmio + MV5_PHY_MODE);
1962 tmp &= ~mask;
1963 tmp |= hpriv->signal[port].pre;
1964 tmp |= hpriv->signal[port].amps;
1965 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001966}
1967
Jeff Garzikc9d39132005-11-13 17:47:51 -05001968
1969#undef ZERO
1970#define ZERO(reg) writel(0, port_mmio + (reg))
1971static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1972 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001973{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001974 void __iomem *port_mmio = mv_port_base(mmio, port);
1975
1976 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1977
1978 mv_channel_reset(hpriv, mmio, port);
1979
1980 ZERO(0x028); /* command */
1981 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1982 ZERO(0x004); /* timer */
1983 ZERO(0x008); /* irq err cause */
1984 ZERO(0x00c); /* irq err mask */
1985 ZERO(0x010); /* rq bah */
1986 ZERO(0x014); /* rq inp */
1987 ZERO(0x018); /* rq outp */
1988 ZERO(0x01c); /* respq bah */
1989 ZERO(0x024); /* respq outp */
1990 ZERO(0x020); /* respq inp */
1991 ZERO(0x02c); /* test control */
1992 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1993}
1994#undef ZERO
1995
1996#define ZERO(reg) writel(0, hc_mmio + (reg))
1997static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1998 unsigned int hc)
1999{
2000 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2001 u32 tmp;
2002
2003 ZERO(0x00c);
2004 ZERO(0x010);
2005 ZERO(0x014);
2006 ZERO(0x018);
2007
2008 tmp = readl(hc_mmio + 0x20);
2009 tmp &= 0x1c1c1c1c;
2010 tmp |= 0x03030303;
2011 writel(tmp, hc_mmio + 0x20);
2012}
2013#undef ZERO
2014
2015static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2016 unsigned int n_hc)
2017{
2018 unsigned int hc, port;
2019
2020 for (hc = 0; hc < n_hc; hc++) {
2021 for (port = 0; port < MV_PORTS_PER_HC; port++)
2022 mv5_reset_hc_port(hpriv, mmio,
2023 (hc * MV_PORTS_PER_HC) + port);
2024
2025 mv5_reset_one_hc(hpriv, mmio, hc);
2026 }
2027
2028 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002029}
2030
Jeff Garzik101ffae2005-11-12 22:17:49 -05002031#undef ZERO
2032#define ZERO(reg) writel(0, mmio + (reg))
2033static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio)
2034{
Mark Lord02a121d2007-12-01 13:07:22 -05002035 struct ata_host *host = dev_get_drvdata(&pdev->dev);
2036 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002037 u32 tmp;
2038
2039 tmp = readl(mmio + MV_PCI_MODE);
2040 tmp &= 0xff00ffff;
2041 writel(tmp, mmio + MV_PCI_MODE);
2042
2043 ZERO(MV_PCI_DISC_TIMER);
2044 ZERO(MV_PCI_MSI_TRIGGER);
2045 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2046 ZERO(HC_MAIN_IRQ_MASK_OFS);
2047 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002048 ZERO(hpriv->irq_cause_ofs);
2049 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002050 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2051 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2052 ZERO(MV_PCI_ERR_ATTRIBUTE);
2053 ZERO(MV_PCI_ERR_COMMAND);
2054}
2055#undef ZERO
2056
2057static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2058{
2059 u32 tmp;
2060
2061 mv5_reset_flash(hpriv, mmio);
2062
2063 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2064 tmp &= 0x3;
2065 tmp |= (1 << 5) | (1 << 6);
2066 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2067}
2068
2069/**
2070 * mv6_reset_hc - Perform the 6xxx global soft reset
2071 * @mmio: base address of the HBA
2072 *
2073 * This routine only applies to 6xxx parts.
2074 *
2075 * LOCKING:
2076 * Inherited from caller.
2077 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002078static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2079 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002080{
2081 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2082 int i, rc = 0;
2083 u32 t;
2084
2085 /* Following procedure defined in PCI "main command and status
2086 * register" table.
2087 */
2088 t = readl(reg);
2089 writel(t | STOP_PCI_MASTER, reg);
2090
2091 for (i = 0; i < 1000; i++) {
2092 udelay(1);
2093 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002094 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002095 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002096 }
2097 if (!(PCI_MASTER_EMPTY & t)) {
2098 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2099 rc = 1;
2100 goto done;
2101 }
2102
2103 /* set reset */
2104 i = 5;
2105 do {
2106 writel(t | GLOB_SFT_RST, reg);
2107 t = readl(reg);
2108 udelay(1);
2109 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2110
2111 if (!(GLOB_SFT_RST & t)) {
2112 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2113 rc = 1;
2114 goto done;
2115 }
2116
2117 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2118 i = 5;
2119 do {
2120 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2121 t = readl(reg);
2122 udelay(1);
2123 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2124
2125 if (GLOB_SFT_RST & t) {
2126 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2127 rc = 1;
2128 }
2129done:
2130 return rc;
2131}
2132
Jeff Garzik47c2b672005-11-12 21:13:17 -05002133static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002134 void __iomem *mmio)
2135{
2136 void __iomem *port_mmio;
2137 u32 tmp;
2138
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002139 tmp = readl(mmio + MV_RESET_CFG);
2140 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002141 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002142 hpriv->signal[idx].pre = 0x1 << 5;
2143 return;
2144 }
2145
2146 port_mmio = mv_port_base(mmio, idx);
2147 tmp = readl(port_mmio + PHY_MODE2);
2148
2149 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2150 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2151}
2152
Jeff Garzik47c2b672005-11-12 21:13:17 -05002153static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002154{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002155 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002156}
2157
Jeff Garzikc9d39132005-11-13 17:47:51 -05002158static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002159 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002160{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002161 void __iomem *port_mmio = mv_port_base(mmio, port);
2162
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002163 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002164 int fix_phy_mode2 =
2165 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002166 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002167 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2168 u32 m2, tmp;
2169
2170 if (fix_phy_mode2) {
2171 m2 = readl(port_mmio + PHY_MODE2);
2172 m2 &= ~(1 << 16);
2173 m2 |= (1 << 31);
2174 writel(m2, port_mmio + PHY_MODE2);
2175
2176 udelay(200);
2177
2178 m2 = readl(port_mmio + PHY_MODE2);
2179 m2 &= ~((1 << 16) | (1 << 31));
2180 writel(m2, port_mmio + PHY_MODE2);
2181
2182 udelay(200);
2183 }
2184
2185 /* who knows what this magic does */
2186 tmp = readl(port_mmio + PHY_MODE3);
2187 tmp &= ~0x7F800000;
2188 tmp |= 0x2A800000;
2189 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002190
2191 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002192 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002193
2194 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002195
2196 if (hp_flags & MV_HP_ERRATA_60X1B2)
2197 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002198
2199 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2200
2201 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002202
2203 if (hp_flags & MV_HP_ERRATA_60X1B2)
2204 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002205 }
2206
2207 /* Revert values of pre-emphasis and signal amps to the saved ones */
2208 m2 = readl(port_mmio + PHY_MODE2);
2209
2210 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002211 m2 |= hpriv->signal[port].amps;
2212 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002213 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002214
Jeff Garzike4e7b892006-01-31 12:18:41 -05002215 /* according to mvSata 3.6.1, some IIE values are fixed */
2216 if (IS_GEN_IIE(hpriv)) {
2217 m2 &= ~0xC30FF01F;
2218 m2 |= 0x0000900F;
2219 }
2220
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002221 writel(m2, port_mmio + PHY_MODE2);
2222}
2223
Jeff Garzikc9d39132005-11-13 17:47:51 -05002224static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2225 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002226{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002227 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002228
Brett Russ31961942005-09-30 01:36:00 -04002229 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002230
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002231 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002232 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002233 ifctl |= (1 << 7); /* enable gen2i speed */
2234 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002235 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2236 }
2237
Brett Russ20f733e2005-09-01 18:26:17 -04002238 udelay(25); /* allow reset propagation */
2239
2240 /* Spec never mentions clearing the bit. Marvell's driver does
2241 * clear the bit, however.
2242 */
Brett Russ31961942005-09-30 01:36:00 -04002243 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002244
Jeff Garzikc9d39132005-11-13 17:47:51 -05002245 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2246
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002247 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002248 mdelay(1);
2249}
2250
Jeff Garzikc9d39132005-11-13 17:47:51 -05002251/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002252 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002253 * @ap: ATA channel to manipulate
2254 *
2255 * Part of this is taken from __sata_phy_reset and modified to
2256 * not sleep since this routine gets called from interrupt level.
2257 *
2258 * LOCKING:
2259 * Inherited from caller. This is coded to safe to call at
2260 * interrupt level, i.e. it does not sleep.
2261 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002262static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2263 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002264{
2265 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002266 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002267 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002268 int retry = 5;
2269 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002270
2271 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002272
Tejun Heoda3dbb12007-07-16 14:29:40 +09002273#ifdef DEBUG
2274 {
2275 u32 sstatus, serror, scontrol;
2276
2277 mv_scr_read(ap, SCR_STATUS, &sstatus);
2278 mv_scr_read(ap, SCR_ERROR, &serror);
2279 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2280 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002281 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002282 }
2283#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002284
Jeff Garzik22374672005-11-17 10:59:48 -05002285 /* Issue COMRESET via SControl */
2286comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002287 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002288 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002289
Tejun Heo936fd732007-08-06 18:36:23 +09002290 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002291 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002292
Brett Russ31961942005-09-30 01:36:00 -04002293 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002294 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002295 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002296 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002297
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002298 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002299 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002300
Jeff Garzik22374672005-11-17 10:59:48 -05002301 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002302 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002303 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2304 (retry-- > 0))
2305 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002306
Tejun Heoda3dbb12007-07-16 14:29:40 +09002307#ifdef DEBUG
2308 {
2309 u32 sstatus, serror, scontrol;
2310
2311 mv_scr_read(ap, SCR_STATUS, &sstatus);
2312 mv_scr_read(ap, SCR_ERROR, &serror);
2313 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2314 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2315 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2316 }
2317#endif
Brett Russ31961942005-09-30 01:36:00 -04002318
Tejun Heo936fd732007-08-06 18:36:23 +09002319 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002320 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002321 return;
2322 }
2323
Jeff Garzik22374672005-11-17 10:59:48 -05002324 /* even after SStatus reflects that device is ready,
2325 * it seems to take a while for link to be fully
2326 * established (and thus Status no longer 0x80/0x7F),
2327 * so we poll a bit for that, here.
2328 */
2329 retry = 20;
2330 while (1) {
2331 u8 drv_stat = ata_check_status(ap);
2332 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2333 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002334 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002335 if (retry-- <= 0)
2336 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002337 if (time_after(jiffies, deadline))
2338 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002339 }
2340
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002341 /* FIXME: if we passed the deadline, the following
2342 * code probably produces an invalid result
2343 */
Brett Russ20f733e2005-09-01 18:26:17 -04002344
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002345 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002346 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002347
2348 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2349
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002350 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002351
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002352 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002353}
2354
Tejun Heocc0680a2007-08-06 18:36:23 +09002355static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002356{
Tejun Heocc0680a2007-08-06 18:36:23 +09002357 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002358 struct mv_port_priv *pp = ap->private_data;
Tejun Heocc0680a2007-08-06 18:36:23 +09002359 struct ata_eh_context *ehc = &link->eh_context;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002360 int rc;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002361
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002362 rc = mv_stop_dma(ap);
2363 if (rc)
2364 ehc->i.action |= ATA_EH_HARDRESET;
2365
2366 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) {
2367 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
2368 ehc->i.action |= ATA_EH_HARDRESET;
2369 }
2370
2371 /* if we're about to do hardreset, nothing more to do */
2372 if (ehc->i.action & ATA_EH_HARDRESET)
2373 return 0;
2374
Tejun Heocc0680a2007-08-06 18:36:23 +09002375 if (ata_link_online(link))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002376 rc = ata_wait_ready(ap, deadline);
2377 else
2378 rc = -ENODEV;
2379
2380 return rc;
Jeff Garzik22374672005-11-17 10:59:48 -05002381}
2382
Tejun Heocc0680a2007-08-06 18:36:23 +09002383static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002384 unsigned long deadline)
2385{
Tejun Heocc0680a2007-08-06 18:36:23 +09002386 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002387 struct mv_host_priv *hpriv = ap->host->private_data;
2388 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2389
2390 mv_stop_dma(ap);
2391
2392 mv_channel_reset(hpriv, mmio, ap->port_no);
2393
2394 mv_phy_reset(ap, class, deadline);
2395
2396 return 0;
2397}
2398
Tejun Heocc0680a2007-08-06 18:36:23 +09002399static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002400{
Tejun Heocc0680a2007-08-06 18:36:23 +09002401 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002402 u32 serr;
2403
2404 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002405 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002406
2407 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002408 sata_scr_read(link, SCR_ERROR, &serr);
2409 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002410
2411 /* bail out if no device is present */
2412 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2413 DPRINTK("EXIT, no device\n");
2414 return;
2415 }
2416
2417 /* set up device control */
2418 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2419}
2420
2421static void mv_error_handler(struct ata_port *ap)
2422{
2423 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2424 mv_hardreset, mv_postreset);
2425}
2426
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002427static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002428{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002429 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002430 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2431 u32 tmp, mask;
2432 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002433
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002434 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002435
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002436 shift = ap->port_no * 2;
2437 if (hc > 0)
2438 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002439
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002440 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002441
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002442 /* disable assertion of portN err, done events */
2443 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2444 writelfl(tmp & ~mask, mmio + HC_MAIN_IRQ_MASK_OFS);
2445}
2446
2447static void mv_eh_thaw(struct ata_port *ap)
2448{
2449 void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
2450 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2451 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2452 void __iomem *port_mmio = mv_ap_base(ap);
2453 u32 tmp, mask, hc_irq_cause;
2454 unsigned int shift, hc_port_no = ap->port_no;
2455
2456 /* FIXME: handle coalescing completion events properly */
2457
2458 shift = ap->port_no * 2;
2459 if (hc > 0) {
2460 shift++;
2461 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002462 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002463
2464 mask = 0x3 << shift;
2465
2466 /* clear EDMA errors on this port */
2467 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2468
2469 /* clear pending irq events */
2470 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2471 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2472 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2473 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2474
2475 /* enable assertion of portN err, done events */
2476 tmp = readl(mmio + HC_MAIN_IRQ_MASK_OFS);
2477 writelfl(tmp | mask, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ31961942005-09-30 01:36:00 -04002478}
2479
Brett Russ05b308e2005-10-05 17:08:53 -04002480/**
2481 * mv_port_init - Perform some early initialization on a single port.
2482 * @port: libata data structure storing shadow register addresses
2483 * @port_mmio: base address of the port
2484 *
2485 * Initialize shadow register mmio addresses, clear outstanding
2486 * interrupts on the port, and unmask interrupts for the future
2487 * start of the port.
2488 *
2489 * LOCKING:
2490 * Inherited from caller.
2491 */
Brett Russ31961942005-09-30 01:36:00 -04002492static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2493{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002494 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002495 unsigned serr_ofs;
2496
Jeff Garzik8b260242005-11-12 12:32:50 -05002497 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002498 */
2499 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002500 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002501 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2502 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2503 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2504 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2505 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2506 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002507 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002508 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2509 /* special case: control/altstatus doesn't have ATA_REG_ address */
2510 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2511
2512 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002513 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002514
Brett Russ31961942005-09-30 01:36:00 -04002515 /* Clear any currently outstanding port interrupt conditions */
2516 serr_ofs = mv_scr_offset(SCR_ERROR);
2517 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2518 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2519
Mark Lord646a4da2008-01-26 18:30:37 -05002520 /* unmask all non-transient EDMA error interrupts */
2521 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002522
Jeff Garzik8b260242005-11-12 12:32:50 -05002523 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002524 readl(port_mmio + EDMA_CFG_OFS),
2525 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2526 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002527}
2528
Tejun Heo4447d352007-04-17 23:44:08 +09002529static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002530{
Tejun Heo4447d352007-04-17 23:44:08 +09002531 struct pci_dev *pdev = to_pci_dev(host->dev);
2532 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002533 u32 hp_flags = hpriv->hp_flags;
2534
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002535 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002536 case chip_5080:
2537 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002538 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002539
Auke Kok44c10132007-06-08 15:46:36 -07002540 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002541 case 0x1:
2542 hp_flags |= MV_HP_ERRATA_50XXB0;
2543 break;
2544 case 0x3:
2545 hp_flags |= MV_HP_ERRATA_50XXB2;
2546 break;
2547 default:
2548 dev_printk(KERN_WARNING, &pdev->dev,
2549 "Applying 50XXB2 workarounds to unknown rev\n");
2550 hp_flags |= MV_HP_ERRATA_50XXB2;
2551 break;
2552 }
2553 break;
2554
2555 case chip_504x:
2556 case chip_508x:
2557 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002558 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002559
Auke Kok44c10132007-06-08 15:46:36 -07002560 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002561 case 0x0:
2562 hp_flags |= MV_HP_ERRATA_50XXB0;
2563 break;
2564 case 0x3:
2565 hp_flags |= MV_HP_ERRATA_50XXB2;
2566 break;
2567 default:
2568 dev_printk(KERN_WARNING, &pdev->dev,
2569 "Applying B2 workarounds to unknown rev\n");
2570 hp_flags |= MV_HP_ERRATA_50XXB2;
2571 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002572 }
2573 break;
2574
2575 case chip_604x:
2576 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002577 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002578 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002579
Auke Kok44c10132007-06-08 15:46:36 -07002580 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002581 case 0x7:
2582 hp_flags |= MV_HP_ERRATA_60X1B2;
2583 break;
2584 case 0x9:
2585 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002586 break;
2587 default:
2588 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002589 "Applying B2 workarounds to unknown rev\n");
2590 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002591 break;
2592 }
2593 break;
2594
Jeff Garzike4e7b892006-01-31 12:18:41 -05002595 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002596 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002597 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2598 (pdev->device == 0x2300 || pdev->device == 0x2310))
2599 {
Mark Lord4e520032007-12-11 12:58:05 -05002600 /*
2601 * Highpoint RocketRAID PCIe 23xx series cards:
2602 *
2603 * Unconfigured drives are treated as "Legacy"
2604 * by the BIOS, and it overwrites sector 8 with
2605 * a "Lgcy" metadata block prior to Linux boot.
2606 *
2607 * Configured drives (RAID or JBOD) leave sector 8
2608 * alone, but instead overwrite a high numbered
2609 * sector for the RAID metadata. This sector can
2610 * be determined exactly, by truncating the physical
2611 * drive capacity to a nice even GB value.
2612 *
2613 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2614 *
2615 * Warn the user, lest they think we're just buggy.
2616 */
2617 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2618 " BIOS CORRUPTS DATA on all attached drives,"
2619 " regardless of if/how they are configured."
2620 " BEWARE!\n");
2621 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2622 " use sectors 8-9 on \"Legacy\" drives,"
2623 " and avoid the final two gigabytes on"
2624 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002625 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002626 case chip_6042:
2627 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002628 hp_flags |= MV_HP_GEN_IIE;
2629
Auke Kok44c10132007-06-08 15:46:36 -07002630 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002631 case 0x0:
2632 hp_flags |= MV_HP_ERRATA_XX42A0;
2633 break;
2634 case 0x1:
2635 hp_flags |= MV_HP_ERRATA_60X1C0;
2636 break;
2637 default:
2638 dev_printk(KERN_WARNING, &pdev->dev,
2639 "Applying 60X1C0 workarounds to unknown rev\n");
2640 hp_flags |= MV_HP_ERRATA_60X1C0;
2641 break;
2642 }
2643 break;
2644
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002645 default:
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002646 dev_printk(KERN_ERR, &pdev->dev,
2647 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002648 return 1;
2649 }
2650
2651 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002652 if (hp_flags & MV_HP_PCIE) {
2653 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2654 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2655 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2656 } else {
2657 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2658 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2659 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2660 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002661
2662 return 0;
2663}
2664
Brett Russ05b308e2005-10-05 17:08:53 -04002665/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002666 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002667 * @host: ATA host to initialize
2668 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002669 *
2670 * If possible, do an early global reset of the host. Then do
2671 * our port init and clear/unmask all/relevant host interrupts.
2672 *
2673 * LOCKING:
2674 * Inherited from caller.
2675 */
Tejun Heo4447d352007-04-17 23:44:08 +09002676static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002677{
2678 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002679 struct pci_dev *pdev = to_pci_dev(host->dev);
2680 void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
2681 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002682
Jeff Garzik47c2b672005-11-12 21:13:17 -05002683 /* global interrupt mask */
2684 writel(0, mmio + HC_MAIN_IRQ_MASK_OFS);
2685
Tejun Heo4447d352007-04-17 23:44:08 +09002686 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002687 if (rc)
2688 goto done;
2689
Tejun Heo4447d352007-04-17 23:44:08 +09002690 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002691
Tejun Heo4447d352007-04-17 23:44:08 +09002692 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002693 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002694
Jeff Garzikc9d39132005-11-13 17:47:51 -05002695 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002696 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002697 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002698
Jeff Garzik522479f2005-11-12 22:14:02 -05002699 hpriv->ops->reset_flash(hpriv, mmio);
2700 hpriv->ops->reset_bus(pdev, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002701 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002702
Tejun Heo4447d352007-04-17 23:44:08 +09002703 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002704 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002705 void __iomem *port_mmio = mv_port_base(mmio, port);
2706
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002707 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002708 ifctl |= (1 << 7); /* enable gen2i speed */
2709 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002710 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2711 }
2712
Jeff Garzikc9d39132005-11-13 17:47:51 -05002713 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002714 }
2715
Tejun Heo4447d352007-04-17 23:44:08 +09002716 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002717 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002718 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002719 unsigned int offset = port_mmio - mmio;
2720
2721 mv_port_init(&ap->ioaddr, port_mmio);
2722
2723 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2724 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
Brett Russ20f733e2005-09-01 18:26:17 -04002725 }
2726
2727 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002728 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2729
2730 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2731 "(before clear)=0x%08x\n", hc,
2732 readl(hc_mmio + HC_CFG_OFS),
2733 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2734
2735 /* Clear any currently outstanding hc interrupt conditions */
2736 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002737 }
2738
Brett Russ31961942005-09-30 01:36:00 -04002739 /* Clear any currently outstanding host interrupt conditions */
Mark Lord02a121d2007-12-01 13:07:22 -05002740 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002741
2742 /* and unmask interrupt generation for host regs */
Mark Lord02a121d2007-12-01 13:07:22 -05002743 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002744
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002745 if (IS_GEN_I(hpriv))
Jeff Garzikfb621e22007-02-25 04:19:45 -05002746 writelfl(~HC_MAIN_MASKED_IRQS_5, mmio + HC_MAIN_IRQ_MASK_OFS);
2747 else
2748 writelfl(~HC_MAIN_MASKED_IRQS, mmio + HC_MAIN_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002749
2750 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
Jeff Garzik8b260242005-11-12 12:32:50 -05002751 "PCI int cause/mask=0x%08x/0x%08x\n",
Brett Russ20f733e2005-09-01 18:26:17 -04002752 readl(mmio + HC_MAIN_IRQ_CAUSE_OFS),
2753 readl(mmio + HC_MAIN_IRQ_MASK_OFS),
Mark Lord02a121d2007-12-01 13:07:22 -05002754 readl(mmio + hpriv->irq_cause_ofs),
2755 readl(mmio + hpriv->irq_mask_ofs));
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002756
Brett Russ31961942005-09-30 01:36:00 -04002757done:
Brett Russ20f733e2005-09-01 18:26:17 -04002758 return rc;
2759}
2760
Brett Russ05b308e2005-10-05 17:08:53 -04002761/**
2762 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002763 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002764 *
2765 * FIXME: complete this.
2766 *
2767 * LOCKING:
2768 * Inherited from caller.
2769 */
Tejun Heo4447d352007-04-17 23:44:08 +09002770static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002771{
Tejun Heo4447d352007-04-17 23:44:08 +09002772 struct pci_dev *pdev = to_pci_dev(host->dev);
2773 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002774 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002775 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002776
2777 /* Use this to determine the HW stepping of the chip so we know
2778 * what errata to workaround
2779 */
Brett Russ31961942005-09-30 01:36:00 -04002780 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2781 if (scc == 0)
2782 scc_s = "SCSI";
2783 else if (scc == 0x01)
2784 scc_s = "RAID";
2785 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002786 scc_s = "?";
2787
2788 if (IS_GEN_I(hpriv))
2789 gen = "I";
2790 else if (IS_GEN_II(hpriv))
2791 gen = "II";
2792 else if (IS_GEN_IIE(hpriv))
2793 gen = "IIE";
2794 else
2795 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002796
Jeff Garzika9524a72005-10-30 14:39:11 -05002797 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002798 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2799 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002800 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2801}
2802
Mark Lordda2fa9b2008-01-26 18:32:45 -05002803static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2804{
2805 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2806 MV_CRQB_Q_SZ, 0);
2807 if (!hpriv->crqb_pool)
2808 return -ENOMEM;
2809
2810 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2811 MV_CRPB_Q_SZ, 0);
2812 if (!hpriv->crpb_pool)
2813 return -ENOMEM;
2814
2815 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2816 MV_SG_TBL_SZ, 0);
2817 if (!hpriv->sg_tbl_pool)
2818 return -ENOMEM;
2819
2820 return 0;
2821}
2822
Brett Russ05b308e2005-10-05 17:08:53 -04002823/**
2824 * mv_init_one - handle a positive probe of a Marvell host
2825 * @pdev: PCI device found
2826 * @ent: PCI device ID entry for the matched host
2827 *
2828 * LOCKING:
2829 * Inherited from caller.
2830 */
Brett Russ20f733e2005-09-01 18:26:17 -04002831static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2832{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002833 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002834 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002835 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2836 struct ata_host *host;
2837 struct mv_host_priv *hpriv;
2838 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002839
Jeff Garzika9524a72005-10-30 14:39:11 -05002840 if (!printed_version++)
2841 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002842
Tejun Heo4447d352007-04-17 23:44:08 +09002843 /* allocate host */
2844 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2845
2846 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2847 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2848 if (!host || !hpriv)
2849 return -ENOMEM;
2850 host->private_data = hpriv;
2851
2852 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002853 rc = pcim_enable_device(pdev);
2854 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002855 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002856
Tejun Heo0d5ff562007-02-01 15:06:36 +09002857 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
2858 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002859 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09002860 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002861 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09002862 host->iomap = pcim_iomap_table(pdev);
Brett Russ20f733e2005-09-01 18:26:17 -04002863
Jeff Garzikd88184f2007-02-26 01:26:06 -05002864 rc = pci_go_64(pdev);
2865 if (rc)
2866 return rc;
2867
Mark Lordda2fa9b2008-01-26 18:32:45 -05002868 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2869 if (rc)
2870 return rc;
2871
Brett Russ20f733e2005-09-01 18:26:17 -04002872 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09002873 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09002874 if (rc)
2875 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002876
Brett Russ31961942005-09-30 01:36:00 -04002877 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09002878 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04002879 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04002880
Brett Russ31961942005-09-30 01:36:00 -04002881 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09002882 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04002883
Tejun Heo4447d352007-04-17 23:44:08 +09002884 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04002885 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09002886 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002887 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04002888}
2889
2890static int __init mv_init(void)
2891{
Pavel Roskinb7887192006-08-10 18:13:18 +09002892 return pci_register_driver(&mv_pci_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04002893}
2894
2895static void __exit mv_exit(void)
2896{
2897 pci_unregister_driver(&mv_pci_driver);
2898}
2899
2900MODULE_AUTHOR("Brett Russ");
2901MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
2902MODULE_LICENSE("GPL");
2903MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
2904MODULE_VERSION(DRV_VERSION);
2905
Jeff Garzikddef9bb2006-02-02 16:17:06 -05002906module_param(msi, int, 0444);
2907MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
2908
Brett Russ20f733e2005-09-01 18:26:17 -04002909module_init(mv_init);
2910module_exit(mv_exit);