blob: 52d41edadb72f0791861572af23f13e7552c8380 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Jeff Garzik8b260242005-11-12 12:32:50 -05004 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05005 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04006 *
7 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; version 2 of the License.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 *
22 */
23
Jeff Garzik4a05e202007-05-24 23:40:15 -040024/*
25 sata_mv TODO list:
26
27 1) Needs a full errata audit for all chipsets. I implemented most
28 of the errata workarounds found in the Marvell vendor driver, but
29 I distinctly remember a couple workarounds (one related to PCI-X)
30 are still needed.
31
Mark Lord1fd2e1c2008-01-26 18:33:59 -050032 2) Improve/fix IRQ and error handling sequences.
33
34 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35
36 4) Think about TCQ support here, and for libata in general
37 with controllers that suppport it via host-queuing hardware
38 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040039
40 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
41
42 6) Add port multiplier support (intermediate)
43
Jeff Garzik4a05e202007-05-24 23:40:15 -040044 8) Develop a low-power-consumption strategy, and implement it.
45
46 9) [Experiment, low priority] See if ATAPI can be supported using
47 "unknown FIS" or "vendor-specific FIS" support, or something creative
48 like that.
49
50 10) [Experiment, low priority] Investigate interrupt coalescing.
51 Quite often, especially with PCI Message Signalled Interrupts (MSI),
52 the overhead reduced by interrupt mitigation is quite often not
53 worth the latency cost.
54
55 11) [Experiment, Marvell value added] Is it possible to use target
56 mode to cross-connect two Linux boxes with Marvell cards? If so,
57 creating LibATA target mode support would be very interesting.
58
59 Target mode, for those without docs, is the ability to directly
60 connect two SATA controllers.
61
Jeff Garzik4a05e202007-05-24 23:40:15 -040062*/
63
64
Brett Russ20f733e2005-09-01 18:26:17 -040065#include <linux/kernel.h>
66#include <linux/module.h>
67#include <linux/pci.h>
68#include <linux/init.h>
69#include <linux/blkdev.h>
70#include <linux/delay.h>
71#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080072#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040073#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050074#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050075#include <linux/platform_device.h>
76#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040077#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050078#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040079#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040081
82#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050083#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040084
85enum {
86 /* BAR's are enumerated in terms of pci_resource_start() terms */
87 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
88 MV_IO_BAR = 2, /* offset 0x18: IO space */
89 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
90
91 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
92 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
93
94 MV_PCI_REG_BASE = 0,
95 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040096 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
97 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
98 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
99 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
100 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
101
Brett Russ20f733e2005-09-01 18:26:17 -0400102 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500103 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500104 MV_GPIO_PORT_CTL = 0x104f0,
105 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400106
107 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
108 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
109 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
110 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
111
Brett Russ31961942005-09-30 01:36:00 -0400112 MV_MAX_Q_DEPTH = 32,
113 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
114
115 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
116 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400117 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
118 */
119 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
120 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500121 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400122 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400123
Brett Russ20f733e2005-09-01 18:26:17 -0400124 MV_PORTS_PER_HC = 4,
125 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
126 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400127 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400128 MV_PORT_MASK = 3,
129
130 /* Host Flags */
131 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
132 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100133 /* SoC integrated controllers, no PCI interface */
134 MV_FLAG_SOC = (1 << 28),
135
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400136 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400137 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
138 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500139 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400140
Brett Russ31961942005-09-30 01:36:00 -0400141 CRQB_FLAG_READ = (1 << 0),
142 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400143 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
144 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400145 CRQB_CMD_ADDR_SHIFT = 8,
146 CRQB_CMD_CS = (0x2 << 11),
147 CRQB_CMD_LAST = (1 << 15),
148
149 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400150 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
151 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400152
153 EPRD_FLAG_END_OF_TBL = (1 << 31),
154
Brett Russ20f733e2005-09-01 18:26:17 -0400155 /* PCI interface registers */
156
Brett Russ31961942005-09-30 01:36:00 -0400157 PCI_COMMAND_OFS = 0xc00,
158
Brett Russ20f733e2005-09-01 18:26:17 -0400159 PCI_MAIN_CMD_STS_OFS = 0xd30,
160 STOP_PCI_MASTER = (1 << 2),
161 PCI_MASTER_EMPTY = (1 << 3),
162 GLOB_SFT_RST = (1 << 4),
163
Jeff Garzik522479f2005-11-12 22:14:02 -0500164 MV_PCI_MODE = 0xd00,
165 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
166 MV_PCI_DISC_TIMER = 0xd04,
167 MV_PCI_MSI_TRIGGER = 0xc38,
168 MV_PCI_SERR_MASK = 0xc28,
169 MV_PCI_XBAR_TMOUT = 0x1d04,
170 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
171 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
172 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
173 MV_PCI_ERR_COMMAND = 0x1d50,
174
Mark Lord02a121d2007-12-01 13:07:22 -0500175 PCI_IRQ_CAUSE_OFS = 0x1d58,
176 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400177 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
178
Mark Lord02a121d2007-12-01 13:07:22 -0500179 PCIE_IRQ_CAUSE_OFS = 0x1900,
180 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500181 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500182
Brett Russ20f733e2005-09-01 18:26:17 -0400183 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
184 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500185 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
186 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400187 PORT0_ERR = (1 << 0), /* shift by port # */
188 PORT0_DONE = (1 << 1), /* shift by port # */
189 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
190 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
191 PCI_ERR = (1 << 18),
192 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
193 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500194 PORTS_0_3_COAL_DONE = (1 << 8),
195 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400196 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
197 GPIO_INT = (1 << 22),
198 SELF_INT = (1 << 23),
199 TWSI_INT = (1 << 24),
200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500203 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400204 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
205 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500206 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
207 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500208 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400209
210 /* SATAHC registers */
211 HC_CFG_OFS = 0,
212
213 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400214 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400215 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
216 DEV_IRQ = (1 << 8), /* shift by port # */
217
218 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400219 SHD_BLK_OFS = 0x100,
220 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400221
222 /* SATA registers */
223 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
224 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500225 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500226 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500227 PHY_MODE4 = 0x314,
228 PHY_MODE2 = 0x330,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500229 MV5_PHY_MODE = 0x74,
230 MV5_LT_MODE = 0x30,
231 MV5_PHY_CTL = 0x0C,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500232 SATA_INTERFACE_CTL = 0x050,
233
234 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400235
236 /* Port registers */
237 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500238 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
239 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
240 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
241 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
242 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Brett Russ20f733e2005-09-01 18:26:17 -0400243
244 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
245 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400246 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
247 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
248 EDMA_ERR_DEV = (1 << 2), /* device error */
249 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
250 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
251 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400252 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
253 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400254 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400255 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400256 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
257 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
258 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
259 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500260
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400261 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500262 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
263 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
264 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
265 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
266
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400267 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500268
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400269 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500270 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
271 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
272 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
273 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
274 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
275
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400276 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500277
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400278 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400279 EDMA_ERR_OVERRUN_5 = (1 << 5),
280 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500281
282 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
283 EDMA_ERR_LNK_CTRL_RX_1 |
284 EDMA_ERR_LNK_CTRL_RX_3 |
285 EDMA_ERR_LNK_CTRL_TX,
286
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400287 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
288 EDMA_ERR_PRD_PAR |
289 EDMA_ERR_DEV_DCON |
290 EDMA_ERR_DEV_CON |
291 EDMA_ERR_SERR |
292 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400293 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400294 EDMA_ERR_CRPB_PAR |
295 EDMA_ERR_INTRL_PAR |
296 EDMA_ERR_IORDY |
297 EDMA_ERR_LNK_CTRL_RX_2 |
298 EDMA_ERR_LNK_DATA_RX |
299 EDMA_ERR_LNK_DATA_TX |
300 EDMA_ERR_TRANS_PROTO,
301 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
302 EDMA_ERR_PRD_PAR |
303 EDMA_ERR_DEV_DCON |
304 EDMA_ERR_DEV_CON |
305 EDMA_ERR_OVERRUN_5 |
306 EDMA_ERR_UNDERRUN_5 |
307 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400308 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400309 EDMA_ERR_CRPB_PAR |
310 EDMA_ERR_INTRL_PAR |
311 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400312
Brett Russ31961942005-09-30 01:36:00 -0400313 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
314 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400315
316 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
317 EDMA_REQ_Q_PTR_SHIFT = 5,
318
319 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
320 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
321 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400322 EDMA_RSP_Q_PTR_SHIFT = 3,
323
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400324 EDMA_CMD_OFS = 0x28, /* EDMA command register */
325 EDMA_EN = (1 << 0), /* enable EDMA */
326 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
327 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400328
Jeff Garzikc9d39132005-11-13 17:47:51 -0500329 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500330 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500331
Brett Russ31961942005-09-30 01:36:00 -0400332 /* Host private flags (hp_flags) */
333 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500334 MV_HP_ERRATA_50XXB0 = (1 << 1),
335 MV_HP_ERRATA_50XXB2 = (1 << 2),
336 MV_HP_ERRATA_60X1B2 = (1 << 3),
337 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500338 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400339 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
340 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
341 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500342 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400343
Brett Russ31961942005-09-30 01:36:00 -0400344 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400345 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500346 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400347 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
Brett Russ31961942005-09-30 01:36:00 -0400348};
349
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400350#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
351#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500352#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100353#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500354
Jeff Garzik095fec82005-11-12 09:50:49 -0500355enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400356 /* DMA boundary 0xffff is required by the s/g splitting
357 * we need on /length/ in mv_fill-sg().
358 */
359 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500360
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400361 /* mask of register bits containing lower 32 bits
362 * of EDMA request queue DMA address
363 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500364 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
365
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400366 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500367 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
368};
369
Jeff Garzik522479f2005-11-12 22:14:02 -0500370enum chip_type {
371 chip_504x,
372 chip_508x,
373 chip_5080,
374 chip_604x,
375 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500376 chip_6042,
377 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500378 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500379};
380
Brett Russ31961942005-09-30 01:36:00 -0400381/* Command ReQuest Block: 32B */
382struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400383 __le32 sg_addr;
384 __le32 sg_addr_hi;
385 __le16 ctrl_flags;
386 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400387};
388
Jeff Garzike4e7b892006-01-31 12:18:41 -0500389struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400390 __le32 addr;
391 __le32 addr_hi;
392 __le32 flags;
393 __le32 len;
394 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500395};
396
Brett Russ31961942005-09-30 01:36:00 -0400397/* Command ResPonse Block: 8B */
398struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400399 __le16 id;
400 __le16 flags;
401 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400402};
403
404/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
405struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400406 __le32 addr;
407 __le32 flags_size;
408 __le32 addr_hi;
409 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400410};
411
412struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400413 struct mv_crqb *crqb;
414 dma_addr_t crqb_dma;
415 struct mv_crpb *crpb;
416 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500417 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
418 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400419
420 unsigned int req_idx;
421 unsigned int resp_idx;
422
Brett Russ31961942005-09-30 01:36:00 -0400423 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400424};
425
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500426struct mv_port_signal {
427 u32 amps;
428 u32 pre;
429};
430
Mark Lord02a121d2007-12-01 13:07:22 -0500431struct mv_host_priv {
432 u32 hp_flags;
433 struct mv_port_signal signal[8];
434 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500435 int n_ports;
436 void __iomem *base;
437 void __iomem *main_cause_reg_addr;
438 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500439 u32 irq_cause_ofs;
440 u32 irq_mask_ofs;
441 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500442 /*
443 * These consistent DMA memory pools give us guaranteed
444 * alignment for hardware-accessed data structures,
445 * and less memory waste in accomplishing the alignment.
446 */
447 struct dma_pool *crqb_pool;
448 struct dma_pool *crpb_pool;
449 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500450};
451
Jeff Garzik47c2b672005-11-12 21:13:17 -0500452struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500453 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
454 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500455 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
456 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
457 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500458 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
459 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500460 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100461 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500462};
463
Tejun Heoda3dbb12007-07-16 14:29:40 +0900464static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
465static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
466static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
467static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400468static int mv_port_start(struct ata_port *ap);
469static void mv_port_stop(struct ata_port *ap);
470static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500471static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900472static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400473static void mv_error_handler(struct ata_port *ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400474static void mv_eh_freeze(struct ata_port *ap);
475static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500476static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400477
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500478static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500480static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
481static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500483static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500485static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100486static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500487
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500488static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
489 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500490static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
491static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
492 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500493static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
494 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500495static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500496static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
497 void __iomem *mmio);
498static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
499 void __iomem *mmio);
500static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
501 void __iomem *mmio, unsigned int n_hc);
502static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
503 void __iomem *mmio);
504static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100505static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500506static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
507 unsigned int port_no);
Mark Lord72109162008-01-26 18:31:33 -0500508static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
509 void __iomem *port_mmio, int want_ncq);
510static int __mv_stop_dma(struct ata_port *ap);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500511
Mark Lordeb73d552008-01-29 13:24:00 -0500512/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
513 * because we have to allow room for worst case splitting of
514 * PRDs for 64K boundaries in mv_fill_sg().
515 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400516static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900517 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400518 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400519 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400520};
521
522static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900523 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500524 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400525 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400526 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400527};
528
Jeff Garzikc9d39132005-11-13 17:47:51 -0500529static const struct ata_port_operations mv5_ops = {
Jeff Garzikc9d39132005-11-13 17:47:51 -0500530 .tf_load = ata_tf_load,
531 .tf_read = ata_tf_read,
532 .check_status = ata_check_status,
533 .exec_command = ata_exec_command,
534 .dev_select = ata_std_dev_select,
535
Jeff Garzikc9d39132005-11-13 17:47:51 -0500536 .qc_prep = mv_qc_prep,
537 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900538 .data_xfer = ata_data_xfer,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500539
Tejun Heo358f9a72008-03-25 12:22:47 +0900540 .irq_clear = ata_noop_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900541 .irq_on = ata_irq_on,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500542
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400543 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400544 .freeze = mv_eh_freeze,
545 .thaw = mv_eh_thaw,
546
Jeff Garzikc9d39132005-11-13 17:47:51 -0500547 .scr_read = mv5_scr_read,
548 .scr_write = mv5_scr_write,
549
550 .port_start = mv_port_start,
551 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500552};
553
554static const struct ata_port_operations mv6_ops = {
Mark Lordf2738272008-01-26 18:32:29 -0500555 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400556 .tf_load = ata_tf_load,
557 .tf_read = ata_tf_read,
558 .check_status = ata_check_status,
559 .exec_command = ata_exec_command,
560 .dev_select = ata_std_dev_select,
561
Brett Russ31961942005-09-30 01:36:00 -0400562 .qc_prep = mv_qc_prep,
563 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900564 .data_xfer = ata_data_xfer,
Brett Russ20f733e2005-09-01 18:26:17 -0400565
Tejun Heo358f9a72008-03-25 12:22:47 +0900566 .irq_clear = ata_noop_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900567 .irq_on = ata_irq_on,
Brett Russ20f733e2005-09-01 18:26:17 -0400568
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400569 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400570 .freeze = mv_eh_freeze,
571 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500572 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400573
Brett Russ20f733e2005-09-01 18:26:17 -0400574 .scr_read = mv_scr_read,
575 .scr_write = mv_scr_write,
576
Brett Russ31961942005-09-30 01:36:00 -0400577 .port_start = mv_port_start,
578 .port_stop = mv_port_stop,
Brett Russ20f733e2005-09-01 18:26:17 -0400579};
580
Jeff Garzike4e7b892006-01-31 12:18:41 -0500581static const struct ata_port_operations mv_iie_ops = {
Jeff Garzike4e7b892006-01-31 12:18:41 -0500582 .tf_load = ata_tf_load,
583 .tf_read = ata_tf_read,
584 .check_status = ata_check_status,
585 .exec_command = ata_exec_command,
586 .dev_select = ata_std_dev_select,
587
Jeff Garzike4e7b892006-01-31 12:18:41 -0500588 .qc_prep = mv_qc_prep_iie,
589 .qc_issue = mv_qc_issue,
Tejun Heo0d5ff562007-02-01 15:06:36 +0900590 .data_xfer = ata_data_xfer,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500591
Tejun Heo358f9a72008-03-25 12:22:47 +0900592 .irq_clear = ata_noop_irq_clear,
Akira Iguchi246ce3b2007-01-26 16:27:58 +0900593 .irq_on = ata_irq_on,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500594
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400595 .error_handler = mv_error_handler,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400596 .freeze = mv_eh_freeze,
597 .thaw = mv_eh_thaw,
Mark Lord138bfdd2008-01-26 18:33:18 -0500598 .qc_defer = ata_std_qc_defer,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400599
Jeff Garzike4e7b892006-01-31 12:18:41 -0500600 .scr_read = mv_scr_read,
601 .scr_write = mv_scr_write,
602
603 .port_start = mv_port_start,
604 .port_stop = mv_port_stop,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500605};
606
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100607static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400608 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400609 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400610 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400611 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500612 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400613 },
614 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400615 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400616 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400617 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500618 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400619 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500620 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400621 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500622 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400623 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500624 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500625 },
Brett Russ20f733e2005-09-01 18:26:17 -0400626 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500627 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
628 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400629 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400630 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500631 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400632 },
633 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400634 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lord138bfdd2008-01-26 18:33:18 -0500635 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400636 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400637 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500638 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400639 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500640 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500641 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
642 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500643 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400644 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500645 .port_ops = &mv_iie_ops,
646 },
647 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500648 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
649 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500650 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400651 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500652 .port_ops = &mv_iie_ops,
653 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500654 { /* chip_soc */
655 .flags = MV_COMMON_FLAGS | MV_FLAG_SOC,
656 .pio_mask = 0x1f, /* pio0-4 */
657 .udma_mask = ATA_UDMA6,
658 .port_ops = &mv_iie_ops,
659 },
Brett Russ20f733e2005-09-01 18:26:17 -0400660};
661
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500662static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400663 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
664 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
665 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
666 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100667 /* RocketRAID 1740/174x have different identifiers */
668 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
669 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400670
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400671 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
672 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
673 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
674 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
675 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500676
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400677 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
678
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200679 /* Adaptec 1430SA */
680 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
681
Mark Lord02a121d2007-12-01 13:07:22 -0500682 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800683 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
684
Mark Lord02a121d2007-12-01 13:07:22 -0500685 /* Highpoint RocketRAID PCIe series */
686 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
687 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
688
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400689 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400690};
691
Jeff Garzik47c2b672005-11-12 21:13:17 -0500692static const struct mv_hw_ops mv5xxx_ops = {
693 .phy_errata = mv5_phy_errata,
694 .enable_leds = mv5_enable_leds,
695 .read_preamp = mv5_read_preamp,
696 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500697 .reset_flash = mv5_reset_flash,
698 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500699};
700
701static const struct mv_hw_ops mv6xxx_ops = {
702 .phy_errata = mv6_phy_errata,
703 .enable_leds = mv6_enable_leds,
704 .read_preamp = mv6_read_preamp,
705 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500706 .reset_flash = mv6_reset_flash,
707 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500708};
709
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500710static const struct mv_hw_ops mv_soc_ops = {
711 .phy_errata = mv6_phy_errata,
712 .enable_leds = mv_soc_enable_leds,
713 .read_preamp = mv_soc_read_preamp,
714 .reset_hc = mv_soc_reset_hc,
715 .reset_flash = mv_soc_reset_flash,
716 .reset_bus = mv_soc_reset_bus,
717};
718
Brett Russ20f733e2005-09-01 18:26:17 -0400719/*
720 * Functions
721 */
722
723static inline void writelfl(unsigned long data, void __iomem *addr)
724{
725 writel(data, addr);
726 (void) readl(addr); /* flush to avoid PCI posted write */
727}
728
Brett Russ20f733e2005-09-01 18:26:17 -0400729static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
730{
731 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
732}
733
Jeff Garzikc9d39132005-11-13 17:47:51 -0500734static inline unsigned int mv_hc_from_port(unsigned int port)
735{
736 return port >> MV_PORT_HC_SHIFT;
737}
738
739static inline unsigned int mv_hardport_from_port(unsigned int port)
740{
741 return port & MV_PORT_MASK;
742}
743
744static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
745 unsigned int port)
746{
747 return mv_hc_base(base, mv_hc_from_port(port));
748}
749
Brett Russ20f733e2005-09-01 18:26:17 -0400750static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
751{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500752 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500753 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500754 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400755}
756
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500757static inline void __iomem *mv_host_base(struct ata_host *host)
758{
759 struct mv_host_priv *hpriv = host->private_data;
760 return hpriv->base;
761}
762
Brett Russ20f733e2005-09-01 18:26:17 -0400763static inline void __iomem *mv_ap_base(struct ata_port *ap)
764{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500765 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400766}
767
Jeff Garzikcca39742006-08-24 03:19:22 -0400768static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400769{
Jeff Garzikcca39742006-08-24 03:19:22 -0400770 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400771}
772
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400773static void mv_set_edma_ptrs(void __iomem *port_mmio,
774 struct mv_host_priv *hpriv,
775 struct mv_port_priv *pp)
776{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400777 u32 index;
778
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400779 /*
780 * initialize request queue
781 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400782 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
783
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400784 WARN_ON(pp->crqb_dma & 0x3ff);
785 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400786 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400787 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
788
789 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400790 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400791 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
792 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400793 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400794
795 /*
796 * initialize response queue
797 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400798 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
799
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400800 WARN_ON(pp->crpb_dma & 0xff);
801 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
802
803 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400804 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400805 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
806 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400807 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400808
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400809 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400810 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400811}
812
Brett Russ05b308e2005-10-05 17:08:53 -0400813/**
814 * mv_start_dma - Enable eDMA engine
815 * @base: port base address
816 * @pp: port private data
817 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900818 * Verify the local cache of the eDMA state is accurate with a
819 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400820 *
821 * LOCKING:
822 * Inherited from caller.
823 */
Mark Lord0c589122008-01-26 18:31:16 -0500824static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500825 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400826{
Mark Lord72109162008-01-26 18:31:33 -0500827 int want_ncq = (protocol == ATA_PROT_NCQ);
828
829 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
830 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
831 if (want_ncq != using_ncq)
832 __mv_stop_dma(ap);
833 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400834 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500835 struct mv_host_priv *hpriv = ap->host->private_data;
836 int hard_port = mv_hardport_from_port(ap->port_no);
837 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100838 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500839 u32 hc_irq_cause, ipending;
840
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400841 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500842 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400843
Mark Lord0c589122008-01-26 18:31:16 -0500844 /* clear EDMA interrupt indicator, if any */
845 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
846 ipending = (DEV_IRQ << hard_port) |
847 (CRPB_DMA_DONE << hard_port);
848 if (hc_irq_cause & ipending) {
849 writelfl(hc_irq_cause & ~ipending,
850 hc_mmio + HC_IRQ_CAUSE_OFS);
851 }
852
Mark Lord72109162008-01-26 18:31:33 -0500853 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500854
855 /* clear FIS IRQ Cause */
856 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
857
Mark Lordf630d562008-01-26 18:31:00 -0500858 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400859
Mark Lordf630d562008-01-26 18:31:00 -0500860 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400861 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
862 }
Mark Lordf630d562008-01-26 18:31:00 -0500863 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400864}
865
Brett Russ05b308e2005-10-05 17:08:53 -0400866/**
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400867 * __mv_stop_dma - Disable eDMA engine
Brett Russ05b308e2005-10-05 17:08:53 -0400868 * @ap: ATA channel to manipulate
869 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900870 * Verify the local cache of the eDMA state is accurate with a
871 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400872 *
873 * LOCKING:
874 * Inherited from caller.
875 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400876static int __mv_stop_dma(struct ata_port *ap)
Brett Russ31961942005-09-30 01:36:00 -0400877{
878 void __iomem *port_mmio = mv_ap_base(ap);
879 struct mv_port_priv *pp = ap->private_data;
Brett Russ31961942005-09-30 01:36:00 -0400880 u32 reg;
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400881 int i, err = 0;
Brett Russ31961942005-09-30 01:36:00 -0400882
Jeff Garzik4537deb52007-07-12 14:30:19 -0400883 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Brett Russafb0edd2005-10-05 17:08:42 -0400884 /* Disable EDMA if active. The disable bit auto clears.
Brett Russ31961942005-09-30 01:36:00 -0400885 */
Brett Russ31961942005-09-30 01:36:00 -0400886 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
887 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Brett Russafb0edd2005-10-05 17:08:42 -0400888 } else {
Tejun Heobeec7db2006-02-11 19:11:13 +0900889 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS));
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400890 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500891
Brett Russ31961942005-09-30 01:36:00 -0400892 /* now properly wait for the eDMA to stop */
893 for (i = 1000; i > 0; i--) {
894 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb52007-07-12 14:30:19 -0400895 if (!(reg & EDMA_EN))
Brett Russ31961942005-09-30 01:36:00 -0400896 break;
Jeff Garzik4537deb52007-07-12 14:30:19 -0400897
Brett Russ31961942005-09-30 01:36:00 -0400898 udelay(100);
899 }
900
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400901 if (reg & EDMA_EN) {
Tejun Heof15a1da2006-05-15 20:57:56 +0900902 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400903 err = -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400904 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400905
906 return err;
Brett Russ31961942005-09-30 01:36:00 -0400907}
908
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400909static int mv_stop_dma(struct ata_port *ap)
910{
911 unsigned long flags;
912 int rc;
913
914 spin_lock_irqsave(&ap->host->lock, flags);
915 rc = __mv_stop_dma(ap);
916 spin_unlock_irqrestore(&ap->host->lock, flags);
917
918 return rc;
919}
920
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400921#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400922static void mv_dump_mem(void __iomem *start, unsigned bytes)
923{
Brett Russ31961942005-09-30 01:36:00 -0400924 int b, w;
925 for (b = 0; b < bytes; ) {
926 DPRINTK("%p: ", start + b);
927 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400928 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400929 b += sizeof(u32);
930 }
931 printk("\n");
932 }
Brett Russ31961942005-09-30 01:36:00 -0400933}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400934#endif
935
Brett Russ31961942005-09-30 01:36:00 -0400936static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
937{
938#ifdef ATA_DEBUG
939 int b, w;
940 u32 dw;
941 for (b = 0; b < bytes; ) {
942 DPRINTK("%02x: ", b);
943 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400944 (void) pci_read_config_dword(pdev, b, &dw);
945 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400946 b += sizeof(u32);
947 }
948 printk("\n");
949 }
950#endif
951}
952static void mv_dump_all_regs(void __iomem *mmio_base, int port,
953 struct pci_dev *pdev)
954{
955#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500956 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400957 port >> MV_PORT_HC_SHIFT);
958 void __iomem *port_base;
959 int start_port, num_ports, p, start_hc, num_hcs, hc;
960
961 if (0 > port) {
962 start_hc = start_port = 0;
963 num_ports = 8; /* shld be benign for 4 port devs */
964 num_hcs = 2;
965 } else {
966 start_hc = port >> MV_PORT_HC_SHIFT;
967 start_port = port;
968 num_ports = num_hcs = 1;
969 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500970 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400971 num_ports > 1 ? num_ports - 1 : start_port);
972
973 if (NULL != pdev) {
974 DPRINTK("PCI config space regs:\n");
975 mv_dump_pci_cfg(pdev, 0x68);
976 }
977 DPRINTK("PCI regs:\n");
978 mv_dump_mem(mmio_base+0xc00, 0x3c);
979 mv_dump_mem(mmio_base+0xd00, 0x34);
980 mv_dump_mem(mmio_base+0xf00, 0x4);
981 mv_dump_mem(mmio_base+0x1d00, 0x6c);
982 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c37e2006-04-10 23:20:22 -0700983 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400984 DPRINTK("HC regs (HC %i):\n", hc);
985 mv_dump_mem(hc_base, 0x1c);
986 }
987 for (p = start_port; p < start_port + num_ports; p++) {
988 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400989 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400990 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400991 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400992 mv_dump_mem(port_base+0x300, 0x60);
993 }
994#endif
995}
996
Brett Russ20f733e2005-09-01 18:26:17 -0400997static unsigned int mv_scr_offset(unsigned int sc_reg_in)
998{
999 unsigned int ofs;
1000
1001 switch (sc_reg_in) {
1002 case SCR_STATUS:
1003 case SCR_CONTROL:
1004 case SCR_ERROR:
1005 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1006 break;
1007 case SCR_ACTIVE:
1008 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1009 break;
1010 default:
1011 ofs = 0xffffffffU;
1012 break;
1013 }
1014 return ofs;
1015}
1016
Tejun Heoda3dbb12007-07-16 14:29:40 +09001017static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001018{
1019 unsigned int ofs = mv_scr_offset(sc_reg_in);
1020
Tejun Heoda3dbb12007-07-16 14:29:40 +09001021 if (ofs != 0xffffffffU) {
1022 *val = readl(mv_ap_base(ap) + ofs);
1023 return 0;
1024 } else
1025 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001026}
1027
Tejun Heoda3dbb12007-07-16 14:29:40 +09001028static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001029{
1030 unsigned int ofs = mv_scr_offset(sc_reg_in);
1031
Tejun Heoda3dbb12007-07-16 14:29:40 +09001032 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001033 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001034 return 0;
1035 } else
1036 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001037}
1038
Mark Lordf2738272008-01-26 18:32:29 -05001039static void mv6_dev_config(struct ata_device *adev)
1040{
1041 /*
1042 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1043 * See mv_qc_prep() for more info.
1044 */
1045 if (adev->flags & ATA_DFLAG_NCQ)
1046 if (adev->max_sectors > ATA_MAX_SECTORS)
1047 adev->max_sectors = ATA_MAX_SECTORS;
1048}
1049
Mark Lord72109162008-01-26 18:31:33 -05001050static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1051 void __iomem *port_mmio, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001052{
Mark Lord0c589122008-01-26 18:31:16 -05001053 u32 cfg;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001054
1055 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001056 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001057
Mark Lord0c589122008-01-26 18:31:16 -05001058 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001059 cfg |= (1 << 8); /* enab config burst size mask */
1060
Mark Lord0c589122008-01-26 18:31:16 -05001061 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001062 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1063
1064 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001065 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1066 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001067 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001068 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001069 }
1070
Mark Lord72109162008-01-26 18:31:33 -05001071 if (want_ncq) {
1072 cfg |= EDMA_CFG_NCQ;
1073 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1074 } else
1075 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1076
Jeff Garzike4e7b892006-01-31 12:18:41 -05001077 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1078}
1079
Mark Lordda2fa9b2008-01-26 18:32:45 -05001080static void mv_port_free_dma_mem(struct ata_port *ap)
1081{
1082 struct mv_host_priv *hpriv = ap->host->private_data;
1083 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001084 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001085
1086 if (pp->crqb) {
1087 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1088 pp->crqb = NULL;
1089 }
1090 if (pp->crpb) {
1091 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1092 pp->crpb = NULL;
1093 }
Mark Lordeb73d552008-01-29 13:24:00 -05001094 /*
1095 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1096 * For later hardware, we have one unique sg_tbl per NCQ tag.
1097 */
1098 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1099 if (pp->sg_tbl[tag]) {
1100 if (tag == 0 || !IS_GEN_I(hpriv))
1101 dma_pool_free(hpriv->sg_tbl_pool,
1102 pp->sg_tbl[tag],
1103 pp->sg_tbl_dma[tag]);
1104 pp->sg_tbl[tag] = NULL;
1105 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001106 }
1107}
1108
Brett Russ05b308e2005-10-05 17:08:53 -04001109/**
1110 * mv_port_start - Port specific init/start routine.
1111 * @ap: ATA channel to manipulate
1112 *
1113 * Allocate and point to DMA memory, init port private memory,
1114 * zero indices.
1115 *
1116 * LOCKING:
1117 * Inherited from caller.
1118 */
Brett Russ31961942005-09-30 01:36:00 -04001119static int mv_port_start(struct ata_port *ap)
1120{
Jeff Garzikcca39742006-08-24 03:19:22 -04001121 struct device *dev = ap->host->dev;
1122 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001123 struct mv_port_priv *pp;
1124 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001125 unsigned long flags;
James Bottomleydde20202008-02-19 11:36:56 +01001126 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001127
Tejun Heo24dc5f32007-01-20 16:00:28 +09001128 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001129 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001130 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001131 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001132
Mark Lordda2fa9b2008-01-26 18:32:45 -05001133 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1134 if (!pp->crqb)
1135 return -ENOMEM;
1136 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001137
Mark Lordda2fa9b2008-01-26 18:32:45 -05001138 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1139 if (!pp->crpb)
1140 goto out_port_free_dma_mem;
1141 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001142
Mark Lordeb73d552008-01-29 13:24:00 -05001143 /*
1144 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1145 * For later hardware, we need one unique sg_tbl per NCQ tag.
1146 */
1147 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1148 if (tag == 0 || !IS_GEN_I(hpriv)) {
1149 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1150 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1151 if (!pp->sg_tbl[tag])
1152 goto out_port_free_dma_mem;
1153 } else {
1154 pp->sg_tbl[tag] = pp->sg_tbl[0];
1155 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1156 }
1157 }
Brett Russ31961942005-09-30 01:36:00 -04001158
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001159 spin_lock_irqsave(&ap->host->lock, flags);
1160
Mark Lord72109162008-01-26 18:31:33 -05001161 mv_edma_cfg(pp, hpriv, port_mmio, 0);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001162 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Brett Russ31961942005-09-30 01:36:00 -04001163
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001164 spin_unlock_irqrestore(&ap->host->lock, flags);
1165
Brett Russ31961942005-09-30 01:36:00 -04001166 /* Don't turn on EDMA here...do it before DMA commands only. Else
1167 * we'll be unable to send non-data, PIO, etc due to restricted access
1168 * to shadow regs.
1169 */
Brett Russ31961942005-09-30 01:36:00 -04001170 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001171
1172out_port_free_dma_mem:
1173 mv_port_free_dma_mem(ap);
1174 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001175}
1176
Brett Russ05b308e2005-10-05 17:08:53 -04001177/**
1178 * mv_port_stop - Port specific cleanup/stop routine.
1179 * @ap: ATA channel to manipulate
1180 *
1181 * Stop DMA, cleanup port memory.
1182 *
1183 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001184 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001185 */
Brett Russ31961942005-09-30 01:36:00 -04001186static void mv_port_stop(struct ata_port *ap)
1187{
Brett Russ31961942005-09-30 01:36:00 -04001188 mv_stop_dma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001189 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001190}
1191
Brett Russ05b308e2005-10-05 17:08:53 -04001192/**
1193 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1194 * @qc: queued command whose SG list to source from
1195 *
1196 * Populate the SG list and mark the last entry.
1197 *
1198 * LOCKING:
1199 * Inherited from caller.
1200 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001201static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001202{
1203 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001204 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001205 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001206 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001207
Mark Lordeb73d552008-01-29 13:24:00 -05001208 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001209 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001210 dma_addr_t addr = sg_dma_address(sg);
1211 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001212
Olof Johansson4007b492007-10-02 20:45:27 -05001213 while (sg_len) {
1214 u32 offset = addr & 0xffff;
1215 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001216
Olof Johansson4007b492007-10-02 20:45:27 -05001217 if ((offset + sg_len > 0x10000))
1218 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001219
Olof Johansson4007b492007-10-02 20:45:27 -05001220 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1221 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001222 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001223
1224 sg_len -= len;
1225 addr += len;
1226
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001227 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001228 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001229 }
Brett Russ31961942005-09-30 01:36:00 -04001230 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001231
1232 if (likely(last_sg))
1233 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001234}
1235
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001236static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001237{
Mark Lord559eeda2006-05-19 16:40:15 -04001238 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001239 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001240 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001241}
1242
Brett Russ05b308e2005-10-05 17:08:53 -04001243/**
1244 * mv_qc_prep - Host specific command preparation.
1245 * @qc: queued command to prepare
1246 *
1247 * This routine simply redirects to the general purpose routine
1248 * if command is not DMA. Else, it handles prep of the CRQB
1249 * (command request block), does some sanity checking, and calls
1250 * the SG load routine.
1251 *
1252 * LOCKING:
1253 * Inherited from caller.
1254 */
Brett Russ31961942005-09-30 01:36:00 -04001255static void mv_qc_prep(struct ata_queued_cmd *qc)
1256{
1257 struct ata_port *ap = qc->ap;
1258 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001259 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001260 struct ata_taskfile *tf;
1261 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001262 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001263
Mark Lord138bfdd2008-01-26 18:33:18 -05001264 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1265 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001266 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001267
Brett Russ31961942005-09-30 01:36:00 -04001268 /* Fill in command request block
1269 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001270 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001271 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001272 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001273 flags |= qc->tag << CRQB_TAG_SHIFT;
1274
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001275 /* get current queue index from software */
1276 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001277
Mark Lorda6432432006-05-19 16:36:36 -04001278 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001279 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001280 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001281 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001282 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1283
1284 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001285 tf = &qc->tf;
1286
1287 /* Sadly, the CRQB cannot accomodate all registers--there are
1288 * only 11 bytes...so we must pick and choose required
1289 * registers based on the command. So, we drop feature and
1290 * hob_feature for [RW] DMA commands, but they are needed for
1291 * NCQ. NCQ will drop hob_nsect.
1292 */
1293 switch (tf->command) {
1294 case ATA_CMD_READ:
1295 case ATA_CMD_READ_EXT:
1296 case ATA_CMD_WRITE:
1297 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001298 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001299 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1300 break;
Brett Russ31961942005-09-30 01:36:00 -04001301 case ATA_CMD_FPDMA_READ:
1302 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001303 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001304 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1305 break;
Brett Russ31961942005-09-30 01:36:00 -04001306 default:
1307 /* The only other commands EDMA supports in non-queued and
1308 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1309 * of which are defined/used by Linux. If we get here, this
1310 * driver needs work.
1311 *
1312 * FIXME: modify libata to give qc_prep a return value and
1313 * return error here.
1314 */
1315 BUG_ON(tf->command);
1316 break;
1317 }
1318 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1319 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1320 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1321 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1322 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1323 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1324 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1325 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1326 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1327
Jeff Garzike4e7b892006-01-31 12:18:41 -05001328 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001329 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001330 mv_fill_sg(qc);
1331}
1332
1333/**
1334 * mv_qc_prep_iie - Host specific command preparation.
1335 * @qc: queued command to prepare
1336 *
1337 * This routine simply redirects to the general purpose routine
1338 * if command is not DMA. Else, it handles prep of the CRQB
1339 * (command request block), does some sanity checking, and calls
1340 * the SG load routine.
1341 *
1342 * LOCKING:
1343 * Inherited from caller.
1344 */
1345static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1346{
1347 struct ata_port *ap = qc->ap;
1348 struct mv_port_priv *pp = ap->private_data;
1349 struct mv_crqb_iie *crqb;
1350 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001351 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001352 u32 flags = 0;
1353
Mark Lord138bfdd2008-01-26 18:33:18 -05001354 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1355 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001356 return;
1357
Jeff Garzike4e7b892006-01-31 12:18:41 -05001358 /* Fill in Gen IIE command request block
1359 */
1360 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1361 flags |= CRQB_FLAG_READ;
1362
Tejun Heobeec7db2006-02-11 19:11:13 +09001363 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001364 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001365 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001366
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001367 /* get current queue index from software */
1368 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001369
1370 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001371 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1372 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001373 crqb->flags = cpu_to_le32(flags);
1374
1375 tf = &qc->tf;
1376 crqb->ata_cmd[0] = cpu_to_le32(
1377 (tf->command << 16) |
1378 (tf->feature << 24)
1379 );
1380 crqb->ata_cmd[1] = cpu_to_le32(
1381 (tf->lbal << 0) |
1382 (tf->lbam << 8) |
1383 (tf->lbah << 16) |
1384 (tf->device << 24)
1385 );
1386 crqb->ata_cmd[2] = cpu_to_le32(
1387 (tf->hob_lbal << 0) |
1388 (tf->hob_lbam << 8) |
1389 (tf->hob_lbah << 16) |
1390 (tf->hob_feature << 24)
1391 );
1392 crqb->ata_cmd[3] = cpu_to_le32(
1393 (tf->nsect << 0) |
1394 (tf->hob_nsect << 8)
1395 );
1396
1397 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1398 return;
Brett Russ31961942005-09-30 01:36:00 -04001399 mv_fill_sg(qc);
1400}
1401
Brett Russ05b308e2005-10-05 17:08:53 -04001402/**
1403 * mv_qc_issue - Initiate a command to the host
1404 * @qc: queued command to start
1405 *
1406 * This routine simply redirects to the general purpose routine
1407 * if command is not DMA. Else, it sanity checks our local
1408 * caches of the request producer/consumer indices then enables
1409 * DMA and bumps the request producer index.
1410 *
1411 * LOCKING:
1412 * Inherited from caller.
1413 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001414static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001415{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001416 struct ata_port *ap = qc->ap;
1417 void __iomem *port_mmio = mv_ap_base(ap);
1418 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001419 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001420
Mark Lord138bfdd2008-01-26 18:33:18 -05001421 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1422 (qc->tf.protocol != ATA_PROT_NCQ)) {
Brett Russ31961942005-09-30 01:36:00 -04001423 /* We're about to send a non-EDMA capable command to the
1424 * port. Turn off EDMA so there won't be problems accessing
1425 * shadow block, etc registers.
1426 */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001427 __mv_stop_dma(ap);
Brett Russ31961942005-09-30 01:36:00 -04001428 return ata_qc_issue_prot(qc);
1429 }
1430
Mark Lord72109162008-01-26 18:31:33 -05001431 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001432
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001433 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001434
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001435 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001436
1437 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001438 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1439 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001440
1441 return 0;
1442}
1443
Brett Russ05b308e2005-10-05 17:08:53 -04001444/**
Brett Russ05b308e2005-10-05 17:08:53 -04001445 * mv_err_intr - Handle error interrupts on the port
1446 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001447 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001448 *
1449 * In most cases, just clear the interrupt and move on. However,
1450 * some cases require an eDMA reset, which is done right before
1451 * the COMRESET in mv_phy_reset(). The SERR case requires a
1452 * clear of pending errors in the SATA SERROR register. Finally,
1453 * if the port disabled DMA, update our cached copy to match.
1454 *
1455 * LOCKING:
1456 * Inherited from caller.
1457 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001458static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001459{
Brett Russ31961942005-09-30 01:36:00 -04001460 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001461 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1462 struct mv_port_priv *pp = ap->private_data;
1463 struct mv_host_priv *hpriv = ap->host->private_data;
1464 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1465 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001466 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001467
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001468 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001469
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001470 if (!edma_enabled) {
1471 /* just a guess: do we need to do this? should we
1472 * expand this, and do it in all cases?
1473 */
Tejun Heo936fd732007-08-06 18:36:23 +09001474 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1475 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001476 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001477
1478 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1479
1480 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1481
1482 /*
1483 * all generations share these EDMA error cause bits
1484 */
1485
1486 if (edma_err_cause & EDMA_ERR_DEV)
1487 err_mask |= AC_ERR_DEV;
1488 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001489 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001490 EDMA_ERR_INTRL_PAR)) {
1491 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001492 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001493 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001494 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001495 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1496 ata_ehi_hotplugged(ehi);
1497 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001498 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001499 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001500 }
1501
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001502 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001503 eh_freeze_mask = EDMA_EH_FREEZE_5;
1504
1505 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001506 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001507 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001508 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001509 }
1510 } else {
1511 eh_freeze_mask = EDMA_EH_FREEZE;
1512
1513 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001514 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001515 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001516 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001517 }
1518
1519 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001520 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1521 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001522 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001523 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001524 }
1525 }
Brett Russ20f733e2005-09-01 18:26:17 -04001526
1527 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001528 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001529
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001530 if (!err_mask) {
1531 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001532 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001533 }
1534
1535 ehi->serror |= serr;
1536 ehi->action |= action;
1537
1538 if (qc)
1539 qc->err_mask |= err_mask;
1540 else
1541 ehi->err_mask |= err_mask;
1542
1543 if (edma_err_cause & eh_freeze_mask)
1544 ata_port_freeze(ap);
1545 else
1546 ata_port_abort(ap);
1547}
1548
1549static void mv_intr_pio(struct ata_port *ap)
1550{
1551 struct ata_queued_cmd *qc;
1552 u8 ata_status;
1553
1554 /* ignore spurious intr if drive still BUSY */
1555 ata_status = readb(ap->ioaddr.status_addr);
1556 if (unlikely(ata_status & ATA_BUSY))
1557 return;
1558
1559 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001560 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001561 if (unlikely(!qc)) /* no active tag */
1562 return;
1563 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1564 return;
1565
1566 /* and finally, complete the ATA command */
1567 qc->err_mask |= ac_err_mask(ata_status);
1568 ata_qc_complete(qc);
1569}
1570
1571static void mv_intr_edma(struct ata_port *ap)
1572{
1573 void __iomem *port_mmio = mv_ap_base(ap);
1574 struct mv_host_priv *hpriv = ap->host->private_data;
1575 struct mv_port_priv *pp = ap->private_data;
1576 struct ata_queued_cmd *qc;
1577 u32 out_index, in_index;
1578 bool work_done = false;
1579
1580 /* get h/w response queue pointer */
1581 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1582 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1583
1584 while (1) {
1585 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001586 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001587
1588 /* get s/w response queue last-read pointer, and compare */
1589 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1590 if (in_index == out_index)
1591 break;
1592
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001593 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001594 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001595 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001596
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001597 /* Gen II/IIE: get active ATA command via tag, to enable
1598 * support for queueing. this works transparently for
1599 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001600 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001601 else
1602 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001603
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001604 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001605
Mark Lordcb924412008-01-26 18:32:09 -05001606 /* For non-NCQ mode, the lower 8 bits of status
1607 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1608 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001609 */
1610 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001611 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001612 mv_err_intr(ap, qc);
1613 return;
1614 }
1615
1616 /* and finally, complete the ATA command */
1617 if (qc) {
1618 qc->err_mask |=
1619 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1620 ata_qc_complete(qc);
1621 }
1622
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001623 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001624 * indicate (after the loop completes) to hardware
1625 * that we have consumed a response queue entry.
1626 */
1627 work_done = true;
1628 pp->resp_idx++;
1629 }
1630
1631 if (work_done)
1632 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1633 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1634 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001635}
1636
Brett Russ05b308e2005-10-05 17:08:53 -04001637/**
1638 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001639 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001640 * @relevant: port error bits relevant to this host controller
1641 * @hc: which host controller we're to look at
1642 *
1643 * Read then write clear the HC interrupt status then walk each
1644 * port connected to the HC and see if it needs servicing. Port
1645 * success ints are reported in the HC interrupt status reg, the
1646 * port error ints are reported in the higher level main
1647 * interrupt status register and thus are passed in via the
1648 * 'relevant' argument.
1649 *
1650 * LOCKING:
1651 * Inherited from caller.
1652 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001653static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001654{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001655 struct mv_host_priv *hpriv = host->private_data;
1656 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001657 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001658 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001659 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001660
Jeff Garzik35177262007-02-24 21:26:42 -05001661 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001662 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001663 else
Brett Russ20f733e2005-09-01 18:26:17 -04001664 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001665
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001666 if (HAS_PCI(host))
1667 last_port = port0 + MV_PORTS_PER_HC;
1668 else
1669 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001670 /* we'll need the HC success int register in most cases */
1671 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001672 if (!hc_irq_cause)
1673 return;
1674
1675 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001676
1677 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001678 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001679
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001680 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001681 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001682 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001683 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001684
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001685 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001686 continue;
1687
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001688 pp = ap->private_data;
1689
Brett Russ31961942005-09-30 01:36:00 -04001690 shift = port << 1; /* (port * 2) */
Brett Russ20f733e2005-09-01 18:26:17 -04001691 if (port >= MV_PORTS_PER_HC) {
1692 shift++; /* skip bit 8 in the HC Main IRQ reg */
1693 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001694 have_err_bits = ((PORT0_ERR << shift) & relevant);
1695
1696 if (unlikely(have_err_bits)) {
1697 struct ata_queued_cmd *qc;
1698
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001699 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001700 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1701 continue;
1702
1703 mv_err_intr(ap, qc);
1704 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001705 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001706
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001707 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1708
1709 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1710 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1711 mv_intr_edma(ap);
1712 } else {
1713 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1714 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001715 }
1716 }
1717 VPRINTK("EXIT\n");
1718}
1719
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001720static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1721{
Mark Lord02a121d2007-12-01 13:07:22 -05001722 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001723 struct ata_port *ap;
1724 struct ata_queued_cmd *qc;
1725 struct ata_eh_info *ehi;
1726 unsigned int i, err_mask, printed = 0;
1727 u32 err_cause;
1728
Mark Lord02a121d2007-12-01 13:07:22 -05001729 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001730
1731 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1732 err_cause);
1733
1734 DPRINTK("All regs @ PCI error\n");
1735 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1736
Mark Lord02a121d2007-12-01 13:07:22 -05001737 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001738
1739 for (i = 0; i < host->n_ports; i++) {
1740 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001741 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001742 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001743 ata_ehi_clear_desc(ehi);
1744 if (!printed++)
1745 ata_ehi_push_desc(ehi,
1746 "PCI err cause 0x%08x", err_cause);
1747 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001748 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001749 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001750 if (qc)
1751 qc->err_mask |= err_mask;
1752 else
1753 ehi->err_mask |= err_mask;
1754
1755 ata_port_freeze(ap);
1756 }
1757 }
1758}
1759
Brett Russ05b308e2005-10-05 17:08:53 -04001760/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001761 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001762 * @irq: unused
1763 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001764 *
1765 * Read the read only register to determine if any host
1766 * controllers have pending interrupts. If so, call lower level
1767 * routine to handle. Also check for PCI errors which are only
1768 * reported here.
1769 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001770 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001771 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001772 * interrupts.
1773 */
David Howells7d12e782006-10-05 14:55:46 +01001774static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001775{
Jeff Garzikcca39742006-08-24 03:19:22 -04001776 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001777 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001778 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001779 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001780 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001781
Mark Lord646a4da2008-01-26 18:30:37 -05001782 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001783
1784 irq_stat = readl(hpriv->main_cause_reg_addr);
1785 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001786
1787 /* check the cases where we either have nothing pending or have read
1788 * a bogus register value which can indicate HW removal or PCI fault
1789 */
Mark Lord646a4da2008-01-26 18:30:37 -05001790 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1791 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001792
Jeff Garzikcca39742006-08-24 03:19:22 -04001793 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001794
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001795 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001796 mv_pci_error(host, mmio);
1797 handled = 1;
1798 goto out_unlock; /* skip all other HC irq handling */
1799 }
1800
Brett Russ20f733e2005-09-01 18:26:17 -04001801 for (hc = 0; hc < n_hcs; hc++) {
1802 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1803 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001804 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001805 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001806 }
1807 }
Mark Lord615ab952006-05-19 16:24:56 -04001808
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001809out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001810 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001811
1812 return IRQ_RETVAL(handled);
1813}
1814
Jeff Garzikc9d39132005-11-13 17:47:51 -05001815static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
1816{
1817 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
1818 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
1819
1820 return hc_mmio + ofs;
1821}
1822
1823static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1824{
1825 unsigned int ofs;
1826
1827 switch (sc_reg_in) {
1828 case SCR_STATUS:
1829 case SCR_ERROR:
1830 case SCR_CONTROL:
1831 ofs = sc_reg_in * sizeof(u32);
1832 break;
1833 default:
1834 ofs = 0xffffffffU;
1835 break;
1836 }
1837 return ofs;
1838}
1839
Tejun Heoda3dbb12007-07-16 14:29:40 +09001840static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001841{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001842 struct mv_host_priv *hpriv = ap->host->private_data;
1843 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001844 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001845 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1846
Tejun Heoda3dbb12007-07-16 14:29:40 +09001847 if (ofs != 0xffffffffU) {
1848 *val = readl(addr + ofs);
1849 return 0;
1850 } else
1851 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001852}
1853
Tejun Heoda3dbb12007-07-16 14:29:40 +09001854static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001855{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001856 struct mv_host_priv *hpriv = ap->host->private_data;
1857 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001858 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001859 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1860
Tejun Heoda3dbb12007-07-16 14:29:40 +09001861 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001862 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001863 return 0;
1864 } else
1865 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001866}
1867
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001868static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001869{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001870 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001871 int early_5080;
1872
Auke Kok44c10132007-06-08 15:46:36 -07001873 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001874
1875 if (!early_5080) {
1876 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1877 tmp |= (1 << 0);
1878 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1879 }
1880
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001881 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001882}
1883
1884static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1885{
1886 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1887}
1888
Jeff Garzik47c2b672005-11-12 21:13:17 -05001889static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001890 void __iomem *mmio)
1891{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001892 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1893 u32 tmp;
1894
1895 tmp = readl(phy_mmio + MV5_PHY_MODE);
1896
1897 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1898 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001899}
1900
Jeff Garzik47c2b672005-11-12 21:13:17 -05001901static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001902{
Jeff Garzik522479f2005-11-12 22:14:02 -05001903 u32 tmp;
1904
1905 writel(0, mmio + MV_GPIO_PORT_CTL);
1906
1907 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1908
1909 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1910 tmp |= ~(1 << 0);
1911 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001912}
1913
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001914static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1915 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001916{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001917 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1918 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1919 u32 tmp;
1920 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1921
1922 if (fix_apm_sq) {
1923 tmp = readl(phy_mmio + MV5_LT_MODE);
1924 tmp |= (1 << 19);
1925 writel(tmp, phy_mmio + MV5_LT_MODE);
1926
1927 tmp = readl(phy_mmio + MV5_PHY_CTL);
1928 tmp &= ~0x3;
1929 tmp |= 0x1;
1930 writel(tmp, phy_mmio + MV5_PHY_CTL);
1931 }
1932
1933 tmp = readl(phy_mmio + MV5_PHY_MODE);
1934 tmp &= ~mask;
1935 tmp |= hpriv->signal[port].pre;
1936 tmp |= hpriv->signal[port].amps;
1937 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001938}
1939
Jeff Garzikc9d39132005-11-13 17:47:51 -05001940
1941#undef ZERO
1942#define ZERO(reg) writel(0, port_mmio + (reg))
1943static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1944 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001945{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001946 void __iomem *port_mmio = mv_port_base(mmio, port);
1947
1948 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
1949
1950 mv_channel_reset(hpriv, mmio, port);
1951
1952 ZERO(0x028); /* command */
1953 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1954 ZERO(0x004); /* timer */
1955 ZERO(0x008); /* irq err cause */
1956 ZERO(0x00c); /* irq err mask */
1957 ZERO(0x010); /* rq bah */
1958 ZERO(0x014); /* rq inp */
1959 ZERO(0x018); /* rq outp */
1960 ZERO(0x01c); /* respq bah */
1961 ZERO(0x024); /* respq outp */
1962 ZERO(0x020); /* respq inp */
1963 ZERO(0x02c); /* test control */
1964 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1965}
1966#undef ZERO
1967
1968#define ZERO(reg) writel(0, hc_mmio + (reg))
1969static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1970 unsigned int hc)
1971{
1972 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1973 u32 tmp;
1974
1975 ZERO(0x00c);
1976 ZERO(0x010);
1977 ZERO(0x014);
1978 ZERO(0x018);
1979
1980 tmp = readl(hc_mmio + 0x20);
1981 tmp &= 0x1c1c1c1c;
1982 tmp |= 0x03030303;
1983 writel(tmp, hc_mmio + 0x20);
1984}
1985#undef ZERO
1986
1987static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1988 unsigned int n_hc)
1989{
1990 unsigned int hc, port;
1991
1992 for (hc = 0; hc < n_hc; hc++) {
1993 for (port = 0; port < MV_PORTS_PER_HC; port++)
1994 mv5_reset_hc_port(hpriv, mmio,
1995 (hc * MV_PORTS_PER_HC) + port);
1996
1997 mv5_reset_one_hc(hpriv, mmio, hc);
1998 }
1999
2000 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002001}
2002
Jeff Garzik101ffae2005-11-12 22:17:49 -05002003#undef ZERO
2004#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002005static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002006{
Mark Lord02a121d2007-12-01 13:07:22 -05002007 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002008 u32 tmp;
2009
2010 tmp = readl(mmio + MV_PCI_MODE);
2011 tmp &= 0xff00ffff;
2012 writel(tmp, mmio + MV_PCI_MODE);
2013
2014 ZERO(MV_PCI_DISC_TIMER);
2015 ZERO(MV_PCI_MSI_TRIGGER);
2016 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2017 ZERO(HC_MAIN_IRQ_MASK_OFS);
2018 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002019 ZERO(hpriv->irq_cause_ofs);
2020 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002021 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2022 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2023 ZERO(MV_PCI_ERR_ATTRIBUTE);
2024 ZERO(MV_PCI_ERR_COMMAND);
2025}
2026#undef ZERO
2027
2028static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2029{
2030 u32 tmp;
2031
2032 mv5_reset_flash(hpriv, mmio);
2033
2034 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2035 tmp &= 0x3;
2036 tmp |= (1 << 5) | (1 << 6);
2037 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2038}
2039
2040/**
2041 * mv6_reset_hc - Perform the 6xxx global soft reset
2042 * @mmio: base address of the HBA
2043 *
2044 * This routine only applies to 6xxx parts.
2045 *
2046 * LOCKING:
2047 * Inherited from caller.
2048 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002049static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2050 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002051{
2052 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2053 int i, rc = 0;
2054 u32 t;
2055
2056 /* Following procedure defined in PCI "main command and status
2057 * register" table.
2058 */
2059 t = readl(reg);
2060 writel(t | STOP_PCI_MASTER, reg);
2061
2062 for (i = 0; i < 1000; i++) {
2063 udelay(1);
2064 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002065 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002066 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002067 }
2068 if (!(PCI_MASTER_EMPTY & t)) {
2069 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2070 rc = 1;
2071 goto done;
2072 }
2073
2074 /* set reset */
2075 i = 5;
2076 do {
2077 writel(t | GLOB_SFT_RST, reg);
2078 t = readl(reg);
2079 udelay(1);
2080 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2081
2082 if (!(GLOB_SFT_RST & t)) {
2083 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2084 rc = 1;
2085 goto done;
2086 }
2087
2088 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2089 i = 5;
2090 do {
2091 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2092 t = readl(reg);
2093 udelay(1);
2094 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2095
2096 if (GLOB_SFT_RST & t) {
2097 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2098 rc = 1;
2099 }
2100done:
2101 return rc;
2102}
2103
Jeff Garzik47c2b672005-11-12 21:13:17 -05002104static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002105 void __iomem *mmio)
2106{
2107 void __iomem *port_mmio;
2108 u32 tmp;
2109
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002110 tmp = readl(mmio + MV_RESET_CFG);
2111 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002112 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002113 hpriv->signal[idx].pre = 0x1 << 5;
2114 return;
2115 }
2116
2117 port_mmio = mv_port_base(mmio, idx);
2118 tmp = readl(port_mmio + PHY_MODE2);
2119
2120 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2121 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2122}
2123
Jeff Garzik47c2b672005-11-12 21:13:17 -05002124static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002125{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002126 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002127}
2128
Jeff Garzikc9d39132005-11-13 17:47:51 -05002129static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002130 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002131{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002132 void __iomem *port_mmio = mv_port_base(mmio, port);
2133
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002134 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002135 int fix_phy_mode2 =
2136 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002137 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002138 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2139 u32 m2, tmp;
2140
2141 if (fix_phy_mode2) {
2142 m2 = readl(port_mmio + PHY_MODE2);
2143 m2 &= ~(1 << 16);
2144 m2 |= (1 << 31);
2145 writel(m2, port_mmio + PHY_MODE2);
2146
2147 udelay(200);
2148
2149 m2 = readl(port_mmio + PHY_MODE2);
2150 m2 &= ~((1 << 16) | (1 << 31));
2151 writel(m2, port_mmio + PHY_MODE2);
2152
2153 udelay(200);
2154 }
2155
2156 /* who knows what this magic does */
2157 tmp = readl(port_mmio + PHY_MODE3);
2158 tmp &= ~0x7F800000;
2159 tmp |= 0x2A800000;
2160 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002161
2162 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002163 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002164
2165 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002166
2167 if (hp_flags & MV_HP_ERRATA_60X1B2)
2168 tmp = readl(port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002169
2170 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2171
2172 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002173
2174 if (hp_flags & MV_HP_ERRATA_60X1B2)
2175 writel(tmp, port_mmio + 0x310);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002176 }
2177
2178 /* Revert values of pre-emphasis and signal amps to the saved ones */
2179 m2 = readl(port_mmio + PHY_MODE2);
2180
2181 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002182 m2 |= hpriv->signal[port].amps;
2183 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002184 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002185
Jeff Garzike4e7b892006-01-31 12:18:41 -05002186 /* according to mvSata 3.6.1, some IIE values are fixed */
2187 if (IS_GEN_IIE(hpriv)) {
2188 m2 &= ~0xC30FF01F;
2189 m2 |= 0x0000900F;
2190 }
2191
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002192 writel(m2, port_mmio + PHY_MODE2);
2193}
2194
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002195/* TODO: use the generic LED interface to configure the SATA Presence */
2196/* & Acitivy LEDs on the board */
2197static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2198 void __iomem *mmio)
2199{
2200 return;
2201}
2202
2203static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2204 void __iomem *mmio)
2205{
2206 void __iomem *port_mmio;
2207 u32 tmp;
2208
2209 port_mmio = mv_port_base(mmio, idx);
2210 tmp = readl(port_mmio + PHY_MODE2);
2211
2212 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2213 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2214}
2215
2216#undef ZERO
2217#define ZERO(reg) writel(0, port_mmio + (reg))
2218static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2219 void __iomem *mmio, unsigned int port)
2220{
2221 void __iomem *port_mmio = mv_port_base(mmio, port);
2222
2223 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
2224
2225 mv_channel_reset(hpriv, mmio, port);
2226
2227 ZERO(0x028); /* command */
2228 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2229 ZERO(0x004); /* timer */
2230 ZERO(0x008); /* irq err cause */
2231 ZERO(0x00c); /* irq err mask */
2232 ZERO(0x010); /* rq bah */
2233 ZERO(0x014); /* rq inp */
2234 ZERO(0x018); /* rq outp */
2235 ZERO(0x01c); /* respq bah */
2236 ZERO(0x024); /* respq outp */
2237 ZERO(0x020); /* respq inp */
2238 ZERO(0x02c); /* test control */
2239 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2240}
2241
2242#undef ZERO
2243
2244#define ZERO(reg) writel(0, hc_mmio + (reg))
2245static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2246 void __iomem *mmio)
2247{
2248 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2249
2250 ZERO(0x00c);
2251 ZERO(0x010);
2252 ZERO(0x014);
2253
2254}
2255
2256#undef ZERO
2257
2258static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2259 void __iomem *mmio, unsigned int n_hc)
2260{
2261 unsigned int port;
2262
2263 for (port = 0; port < hpriv->n_ports; port++)
2264 mv_soc_reset_hc_port(hpriv, mmio, port);
2265
2266 mv_soc_reset_one_hc(hpriv, mmio);
2267
2268 return 0;
2269}
2270
2271static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2272 void __iomem *mmio)
2273{
2274 return;
2275}
2276
2277static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2278{
2279 return;
2280}
2281
Jeff Garzikc9d39132005-11-13 17:47:51 -05002282static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
2283 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002284{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002285 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002286
Brett Russ31961942005-09-30 01:36:00 -04002287 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002288
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002289 if (IS_GEN_II(hpriv)) {
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002290 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002291 ifctl |= (1 << 7); /* enable gen2i speed */
2292 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002293 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2294 }
2295
Brett Russ20f733e2005-09-01 18:26:17 -04002296 udelay(25); /* allow reset propagation */
2297
2298 /* Spec never mentions clearing the bit. Marvell's driver does
2299 * clear the bit, however.
2300 */
Brett Russ31961942005-09-30 01:36:00 -04002301 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002302
Jeff Garzikc9d39132005-11-13 17:47:51 -05002303 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2304
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002305 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002306 mdelay(1);
2307}
2308
Jeff Garzikc9d39132005-11-13 17:47:51 -05002309/**
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002310 * mv_phy_reset - Perform eDMA reset followed by COMRESET
Jeff Garzikc9d39132005-11-13 17:47:51 -05002311 * @ap: ATA channel to manipulate
2312 *
2313 * Part of this is taken from __sata_phy_reset and modified to
2314 * not sleep since this routine gets called from interrupt level.
2315 *
2316 * LOCKING:
2317 * Inherited from caller. This is coded to safe to call at
2318 * interrupt level, i.e. it does not sleep.
2319 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002320static void mv_phy_reset(struct ata_port *ap, unsigned int *class,
2321 unsigned long deadline)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002322{
2323 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikcca39742006-08-24 03:19:22 -04002324 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002325 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzik22374672005-11-17 10:59:48 -05002326 int retry = 5;
2327 u32 sstatus;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002328
2329 VPRINTK("ENTER, port %u, mmio 0x%p\n", ap->port_no, port_mmio);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002330
Tejun Heoda3dbb12007-07-16 14:29:40 +09002331#ifdef DEBUG
2332 {
2333 u32 sstatus, serror, scontrol;
2334
2335 mv_scr_read(ap, SCR_STATUS, &sstatus);
2336 mv_scr_read(ap, SCR_ERROR, &serror);
2337 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2338 DPRINTK("S-regs after ATA_RST: SStat 0x%08x SErr 0x%08x "
Saeed Bishara2d79ab82007-11-27 17:26:08 +02002339 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
Tejun Heoda3dbb12007-07-16 14:29:40 +09002340 }
2341#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002342
Jeff Garzik22374672005-11-17 10:59:48 -05002343 /* Issue COMRESET via SControl */
2344comreset_retry:
Tejun Heo936fd732007-08-06 18:36:23 +09002345 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x301);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002346 msleep(1);
Jeff Garzik22374672005-11-17 10:59:48 -05002347
Tejun Heo936fd732007-08-06 18:36:23 +09002348 sata_scr_write_flush(&ap->link, SCR_CONTROL, 0x300);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002349 msleep(20);
Jeff Garzik22374672005-11-17 10:59:48 -05002350
Brett Russ31961942005-09-30 01:36:00 -04002351 do {
Tejun Heo936fd732007-08-06 18:36:23 +09002352 sata_scr_read(&ap->link, SCR_STATUS, &sstatus);
Andres Salomon62f1d0e2006-09-11 08:51:05 -04002353 if (((sstatus & 0x3) == 3) || ((sstatus & 0x3) == 0))
Brett Russ31961942005-09-30 01:36:00 -04002354 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002355
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002356 msleep(1);
Jeff Garzikc5d3e452007-07-11 18:30:50 -04002357 } while (time_before(jiffies, deadline));
Brett Russ20f733e2005-09-01 18:26:17 -04002358
Jeff Garzik22374672005-11-17 10:59:48 -05002359 /* work around errata */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002360 if (IS_GEN_II(hpriv) &&
Jeff Garzik22374672005-11-17 10:59:48 -05002361 (sstatus != 0x0) && (sstatus != 0x113) && (sstatus != 0x123) &&
2362 (retry-- > 0))
2363 goto comreset_retry;
Jeff Garzik095fec82005-11-12 09:50:49 -05002364
Tejun Heoda3dbb12007-07-16 14:29:40 +09002365#ifdef DEBUG
2366 {
2367 u32 sstatus, serror, scontrol;
2368
2369 mv_scr_read(ap, SCR_STATUS, &sstatus);
2370 mv_scr_read(ap, SCR_ERROR, &serror);
2371 mv_scr_read(ap, SCR_CONTROL, &scontrol);
2372 DPRINTK("S-regs after PHY wake: SStat 0x%08x SErr 0x%08x "
2373 "SCtrl 0x%08x\n", sstatus, serror, scontrol);
2374 }
2375#endif
Brett Russ31961942005-09-30 01:36:00 -04002376
Tejun Heo936fd732007-08-06 18:36:23 +09002377 if (ata_link_offline(&ap->link)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002378 *class = ATA_DEV_NONE;
Brett Russ20f733e2005-09-01 18:26:17 -04002379 return;
2380 }
2381
Jeff Garzik22374672005-11-17 10:59:48 -05002382 /* even after SStatus reflects that device is ready,
2383 * it seems to take a while for link to be fully
2384 * established (and thus Status no longer 0x80/0x7F),
2385 * so we poll a bit for that, here.
2386 */
2387 retry = 20;
2388 while (1) {
2389 u8 drv_stat = ata_check_status(ap);
2390 if ((drv_stat != 0x80) && (drv_stat != 0x7f))
2391 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002392 msleep(500);
Jeff Garzik22374672005-11-17 10:59:48 -05002393 if (retry-- <= 0)
2394 break;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002395 if (time_after(jiffies, deadline))
2396 break;
Jeff Garzik22374672005-11-17 10:59:48 -05002397 }
2398
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002399 /* FIXME: if we passed the deadline, the following
2400 * code probably produces an invalid result
2401 */
Brett Russ20f733e2005-09-01 18:26:17 -04002402
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002403 /* finally, read device signature from TF registers */
Tejun Heo3f198592007-09-02 23:23:57 +09002404 *class = ata_dev_try_classify(ap->link.device, 1, NULL);
Jeff Garzik095fec82005-11-12 09:50:49 -05002405
2406 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2407
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002408 WARN_ON(pp->pp_flags & MV_PP_FLAG_EDMA_EN);
Jeff Garzik095fec82005-11-12 09:50:49 -05002409
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002410 VPRINTK("EXIT\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002411}
2412
Tejun Heocc0680a2007-08-06 18:36:23 +09002413static int mv_prereset(struct ata_link *link, unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002414{
Tejun Heocc0680a2007-08-06 18:36:23 +09002415 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002416 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002417
Tejun Heocf480622008-01-24 00:05:14 +09002418 mv_stop_dma(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002419
Tejun Heocf480622008-01-24 00:05:14 +09002420 if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET))
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002421 pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002422
Tejun Heocf480622008-01-24 00:05:14 +09002423 return 0;
Jeff Garzik22374672005-11-17 10:59:48 -05002424}
2425
Tejun Heocc0680a2007-08-06 18:36:23 +09002426static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002427 unsigned long deadline)
2428{
Tejun Heocc0680a2007-08-06 18:36:23 +09002429 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002430 struct mv_host_priv *hpriv = ap->host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002431 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002432
2433 mv_stop_dma(ap);
2434
2435 mv_channel_reset(hpriv, mmio, ap->port_no);
2436
2437 mv_phy_reset(ap, class, deadline);
2438
2439 return 0;
2440}
2441
Tejun Heocc0680a2007-08-06 18:36:23 +09002442static void mv_postreset(struct ata_link *link, unsigned int *classes)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002443{
Tejun Heocc0680a2007-08-06 18:36:23 +09002444 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002445 u32 serr;
2446
2447 /* print link status */
Tejun Heocc0680a2007-08-06 18:36:23 +09002448 sata_print_link_status(link);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002449
2450 /* clear SError */
Tejun Heocc0680a2007-08-06 18:36:23 +09002451 sata_scr_read(link, SCR_ERROR, &serr);
2452 sata_scr_write_flush(link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002453
2454 /* bail out if no device is present */
2455 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2456 DPRINTK("EXIT, no device\n");
2457 return;
2458 }
2459
2460 /* set up device control */
2461 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
2462}
2463
2464static void mv_error_handler(struct ata_port *ap)
2465{
2466 ata_do_eh(ap, mv_prereset, ata_std_softreset,
2467 mv_hardreset, mv_postreset);
2468}
2469
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002470static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002471{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002472 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002473 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2474 u32 tmp, mask;
2475 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002476
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002477 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002478
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002479 shift = ap->port_no * 2;
2480 if (hc > 0)
2481 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002482
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002483 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002484
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002485 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002486 tmp = readl(hpriv->main_mask_reg_addr);
2487 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002488}
2489
2490static void mv_eh_thaw(struct ata_port *ap)
2491{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002492 struct mv_host_priv *hpriv = ap->host->private_data;
2493 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002494 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2495 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2496 void __iomem *port_mmio = mv_ap_base(ap);
2497 u32 tmp, mask, hc_irq_cause;
2498 unsigned int shift, hc_port_no = ap->port_no;
2499
2500 /* FIXME: handle coalescing completion events properly */
2501
2502 shift = ap->port_no * 2;
2503 if (hc > 0) {
2504 shift++;
2505 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002506 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002507
2508 mask = 0x3 << shift;
2509
2510 /* clear EDMA errors on this port */
2511 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2512
2513 /* clear pending irq events */
2514 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2515 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2516 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2517 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2518
2519 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002520 tmp = readl(hpriv->main_mask_reg_addr);
2521 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002522}
2523
Brett Russ05b308e2005-10-05 17:08:53 -04002524/**
2525 * mv_port_init - Perform some early initialization on a single port.
2526 * @port: libata data structure storing shadow register addresses
2527 * @port_mmio: base address of the port
2528 *
2529 * Initialize shadow register mmio addresses, clear outstanding
2530 * interrupts on the port, and unmask interrupts for the future
2531 * start of the port.
2532 *
2533 * LOCKING:
2534 * Inherited from caller.
2535 */
Brett Russ31961942005-09-30 01:36:00 -04002536static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2537{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002538 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002539 unsigned serr_ofs;
2540
Jeff Garzik8b260242005-11-12 12:32:50 -05002541 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002542 */
2543 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002544 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002545 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2546 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2547 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2548 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2549 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2550 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002551 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002552 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2553 /* special case: control/altstatus doesn't have ATA_REG_ address */
2554 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2555
2556 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002557 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002558
Brett Russ31961942005-09-30 01:36:00 -04002559 /* Clear any currently outstanding port interrupt conditions */
2560 serr_ofs = mv_scr_offset(SCR_ERROR);
2561 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2562 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2563
Mark Lord646a4da2008-01-26 18:30:37 -05002564 /* unmask all non-transient EDMA error interrupts */
2565 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002566
Jeff Garzik8b260242005-11-12 12:32:50 -05002567 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002568 readl(port_mmio + EDMA_CFG_OFS),
2569 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2570 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002571}
2572
Tejun Heo4447d352007-04-17 23:44:08 +09002573static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002574{
Tejun Heo4447d352007-04-17 23:44:08 +09002575 struct pci_dev *pdev = to_pci_dev(host->dev);
2576 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002577 u32 hp_flags = hpriv->hp_flags;
2578
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002579 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002580 case chip_5080:
2581 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002582 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002583
Auke Kok44c10132007-06-08 15:46:36 -07002584 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002585 case 0x1:
2586 hp_flags |= MV_HP_ERRATA_50XXB0;
2587 break;
2588 case 0x3:
2589 hp_flags |= MV_HP_ERRATA_50XXB2;
2590 break;
2591 default:
2592 dev_printk(KERN_WARNING, &pdev->dev,
2593 "Applying 50XXB2 workarounds to unknown rev\n");
2594 hp_flags |= MV_HP_ERRATA_50XXB2;
2595 break;
2596 }
2597 break;
2598
2599 case chip_504x:
2600 case chip_508x:
2601 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002602 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002603
Auke Kok44c10132007-06-08 15:46:36 -07002604 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002605 case 0x0:
2606 hp_flags |= MV_HP_ERRATA_50XXB0;
2607 break;
2608 case 0x3:
2609 hp_flags |= MV_HP_ERRATA_50XXB2;
2610 break;
2611 default:
2612 dev_printk(KERN_WARNING, &pdev->dev,
2613 "Applying B2 workarounds to unknown rev\n");
2614 hp_flags |= MV_HP_ERRATA_50XXB2;
2615 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002616 }
2617 break;
2618
2619 case chip_604x:
2620 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002621 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002622 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002623
Auke Kok44c10132007-06-08 15:46:36 -07002624 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002625 case 0x7:
2626 hp_flags |= MV_HP_ERRATA_60X1B2;
2627 break;
2628 case 0x9:
2629 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002630 break;
2631 default:
2632 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002633 "Applying B2 workarounds to unknown rev\n");
2634 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002635 break;
2636 }
2637 break;
2638
Jeff Garzike4e7b892006-01-31 12:18:41 -05002639 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002640 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002641 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2642 (pdev->device == 0x2300 || pdev->device == 0x2310))
2643 {
Mark Lord4e520032007-12-11 12:58:05 -05002644 /*
2645 * Highpoint RocketRAID PCIe 23xx series cards:
2646 *
2647 * Unconfigured drives are treated as "Legacy"
2648 * by the BIOS, and it overwrites sector 8 with
2649 * a "Lgcy" metadata block prior to Linux boot.
2650 *
2651 * Configured drives (RAID or JBOD) leave sector 8
2652 * alone, but instead overwrite a high numbered
2653 * sector for the RAID metadata. This sector can
2654 * be determined exactly, by truncating the physical
2655 * drive capacity to a nice even GB value.
2656 *
2657 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2658 *
2659 * Warn the user, lest they think we're just buggy.
2660 */
2661 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2662 " BIOS CORRUPTS DATA on all attached drives,"
2663 " regardless of if/how they are configured."
2664 " BEWARE!\n");
2665 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2666 " use sectors 8-9 on \"Legacy\" drives,"
2667 " and avoid the final two gigabytes on"
2668 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002669 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002670 case chip_6042:
2671 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002672 hp_flags |= MV_HP_GEN_IIE;
2673
Auke Kok44c10132007-06-08 15:46:36 -07002674 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002675 case 0x0:
2676 hp_flags |= MV_HP_ERRATA_XX42A0;
2677 break;
2678 case 0x1:
2679 hp_flags |= MV_HP_ERRATA_60X1C0;
2680 break;
2681 default:
2682 dev_printk(KERN_WARNING, &pdev->dev,
2683 "Applying 60X1C0 workarounds to unknown rev\n");
2684 hp_flags |= MV_HP_ERRATA_60X1C0;
2685 break;
2686 }
2687 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002688 case chip_soc:
2689 hpriv->ops = &mv_soc_ops;
2690 hp_flags |= MV_HP_ERRATA_60X1C0;
2691 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002692
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002693 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002694 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002695 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002696 return 1;
2697 }
2698
2699 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002700 if (hp_flags & MV_HP_PCIE) {
2701 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2702 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2703 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2704 } else {
2705 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2706 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2707 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2708 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002709
2710 return 0;
2711}
2712
Brett Russ05b308e2005-10-05 17:08:53 -04002713/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002714 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002715 * @host: ATA host to initialize
2716 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002717 *
2718 * If possible, do an early global reset of the host. Then do
2719 * our port init and clear/unmask all/relevant host interrupts.
2720 *
2721 * LOCKING:
2722 * Inherited from caller.
2723 */
Tejun Heo4447d352007-04-17 23:44:08 +09002724static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002725{
2726 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002727 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002728 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002729
Tejun Heo4447d352007-04-17 23:44:08 +09002730 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002731 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002732 goto done;
2733
2734 if (HAS_PCI(host)) {
2735 hpriv->main_cause_reg_addr = hpriv->base +
2736 HC_MAIN_IRQ_CAUSE_OFS;
2737 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2738 } else {
2739 hpriv->main_cause_reg_addr = hpriv->base +
2740 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2741 hpriv->main_mask_reg_addr = hpriv->base +
2742 HC_SOC_MAIN_IRQ_MASK_OFS;
2743 }
2744 /* global interrupt mask */
2745 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002746
Tejun Heo4447d352007-04-17 23:44:08 +09002747 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002748
Tejun Heo4447d352007-04-17 23:44:08 +09002749 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002750 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002751
Jeff Garzikc9d39132005-11-13 17:47:51 -05002752 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002753 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002754 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002755
Jeff Garzik522479f2005-11-12 22:14:02 -05002756 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002757 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002758 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002759
Tejun Heo4447d352007-04-17 23:44:08 +09002760 for (port = 0; port < host->n_ports; port++) {
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002761 if (IS_GEN_II(hpriv)) {
Jeff Garzikc9d39132005-11-13 17:47:51 -05002762 void __iomem *port_mmio = mv_port_base(mmio, port);
2763
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002764 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL);
Mark Lordeb46d682006-05-19 16:29:21 -04002765 ifctl |= (1 << 7); /* enable gen2i speed */
2766 ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002767 writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL);
2768 }
2769
Jeff Garzikc9d39132005-11-13 17:47:51 -05002770 hpriv->ops->phy_errata(hpriv, mmio, port);
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002771 }
2772
Tejun Heo4447d352007-04-17 23:44:08 +09002773 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002774 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002775 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002776
2777 mv_port_init(&ap->ioaddr, port_mmio);
2778
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002779#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002780 if (HAS_PCI(host)) {
2781 unsigned int offset = port_mmio - mmio;
2782 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2783 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2784 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002785#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002786 }
2787
2788 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002789 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2790
2791 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2792 "(before clear)=0x%08x\n", hc,
2793 readl(hc_mmio + HC_CFG_OFS),
2794 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2795
2796 /* Clear any currently outstanding hc interrupt conditions */
2797 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002798 }
2799
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002800 if (HAS_PCI(host)) {
2801 /* Clear any currently outstanding host interrupt conditions */
2802 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002803
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002804 /* and unmask interrupt generation for host regs */
2805 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2806 if (IS_GEN_I(hpriv))
2807 writelfl(~HC_MAIN_MASKED_IRQS_5,
2808 hpriv->main_mask_reg_addr);
2809 else
2810 writelfl(~HC_MAIN_MASKED_IRQS,
2811 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002812
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002813 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2814 "PCI int cause/mask=0x%08x/0x%08x\n",
2815 readl(hpriv->main_cause_reg_addr),
2816 readl(hpriv->main_mask_reg_addr),
2817 readl(mmio + hpriv->irq_cause_ofs),
2818 readl(mmio + hpriv->irq_mask_ofs));
2819 } else {
2820 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2821 hpriv->main_mask_reg_addr);
2822 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2823 readl(hpriv->main_cause_reg_addr),
2824 readl(hpriv->main_mask_reg_addr));
2825 }
Brett Russ31961942005-09-30 01:36:00 -04002826done:
Brett Russ20f733e2005-09-01 18:26:17 -04002827 return rc;
2828}
2829
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002830static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2831{
2832 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2833 MV_CRQB_Q_SZ, 0);
2834 if (!hpriv->crqb_pool)
2835 return -ENOMEM;
2836
2837 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2838 MV_CRPB_Q_SZ, 0);
2839 if (!hpriv->crpb_pool)
2840 return -ENOMEM;
2841
2842 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2843 MV_SG_TBL_SZ, 0);
2844 if (!hpriv->sg_tbl_pool)
2845 return -ENOMEM;
2846
2847 return 0;
2848}
2849
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002850/**
2851 * mv_platform_probe - handle a positive probe of an soc Marvell
2852 * host
2853 * @pdev: platform device found
2854 *
2855 * LOCKING:
2856 * Inherited from caller.
2857 */
2858static int mv_platform_probe(struct platform_device *pdev)
2859{
2860 static int printed_version;
2861 const struct mv_sata_platform_data *mv_platform_data;
2862 const struct ata_port_info *ppi[] =
2863 { &mv_port_info[chip_soc], NULL };
2864 struct ata_host *host;
2865 struct mv_host_priv *hpriv;
2866 struct resource *res;
2867 int n_ports, rc;
2868
2869 if (!printed_version++)
2870 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2871
2872 /*
2873 * Simple resource validation ..
2874 */
2875 if (unlikely(pdev->num_resources != 2)) {
2876 dev_err(&pdev->dev, "invalid number of resources\n");
2877 return -EINVAL;
2878 }
2879
2880 /*
2881 * Get the register base first
2882 */
2883 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2884 if (res == NULL)
2885 return -EINVAL;
2886
2887 /* allocate host */
2888 mv_platform_data = pdev->dev.platform_data;
2889 n_ports = mv_platform_data->n_ports;
2890
2891 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2892 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2893
2894 if (!host || !hpriv)
2895 return -ENOMEM;
2896 host->private_data = hpriv;
2897 hpriv->n_ports = n_ports;
2898
2899 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002900 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2901 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002902 hpriv->base -= MV_SATAHC0_REG_BASE;
2903
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002904 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2905 if (rc)
2906 return rc;
2907
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002908 /* initialize adapter */
2909 rc = mv_init_host(host, chip_soc);
2910 if (rc)
2911 return rc;
2912
2913 dev_printk(KERN_INFO, &pdev->dev,
2914 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2915 host->n_ports);
2916
2917 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2918 IRQF_SHARED, &mv6_sht);
2919}
2920
2921/*
2922 *
2923 * mv_platform_remove - unplug a platform interface
2924 * @pdev: platform device
2925 *
2926 * A platform bus SATA device has been unplugged. Perform the needed
2927 * cleanup. Also called on module unload for any active devices.
2928 */
2929static int __devexit mv_platform_remove(struct platform_device *pdev)
2930{
2931 struct device *dev = &pdev->dev;
2932 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002933
2934 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002935 return 0;
2936}
2937
2938static struct platform_driver mv_platform_driver = {
2939 .probe = mv_platform_probe,
2940 .remove = __devexit_p(mv_platform_remove),
2941 .driver = {
2942 .name = DRV_NAME,
2943 .owner = THIS_MODULE,
2944 },
2945};
2946
2947
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002948#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002949static int mv_pci_init_one(struct pci_dev *pdev,
2950 const struct pci_device_id *ent);
2951
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002952
2953static struct pci_driver mv_pci_driver = {
2954 .name = DRV_NAME,
2955 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002956 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002957 .remove = ata_pci_remove_one,
2958};
2959
2960/*
2961 * module options
2962 */
2963static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2964
2965
2966/* move to PCI layer or libata core? */
2967static int pci_go_64(struct pci_dev *pdev)
2968{
2969 int rc;
2970
2971 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2972 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2973 if (rc) {
2974 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2975 if (rc) {
2976 dev_printk(KERN_ERR, &pdev->dev,
2977 "64-bit DMA enable failed\n");
2978 return rc;
2979 }
2980 }
2981 } else {
2982 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2983 if (rc) {
2984 dev_printk(KERN_ERR, &pdev->dev,
2985 "32-bit DMA enable failed\n");
2986 return rc;
2987 }
2988 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2989 if (rc) {
2990 dev_printk(KERN_ERR, &pdev->dev,
2991 "32-bit consistent DMA enable failed\n");
2992 return rc;
2993 }
2994 }
2995
2996 return rc;
2997}
2998
Brett Russ05b308e2005-10-05 17:08:53 -04002999/**
3000 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09003001 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04003002 *
3003 * FIXME: complete this.
3004 *
3005 * LOCKING:
3006 * Inherited from caller.
3007 */
Tejun Heo4447d352007-04-17 23:44:08 +09003008static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04003009{
Tejun Heo4447d352007-04-17 23:44:08 +09003010 struct pci_dev *pdev = to_pci_dev(host->dev);
3011 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07003012 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003013 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04003014
3015 /* Use this to determine the HW stepping of the chip so we know
3016 * what errata to workaround
3017 */
Brett Russ31961942005-09-30 01:36:00 -04003018 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
3019 if (scc == 0)
3020 scc_s = "SCSI";
3021 else if (scc == 0x01)
3022 scc_s = "RAID";
3023 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003024 scc_s = "?";
3025
3026 if (IS_GEN_I(hpriv))
3027 gen = "I";
3028 else if (IS_GEN_II(hpriv))
3029 gen = "II";
3030 else if (IS_GEN_IIE(hpriv))
3031 gen = "IIE";
3032 else
3033 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04003034
Jeff Garzika9524a72005-10-30 14:39:11 -05003035 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04003036 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
3037 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04003038 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3039}
3040
Brett Russ05b308e2005-10-05 17:08:53 -04003041/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003042 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04003043 * @pdev: PCI device found
3044 * @ent: PCI device ID entry for the matched host
3045 *
3046 * LOCKING:
3047 * Inherited from caller.
3048 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003049static int mv_pci_init_one(struct pci_dev *pdev,
3050 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003051{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003052 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003053 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003054 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3055 struct ata_host *host;
3056 struct mv_host_priv *hpriv;
3057 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003058
Jeff Garzika9524a72005-10-30 14:39:11 -05003059 if (!printed_version++)
3060 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003061
Tejun Heo4447d352007-04-17 23:44:08 +09003062 /* allocate host */
3063 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3064
3065 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3066 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3067 if (!host || !hpriv)
3068 return -ENOMEM;
3069 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003070 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003071
3072 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003073 rc = pcim_enable_device(pdev);
3074 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003075 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003076
Tejun Heo0d5ff562007-02-01 15:06:36 +09003077 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3078 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003079 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003080 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003081 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003082 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003083 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003084
Jeff Garzikd88184f2007-02-26 01:26:06 -05003085 rc = pci_go_64(pdev);
3086 if (rc)
3087 return rc;
3088
Mark Lordda2fa9b2008-01-26 18:32:45 -05003089 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3090 if (rc)
3091 return rc;
3092
Brett Russ20f733e2005-09-01 18:26:17 -04003093 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003094 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003095 if (rc)
3096 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003097
Brett Russ31961942005-09-30 01:36:00 -04003098 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003099 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003100 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003101
Brett Russ31961942005-09-30 01:36:00 -04003102 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003103 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003104
Tejun Heo4447d352007-04-17 23:44:08 +09003105 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003106 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003107 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003108 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003109}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003110#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003111
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003112static int mv_platform_probe(struct platform_device *pdev);
3113static int __devexit mv_platform_remove(struct platform_device *pdev);
3114
Brett Russ20f733e2005-09-01 18:26:17 -04003115static int __init mv_init(void)
3116{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003117 int rc = -ENODEV;
3118#ifdef CONFIG_PCI
3119 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003120 if (rc < 0)
3121 return rc;
3122#endif
3123 rc = platform_driver_register(&mv_platform_driver);
3124
3125#ifdef CONFIG_PCI
3126 if (rc < 0)
3127 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003128#endif
3129 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003130}
3131
3132static void __exit mv_exit(void)
3133{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003134#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003135 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003136#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003137 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003138}
3139
3140MODULE_AUTHOR("Brett Russ");
3141MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3142MODULE_LICENSE("GPL");
3143MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3144MODULE_VERSION(DRV_VERSION);
Martin Michlmayr2e7e1212008-02-16 18:15:27 +01003145MODULE_ALIAS("platform:sata_mv");
Brett Russ20f733e2005-09-01 18:26:17 -04003146
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003147#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003148module_param(msi, int, 0444);
3149MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003150#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003151
Brett Russ20f733e2005-09-01 18:26:17 -04003152module_init(mv_init);
3153module_exit(mv_exit);