blob: 05ff8c776497be69a2b05846674ac94574682c86 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
Mark Lorde49856d2008-04-16 14:59:07 -040043 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
Jeff Garzik4a05e202007-05-24 23:40:15 -040044
Mark Lord40f0bc22008-04-16 14:57:25 -040045 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
46
Jeff Garzik4a05e202007-05-24 23:40:15 -040047 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
Jeff Garzik4a05e202007-05-24 23:40:15 -040065*/
66
Brett Russ20f733e2005-09-01 18:26:17 -040067#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080074#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050077#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
Brett Russ20f733e2005-09-01 18:26:17 -040079#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050080#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040081#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040082#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040083
84#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050085#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040086
87enum {
88 /* BAR's are enumerated in terms of pci_resource_start() terms */
89 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
90 MV_IO_BAR = 2, /* offset 0x18: IO space */
91 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
92
93 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
94 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
95
96 MV_PCI_REG_BASE = 0,
97 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040098 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
99 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
100 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
101 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
102 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
103
Brett Russ20f733e2005-09-01 18:26:17 -0400104 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500105 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500106 MV_GPIO_PORT_CTL = 0x104f0,
107 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400108
109 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
110 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
112 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
113
Brett Russ31961942005-09-30 01:36:00 -0400114 MV_MAX_Q_DEPTH = 32,
115 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
116
117 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
118 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400119 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
120 */
121 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
122 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500123 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400124 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400125
Brett Russ20f733e2005-09-01 18:26:17 -0400126 MV_PORTS_PER_HC = 4,
127 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
128 MV_PORT_HC_SHIFT = 2,
Brett Russ31961942005-09-30 01:36:00 -0400129 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */
Brett Russ20f733e2005-09-01 18:26:17 -0400130 MV_PORT_MASK = 3,
131
132 /* Host Flags */
133 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
134 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100135 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400136 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100137
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400138 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400139 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
140 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500141 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400142
Brett Russ31961942005-09-30 01:36:00 -0400143 CRQB_FLAG_READ = (1 << 0),
144 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400145 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400146 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400147 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400148 CRQB_CMD_ADDR_SHIFT = 8,
149 CRQB_CMD_CS = (0x2 << 11),
150 CRQB_CMD_LAST = (1 << 15),
151
152 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400153 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
154 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400155
156 EPRD_FLAG_END_OF_TBL = (1 << 31),
157
Brett Russ20f733e2005-09-01 18:26:17 -0400158 /* PCI interface registers */
159
Brett Russ31961942005-09-30 01:36:00 -0400160 PCI_COMMAND_OFS = 0xc00,
161
Brett Russ20f733e2005-09-01 18:26:17 -0400162 PCI_MAIN_CMD_STS_OFS = 0xd30,
163 STOP_PCI_MASTER = (1 << 2),
164 PCI_MASTER_EMPTY = (1 << 3),
165 GLOB_SFT_RST = (1 << 4),
166
Jeff Garzik522479f2005-11-12 22:14:02 -0500167 MV_PCI_MODE = 0xd00,
168 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
169 MV_PCI_DISC_TIMER = 0xd04,
170 MV_PCI_MSI_TRIGGER = 0xc38,
171 MV_PCI_SERR_MASK = 0xc28,
172 MV_PCI_XBAR_TMOUT = 0x1d04,
173 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
174 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
175 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
176 MV_PCI_ERR_COMMAND = 0x1d50,
177
Mark Lord02a121d2007-12-01 13:07:22 -0500178 PCI_IRQ_CAUSE_OFS = 0x1d58,
179 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400180 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
181
Mark Lord02a121d2007-12-01 13:07:22 -0500182 PCIE_IRQ_CAUSE_OFS = 0x1900,
183 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500184 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500185
Brett Russ20f733e2005-09-01 18:26:17 -0400186 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
187 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500188 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
189 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Brett Russ20f733e2005-09-01 18:26:17 -0400190 PORT0_ERR = (1 << 0), /* shift by port # */
191 PORT0_DONE = (1 << 1), /* shift by port # */
192 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
193 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
194 PCI_ERR = (1 << 18),
195 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
196 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500197 PORTS_0_3_COAL_DONE = (1 << 8),
198 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400199 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
200 GPIO_INT = (1 << 22),
201 SELF_INT = (1 << 23),
202 TWSI_INT = (1 << 24),
203 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500204 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400205 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500206 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400207 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
208 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500209 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
210 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500211 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400212
213 /* SATAHC registers */
214 HC_CFG_OFS = 0,
215
216 HC_IRQ_CAUSE_OFS = 0x14,
Brett Russ31961942005-09-30 01:36:00 -0400217 CRPB_DMA_DONE = (1 << 0), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400218 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */
219 DEV_IRQ = (1 << 8), /* shift by port # */
220
221 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400222 SHD_BLK_OFS = 0x100,
223 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400224
225 /* SATA registers */
226 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
227 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500228 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lord17c5aab2008-04-16 14:56:51 -0400229
Mark Lorde12bef52008-03-31 19:33:56 -0400230 LTMODE_OFS = 0x30c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400231 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
232
Jeff Garzik47c2b672005-11-12 21:13:17 -0500233 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500234 PHY_MODE4 = 0x314,
235 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400236 SATA_IFCTL_OFS = 0x344,
237 SATA_IFSTAT_OFS = 0x34c,
238 VENDOR_UNIQUE_FIS_OFS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400239
Mark Lorde12bef52008-03-31 19:33:56 -0400240 FIS_CFG_OFS = 0x360,
Mark Lord17c5aab2008-04-16 14:56:51 -0400241 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
242
Jeff Garzikc9d39132005-11-13 17:47:51 -0500243 MV5_PHY_MODE = 0x74,
244 MV5_LT_MODE = 0x30,
245 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400246 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500247
248 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400249
250 /* Port registers */
251 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500252 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
253 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
254 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
255 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
256 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400257 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
258 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400259
260 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
261 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400262 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
263 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
264 EDMA_ERR_DEV = (1 << 2), /* device error */
265 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
266 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
267 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400268 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
269 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400270 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400271 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400272 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
273 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
274 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
275 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500276
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400277 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500278 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
279 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
280 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
281 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
282
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400283 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500284
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400285 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500286 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
287 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
288 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
289 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
290 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
291
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400292 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500293
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400294 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400295 EDMA_ERR_OVERRUN_5 = (1 << 5),
296 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500297
298 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
299 EDMA_ERR_LNK_CTRL_RX_1 |
300 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord40f0bc22008-04-16 14:57:25 -0400301 EDMA_ERR_LNK_CTRL_TX |
302 /* temporary, until we fix hotplug: */
303 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
Mark Lord646a4da2008-01-26 18:30:37 -0500304
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400305 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
306 EDMA_ERR_PRD_PAR |
307 EDMA_ERR_DEV_DCON |
308 EDMA_ERR_DEV_CON |
309 EDMA_ERR_SERR |
310 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400311 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400312 EDMA_ERR_CRPB_PAR |
313 EDMA_ERR_INTRL_PAR |
314 EDMA_ERR_IORDY |
315 EDMA_ERR_LNK_CTRL_RX_2 |
316 EDMA_ERR_LNK_DATA_RX |
317 EDMA_ERR_LNK_DATA_TX |
318 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400319
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400320 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
321 EDMA_ERR_PRD_PAR |
322 EDMA_ERR_DEV_DCON |
323 EDMA_ERR_DEV_CON |
324 EDMA_ERR_OVERRUN_5 |
325 EDMA_ERR_UNDERRUN_5 |
326 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400327 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400328 EDMA_ERR_CRPB_PAR |
329 EDMA_ERR_INTRL_PAR |
330 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400331
Brett Russ31961942005-09-30 01:36:00 -0400332 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
333 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400334
335 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
336 EDMA_REQ_Q_PTR_SHIFT = 5,
337
338 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
339 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
340 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400341 EDMA_RSP_Q_PTR_SHIFT = 3,
342
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400343 EDMA_CMD_OFS = 0x28, /* EDMA command register */
344 EDMA_EN = (1 << 0), /* enable EDMA */
345 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
346 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400347
Jeff Garzikc9d39132005-11-13 17:47:51 -0500348 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500349 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500350
Brett Russ31961942005-09-30 01:36:00 -0400351 /* Host private flags (hp_flags) */
352 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500353 MV_HP_ERRATA_50XXB0 = (1 << 1),
354 MV_HP_ERRATA_50XXB2 = (1 << 2),
355 MV_HP_ERRATA_60X1B2 = (1 << 3),
356 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500357 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400358 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
359 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
360 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500361 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400362
Brett Russ31961942005-09-30 01:36:00 -0400363 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400364 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500365 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400366};
367
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400368#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
369#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500370#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100371#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500372
Jeff Garzik095fec82005-11-12 09:50:49 -0500373enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400374 /* DMA boundary 0xffff is required by the s/g splitting
375 * we need on /length/ in mv_fill-sg().
376 */
377 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500378
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400379 /* mask of register bits containing lower 32 bits
380 * of EDMA request queue DMA address
381 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500382 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
383
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400384 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500385 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
386};
387
Jeff Garzik522479f2005-11-12 22:14:02 -0500388enum chip_type {
389 chip_504x,
390 chip_508x,
391 chip_5080,
392 chip_604x,
393 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500394 chip_6042,
395 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500396 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500397};
398
Brett Russ31961942005-09-30 01:36:00 -0400399/* Command ReQuest Block: 32B */
400struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400401 __le32 sg_addr;
402 __le32 sg_addr_hi;
403 __le16 ctrl_flags;
404 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400405};
406
Jeff Garzike4e7b892006-01-31 12:18:41 -0500407struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400408 __le32 addr;
409 __le32 addr_hi;
410 __le32 flags;
411 __le32 len;
412 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500413};
414
Brett Russ31961942005-09-30 01:36:00 -0400415/* Command ResPonse Block: 8B */
416struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400417 __le16 id;
418 __le16 flags;
419 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400420};
421
422/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
423struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400424 __le32 addr;
425 __le32 flags_size;
426 __le32 addr_hi;
427 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400428};
429
430struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400431 struct mv_crqb *crqb;
432 dma_addr_t crqb_dma;
433 struct mv_crpb *crpb;
434 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500435 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
436 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400437
438 unsigned int req_idx;
439 unsigned int resp_idx;
440
Brett Russ31961942005-09-30 01:36:00 -0400441 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400442};
443
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500444struct mv_port_signal {
445 u32 amps;
446 u32 pre;
447};
448
Mark Lord02a121d2007-12-01 13:07:22 -0500449struct mv_host_priv {
450 u32 hp_flags;
451 struct mv_port_signal signal[8];
452 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500453 int n_ports;
454 void __iomem *base;
455 void __iomem *main_cause_reg_addr;
456 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500457 u32 irq_cause_ofs;
458 u32 irq_mask_ofs;
459 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500460 /*
461 * These consistent DMA memory pools give us guaranteed
462 * alignment for hardware-accessed data structures,
463 * and less memory waste in accomplishing the alignment.
464 */
465 struct dma_pool *crqb_pool;
466 struct dma_pool *crpb_pool;
467 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500468};
469
Jeff Garzik47c2b672005-11-12 21:13:17 -0500470struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500471 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
472 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500473 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
474 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
475 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500476 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
477 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500478 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100479 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500480};
481
Tejun Heoda3dbb12007-07-16 14:29:40 +0900482static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
483static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
484static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
485static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400486static int mv_port_start(struct ata_port *ap);
487static void mv_port_stop(struct ata_port *ap);
488static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500489static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900490static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900491static int mv_hardreset(struct ata_link *link, unsigned int *class,
492 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400493static void mv_eh_freeze(struct ata_port *ap);
494static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500495static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400496
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500497static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
498 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500499static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
500static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
501 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500502static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
503 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500504static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100505static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500506
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500507static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
508 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500509static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
510static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
511 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500512static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
513 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500514static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500515static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
516 void __iomem *mmio);
517static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
518 void __iomem *mmio);
519static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
520 void __iomem *mmio, unsigned int n_hc);
521static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
522 void __iomem *mmio);
523static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100524static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400525static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500526 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400527static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400528static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400529static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500530
Mark Lorde49856d2008-04-16 14:59:07 -0400531static void mv_pmp_select(struct ata_port *ap, int pmp);
532static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
533 unsigned long deadline);
534static int mv_softreset(struct ata_link *link, unsigned int *class,
535 unsigned long deadline);
536
Mark Lordeb73d552008-01-29 13:24:00 -0500537/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
538 * because we have to allow room for worst case splitting of
539 * PRDs for 64K boundaries in mv_fill_sg().
540 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400541static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900542 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400543 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400544 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400545};
546
547static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900548 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500549 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400550 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400551 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400552};
553
Tejun Heo029cfd62008-03-25 12:22:49 +0900554static struct ata_port_operations mv5_ops = {
555 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500556
Jeff Garzikc9d39132005-11-13 17:47:51 -0500557 .qc_prep = mv_qc_prep,
558 .qc_issue = mv_qc_issue,
559
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400560 .freeze = mv_eh_freeze,
561 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900562 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900563 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900564 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400565
Jeff Garzikc9d39132005-11-13 17:47:51 -0500566 .scr_read = mv5_scr_read,
567 .scr_write = mv5_scr_write,
568
569 .port_start = mv_port_start,
570 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500571};
572
Tejun Heo029cfd62008-03-25 12:22:49 +0900573static struct ata_port_operations mv6_ops = {
574 .inherits = &mv5_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400575 .qc_defer = sata_pmp_qc_defer_cmd_switch,
Tejun Heo029cfd62008-03-25 12:22:49 +0900576 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400577 .scr_read = mv_scr_read,
578 .scr_write = mv_scr_write,
Mark Lorde49856d2008-04-16 14:59:07 -0400579
580 .pmp_hardreset = mv_pmp_hardreset,
581 .pmp_softreset = mv_softreset,
582 .softreset = mv_softreset,
583 .error_handler = sata_pmp_error_handler,
Brett Russ20f733e2005-09-01 18:26:17 -0400584};
585
Tejun Heo029cfd62008-03-25 12:22:49 +0900586static struct ata_port_operations mv_iie_ops = {
587 .inherits = &mv6_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400588 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
Tejun Heo029cfd62008-03-25 12:22:49 +0900589 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500590 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500591};
592
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100593static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400594 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400595 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400596 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400597 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500598 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400599 },
600 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400601 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400602 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400603 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500604 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400605 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500606 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400607 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500608 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400609 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500610 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500611 },
Brett Russ20f733e2005-09-01 18:26:17 -0400612 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500613 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400614 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500615 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400616 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400617 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500618 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400619 },
620 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400621 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400622 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500623 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400624 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400625 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500626 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400627 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500628 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500629 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400630 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500631 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500632 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400633 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500634 .port_ops = &mv_iie_ops,
635 },
636 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500637 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400638 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500639 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500640 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400641 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500642 .port_ops = &mv_iie_ops,
643 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500644 { /* chip_soc */
Mark Lord02c1f322008-04-16 14:58:13 -0400645 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400646 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord02c1f322008-04-16 14:58:13 -0400647 ATA_FLAG_NCQ | MV_FLAG_SOC,
Mark Lord17c5aab2008-04-16 14:56:51 -0400648 .pio_mask = 0x1f, /* pio0-4 */
649 .udma_mask = ATA_UDMA6,
650 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500651 },
Brett Russ20f733e2005-09-01 18:26:17 -0400652};
653
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500654static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400655 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
656 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
657 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
658 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100659 /* RocketRAID 1740/174x have different identifiers */
660 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
661 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400662
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400663 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
664 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
665 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
666 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
667 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500668
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400669 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
670
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200671 /* Adaptec 1430SA */
672 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
673
Mark Lord02a121d2007-12-01 13:07:22 -0500674 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800675 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
676
Mark Lord02a121d2007-12-01 13:07:22 -0500677 /* Highpoint RocketRAID PCIe series */
678 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
679 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
680
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400681 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400682};
683
Jeff Garzik47c2b672005-11-12 21:13:17 -0500684static const struct mv_hw_ops mv5xxx_ops = {
685 .phy_errata = mv5_phy_errata,
686 .enable_leds = mv5_enable_leds,
687 .read_preamp = mv5_read_preamp,
688 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500689 .reset_flash = mv5_reset_flash,
690 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500691};
692
693static const struct mv_hw_ops mv6xxx_ops = {
694 .phy_errata = mv6_phy_errata,
695 .enable_leds = mv6_enable_leds,
696 .read_preamp = mv6_read_preamp,
697 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500698 .reset_flash = mv6_reset_flash,
699 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500700};
701
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500702static const struct mv_hw_ops mv_soc_ops = {
703 .phy_errata = mv6_phy_errata,
704 .enable_leds = mv_soc_enable_leds,
705 .read_preamp = mv_soc_read_preamp,
706 .reset_hc = mv_soc_reset_hc,
707 .reset_flash = mv_soc_reset_flash,
708 .reset_bus = mv_soc_reset_bus,
709};
710
Brett Russ20f733e2005-09-01 18:26:17 -0400711/*
712 * Functions
713 */
714
715static inline void writelfl(unsigned long data, void __iomem *addr)
716{
717 writel(data, addr);
718 (void) readl(addr); /* flush to avoid PCI posted write */
719}
720
Brett Russ20f733e2005-09-01 18:26:17 -0400721static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
722{
723 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
724}
725
Jeff Garzikc9d39132005-11-13 17:47:51 -0500726static inline unsigned int mv_hc_from_port(unsigned int port)
727{
728 return port >> MV_PORT_HC_SHIFT;
729}
730
731static inline unsigned int mv_hardport_from_port(unsigned int port)
732{
733 return port & MV_PORT_MASK;
734}
735
736static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
737 unsigned int port)
738{
739 return mv_hc_base(base, mv_hc_from_port(port));
740}
741
Brett Russ20f733e2005-09-01 18:26:17 -0400742static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
743{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500744 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500745 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500746 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400747}
748
Mark Lorde12bef52008-03-31 19:33:56 -0400749static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
750{
751 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
752 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
753
754 return hc_mmio + ofs;
755}
756
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500757static inline void __iomem *mv_host_base(struct ata_host *host)
758{
759 struct mv_host_priv *hpriv = host->private_data;
760 return hpriv->base;
761}
762
Brett Russ20f733e2005-09-01 18:26:17 -0400763static inline void __iomem *mv_ap_base(struct ata_port *ap)
764{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500765 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400766}
767
Jeff Garzikcca39742006-08-24 03:19:22 -0400768static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400769{
Jeff Garzikcca39742006-08-24 03:19:22 -0400770 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400771}
772
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400773static void mv_set_edma_ptrs(void __iomem *port_mmio,
774 struct mv_host_priv *hpriv,
775 struct mv_port_priv *pp)
776{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400777 u32 index;
778
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400779 /*
780 * initialize request queue
781 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400782 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
783
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400784 WARN_ON(pp->crqb_dma & 0x3ff);
785 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400786 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400787 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
788
789 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400790 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400791 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
792 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400793 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400794
795 /*
796 * initialize response queue
797 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400798 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
799
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400800 WARN_ON(pp->crpb_dma & 0xff);
801 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
802
803 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400804 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400805 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
806 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400807 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400808
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400809 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400810 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400811}
812
Brett Russ05b308e2005-10-05 17:08:53 -0400813/**
814 * mv_start_dma - Enable eDMA engine
815 * @base: port base address
816 * @pp: port private data
817 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900818 * Verify the local cache of the eDMA state is accurate with a
819 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400820 *
821 * LOCKING:
822 * Inherited from caller.
823 */
Mark Lord0c589122008-01-26 18:31:16 -0500824static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500825 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400826{
Mark Lord72109162008-01-26 18:31:33 -0500827 int want_ncq = (protocol == ATA_PROT_NCQ);
828
829 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
830 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
831 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400832 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500833 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400834 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500835 struct mv_host_priv *hpriv = ap->host->private_data;
836 int hard_port = mv_hardport_from_port(ap->port_no);
837 void __iomem *hc_mmio = mv_hc_base_from_port(
Saeed Bishara0fca0d62008-02-13 10:09:09 -1100838 mv_host_base(ap->host), hard_port);
Mark Lord0c589122008-01-26 18:31:16 -0500839 u32 hc_irq_cause, ipending;
840
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400841 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500842 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400843
Mark Lord0c589122008-01-26 18:31:16 -0500844 /* clear EDMA interrupt indicator, if any */
845 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
846 ipending = (DEV_IRQ << hard_port) |
847 (CRPB_DMA_DONE << hard_port);
848 if (hc_irq_cause & ipending) {
849 writelfl(hc_irq_cause & ~ipending,
850 hc_mmio + HC_IRQ_CAUSE_OFS);
851 }
852
Mark Lorde12bef52008-03-31 19:33:56 -0400853 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500854
855 /* clear FIS IRQ Cause */
856 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
857
Mark Lordf630d562008-01-26 18:31:00 -0500858 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400859
Mark Lordf630d562008-01-26 18:31:00 -0500860 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400861 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
862 }
Mark Lordf630d562008-01-26 18:31:00 -0500863 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
Brett Russ31961942005-09-30 01:36:00 -0400864}
865
Brett Russ05b308e2005-10-05 17:08:53 -0400866/**
Mark Lorde12bef52008-03-31 19:33:56 -0400867 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400868 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400869 *
870 * LOCKING:
871 * Inherited from caller.
872 */
Mark Lordb5624682008-03-31 19:34:40 -0400873static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400874{
Mark Lordb5624682008-03-31 19:34:40 -0400875 int i;
Brett Russ31961942005-09-30 01:36:00 -0400876
Mark Lordb5624682008-03-31 19:34:40 -0400877 /* Disable eDMA. The disable bit auto clears. */
878 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500879
Mark Lordb5624682008-03-31 19:34:40 -0400880 /* Wait for the chip to confirm eDMA is off. */
881 for (i = 10000; i > 0; i--) {
882 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb52007-07-12 14:30:19 -0400883 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400884 return 0;
885 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400886 }
Mark Lordb5624682008-03-31 19:34:40 -0400887 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400888}
889
Mark Lorde12bef52008-03-31 19:33:56 -0400890static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400891{
Mark Lordb5624682008-03-31 19:34:40 -0400892 void __iomem *port_mmio = mv_ap_base(ap);
893 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400894
Mark Lordb5624682008-03-31 19:34:40 -0400895 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
896 return 0;
897 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
898 if (mv_stop_edma_engine(port_mmio)) {
899 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
900 return -EIO;
901 }
902 return 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400903}
904
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400905#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400906static void mv_dump_mem(void __iomem *start, unsigned bytes)
907{
Brett Russ31961942005-09-30 01:36:00 -0400908 int b, w;
909 for (b = 0; b < bytes; ) {
910 DPRINTK("%p: ", start + b);
911 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400912 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400913 b += sizeof(u32);
914 }
915 printk("\n");
916 }
Brett Russ31961942005-09-30 01:36:00 -0400917}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400918#endif
919
Brett Russ31961942005-09-30 01:36:00 -0400920static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
921{
922#ifdef ATA_DEBUG
923 int b, w;
924 u32 dw;
925 for (b = 0; b < bytes; ) {
926 DPRINTK("%02x: ", b);
927 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400928 (void) pci_read_config_dword(pdev, b, &dw);
929 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400930 b += sizeof(u32);
931 }
932 printk("\n");
933 }
934#endif
935}
936static void mv_dump_all_regs(void __iomem *mmio_base, int port,
937 struct pci_dev *pdev)
938{
939#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500940 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400941 port >> MV_PORT_HC_SHIFT);
942 void __iomem *port_base;
943 int start_port, num_ports, p, start_hc, num_hcs, hc;
944
945 if (0 > port) {
946 start_hc = start_port = 0;
947 num_ports = 8; /* shld be benign for 4 port devs */
948 num_hcs = 2;
949 } else {
950 start_hc = port >> MV_PORT_HC_SHIFT;
951 start_port = port;
952 num_ports = num_hcs = 1;
953 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500954 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400955 num_ports > 1 ? num_ports - 1 : start_port);
956
957 if (NULL != pdev) {
958 DPRINTK("PCI config space regs:\n");
959 mv_dump_pci_cfg(pdev, 0x68);
960 }
961 DPRINTK("PCI regs:\n");
962 mv_dump_mem(mmio_base+0xc00, 0x3c);
963 mv_dump_mem(mmio_base+0xd00, 0x34);
964 mv_dump_mem(mmio_base+0xf00, 0x4);
965 mv_dump_mem(mmio_base+0x1d00, 0x6c);
966 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c37e2006-04-10 23:20:22 -0700967 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400968 DPRINTK("HC regs (HC %i):\n", hc);
969 mv_dump_mem(hc_base, 0x1c);
970 }
971 for (p = start_port; p < start_port + num_ports; p++) {
972 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400973 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400974 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400975 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400976 mv_dump_mem(port_base+0x300, 0x60);
977 }
978#endif
979}
980
Brett Russ20f733e2005-09-01 18:26:17 -0400981static unsigned int mv_scr_offset(unsigned int sc_reg_in)
982{
983 unsigned int ofs;
984
985 switch (sc_reg_in) {
986 case SCR_STATUS:
987 case SCR_CONTROL:
988 case SCR_ERROR:
989 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
990 break;
991 case SCR_ACTIVE:
992 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
993 break;
994 default:
995 ofs = 0xffffffffU;
996 break;
997 }
998 return ofs;
999}
1000
Tejun Heoda3dbb12007-07-16 14:29:40 +09001001static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001002{
1003 unsigned int ofs = mv_scr_offset(sc_reg_in);
1004
Tejun Heoda3dbb12007-07-16 14:29:40 +09001005 if (ofs != 0xffffffffU) {
1006 *val = readl(mv_ap_base(ap) + ofs);
1007 return 0;
1008 } else
1009 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001010}
1011
Tejun Heoda3dbb12007-07-16 14:29:40 +09001012static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001013{
1014 unsigned int ofs = mv_scr_offset(sc_reg_in);
1015
Tejun Heoda3dbb12007-07-16 14:29:40 +09001016 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001017 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001018 return 0;
1019 } else
1020 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001021}
1022
Mark Lordf2738272008-01-26 18:32:29 -05001023static void mv6_dev_config(struct ata_device *adev)
1024{
1025 /*
Mark Lorde49856d2008-04-16 14:59:07 -04001026 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1027 *
1028 * Gen-II does not support NCQ over a port multiplier
1029 * (no FIS-based switching).
1030 *
Mark Lordf2738272008-01-26 18:32:29 -05001031 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1032 * See mv_qc_prep() for more info.
1033 */
Mark Lorde49856d2008-04-16 14:59:07 -04001034 if (adev->flags & ATA_DFLAG_NCQ) {
1035 if (sata_pmp_attached(adev->link->ap))
1036 adev->flags &= ~ATA_DFLAG_NCQ;
1037 else if (adev->max_sectors > ATA_MAX_SECTORS)
Mark Lordf2738272008-01-26 18:32:29 -05001038 adev->max_sectors = ATA_MAX_SECTORS;
Mark Lorde49856d2008-04-16 14:59:07 -04001039 }
1040}
1041
1042static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1043{
1044 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1045 /*
1046 * Various bit settings required for operation
1047 * in FIS-based switching (fbs) mode on GenIIe:
1048 */
1049 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1050 old_ltmode = readl(port_mmio + LTMODE_OFS);
1051 if (enable_fbs) {
1052 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1053 new_ltmode = old_ltmode | LTMODE_BIT8;
1054 } else { /* disable fbs */
1055 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1056 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1057 }
1058 if (new_fcfg != old_fcfg)
1059 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1060 if (new_ltmode != old_ltmode)
1061 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
Mark Lordf2738272008-01-26 18:32:29 -05001062}
1063
Mark Lorde12bef52008-03-31 19:33:56 -04001064static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001065{
Mark Lord0c589122008-01-26 18:31:16 -05001066 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001067 struct mv_port_priv *pp = ap->private_data;
1068 struct mv_host_priv *hpriv = ap->host->private_data;
1069 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001070
1071 /* set up non-NCQ EDMA configuration */
Mark Lord0c589122008-01-26 18:31:16 -05001072 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001073
Mark Lord0c589122008-01-26 18:31:16 -05001074 if (IS_GEN_I(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001075 cfg |= (1 << 8); /* enab config burst size mask */
1076
Mark Lord0c589122008-01-26 18:31:16 -05001077 else if (IS_GEN_II(hpriv))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001078 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1079
1080 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001081 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1082 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001083 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001084 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Mark Lorde49856d2008-04-16 14:59:07 -04001085
1086 if (want_ncq && sata_pmp_attached(ap)) {
1087 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1088 mv_config_fbs(port_mmio, 1);
1089 } else {
1090 mv_config_fbs(port_mmio, 0);
1091 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001092 }
1093
Mark Lord72109162008-01-26 18:31:33 -05001094 if (want_ncq) {
1095 cfg |= EDMA_CFG_NCQ;
1096 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1097 } else
1098 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1099
Jeff Garzike4e7b892006-01-31 12:18:41 -05001100 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1101}
1102
Mark Lordda2fa9b2008-01-26 18:32:45 -05001103static void mv_port_free_dma_mem(struct ata_port *ap)
1104{
1105 struct mv_host_priv *hpriv = ap->host->private_data;
1106 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001107 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001108
1109 if (pp->crqb) {
1110 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1111 pp->crqb = NULL;
1112 }
1113 if (pp->crpb) {
1114 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1115 pp->crpb = NULL;
1116 }
Mark Lordeb73d552008-01-29 13:24:00 -05001117 /*
1118 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1119 * For later hardware, we have one unique sg_tbl per NCQ tag.
1120 */
1121 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1122 if (pp->sg_tbl[tag]) {
1123 if (tag == 0 || !IS_GEN_I(hpriv))
1124 dma_pool_free(hpriv->sg_tbl_pool,
1125 pp->sg_tbl[tag],
1126 pp->sg_tbl_dma[tag]);
1127 pp->sg_tbl[tag] = NULL;
1128 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001129 }
1130}
1131
Brett Russ05b308e2005-10-05 17:08:53 -04001132/**
1133 * mv_port_start - Port specific init/start routine.
1134 * @ap: ATA channel to manipulate
1135 *
1136 * Allocate and point to DMA memory, init port private memory,
1137 * zero indices.
1138 *
1139 * LOCKING:
1140 * Inherited from caller.
1141 */
Brett Russ31961942005-09-30 01:36:00 -04001142static int mv_port_start(struct ata_port *ap)
1143{
Jeff Garzikcca39742006-08-24 03:19:22 -04001144 struct device *dev = ap->host->dev;
1145 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001146 struct mv_port_priv *pp;
James Bottomleydde20202008-02-19 11:36:56 +01001147 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001148
Tejun Heo24dc5f32007-01-20 16:00:28 +09001149 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001150 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001151 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001152 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001153
Mark Lordda2fa9b2008-01-26 18:32:45 -05001154 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1155 if (!pp->crqb)
1156 return -ENOMEM;
1157 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001158
Mark Lordda2fa9b2008-01-26 18:32:45 -05001159 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1160 if (!pp->crpb)
1161 goto out_port_free_dma_mem;
1162 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001163
Mark Lordeb73d552008-01-29 13:24:00 -05001164 /*
1165 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1166 * For later hardware, we need one unique sg_tbl per NCQ tag.
1167 */
1168 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1169 if (tag == 0 || !IS_GEN_I(hpriv)) {
1170 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1171 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1172 if (!pp->sg_tbl[tag])
1173 goto out_port_free_dma_mem;
1174 } else {
1175 pp->sg_tbl[tag] = pp->sg_tbl[0];
1176 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1177 }
1178 }
Brett Russ31961942005-09-30 01:36:00 -04001179 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001180
1181out_port_free_dma_mem:
1182 mv_port_free_dma_mem(ap);
1183 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001184}
1185
Brett Russ05b308e2005-10-05 17:08:53 -04001186/**
1187 * mv_port_stop - Port specific cleanup/stop routine.
1188 * @ap: ATA channel to manipulate
1189 *
1190 * Stop DMA, cleanup port memory.
1191 *
1192 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001193 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001194 */
Brett Russ31961942005-09-30 01:36:00 -04001195static void mv_port_stop(struct ata_port *ap)
1196{
Mark Lorde12bef52008-03-31 19:33:56 -04001197 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001198 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001199}
1200
Brett Russ05b308e2005-10-05 17:08:53 -04001201/**
1202 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1203 * @qc: queued command whose SG list to source from
1204 *
1205 * Populate the SG list and mark the last entry.
1206 *
1207 * LOCKING:
1208 * Inherited from caller.
1209 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001210static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001211{
1212 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001213 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001214 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001215 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001216
Mark Lordeb73d552008-01-29 13:24:00 -05001217 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001218 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001219 dma_addr_t addr = sg_dma_address(sg);
1220 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001221
Olof Johansson4007b492007-10-02 20:45:27 -05001222 while (sg_len) {
1223 u32 offset = addr & 0xffff;
1224 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001225
Olof Johansson4007b492007-10-02 20:45:27 -05001226 if ((offset + sg_len > 0x10000))
1227 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001228
Olof Johansson4007b492007-10-02 20:45:27 -05001229 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1230 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001231 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001232
1233 sg_len -= len;
1234 addr += len;
1235
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001236 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001237 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001238 }
Brett Russ31961942005-09-30 01:36:00 -04001239 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001240
1241 if (likely(last_sg))
1242 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001243}
1244
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001245static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001246{
Mark Lord559eeda2006-05-19 16:40:15 -04001247 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001248 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001249 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001250}
1251
Brett Russ05b308e2005-10-05 17:08:53 -04001252/**
1253 * mv_qc_prep - Host specific command preparation.
1254 * @qc: queued command to prepare
1255 *
1256 * This routine simply redirects to the general purpose routine
1257 * if command is not DMA. Else, it handles prep of the CRQB
1258 * (command request block), does some sanity checking, and calls
1259 * the SG load routine.
1260 *
1261 * LOCKING:
1262 * Inherited from caller.
1263 */
Brett Russ31961942005-09-30 01:36:00 -04001264static void mv_qc_prep(struct ata_queued_cmd *qc)
1265{
1266 struct ata_port *ap = qc->ap;
1267 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001268 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001269 struct ata_taskfile *tf;
1270 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001271 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001272
Mark Lord138bfdd2008-01-26 18:33:18 -05001273 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1274 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001275 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001276
Brett Russ31961942005-09-30 01:36:00 -04001277 /* Fill in command request block
1278 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001279 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001280 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001281 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001282 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001283 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001284
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001285 /* get current queue index from software */
1286 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001287
Mark Lorda6432432006-05-19 16:36:36 -04001288 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001289 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001290 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001291 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001292 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1293
1294 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001295 tf = &qc->tf;
1296
1297 /* Sadly, the CRQB cannot accomodate all registers--there are
1298 * only 11 bytes...so we must pick and choose required
1299 * registers based on the command. So, we drop feature and
1300 * hob_feature for [RW] DMA commands, but they are needed for
1301 * NCQ. NCQ will drop hob_nsect.
1302 */
1303 switch (tf->command) {
1304 case ATA_CMD_READ:
1305 case ATA_CMD_READ_EXT:
1306 case ATA_CMD_WRITE:
1307 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001308 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001309 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1310 break;
Brett Russ31961942005-09-30 01:36:00 -04001311 case ATA_CMD_FPDMA_READ:
1312 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001313 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001314 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1315 break;
Brett Russ31961942005-09-30 01:36:00 -04001316 default:
1317 /* The only other commands EDMA supports in non-queued and
1318 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1319 * of which are defined/used by Linux. If we get here, this
1320 * driver needs work.
1321 *
1322 * FIXME: modify libata to give qc_prep a return value and
1323 * return error here.
1324 */
1325 BUG_ON(tf->command);
1326 break;
1327 }
1328 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1329 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1330 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1331 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1332 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1333 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1334 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1335 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1336 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1337
Jeff Garzike4e7b892006-01-31 12:18:41 -05001338 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001339 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001340 mv_fill_sg(qc);
1341}
1342
1343/**
1344 * mv_qc_prep_iie - Host specific command preparation.
1345 * @qc: queued command to prepare
1346 *
1347 * This routine simply redirects to the general purpose routine
1348 * if command is not DMA. Else, it handles prep of the CRQB
1349 * (command request block), does some sanity checking, and calls
1350 * the SG load routine.
1351 *
1352 * LOCKING:
1353 * Inherited from caller.
1354 */
1355static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1356{
1357 struct ata_port *ap = qc->ap;
1358 struct mv_port_priv *pp = ap->private_data;
1359 struct mv_crqb_iie *crqb;
1360 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001361 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001362 u32 flags = 0;
1363
Mark Lord138bfdd2008-01-26 18:33:18 -05001364 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1365 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001366 return;
1367
Mark Lorde12bef52008-03-31 19:33:56 -04001368 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001369 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1370 flags |= CRQB_FLAG_READ;
1371
Tejun Heobeec7db2006-02-11 19:11:13 +09001372 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001373 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001374 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001375 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001376
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001377 /* get current queue index from software */
1378 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001379
1380 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001381 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1382 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001383 crqb->flags = cpu_to_le32(flags);
1384
1385 tf = &qc->tf;
1386 crqb->ata_cmd[0] = cpu_to_le32(
1387 (tf->command << 16) |
1388 (tf->feature << 24)
1389 );
1390 crqb->ata_cmd[1] = cpu_to_le32(
1391 (tf->lbal << 0) |
1392 (tf->lbam << 8) |
1393 (tf->lbah << 16) |
1394 (tf->device << 24)
1395 );
1396 crqb->ata_cmd[2] = cpu_to_le32(
1397 (tf->hob_lbal << 0) |
1398 (tf->hob_lbam << 8) |
1399 (tf->hob_lbah << 16) |
1400 (tf->hob_feature << 24)
1401 );
1402 crqb->ata_cmd[3] = cpu_to_le32(
1403 (tf->nsect << 0) |
1404 (tf->hob_nsect << 8)
1405 );
1406
1407 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1408 return;
Brett Russ31961942005-09-30 01:36:00 -04001409 mv_fill_sg(qc);
1410}
1411
Brett Russ05b308e2005-10-05 17:08:53 -04001412/**
1413 * mv_qc_issue - Initiate a command to the host
1414 * @qc: queued command to start
1415 *
1416 * This routine simply redirects to the general purpose routine
1417 * if command is not DMA. Else, it sanity checks our local
1418 * caches of the request producer/consumer indices then enables
1419 * DMA and bumps the request producer index.
1420 *
1421 * LOCKING:
1422 * Inherited from caller.
1423 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001424static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001425{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001426 struct ata_port *ap = qc->ap;
1427 void __iomem *port_mmio = mv_ap_base(ap);
1428 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001429 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001430
Mark Lord138bfdd2008-01-26 18:33:18 -05001431 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1432 (qc->tf.protocol != ATA_PROT_NCQ)) {
Mark Lord17c5aab2008-04-16 14:56:51 -04001433 /*
1434 * We're about to send a non-EDMA capable command to the
Brett Russ31961942005-09-30 01:36:00 -04001435 * port. Turn off EDMA so there won't be problems accessing
1436 * shadow block, etc registers.
1437 */
Mark Lordb5624682008-03-31 19:34:40 -04001438 mv_stop_edma(ap);
Mark Lorde49856d2008-04-16 14:59:07 -04001439 mv_pmp_select(ap, qc->dev->link->pmp);
Tejun Heo9363c382008-04-07 22:47:16 +09001440 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04001441 }
1442
Mark Lord72109162008-01-26 18:31:33 -05001443 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001444
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001445 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001446
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001447 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001448
1449 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001450 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1451 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001452
1453 return 0;
1454}
1455
Brett Russ05b308e2005-10-05 17:08:53 -04001456/**
Brett Russ05b308e2005-10-05 17:08:53 -04001457 * mv_err_intr - Handle error interrupts on the port
1458 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001459 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001460 *
1461 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001462 * some cases require an eDMA reset, which also performs a COMRESET.
1463 * The SERR case requires a clear of pending errors in the SATA
1464 * SERROR register. Finally, if the port disabled DMA,
1465 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001466 *
1467 * LOCKING:
1468 * Inherited from caller.
1469 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001470static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001471{
Brett Russ31961942005-09-30 01:36:00 -04001472 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001473 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1474 struct mv_port_priv *pp = ap->private_data;
1475 struct mv_host_priv *hpriv = ap->host->private_data;
1476 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1477 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001478 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001479
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001480 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001481
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001482 if (!edma_enabled) {
1483 /* just a guess: do we need to do this? should we
1484 * expand this, and do it in all cases?
1485 */
Tejun Heo936fd732007-08-06 18:36:23 +09001486 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1487 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001488 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001489
1490 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1491
1492 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause);
1493
1494 /*
1495 * all generations share these EDMA error cause bits
1496 */
1497
1498 if (edma_err_cause & EDMA_ERR_DEV)
1499 err_mask |= AC_ERR_DEV;
1500 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001501 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001502 EDMA_ERR_INTRL_PAR)) {
1503 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001504 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001505 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001506 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001507 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1508 ata_ehi_hotplugged(ehi);
1509 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001510 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001511 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001512 }
1513
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001514 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001515 eh_freeze_mask = EDMA_EH_FREEZE_5;
1516
1517 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001518 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001519 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001520 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001521 }
1522 } else {
1523 eh_freeze_mask = EDMA_EH_FREEZE;
1524
1525 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Harvey Harrison5ab063e2008-02-13 21:14:14 -08001526 pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001527 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001528 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001529 }
1530
1531 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001532 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1533 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001534 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001535 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001536 }
1537 }
Brett Russ20f733e2005-09-01 18:26:17 -04001538
1539 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001540 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001541
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001542 if (!err_mask) {
1543 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001544 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001545 }
1546
1547 ehi->serror |= serr;
1548 ehi->action |= action;
1549
1550 if (qc)
1551 qc->err_mask |= err_mask;
1552 else
1553 ehi->err_mask |= err_mask;
1554
1555 if (edma_err_cause & eh_freeze_mask)
1556 ata_port_freeze(ap);
1557 else
1558 ata_port_abort(ap);
1559}
1560
1561static void mv_intr_pio(struct ata_port *ap)
1562{
1563 struct ata_queued_cmd *qc;
1564 u8 ata_status;
1565
1566 /* ignore spurious intr if drive still BUSY */
1567 ata_status = readb(ap->ioaddr.status_addr);
1568 if (unlikely(ata_status & ATA_BUSY))
1569 return;
1570
1571 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001572 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001573 if (unlikely(!qc)) /* no active tag */
1574 return;
1575 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1576 return;
1577
1578 /* and finally, complete the ATA command */
1579 qc->err_mask |= ac_err_mask(ata_status);
1580 ata_qc_complete(qc);
1581}
1582
1583static void mv_intr_edma(struct ata_port *ap)
1584{
1585 void __iomem *port_mmio = mv_ap_base(ap);
1586 struct mv_host_priv *hpriv = ap->host->private_data;
1587 struct mv_port_priv *pp = ap->private_data;
1588 struct ata_queued_cmd *qc;
1589 u32 out_index, in_index;
1590 bool work_done = false;
1591
1592 /* get h/w response queue pointer */
1593 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1594 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1595
1596 while (1) {
1597 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001598 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001599
1600 /* get s/w response queue last-read pointer, and compare */
1601 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1602 if (in_index == out_index)
1603 break;
1604
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001605 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001606 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001607 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001608
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001609 /* Gen II/IIE: get active ATA command via tag, to enable
1610 * support for queueing. this works transparently for
1611 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001612 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001613 else
1614 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001615
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001616 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001617
Mark Lordcb924412008-01-26 18:32:09 -05001618 /* For non-NCQ mode, the lower 8 bits of status
1619 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1620 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001621 */
1622 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001623 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001624 mv_err_intr(ap, qc);
1625 return;
1626 }
1627
1628 /* and finally, complete the ATA command */
1629 if (qc) {
1630 qc->err_mask |=
1631 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1632 ata_qc_complete(qc);
1633 }
1634
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001635 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001636 * indicate (after the loop completes) to hardware
1637 * that we have consumed a response queue entry.
1638 */
1639 work_done = true;
1640 pp->resp_idx++;
1641 }
1642
1643 if (work_done)
1644 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1645 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1646 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001647}
1648
Brett Russ05b308e2005-10-05 17:08:53 -04001649/**
1650 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001651 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001652 * @relevant: port error bits relevant to this host controller
1653 * @hc: which host controller we're to look at
1654 *
1655 * Read then write clear the HC interrupt status then walk each
1656 * port connected to the HC and see if it needs servicing. Port
1657 * success ints are reported in the HC interrupt status reg, the
1658 * port error ints are reported in the higher level main
1659 * interrupt status register and thus are passed in via the
1660 * 'relevant' argument.
1661 *
1662 * LOCKING:
1663 * Inherited from caller.
1664 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001665static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001666{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001667 struct mv_host_priv *hpriv = host->private_data;
1668 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001669 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001670 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001671 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001672
Jeff Garzik35177262007-02-24 21:26:42 -05001673 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001674 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001675 else
Brett Russ20f733e2005-09-01 18:26:17 -04001676 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001677
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001678 if (HAS_PCI(host))
1679 last_port = port0 + MV_PORTS_PER_HC;
1680 else
1681 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001682 /* we'll need the HC success int register in most cases */
1683 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001684 if (!hc_irq_cause)
1685 return;
1686
1687 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001688
1689 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001690 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001691
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001692 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001693 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001694 struct mv_port_priv *pp;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001695 int have_err_bits, hard_port, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001696
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001697 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001698 continue;
1699
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001700 pp = ap->private_data;
1701
Brett Russ31961942005-09-30 01:36:00 -04001702 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001703 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e2005-09-01 18:26:17 -04001704 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001705
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001706 have_err_bits = ((PORT0_ERR << shift) & relevant);
1707
1708 if (unlikely(have_err_bits)) {
1709 struct ata_queued_cmd *qc;
1710
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001711 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001712 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1713 continue;
1714
1715 mv_err_intr(ap, qc);
1716 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001717 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001718
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001719 hard_port = mv_hardport_from_port(port); /* range 0..3 */
1720
1721 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1722 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause)
1723 mv_intr_edma(ap);
1724 } else {
1725 if ((DEV_IRQ << hard_port) & hc_irq_cause)
1726 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001727 }
1728 }
1729 VPRINTK("EXIT\n");
1730}
1731
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001732static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1733{
Mark Lord02a121d2007-12-01 13:07:22 -05001734 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001735 struct ata_port *ap;
1736 struct ata_queued_cmd *qc;
1737 struct ata_eh_info *ehi;
1738 unsigned int i, err_mask, printed = 0;
1739 u32 err_cause;
1740
Mark Lord02a121d2007-12-01 13:07:22 -05001741 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001742
1743 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1744 err_cause);
1745
1746 DPRINTK("All regs @ PCI error\n");
1747 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1748
Mark Lord02a121d2007-12-01 13:07:22 -05001749 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001750
1751 for (i = 0; i < host->n_ports; i++) {
1752 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001753 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001754 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001755 ata_ehi_clear_desc(ehi);
1756 if (!printed++)
1757 ata_ehi_push_desc(ehi,
1758 "PCI err cause 0x%08x", err_cause);
1759 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001760 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001761 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001762 if (qc)
1763 qc->err_mask |= err_mask;
1764 else
1765 ehi->err_mask |= err_mask;
1766
1767 ata_port_freeze(ap);
1768 }
1769 }
1770}
1771
Brett Russ05b308e2005-10-05 17:08:53 -04001772/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001773 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001774 * @irq: unused
1775 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001776 *
1777 * Read the read only register to determine if any host
1778 * controllers have pending interrupts. If so, call lower level
1779 * routine to handle. Also check for PCI errors which are only
1780 * reported here.
1781 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001782 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001783 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001784 * interrupts.
1785 */
David Howells7d12e782006-10-05 14:55:46 +01001786static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001787{
Jeff Garzikcca39742006-08-24 03:19:22 -04001788 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001789 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001790 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001791 void __iomem *mmio = hpriv->base;
Mark Lord646a4da2008-01-26 18:30:37 -05001792 u32 irq_stat, irq_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001793
Mark Lorde12bef52008-03-31 19:33:56 -04001794 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
Mark Lord646a4da2008-01-26 18:30:37 -05001795 spin_lock(&host->lock);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001796
1797 irq_stat = readl(hpriv->main_cause_reg_addr);
1798 irq_mask = readl(hpriv->main_mask_reg_addr);
Brett Russ20f733e2005-09-01 18:26:17 -04001799
1800 /* check the cases where we either have nothing pending or have read
1801 * a bogus register value which can indicate HW removal or PCI fault
1802 */
Mark Lord646a4da2008-01-26 18:30:37 -05001803 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat))
1804 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001805
Jeff Garzikcca39742006-08-24 03:19:22 -04001806 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001807
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001808 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001809 mv_pci_error(host, mmio);
1810 handled = 1;
1811 goto out_unlock; /* skip all other HC irq handling */
1812 }
1813
Brett Russ20f733e2005-09-01 18:26:17 -04001814 for (hc = 0; hc < n_hcs; hc++) {
1815 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1816 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001817 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001818 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001819 }
1820 }
Mark Lord615ab952006-05-19 16:24:56 -04001821
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001822out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001823 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001824
1825 return IRQ_RETVAL(handled);
1826}
1827
Jeff Garzikc9d39132005-11-13 17:47:51 -05001828static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1829{
1830 unsigned int ofs;
1831
1832 switch (sc_reg_in) {
1833 case SCR_STATUS:
1834 case SCR_ERROR:
1835 case SCR_CONTROL:
1836 ofs = sc_reg_in * sizeof(u32);
1837 break;
1838 default:
1839 ofs = 0xffffffffU;
1840 break;
1841 }
1842 return ofs;
1843}
1844
Tejun Heoda3dbb12007-07-16 14:29:40 +09001845static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001846{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001847 struct mv_host_priv *hpriv = ap->host->private_data;
1848 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001849 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001850 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1851
Tejun Heoda3dbb12007-07-16 14:29:40 +09001852 if (ofs != 0xffffffffU) {
1853 *val = readl(addr + ofs);
1854 return 0;
1855 } else
1856 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001857}
1858
Tejun Heoda3dbb12007-07-16 14:29:40 +09001859static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001860{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001861 struct mv_host_priv *hpriv = ap->host->private_data;
1862 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001863 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001864 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1865
Tejun Heoda3dbb12007-07-16 14:29:40 +09001866 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001867 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001868 return 0;
1869 } else
1870 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001871}
1872
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001873static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001874{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001875 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001876 int early_5080;
1877
Auke Kok44c10132007-06-08 15:46:36 -07001878 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001879
1880 if (!early_5080) {
1881 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1882 tmp |= (1 << 0);
1883 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1884 }
1885
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001886 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001887}
1888
1889static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1890{
1891 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1892}
1893
Jeff Garzik47c2b672005-11-12 21:13:17 -05001894static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001895 void __iomem *mmio)
1896{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001897 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1898 u32 tmp;
1899
1900 tmp = readl(phy_mmio + MV5_PHY_MODE);
1901
1902 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1903 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001904}
1905
Jeff Garzik47c2b672005-11-12 21:13:17 -05001906static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001907{
Jeff Garzik522479f2005-11-12 22:14:02 -05001908 u32 tmp;
1909
1910 writel(0, mmio + MV_GPIO_PORT_CTL);
1911
1912 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1913
1914 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1915 tmp |= ~(1 << 0);
1916 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001917}
1918
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001919static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1920 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001921{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001922 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1923 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1924 u32 tmp;
1925 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1926
1927 if (fix_apm_sq) {
1928 tmp = readl(phy_mmio + MV5_LT_MODE);
1929 tmp |= (1 << 19);
1930 writel(tmp, phy_mmio + MV5_LT_MODE);
1931
1932 tmp = readl(phy_mmio + MV5_PHY_CTL);
1933 tmp &= ~0x3;
1934 tmp |= 0x1;
1935 writel(tmp, phy_mmio + MV5_PHY_CTL);
1936 }
1937
1938 tmp = readl(phy_mmio + MV5_PHY_MODE);
1939 tmp &= ~mask;
1940 tmp |= hpriv->signal[port].pre;
1941 tmp |= hpriv->signal[port].amps;
1942 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001943}
1944
Jeff Garzikc9d39132005-11-13 17:47:51 -05001945
1946#undef ZERO
1947#define ZERO(reg) writel(0, port_mmio + (reg))
1948static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1949 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001950{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001951 void __iomem *port_mmio = mv_port_base(mmio, port);
1952
Mark Lordb5624682008-03-31 19:34:40 -04001953 /*
1954 * The datasheet warns against setting ATA_RST when EDMA is active
1955 * (but doesn't say what the problem might be). So we first try
1956 * to disable the EDMA engine before doing the ATA_RST operation.
1957 */
Mark Lorde12bef52008-03-31 19:33:56 -04001958 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001959
1960 ZERO(0x028); /* command */
1961 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1962 ZERO(0x004); /* timer */
1963 ZERO(0x008); /* irq err cause */
1964 ZERO(0x00c); /* irq err mask */
1965 ZERO(0x010); /* rq bah */
1966 ZERO(0x014); /* rq inp */
1967 ZERO(0x018); /* rq outp */
1968 ZERO(0x01c); /* respq bah */
1969 ZERO(0x024); /* respq outp */
1970 ZERO(0x020); /* respq inp */
1971 ZERO(0x02c); /* test control */
1972 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1973}
1974#undef ZERO
1975
1976#define ZERO(reg) writel(0, hc_mmio + (reg))
1977static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1978 unsigned int hc)
1979{
1980 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1981 u32 tmp;
1982
1983 ZERO(0x00c);
1984 ZERO(0x010);
1985 ZERO(0x014);
1986 ZERO(0x018);
1987
1988 tmp = readl(hc_mmio + 0x20);
1989 tmp &= 0x1c1c1c1c;
1990 tmp |= 0x03030303;
1991 writel(tmp, hc_mmio + 0x20);
1992}
1993#undef ZERO
1994
1995static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1996 unsigned int n_hc)
1997{
1998 unsigned int hc, port;
1999
2000 for (hc = 0; hc < n_hc; hc++) {
2001 for (port = 0; port < MV_PORTS_PER_HC; port++)
2002 mv5_reset_hc_port(hpriv, mmio,
2003 (hc * MV_PORTS_PER_HC) + port);
2004
2005 mv5_reset_one_hc(hpriv, mmio, hc);
2006 }
2007
2008 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002009}
2010
Jeff Garzik101ffae2005-11-12 22:17:49 -05002011#undef ZERO
2012#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002013static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002014{
Mark Lord02a121d2007-12-01 13:07:22 -05002015 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002016 u32 tmp;
2017
2018 tmp = readl(mmio + MV_PCI_MODE);
2019 tmp &= 0xff00ffff;
2020 writel(tmp, mmio + MV_PCI_MODE);
2021
2022 ZERO(MV_PCI_DISC_TIMER);
2023 ZERO(MV_PCI_MSI_TRIGGER);
2024 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2025 ZERO(HC_MAIN_IRQ_MASK_OFS);
2026 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002027 ZERO(hpriv->irq_cause_ofs);
2028 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002029 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2030 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2031 ZERO(MV_PCI_ERR_ATTRIBUTE);
2032 ZERO(MV_PCI_ERR_COMMAND);
2033}
2034#undef ZERO
2035
2036static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2037{
2038 u32 tmp;
2039
2040 mv5_reset_flash(hpriv, mmio);
2041
2042 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2043 tmp &= 0x3;
2044 tmp |= (1 << 5) | (1 << 6);
2045 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2046}
2047
2048/**
2049 * mv6_reset_hc - Perform the 6xxx global soft reset
2050 * @mmio: base address of the HBA
2051 *
2052 * This routine only applies to 6xxx parts.
2053 *
2054 * LOCKING:
2055 * Inherited from caller.
2056 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002057static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2058 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002059{
2060 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2061 int i, rc = 0;
2062 u32 t;
2063
2064 /* Following procedure defined in PCI "main command and status
2065 * register" table.
2066 */
2067 t = readl(reg);
2068 writel(t | STOP_PCI_MASTER, reg);
2069
2070 for (i = 0; i < 1000; i++) {
2071 udelay(1);
2072 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002073 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002074 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002075 }
2076 if (!(PCI_MASTER_EMPTY & t)) {
2077 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2078 rc = 1;
2079 goto done;
2080 }
2081
2082 /* set reset */
2083 i = 5;
2084 do {
2085 writel(t | GLOB_SFT_RST, reg);
2086 t = readl(reg);
2087 udelay(1);
2088 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2089
2090 if (!(GLOB_SFT_RST & t)) {
2091 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2092 rc = 1;
2093 goto done;
2094 }
2095
2096 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2097 i = 5;
2098 do {
2099 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2100 t = readl(reg);
2101 udelay(1);
2102 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2103
2104 if (GLOB_SFT_RST & t) {
2105 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2106 rc = 1;
2107 }
Mark Lord094e50b2008-04-16 15:01:19 -04002108 /*
2109 * Temporary: wait 3 seconds before port-probing can happen,
2110 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2111 * This can go away once hotplug is fully/correctly implemented.
2112 */
2113 if (rc == 0)
2114 msleep(3000);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002115done:
2116 return rc;
2117}
2118
Jeff Garzik47c2b672005-11-12 21:13:17 -05002119static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002120 void __iomem *mmio)
2121{
2122 void __iomem *port_mmio;
2123 u32 tmp;
2124
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002125 tmp = readl(mmio + MV_RESET_CFG);
2126 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002127 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002128 hpriv->signal[idx].pre = 0x1 << 5;
2129 return;
2130 }
2131
2132 port_mmio = mv_port_base(mmio, idx);
2133 tmp = readl(port_mmio + PHY_MODE2);
2134
2135 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2136 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2137}
2138
Jeff Garzik47c2b672005-11-12 21:13:17 -05002139static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002140{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002141 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002142}
2143
Jeff Garzikc9d39132005-11-13 17:47:51 -05002144static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002145 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002146{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002147 void __iomem *port_mmio = mv_port_base(mmio, port);
2148
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002149 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002150 int fix_phy_mode2 =
2151 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002152 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002153 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2154 u32 m2, tmp;
2155
2156 if (fix_phy_mode2) {
2157 m2 = readl(port_mmio + PHY_MODE2);
2158 m2 &= ~(1 << 16);
2159 m2 |= (1 << 31);
2160 writel(m2, port_mmio + PHY_MODE2);
2161
2162 udelay(200);
2163
2164 m2 = readl(port_mmio + PHY_MODE2);
2165 m2 &= ~((1 << 16) | (1 << 31));
2166 writel(m2, port_mmio + PHY_MODE2);
2167
2168 udelay(200);
2169 }
2170
2171 /* who knows what this magic does */
2172 tmp = readl(port_mmio + PHY_MODE3);
2173 tmp &= ~0x7F800000;
2174 tmp |= 0x2A800000;
2175 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002176
2177 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002178 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002179
2180 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002181
2182 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002183 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002184
Mark Lorde12bef52008-03-31 19:33:56 -04002185 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002186 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2187
2188 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002189
2190 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002191 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002192 }
2193
2194 /* Revert values of pre-emphasis and signal amps to the saved ones */
2195 m2 = readl(port_mmio + PHY_MODE2);
2196
2197 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002198 m2 |= hpriv->signal[port].amps;
2199 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002200 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002201
Jeff Garzike4e7b892006-01-31 12:18:41 -05002202 /* according to mvSata 3.6.1, some IIE values are fixed */
2203 if (IS_GEN_IIE(hpriv)) {
2204 m2 &= ~0xC30FF01F;
2205 m2 |= 0x0000900F;
2206 }
2207
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002208 writel(m2, port_mmio + PHY_MODE2);
2209}
2210
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002211/* TODO: use the generic LED interface to configure the SATA Presence */
2212/* & Acitivy LEDs on the board */
2213static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2214 void __iomem *mmio)
2215{
2216 return;
2217}
2218
2219static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2220 void __iomem *mmio)
2221{
2222 void __iomem *port_mmio;
2223 u32 tmp;
2224
2225 port_mmio = mv_port_base(mmio, idx);
2226 tmp = readl(port_mmio + PHY_MODE2);
2227
2228 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2229 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2230}
2231
2232#undef ZERO
2233#define ZERO(reg) writel(0, port_mmio + (reg))
2234static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2235 void __iomem *mmio, unsigned int port)
2236{
2237 void __iomem *port_mmio = mv_port_base(mmio, port);
2238
Mark Lordb5624682008-03-31 19:34:40 -04002239 /*
2240 * The datasheet warns against setting ATA_RST when EDMA is active
2241 * (but doesn't say what the problem might be). So we first try
2242 * to disable the EDMA engine before doing the ATA_RST operation.
2243 */
Mark Lorde12bef52008-03-31 19:33:56 -04002244 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002245
2246 ZERO(0x028); /* command */
2247 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2248 ZERO(0x004); /* timer */
2249 ZERO(0x008); /* irq err cause */
2250 ZERO(0x00c); /* irq err mask */
2251 ZERO(0x010); /* rq bah */
2252 ZERO(0x014); /* rq inp */
2253 ZERO(0x018); /* rq outp */
2254 ZERO(0x01c); /* respq bah */
2255 ZERO(0x024); /* respq outp */
2256 ZERO(0x020); /* respq inp */
2257 ZERO(0x02c); /* test control */
2258 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2259}
2260
2261#undef ZERO
2262
2263#define ZERO(reg) writel(0, hc_mmio + (reg))
2264static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2265 void __iomem *mmio)
2266{
2267 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2268
2269 ZERO(0x00c);
2270 ZERO(0x010);
2271 ZERO(0x014);
2272
2273}
2274
2275#undef ZERO
2276
2277static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2278 void __iomem *mmio, unsigned int n_hc)
2279{
2280 unsigned int port;
2281
2282 for (port = 0; port < hpriv->n_ports; port++)
2283 mv_soc_reset_hc_port(hpriv, mmio, port);
2284
2285 mv_soc_reset_one_hc(hpriv, mmio);
2286
2287 return 0;
2288}
2289
2290static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2291 void __iomem *mmio)
2292{
2293 return;
2294}
2295
2296static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2297{
2298 return;
2299}
2300
Mark Lordb67a1062008-03-31 19:35:13 -04002301static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2302{
2303 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2304
2305 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2306 if (want_gen2i)
2307 ifctl |= (1 << 7); /* enable gen2i speed */
2308 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2309}
2310
Mark Lordb5624682008-03-31 19:34:40 -04002311/*
2312 * Caller must ensure that EDMA is not active,
2313 * by first doing mv_stop_edma() where needed.
2314 */
Mark Lorde12bef52008-03-31 19:33:56 -04002315static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002316 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002317{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002318 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002319
Mark Lord0d8be5c2008-04-16 14:56:12 -04002320 mv_stop_edma_engine(port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04002321 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002322
Mark Lordb67a1062008-03-31 19:35:13 -04002323 if (!IS_GEN_I(hpriv)) {
2324 /* Enable 3.0gb/s link speed */
2325 mv_setup_ifctl(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002326 }
Mark Lordb67a1062008-03-31 19:35:13 -04002327 /*
2328 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2329 * link, and physical layers. It resets all SATA interface registers
2330 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04002331 */
Mark Lordb67a1062008-03-31 19:35:13 -04002332 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2333 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04002334 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002335
Jeff Garzikc9d39132005-11-13 17:47:51 -05002336 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2337
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002338 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002339 mdelay(1);
2340}
2341
Mark Lorde49856d2008-04-16 14:59:07 -04002342static void mv_pmp_select(struct ata_port *ap, int pmp)
2343{
2344 if (sata_pmp_supported(ap)) {
2345 void __iomem *port_mmio = mv_ap_base(ap);
2346 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2347 int old = reg & 0xf;
2348
2349 if (old != pmp) {
2350 reg = (reg & ~0xf) | pmp;
2351 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2352 }
2353 }
2354}
2355
2356static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2357 unsigned long deadline)
2358{
2359 mv_pmp_select(link->ap, sata_srst_pmp(link));
2360 return sata_std_hardreset(link, class, deadline);
2361}
2362
2363static int mv_softreset(struct ata_link *link, unsigned int *class,
2364 unsigned long deadline)
2365{
2366 mv_pmp_select(link->ap, sata_srst_pmp(link));
2367 return ata_sff_softreset(link, class, deadline);
2368}
2369
Tejun Heocc0680a2007-08-06 18:36:23 +09002370static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002371 unsigned long deadline)
2372{
Tejun Heocc0680a2007-08-06 18:36:23 +09002373 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002374 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002375 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002376 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002377 int rc, attempts = 0, extra = 0;
2378 u32 sstatus;
2379 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002380
Mark Lorde12bef52008-03-31 19:33:56 -04002381 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002382 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002383
Mark Lord0d8be5c2008-04-16 14:56:12 -04002384 /* Workaround for errata FEr SATA#10 (part 2) */
2385 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04002386 const unsigned long *timing =
2387 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002388
Mark Lord17c5aab2008-04-16 14:56:51 -04002389 rc = sata_link_hardreset(link, timing, deadline + extra,
2390 &online, NULL);
2391 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04002392 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002393 sata_scr_read(link, SCR_STATUS, &sstatus);
2394 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2395 /* Force 1.5gb/s link speed and try again */
2396 mv_setup_ifctl(mv_ap_base(ap), 0);
2397 if (time_after(jiffies + HZ, deadline))
2398 extra = HZ; /* only extend it once, max */
2399 }
2400 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002401
Mark Lord17c5aab2008-04-16 14:56:51 -04002402 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002403}
2404
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002405static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002406{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002407 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002408 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2409 u32 tmp, mask;
2410 unsigned int shift;
Brett Russ31961942005-09-30 01:36:00 -04002411
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002412 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002413
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002414 shift = ap->port_no * 2;
2415 if (hc > 0)
2416 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002417
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002418 mask = 0x3 << shift;
Brett Russ31961942005-09-30 01:36:00 -04002419
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002420 /* disable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002421 tmp = readl(hpriv->main_mask_reg_addr);
2422 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002423}
2424
2425static void mv_eh_thaw(struct ata_port *ap)
2426{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002427 struct mv_host_priv *hpriv = ap->host->private_data;
2428 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002429 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2430 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2431 void __iomem *port_mmio = mv_ap_base(ap);
2432 u32 tmp, mask, hc_irq_cause;
2433 unsigned int shift, hc_port_no = ap->port_no;
2434
2435 /* FIXME: handle coalescing completion events properly */
2436
2437 shift = ap->port_no * 2;
2438 if (hc > 0) {
2439 shift++;
2440 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002441 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002442
2443 mask = 0x3 << shift;
2444
2445 /* clear EDMA errors on this port */
2446 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2447
2448 /* clear pending irq events */
2449 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2450 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */
2451 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2452 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2453
2454 /* enable assertion of portN err, done events */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002455 tmp = readl(hpriv->main_mask_reg_addr);
2456 writelfl(tmp | mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002457}
2458
Brett Russ05b308e2005-10-05 17:08:53 -04002459/**
2460 * mv_port_init - Perform some early initialization on a single port.
2461 * @port: libata data structure storing shadow register addresses
2462 * @port_mmio: base address of the port
2463 *
2464 * Initialize shadow register mmio addresses, clear outstanding
2465 * interrupts on the port, and unmask interrupts for the future
2466 * start of the port.
2467 *
2468 * LOCKING:
2469 * Inherited from caller.
2470 */
Brett Russ31961942005-09-30 01:36:00 -04002471static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2472{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002473 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002474 unsigned serr_ofs;
2475
Jeff Garzik8b260242005-11-12 12:32:50 -05002476 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002477 */
2478 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002479 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002480 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2481 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2482 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2483 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2484 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2485 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002486 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002487 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2488 /* special case: control/altstatus doesn't have ATA_REG_ address */
2489 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2490
2491 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002492 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002493
Brett Russ31961942005-09-30 01:36:00 -04002494 /* Clear any currently outstanding port interrupt conditions */
2495 serr_ofs = mv_scr_offset(SCR_ERROR);
2496 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2497 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2498
Mark Lord646a4da2008-01-26 18:30:37 -05002499 /* unmask all non-transient EDMA error interrupts */
2500 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002501
Jeff Garzik8b260242005-11-12 12:32:50 -05002502 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002503 readl(port_mmio + EDMA_CFG_OFS),
2504 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2505 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002506}
2507
Tejun Heo4447d352007-04-17 23:44:08 +09002508static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002509{
Tejun Heo4447d352007-04-17 23:44:08 +09002510 struct pci_dev *pdev = to_pci_dev(host->dev);
2511 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002512 u32 hp_flags = hpriv->hp_flags;
2513
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002514 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002515 case chip_5080:
2516 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002517 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002518
Auke Kok44c10132007-06-08 15:46:36 -07002519 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002520 case 0x1:
2521 hp_flags |= MV_HP_ERRATA_50XXB0;
2522 break;
2523 case 0x3:
2524 hp_flags |= MV_HP_ERRATA_50XXB2;
2525 break;
2526 default:
2527 dev_printk(KERN_WARNING, &pdev->dev,
2528 "Applying 50XXB2 workarounds to unknown rev\n");
2529 hp_flags |= MV_HP_ERRATA_50XXB2;
2530 break;
2531 }
2532 break;
2533
2534 case chip_504x:
2535 case chip_508x:
2536 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002537 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002538
Auke Kok44c10132007-06-08 15:46:36 -07002539 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002540 case 0x0:
2541 hp_flags |= MV_HP_ERRATA_50XXB0;
2542 break;
2543 case 0x3:
2544 hp_flags |= MV_HP_ERRATA_50XXB2;
2545 break;
2546 default:
2547 dev_printk(KERN_WARNING, &pdev->dev,
2548 "Applying B2 workarounds to unknown rev\n");
2549 hp_flags |= MV_HP_ERRATA_50XXB2;
2550 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002551 }
2552 break;
2553
2554 case chip_604x:
2555 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002556 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002557 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002558
Auke Kok44c10132007-06-08 15:46:36 -07002559 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002560 case 0x7:
2561 hp_flags |= MV_HP_ERRATA_60X1B2;
2562 break;
2563 case 0x9:
2564 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002565 break;
2566 default:
2567 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002568 "Applying B2 workarounds to unknown rev\n");
2569 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002570 break;
2571 }
2572 break;
2573
Jeff Garzike4e7b892006-01-31 12:18:41 -05002574 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002575 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002576 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2577 (pdev->device == 0x2300 || pdev->device == 0x2310))
2578 {
Mark Lord4e520032007-12-11 12:58:05 -05002579 /*
2580 * Highpoint RocketRAID PCIe 23xx series cards:
2581 *
2582 * Unconfigured drives are treated as "Legacy"
2583 * by the BIOS, and it overwrites sector 8 with
2584 * a "Lgcy" metadata block prior to Linux boot.
2585 *
2586 * Configured drives (RAID or JBOD) leave sector 8
2587 * alone, but instead overwrite a high numbered
2588 * sector for the RAID metadata. This sector can
2589 * be determined exactly, by truncating the physical
2590 * drive capacity to a nice even GB value.
2591 *
2592 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2593 *
2594 * Warn the user, lest they think we're just buggy.
2595 */
2596 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2597 " BIOS CORRUPTS DATA on all attached drives,"
2598 " regardless of if/how they are configured."
2599 " BEWARE!\n");
2600 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2601 " use sectors 8-9 on \"Legacy\" drives,"
2602 " and avoid the final two gigabytes on"
2603 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002604 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002605 case chip_6042:
2606 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002607 hp_flags |= MV_HP_GEN_IIE;
2608
Auke Kok44c10132007-06-08 15:46:36 -07002609 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002610 case 0x0:
2611 hp_flags |= MV_HP_ERRATA_XX42A0;
2612 break;
2613 case 0x1:
2614 hp_flags |= MV_HP_ERRATA_60X1C0;
2615 break;
2616 default:
2617 dev_printk(KERN_WARNING, &pdev->dev,
2618 "Applying 60X1C0 workarounds to unknown rev\n");
2619 hp_flags |= MV_HP_ERRATA_60X1C0;
2620 break;
2621 }
2622 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002623 case chip_soc:
2624 hpriv->ops = &mv_soc_ops;
2625 hp_flags |= MV_HP_ERRATA_60X1C0;
2626 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002627
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002628 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002629 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002630 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002631 return 1;
2632 }
2633
2634 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002635 if (hp_flags & MV_HP_PCIE) {
2636 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2637 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2638 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2639 } else {
2640 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2641 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2642 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2643 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002644
2645 return 0;
2646}
2647
Brett Russ05b308e2005-10-05 17:08:53 -04002648/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002649 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002650 * @host: ATA host to initialize
2651 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002652 *
2653 * If possible, do an early global reset of the host. Then do
2654 * our port init and clear/unmask all/relevant host interrupts.
2655 *
2656 * LOCKING:
2657 * Inherited from caller.
2658 */
Tejun Heo4447d352007-04-17 23:44:08 +09002659static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002660{
2661 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002662 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002663 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002664
Tejun Heo4447d352007-04-17 23:44:08 +09002665 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002666 if (rc)
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002667 goto done;
2668
2669 if (HAS_PCI(host)) {
2670 hpriv->main_cause_reg_addr = hpriv->base +
2671 HC_MAIN_IRQ_CAUSE_OFS;
2672 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2673 } else {
2674 hpriv->main_cause_reg_addr = hpriv->base +
2675 HC_SOC_MAIN_IRQ_CAUSE_OFS;
2676 hpriv->main_mask_reg_addr = hpriv->base +
2677 HC_SOC_MAIN_IRQ_MASK_OFS;
2678 }
2679 /* global interrupt mask */
2680 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002681
Tejun Heo4447d352007-04-17 23:44:08 +09002682 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002683
Tejun Heo4447d352007-04-17 23:44:08 +09002684 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002685 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002686
Jeff Garzikc9d39132005-11-13 17:47:51 -05002687 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002688 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002689 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002690
Jeff Garzik522479f2005-11-12 22:14:02 -05002691 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002692 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002693 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002694
Tejun Heo4447d352007-04-17 23:44:08 +09002695 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002696 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002697 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002698
2699 mv_port_init(&ap->ioaddr, port_mmio);
2700
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002701#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002702 if (HAS_PCI(host)) {
2703 unsigned int offset = port_mmio - mmio;
2704 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2705 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2706 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002707#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002708 }
2709
2710 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002711 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2712
2713 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2714 "(before clear)=0x%08x\n", hc,
2715 readl(hc_mmio + HC_CFG_OFS),
2716 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2717
2718 /* Clear any currently outstanding hc interrupt conditions */
2719 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002720 }
2721
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002722 if (HAS_PCI(host)) {
2723 /* Clear any currently outstanding host interrupt conditions */
2724 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002725
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002726 /* and unmask interrupt generation for host regs */
2727 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2728 if (IS_GEN_I(hpriv))
2729 writelfl(~HC_MAIN_MASKED_IRQS_5,
2730 hpriv->main_mask_reg_addr);
2731 else
2732 writelfl(~HC_MAIN_MASKED_IRQS,
2733 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002734
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002735 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2736 "PCI int cause/mask=0x%08x/0x%08x\n",
2737 readl(hpriv->main_cause_reg_addr),
2738 readl(hpriv->main_mask_reg_addr),
2739 readl(mmio + hpriv->irq_cause_ofs),
2740 readl(mmio + hpriv->irq_mask_ofs));
2741 } else {
2742 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2743 hpriv->main_mask_reg_addr);
2744 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2745 readl(hpriv->main_cause_reg_addr),
2746 readl(hpriv->main_mask_reg_addr));
2747 }
Brett Russ31961942005-09-30 01:36:00 -04002748done:
Brett Russ20f733e2005-09-01 18:26:17 -04002749 return rc;
2750}
2751
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002752static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2753{
2754 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2755 MV_CRQB_Q_SZ, 0);
2756 if (!hpriv->crqb_pool)
2757 return -ENOMEM;
2758
2759 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2760 MV_CRPB_Q_SZ, 0);
2761 if (!hpriv->crpb_pool)
2762 return -ENOMEM;
2763
2764 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2765 MV_SG_TBL_SZ, 0);
2766 if (!hpriv->sg_tbl_pool)
2767 return -ENOMEM;
2768
2769 return 0;
2770}
2771
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002772/**
2773 * mv_platform_probe - handle a positive probe of an soc Marvell
2774 * host
2775 * @pdev: platform device found
2776 *
2777 * LOCKING:
2778 * Inherited from caller.
2779 */
2780static int mv_platform_probe(struct platform_device *pdev)
2781{
2782 static int printed_version;
2783 const struct mv_sata_platform_data *mv_platform_data;
2784 const struct ata_port_info *ppi[] =
2785 { &mv_port_info[chip_soc], NULL };
2786 struct ata_host *host;
2787 struct mv_host_priv *hpriv;
2788 struct resource *res;
2789 int n_ports, rc;
2790
2791 if (!printed_version++)
2792 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2793
2794 /*
2795 * Simple resource validation ..
2796 */
2797 if (unlikely(pdev->num_resources != 2)) {
2798 dev_err(&pdev->dev, "invalid number of resources\n");
2799 return -EINVAL;
2800 }
2801
2802 /*
2803 * Get the register base first
2804 */
2805 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2806 if (res == NULL)
2807 return -EINVAL;
2808
2809 /* allocate host */
2810 mv_platform_data = pdev->dev.platform_data;
2811 n_ports = mv_platform_data->n_ports;
2812
2813 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2814 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2815
2816 if (!host || !hpriv)
2817 return -ENOMEM;
2818 host->private_data = hpriv;
2819 hpriv->n_ports = n_ports;
2820
2821 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002822 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2823 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002824 hpriv->base -= MV_SATAHC0_REG_BASE;
2825
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002826 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2827 if (rc)
2828 return rc;
2829
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002830 /* initialize adapter */
2831 rc = mv_init_host(host, chip_soc);
2832 if (rc)
2833 return rc;
2834
2835 dev_printk(KERN_INFO, &pdev->dev,
2836 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2837 host->n_ports);
2838
2839 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2840 IRQF_SHARED, &mv6_sht);
2841}
2842
2843/*
2844 *
2845 * mv_platform_remove - unplug a platform interface
2846 * @pdev: platform device
2847 *
2848 * A platform bus SATA device has been unplugged. Perform the needed
2849 * cleanup. Also called on module unload for any active devices.
2850 */
2851static int __devexit mv_platform_remove(struct platform_device *pdev)
2852{
2853 struct device *dev = &pdev->dev;
2854 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002855
2856 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002857 return 0;
2858}
2859
2860static struct platform_driver mv_platform_driver = {
2861 .probe = mv_platform_probe,
2862 .remove = __devexit_p(mv_platform_remove),
2863 .driver = {
2864 .name = DRV_NAME,
2865 .owner = THIS_MODULE,
2866 },
2867};
2868
2869
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002870#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002871static int mv_pci_init_one(struct pci_dev *pdev,
2872 const struct pci_device_id *ent);
2873
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002874
2875static struct pci_driver mv_pci_driver = {
2876 .name = DRV_NAME,
2877 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002878 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002879 .remove = ata_pci_remove_one,
2880};
2881
2882/*
2883 * module options
2884 */
2885static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2886
2887
2888/* move to PCI layer or libata core? */
2889static int pci_go_64(struct pci_dev *pdev)
2890{
2891 int rc;
2892
2893 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2894 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2895 if (rc) {
2896 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2897 if (rc) {
2898 dev_printk(KERN_ERR, &pdev->dev,
2899 "64-bit DMA enable failed\n");
2900 return rc;
2901 }
2902 }
2903 } else {
2904 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2905 if (rc) {
2906 dev_printk(KERN_ERR, &pdev->dev,
2907 "32-bit DMA enable failed\n");
2908 return rc;
2909 }
2910 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2911 if (rc) {
2912 dev_printk(KERN_ERR, &pdev->dev,
2913 "32-bit consistent DMA enable failed\n");
2914 return rc;
2915 }
2916 }
2917
2918 return rc;
2919}
2920
Brett Russ05b308e2005-10-05 17:08:53 -04002921/**
2922 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002923 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002924 *
2925 * FIXME: complete this.
2926 *
2927 * LOCKING:
2928 * Inherited from caller.
2929 */
Tejun Heo4447d352007-04-17 23:44:08 +09002930static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002931{
Tejun Heo4447d352007-04-17 23:44:08 +09002932 struct pci_dev *pdev = to_pci_dev(host->dev);
2933 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002934 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002935 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002936
2937 /* Use this to determine the HW stepping of the chip so we know
2938 * what errata to workaround
2939 */
Brett Russ31961942005-09-30 01:36:00 -04002940 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2941 if (scc == 0)
2942 scc_s = "SCSI";
2943 else if (scc == 0x01)
2944 scc_s = "RAID";
2945 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002946 scc_s = "?";
2947
2948 if (IS_GEN_I(hpriv))
2949 gen = "I";
2950 else if (IS_GEN_II(hpriv))
2951 gen = "II";
2952 else if (IS_GEN_IIE(hpriv))
2953 gen = "IIE";
2954 else
2955 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002956
Jeff Garzika9524a72005-10-30 14:39:11 -05002957 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002958 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2959 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002960 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2961}
2962
Brett Russ05b308e2005-10-05 17:08:53 -04002963/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002964 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002965 * @pdev: PCI device found
2966 * @ent: PCI device ID entry for the matched host
2967 *
2968 * LOCKING:
2969 * Inherited from caller.
2970 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002971static int mv_pci_init_one(struct pci_dev *pdev,
2972 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04002973{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002974 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04002975 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09002976 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
2977 struct ata_host *host;
2978 struct mv_host_priv *hpriv;
2979 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002980
Jeff Garzika9524a72005-10-30 14:39:11 -05002981 if (!printed_version++)
2982 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04002983
Tejun Heo4447d352007-04-17 23:44:08 +09002984 /* allocate host */
2985 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
2986
2987 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2988 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2989 if (!host || !hpriv)
2990 return -ENOMEM;
2991 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002992 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09002993
2994 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09002995 rc = pcim_enable_device(pdev);
2996 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002997 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04002998
Tejun Heo0d5ff562007-02-01 15:06:36 +09002999 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3000 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003001 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003002 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003003 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003004 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003005 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003006
Jeff Garzikd88184f2007-02-26 01:26:06 -05003007 rc = pci_go_64(pdev);
3008 if (rc)
3009 return rc;
3010
Mark Lordda2fa9b2008-01-26 18:32:45 -05003011 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3012 if (rc)
3013 return rc;
3014
Brett Russ20f733e2005-09-01 18:26:17 -04003015 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003016 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003017 if (rc)
3018 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003019
Brett Russ31961942005-09-30 01:36:00 -04003020 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003021 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003022 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003023
Brett Russ31961942005-09-30 01:36:00 -04003024 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003025 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003026
Tejun Heo4447d352007-04-17 23:44:08 +09003027 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003028 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003029 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003030 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003031}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003032#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003033
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003034static int mv_platform_probe(struct platform_device *pdev);
3035static int __devexit mv_platform_remove(struct platform_device *pdev);
3036
Brett Russ20f733e2005-09-01 18:26:17 -04003037static int __init mv_init(void)
3038{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003039 int rc = -ENODEV;
3040#ifdef CONFIG_PCI
3041 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003042 if (rc < 0)
3043 return rc;
3044#endif
3045 rc = platform_driver_register(&mv_platform_driver);
3046
3047#ifdef CONFIG_PCI
3048 if (rc < 0)
3049 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003050#endif
3051 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003052}
3053
3054static void __exit mv_exit(void)
3055{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003056#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003057 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003058#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003059 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003060}
3061
3062MODULE_AUTHOR("Brett Russ");
3063MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3064MODULE_LICENSE("GPL");
3065MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3066MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04003067MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04003068
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003069#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003070module_param(msi, int, 0444);
3071MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003072#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003073
Brett Russ20f733e2005-09-01 18:26:17 -04003074module_init(mv_init);
3075module_exit(mv_exit);