blob: b3f35a6af32beead6be82a1e61ce1a1b20bd05b9 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
Mark Lorde49856d2008-04-16 14:59:07 -040043 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
Jeff Garzik4a05e202007-05-24 23:40:15 -040044
Mark Lord40f0bc22008-04-16 14:57:25 -040045 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
Jeff Garzik4a05e202007-05-24 23:40:15 -040046
Jeff Garzik4a05e202007-05-24 23:40:15 -040047 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
Jeff Garzik4a05e202007-05-24 23:40:15 -040065*/
66
Brett Russ20f733e2005-09-01 18:26:17 -040067#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080074#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050077#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
Lennert Buytenhek15a32632008-03-27 14:51:39 -040079#include <linux/mbus.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050081#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040082#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040083#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040084
85#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050086#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040087
88enum {
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
96
97 MV_PCI_REG_BASE = 0,
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040099 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104
Brett Russ20f733e2005-09-01 18:26:17 -0400105 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500106 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400109
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
114
Brett Russ31961942005-09-30 01:36:00 -0400115 MV_MAX_Q_DEPTH = 32,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 */
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500124 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400126
Mark Lord352fab72008-04-19 14:43:42 -0400127 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
Brett Russ20f733e2005-09-01 18:26:17 -0400128 MV_PORT_HC_SHIFT = 2,
Mark Lord352fab72008-04-19 14:43:42 -0400129 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
130 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
131 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
Brett Russ20f733e2005-09-01 18:26:17 -0400132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100136 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400137 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100138
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400143
Brett Russ31961942005-09-30 01:36:00 -0400144 CRQB_FLAG_READ = (1 << 0),
145 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
152
153 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400156
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
158
Brett Russ20f733e2005-09-01 18:26:17 -0400159 /* PCI interface registers */
160
Brett Russ31961942005-09-30 01:36:00 -0400161 PCI_COMMAND_OFS = 0xc00,
162
Brett Russ20f733e2005-09-01 18:26:17 -0400163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
167
Jeff Garzik522479f2005-11-12 22:14:02 -0500168 MV_PCI_MODE = 0xd00,
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
178
Mark Lord02a121d2007-12-01 13:07:22 -0500179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182
Mark Lord02a121d2007-12-01 13:07:22 -0500183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500186
Brett Russ20f733e2005-09-01 18:26:17 -0400187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Mark Lord352fab72008-04-19 14:43:42 -0400191 ERR_IRQ = (1 << 0), /* shift by port # */
192 DONE_IRQ = (1 << 1), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 PCI_ERR = (1 << 18),
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Mark Lordf9f7fe02008-04-19 14:44:42 -0400208 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400209 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
210 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500211 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
212 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500213 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400214
215 /* SATAHC registers */
216 HC_CFG_OFS = 0,
217
218 HC_IRQ_CAUSE_OFS = 0x14,
Mark Lord352fab72008-04-19 14:43:42 -0400219 DMA_IRQ = (1 << 0), /* shift by port # */
220 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
Brett Russ20f733e2005-09-01 18:26:17 -0400221 DEV_IRQ = (1 << 8), /* shift by port # */
222
223 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400224 SHD_BLK_OFS = 0x100,
225 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400226
227 /* SATA registers */
228 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
229 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500230 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lord17c5aab2008-04-16 14:56:51 -0400231
Mark Lorde12bef52008-03-31 19:33:56 -0400232 LTMODE_OFS = 0x30c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400233 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
234
Jeff Garzik47c2b672005-11-12 21:13:17 -0500235 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500236 PHY_MODE4 = 0x314,
237 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400238 SATA_IFCTL_OFS = 0x344,
239 SATA_IFSTAT_OFS = 0x34c,
240 VENDOR_UNIQUE_FIS_OFS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400241
Mark Lorde12bef52008-03-31 19:33:56 -0400242 FIS_CFG_OFS = 0x360,
Mark Lord17c5aab2008-04-16 14:56:51 -0400243 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
244
Jeff Garzikc9d39132005-11-13 17:47:51 -0500245 MV5_PHY_MODE = 0x74,
246 MV5_LT_MODE = 0x30,
247 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400248 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500249
250 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400251
252 /* Port registers */
253 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500254 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
255 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
256 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
257 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
258 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400259 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
260 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400261
262 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
263 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
265 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
266 EDMA_ERR_DEV = (1 << 2), /* device error */
267 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
268 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
269 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400270 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
271 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400272 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400273 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400274 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
275 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
276 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
277 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500278
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400279 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500280 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
281 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
282 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
283 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
284
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400285 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500286
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400287 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500288 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
289 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
290 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
291 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
292 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
293
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400294 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500295
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400296 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400297 EDMA_ERR_OVERRUN_5 = (1 << 5),
298 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500299
300 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
301 EDMA_ERR_LNK_CTRL_RX_1 |
302 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord40f0bc22008-04-16 14:57:25 -0400303 EDMA_ERR_LNK_CTRL_TX |
304 /* temporary, until we fix hotplug: */
305 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
Mark Lord646a4da2008-01-26 18:30:37 -0500306
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400307 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
308 EDMA_ERR_PRD_PAR |
309 EDMA_ERR_DEV_DCON |
310 EDMA_ERR_DEV_CON |
311 EDMA_ERR_SERR |
312 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400313 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400314 EDMA_ERR_CRPB_PAR |
315 EDMA_ERR_INTRL_PAR |
316 EDMA_ERR_IORDY |
317 EDMA_ERR_LNK_CTRL_RX_2 |
318 EDMA_ERR_LNK_DATA_RX |
319 EDMA_ERR_LNK_DATA_TX |
320 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400321
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400322 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
323 EDMA_ERR_PRD_PAR |
324 EDMA_ERR_DEV_DCON |
325 EDMA_ERR_DEV_CON |
326 EDMA_ERR_OVERRUN_5 |
327 EDMA_ERR_UNDERRUN_5 |
328 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400329 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400330 EDMA_ERR_CRPB_PAR |
331 EDMA_ERR_INTRL_PAR |
332 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400333
Brett Russ31961942005-09-30 01:36:00 -0400334 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
335 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400336
337 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
338 EDMA_REQ_Q_PTR_SHIFT = 5,
339
340 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
341 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
342 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400343 EDMA_RSP_Q_PTR_SHIFT = 3,
344
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400345 EDMA_CMD_OFS = 0x28, /* EDMA command register */
346 EDMA_EN = (1 << 0), /* enable EDMA */
347 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
348 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400349
Jeff Garzikc9d39132005-11-13 17:47:51 -0500350 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500351 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500352
Mark Lord352fab72008-04-19 14:43:42 -0400353 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
354
Brett Russ31961942005-09-30 01:36:00 -0400355 /* Host private flags (hp_flags) */
356 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500357 MV_HP_ERRATA_50XXB0 = (1 << 1),
358 MV_HP_ERRATA_50XXB2 = (1 << 2),
359 MV_HP_ERRATA_60X1B2 = (1 << 3),
360 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500361 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400362 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
363 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
364 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500365 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400366
Brett Russ31961942005-09-30 01:36:00 -0400367 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500369 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400370};
371
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400372#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
373#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500374#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100375#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500376
Lennert Buytenhek15a32632008-03-27 14:51:39 -0400377#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
378#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
379
Jeff Garzik095fec82005-11-12 09:50:49 -0500380enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400381 /* DMA boundary 0xffff is required by the s/g splitting
382 * we need on /length/ in mv_fill-sg().
383 */
384 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500385
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400386 /* mask of register bits containing lower 32 bits
387 * of EDMA request queue DMA address
388 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500389 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
390
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400391 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500392 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
393};
394
Jeff Garzik522479f2005-11-12 22:14:02 -0500395enum chip_type {
396 chip_504x,
397 chip_508x,
398 chip_5080,
399 chip_604x,
400 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500401 chip_6042,
402 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500403 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500404};
405
Brett Russ31961942005-09-30 01:36:00 -0400406/* Command ReQuest Block: 32B */
407struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400408 __le32 sg_addr;
409 __le32 sg_addr_hi;
410 __le16 ctrl_flags;
411 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400412};
413
Jeff Garzike4e7b892006-01-31 12:18:41 -0500414struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400415 __le32 addr;
416 __le32 addr_hi;
417 __le32 flags;
418 __le32 len;
419 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500420};
421
Brett Russ31961942005-09-30 01:36:00 -0400422/* Command ResPonse Block: 8B */
423struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400424 __le16 id;
425 __le16 flags;
426 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400427};
428
429/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
430struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400431 __le32 addr;
432 __le32 flags_size;
433 __le32 addr_hi;
434 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400435};
436
437struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400438 struct mv_crqb *crqb;
439 dma_addr_t crqb_dma;
440 struct mv_crpb *crpb;
441 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500442 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
443 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400444
445 unsigned int req_idx;
446 unsigned int resp_idx;
447
Brett Russ31961942005-09-30 01:36:00 -0400448 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400449};
450
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500451struct mv_port_signal {
452 u32 amps;
453 u32 pre;
454};
455
Mark Lord02a121d2007-12-01 13:07:22 -0500456struct mv_host_priv {
457 u32 hp_flags;
458 struct mv_port_signal signal[8];
459 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500460 int n_ports;
461 void __iomem *base;
462 void __iomem *main_cause_reg_addr;
463 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500464 u32 irq_cause_ofs;
465 u32 irq_mask_ofs;
466 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500467 /*
468 * These consistent DMA memory pools give us guaranteed
469 * alignment for hardware-accessed data structures,
470 * and less memory waste in accomplishing the alignment.
471 */
472 struct dma_pool *crqb_pool;
473 struct dma_pool *crpb_pool;
474 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500475};
476
Jeff Garzik47c2b672005-11-12 21:13:17 -0500477struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500478 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500480 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
481 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500483 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500485 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100486 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500487};
488
Tejun Heoda3dbb12007-07-16 14:29:40 +0900489static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
490static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
491static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
492static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400493static int mv_port_start(struct ata_port *ap);
494static void mv_port_stop(struct ata_port *ap);
495static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500496static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900497static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900498static int mv_hardreset(struct ata_link *link, unsigned int *class,
499 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400500static void mv_eh_freeze(struct ata_port *ap);
501static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500502static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400503
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500504static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
505 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500506static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
507static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
508 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500509static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
510 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500511static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100512static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500513
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500514static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
515 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500516static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
517static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
518 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500519static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
520 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500521static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500522static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
523 void __iomem *mmio);
524static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
525 void __iomem *mmio);
526static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
527 void __iomem *mmio, unsigned int n_hc);
528static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
529 void __iomem *mmio);
530static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100531static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400532static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500533 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400534static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400535static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400536static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500537
Mark Lorde49856d2008-04-16 14:59:07 -0400538static void mv_pmp_select(struct ata_port *ap, int pmp);
539static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
540 unsigned long deadline);
541static int mv_softreset(struct ata_link *link, unsigned int *class,
542 unsigned long deadline);
Brett Russ20f733e2005-09-01 18:26:17 -0400543
Mark Lordeb73d552008-01-29 13:24:00 -0500544/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
545 * because we have to allow room for worst case splitting of
546 * PRDs for 64K boundaries in mv_fill_sg().
547 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400548static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900549 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400550 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400551 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400552};
553
554static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900555 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500556 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400557 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400558 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400559};
560
Tejun Heo029cfd62008-03-25 12:22:49 +0900561static struct ata_port_operations mv5_ops = {
562 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500563
564 .qc_prep = mv_qc_prep,
565 .qc_issue = mv_qc_issue,
566
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400567 .freeze = mv_eh_freeze,
568 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900569 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900570 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900571 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400572
Jeff Garzikc9d39132005-11-13 17:47:51 -0500573 .scr_read = mv5_scr_read,
574 .scr_write = mv5_scr_write,
575
576 .port_start = mv_port_start,
577 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500578};
579
Tejun Heo029cfd62008-03-25 12:22:49 +0900580static struct ata_port_operations mv6_ops = {
581 .inherits = &mv5_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400582 .qc_defer = sata_pmp_qc_defer_cmd_switch,
Mark Lordf2738272008-01-26 18:32:29 -0500583 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400584 .scr_read = mv_scr_read,
585 .scr_write = mv_scr_write,
586
Mark Lorde49856d2008-04-16 14:59:07 -0400587 .pmp_hardreset = mv_pmp_hardreset,
588 .pmp_softreset = mv_softreset,
589 .softreset = mv_softreset,
590 .error_handler = sata_pmp_error_handler,
Brett Russ20f733e2005-09-01 18:26:17 -0400591};
592
Tejun Heo029cfd62008-03-25 12:22:49 +0900593static struct ata_port_operations mv_iie_ops = {
594 .inherits = &mv6_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400595 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
Tejun Heo029cfd62008-03-25 12:22:49 +0900596 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500597 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500598};
599
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100600static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400601 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400602 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400603 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400604 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500605 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400606 },
607 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400608 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400609 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400610 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500611 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400612 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500613 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400614 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500615 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400616 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500617 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500618 },
Brett Russ20f733e2005-09-01 18:26:17 -0400619 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500620 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400621 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500622 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400623 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400624 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500625 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400626 },
627 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400628 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400629 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500630 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400631 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400632 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500633 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400634 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500635 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400637 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500638 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500639 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400640 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500641 .port_ops = &mv_iie_ops,
642 },
643 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400645 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500646 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500647 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400648 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500649 .port_ops = &mv_iie_ops,
650 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500651 { /* chip_soc */
Mark Lord02c1f322008-04-16 14:58:13 -0400652 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400653 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord02c1f322008-04-16 14:58:13 -0400654 ATA_FLAG_NCQ | MV_FLAG_SOC,
Mark Lord17c5aab2008-04-16 14:56:51 -0400655 .pio_mask = 0x1f, /* pio0-4 */
656 .udma_mask = ATA_UDMA6,
657 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500658 },
Brett Russ20f733e2005-09-01 18:26:17 -0400659};
660
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500661static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400662 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
663 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
664 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
665 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100666 /* RocketRAID 1740/174x have different identifiers */
667 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
668 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400669
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400670 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
671 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
672 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
673 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
674 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500675
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400676 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
677
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200678 /* Adaptec 1430SA */
679 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
680
Mark Lord02a121d2007-12-01 13:07:22 -0500681 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800682 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
683
Mark Lord02a121d2007-12-01 13:07:22 -0500684 /* Highpoint RocketRAID PCIe series */
685 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
686 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
687
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400688 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400689};
690
Jeff Garzik47c2b672005-11-12 21:13:17 -0500691static const struct mv_hw_ops mv5xxx_ops = {
692 .phy_errata = mv5_phy_errata,
693 .enable_leds = mv5_enable_leds,
694 .read_preamp = mv5_read_preamp,
695 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500696 .reset_flash = mv5_reset_flash,
697 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500698};
699
700static const struct mv_hw_ops mv6xxx_ops = {
701 .phy_errata = mv6_phy_errata,
702 .enable_leds = mv6_enable_leds,
703 .read_preamp = mv6_read_preamp,
704 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500705 .reset_flash = mv6_reset_flash,
706 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500707};
708
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500709static const struct mv_hw_ops mv_soc_ops = {
710 .phy_errata = mv6_phy_errata,
711 .enable_leds = mv_soc_enable_leds,
712 .read_preamp = mv_soc_read_preamp,
713 .reset_hc = mv_soc_reset_hc,
714 .reset_flash = mv_soc_reset_flash,
715 .reset_bus = mv_soc_reset_bus,
716};
717
Brett Russ20f733e2005-09-01 18:26:17 -0400718/*
719 * Functions
720 */
721
722static inline void writelfl(unsigned long data, void __iomem *addr)
723{
724 writel(data, addr);
725 (void) readl(addr); /* flush to avoid PCI posted write */
726}
727
Jeff Garzikc9d39132005-11-13 17:47:51 -0500728static inline unsigned int mv_hc_from_port(unsigned int port)
729{
730 return port >> MV_PORT_HC_SHIFT;
731}
732
733static inline unsigned int mv_hardport_from_port(unsigned int port)
734{
735 return port & MV_PORT_MASK;
736}
737
Mark Lord352fab72008-04-19 14:43:42 -0400738static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
739{
740 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
741}
742
Jeff Garzikc9d39132005-11-13 17:47:51 -0500743static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
744 unsigned int port)
745{
746 return mv_hc_base(base, mv_hc_from_port(port));
747}
748
Brett Russ20f733e2005-09-01 18:26:17 -0400749static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
750{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500751 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500752 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500753 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400754}
755
Mark Lorde12bef52008-03-31 19:33:56 -0400756static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
757{
758 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
759 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
760
761 return hc_mmio + ofs;
762}
763
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500764static inline void __iomem *mv_host_base(struct ata_host *host)
765{
766 struct mv_host_priv *hpriv = host->private_data;
767 return hpriv->base;
768}
769
Brett Russ20f733e2005-09-01 18:26:17 -0400770static inline void __iomem *mv_ap_base(struct ata_port *ap)
771{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500772 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400773}
774
Jeff Garzikcca39742006-08-24 03:19:22 -0400775static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400776{
Jeff Garzikcca39742006-08-24 03:19:22 -0400777 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400778}
779
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400780static void mv_set_edma_ptrs(void __iomem *port_mmio,
781 struct mv_host_priv *hpriv,
782 struct mv_port_priv *pp)
783{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400784 u32 index;
785
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400786 /*
787 * initialize request queue
788 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400789 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
790
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400791 WARN_ON(pp->crqb_dma & 0x3ff);
792 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400793 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400794 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
795
796 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400797 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400798 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
799 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400800 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400801
802 /*
803 * initialize response queue
804 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400805 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
806
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400807 WARN_ON(pp->crpb_dma & 0xff);
808 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
809
810 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400811 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400812 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
813 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400814 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400815
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400816 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400817 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400818}
819
Brett Russ05b308e2005-10-05 17:08:53 -0400820/**
821 * mv_start_dma - Enable eDMA engine
822 * @base: port base address
823 * @pp: port private data
824 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900825 * Verify the local cache of the eDMA state is accurate with a
826 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400827 *
828 * LOCKING:
829 * Inherited from caller.
830 */
Mark Lord0c589122008-01-26 18:31:16 -0500831static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500832 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400833{
Mark Lord72109162008-01-26 18:31:33 -0500834 int want_ncq = (protocol == ATA_PROT_NCQ);
835
836 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
837 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
838 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400839 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500840 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400841 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500842 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lord352fab72008-04-19 14:43:42 -0400843 int hardport = mv_hardport_from_port(ap->port_no);
Mark Lord0c589122008-01-26 18:31:16 -0500844 void __iomem *hc_mmio = mv_hc_base_from_port(
Mark Lord352fab72008-04-19 14:43:42 -0400845 mv_host_base(ap->host), hardport);
Mark Lord0c589122008-01-26 18:31:16 -0500846 u32 hc_irq_cause, ipending;
847
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400848 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500849 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400850
Mark Lord0c589122008-01-26 18:31:16 -0500851 /* clear EDMA interrupt indicator, if any */
852 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Mark Lord352fab72008-04-19 14:43:42 -0400853 ipending = (DEV_IRQ | DMA_IRQ) << hardport;
Mark Lord0c589122008-01-26 18:31:16 -0500854 if (hc_irq_cause & ipending) {
855 writelfl(hc_irq_cause & ~ipending,
856 hc_mmio + HC_IRQ_CAUSE_OFS);
857 }
858
Mark Lorde12bef52008-03-31 19:33:56 -0400859 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500860
861 /* clear FIS IRQ Cause */
862 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
863
Mark Lordf630d562008-01-26 18:31:00 -0500864 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400865
Mark Lordf630d562008-01-26 18:31:00 -0500866 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400867 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
868 }
Brett Russ31961942005-09-30 01:36:00 -0400869}
870
Brett Russ05b308e2005-10-05 17:08:53 -0400871/**
Mark Lorde12bef52008-03-31 19:33:56 -0400872 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400873 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400874 *
875 * LOCKING:
876 * Inherited from caller.
877 */
Mark Lordb5624682008-03-31 19:34:40 -0400878static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400879{
Mark Lordb5624682008-03-31 19:34:40 -0400880 int i;
Brett Russ31961942005-09-30 01:36:00 -0400881
Mark Lordb5624682008-03-31 19:34:40 -0400882 /* Disable eDMA. The disable bit auto clears. */
883 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500884
Mark Lordb5624682008-03-31 19:34:40 -0400885 /* Wait for the chip to confirm eDMA is off. */
886 for (i = 10000; i > 0; i--) {
887 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb52007-07-12 14:30:19 -0400888 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400889 return 0;
890 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400891 }
Mark Lordb5624682008-03-31 19:34:40 -0400892 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400893}
894
Mark Lorde12bef52008-03-31 19:33:56 -0400895static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400896{
Mark Lordb5624682008-03-31 19:34:40 -0400897 void __iomem *port_mmio = mv_ap_base(ap);
898 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400899
Mark Lordb5624682008-03-31 19:34:40 -0400900 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
901 return 0;
902 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
903 if (mv_stop_edma_engine(port_mmio)) {
904 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
905 return -EIO;
906 }
907 return 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400908}
909
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400910#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400911static void mv_dump_mem(void __iomem *start, unsigned bytes)
912{
Brett Russ31961942005-09-30 01:36:00 -0400913 int b, w;
914 for (b = 0; b < bytes; ) {
915 DPRINTK("%p: ", start + b);
916 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400917 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400918 b += sizeof(u32);
919 }
920 printk("\n");
921 }
Brett Russ31961942005-09-30 01:36:00 -0400922}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400923#endif
924
Brett Russ31961942005-09-30 01:36:00 -0400925static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
926{
927#ifdef ATA_DEBUG
928 int b, w;
929 u32 dw;
930 for (b = 0; b < bytes; ) {
931 DPRINTK("%02x: ", b);
932 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400933 (void) pci_read_config_dword(pdev, b, &dw);
934 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400935 b += sizeof(u32);
936 }
937 printk("\n");
938 }
939#endif
940}
941static void mv_dump_all_regs(void __iomem *mmio_base, int port,
942 struct pci_dev *pdev)
943{
944#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500945 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400946 port >> MV_PORT_HC_SHIFT);
947 void __iomem *port_base;
948 int start_port, num_ports, p, start_hc, num_hcs, hc;
949
950 if (0 > port) {
951 start_hc = start_port = 0;
952 num_ports = 8; /* shld be benign for 4 port devs */
953 num_hcs = 2;
954 } else {
955 start_hc = port >> MV_PORT_HC_SHIFT;
956 start_port = port;
957 num_ports = num_hcs = 1;
958 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500959 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400960 num_ports > 1 ? num_ports - 1 : start_port);
961
962 if (NULL != pdev) {
963 DPRINTK("PCI config space regs:\n");
964 mv_dump_pci_cfg(pdev, 0x68);
965 }
966 DPRINTK("PCI regs:\n");
967 mv_dump_mem(mmio_base+0xc00, 0x3c);
968 mv_dump_mem(mmio_base+0xd00, 0x34);
969 mv_dump_mem(mmio_base+0xf00, 0x4);
970 mv_dump_mem(mmio_base+0x1d00, 0x6c);
971 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c37e2006-04-10 23:20:22 -0700972 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400973 DPRINTK("HC regs (HC %i):\n", hc);
974 mv_dump_mem(hc_base, 0x1c);
975 }
976 for (p = start_port; p < start_port + num_ports; p++) {
977 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400978 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400979 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400980 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400981 mv_dump_mem(port_base+0x300, 0x60);
982 }
983#endif
984}
985
Brett Russ20f733e2005-09-01 18:26:17 -0400986static unsigned int mv_scr_offset(unsigned int sc_reg_in)
987{
988 unsigned int ofs;
989
990 switch (sc_reg_in) {
991 case SCR_STATUS:
992 case SCR_CONTROL:
993 case SCR_ERROR:
994 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
995 break;
996 case SCR_ACTIVE:
997 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
998 break;
999 default:
1000 ofs = 0xffffffffU;
1001 break;
1002 }
1003 return ofs;
1004}
1005
Tejun Heoda3dbb12007-07-16 14:29:40 +09001006static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001007{
1008 unsigned int ofs = mv_scr_offset(sc_reg_in);
1009
Tejun Heoda3dbb12007-07-16 14:29:40 +09001010 if (ofs != 0xffffffffU) {
1011 *val = readl(mv_ap_base(ap) + ofs);
1012 return 0;
1013 } else
1014 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001015}
1016
Tejun Heoda3dbb12007-07-16 14:29:40 +09001017static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001018{
1019 unsigned int ofs = mv_scr_offset(sc_reg_in);
1020
Tejun Heoda3dbb12007-07-16 14:29:40 +09001021 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001022 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001023 return 0;
1024 } else
1025 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001026}
1027
Mark Lordf2738272008-01-26 18:32:29 -05001028static void mv6_dev_config(struct ata_device *adev)
1029{
1030 /*
Mark Lorde49856d2008-04-16 14:59:07 -04001031 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1032 *
1033 * Gen-II does not support NCQ over a port multiplier
1034 * (no FIS-based switching).
1035 *
Mark Lordf2738272008-01-26 18:32:29 -05001036 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1037 * See mv_qc_prep() for more info.
1038 */
Mark Lorde49856d2008-04-16 14:59:07 -04001039 if (adev->flags & ATA_DFLAG_NCQ) {
Mark Lord352fab72008-04-19 14:43:42 -04001040 if (sata_pmp_attached(adev->link->ap)) {
Mark Lorde49856d2008-04-16 14:59:07 -04001041 adev->flags &= ~ATA_DFLAG_NCQ;
Mark Lord352fab72008-04-19 14:43:42 -04001042 ata_dev_printk(adev, KERN_INFO,
1043 "NCQ disabled for command-based switching\n");
1044 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1045 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1046 ata_dev_printk(adev, KERN_INFO,
1047 "max_sectors limited to %u for NCQ\n",
1048 adev->max_sectors);
1049 }
Mark Lorde49856d2008-04-16 14:59:07 -04001050 }
Mark Lordf2738272008-01-26 18:32:29 -05001051}
1052
Mark Lorde49856d2008-04-16 14:59:07 -04001053static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1054{
1055 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1056 /*
1057 * Various bit settings required for operation
1058 * in FIS-based switching (fbs) mode on GenIIe:
1059 */
1060 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1061 old_ltmode = readl(port_mmio + LTMODE_OFS);
1062 if (enable_fbs) {
1063 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1064 new_ltmode = old_ltmode | LTMODE_BIT8;
1065 } else { /* disable fbs */
1066 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1067 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1068 }
1069 if (new_fcfg != old_fcfg)
1070 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1071 if (new_ltmode != old_ltmode)
1072 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
Mark Lord0c589122008-01-26 18:31:16 -05001073}
Jeff Garzike4e7b892006-01-31 12:18:41 -05001074
Mark Lorde12bef52008-03-31 19:33:56 -04001075static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001076{
1077 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001078 struct mv_port_priv *pp = ap->private_data;
1079 struct mv_host_priv *hpriv = ap->host->private_data;
1080 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001081
1082 /* set up non-NCQ EDMA configuration */
1083 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1084
1085 if (IS_GEN_I(hpriv))
1086 cfg |= (1 << 8); /* enab config burst size mask */
1087
1088 else if (IS_GEN_II(hpriv))
1089 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1090
1091 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001092 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1093 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001094 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001095 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Mark Lorde49856d2008-04-16 14:59:07 -04001096
1097 if (want_ncq && sata_pmp_attached(ap)) {
1098 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1099 mv_config_fbs(port_mmio, 1);
1100 } else {
1101 mv_config_fbs(port_mmio, 0);
1102 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001103 }
1104
Mark Lord72109162008-01-26 18:31:33 -05001105 if (want_ncq) {
1106 cfg |= EDMA_CFG_NCQ;
1107 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1108 } else
1109 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1110
Jeff Garzike4e7b892006-01-31 12:18:41 -05001111 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1112}
1113
Mark Lordda2fa9b2008-01-26 18:32:45 -05001114static void mv_port_free_dma_mem(struct ata_port *ap)
1115{
1116 struct mv_host_priv *hpriv = ap->host->private_data;
1117 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001118 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001119
1120 if (pp->crqb) {
1121 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1122 pp->crqb = NULL;
1123 }
1124 if (pp->crpb) {
1125 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1126 pp->crpb = NULL;
1127 }
Mark Lordeb73d552008-01-29 13:24:00 -05001128 /*
1129 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1130 * For later hardware, we have one unique sg_tbl per NCQ tag.
1131 */
1132 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1133 if (pp->sg_tbl[tag]) {
1134 if (tag == 0 || !IS_GEN_I(hpriv))
1135 dma_pool_free(hpriv->sg_tbl_pool,
1136 pp->sg_tbl[tag],
1137 pp->sg_tbl_dma[tag]);
1138 pp->sg_tbl[tag] = NULL;
1139 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001140 }
1141}
1142
Brett Russ05b308e2005-10-05 17:08:53 -04001143/**
1144 * mv_port_start - Port specific init/start routine.
1145 * @ap: ATA channel to manipulate
1146 *
1147 * Allocate and point to DMA memory, init port private memory,
1148 * zero indices.
1149 *
1150 * LOCKING:
1151 * Inherited from caller.
1152 */
Brett Russ31961942005-09-30 01:36:00 -04001153static int mv_port_start(struct ata_port *ap)
1154{
Jeff Garzikcca39742006-08-24 03:19:22 -04001155 struct device *dev = ap->host->dev;
1156 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001157 struct mv_port_priv *pp;
James Bottomleydde20202008-02-19 11:36:56 +01001158 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001159
Tejun Heo24dc5f32007-01-20 16:00:28 +09001160 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001161 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001162 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001163 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001164
Mark Lordda2fa9b2008-01-26 18:32:45 -05001165 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1166 if (!pp->crqb)
1167 return -ENOMEM;
1168 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001169
Mark Lordda2fa9b2008-01-26 18:32:45 -05001170 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1171 if (!pp->crpb)
1172 goto out_port_free_dma_mem;
1173 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001174
Mark Lordeb73d552008-01-29 13:24:00 -05001175 /*
1176 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1177 * For later hardware, we need one unique sg_tbl per NCQ tag.
1178 */
1179 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1180 if (tag == 0 || !IS_GEN_I(hpriv)) {
1181 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1182 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1183 if (!pp->sg_tbl[tag])
1184 goto out_port_free_dma_mem;
1185 } else {
1186 pp->sg_tbl[tag] = pp->sg_tbl[0];
1187 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1188 }
1189 }
Brett Russ31961942005-09-30 01:36:00 -04001190 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001191
1192out_port_free_dma_mem:
1193 mv_port_free_dma_mem(ap);
1194 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001195}
1196
Brett Russ05b308e2005-10-05 17:08:53 -04001197/**
1198 * mv_port_stop - Port specific cleanup/stop routine.
1199 * @ap: ATA channel to manipulate
1200 *
1201 * Stop DMA, cleanup port memory.
1202 *
1203 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001204 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001205 */
Brett Russ31961942005-09-30 01:36:00 -04001206static void mv_port_stop(struct ata_port *ap)
1207{
Mark Lorde12bef52008-03-31 19:33:56 -04001208 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001209 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001210}
1211
Brett Russ05b308e2005-10-05 17:08:53 -04001212/**
1213 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1214 * @qc: queued command whose SG list to source from
1215 *
1216 * Populate the SG list and mark the last entry.
1217 *
1218 * LOCKING:
1219 * Inherited from caller.
1220 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001221static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001222{
1223 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001224 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001225 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001226 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001227
Mark Lordeb73d552008-01-29 13:24:00 -05001228 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001229 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001230 dma_addr_t addr = sg_dma_address(sg);
1231 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001232
Olof Johansson4007b492007-10-02 20:45:27 -05001233 while (sg_len) {
1234 u32 offset = addr & 0xffff;
1235 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001236
Olof Johansson4007b492007-10-02 20:45:27 -05001237 if ((offset + sg_len > 0x10000))
1238 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001239
Olof Johansson4007b492007-10-02 20:45:27 -05001240 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1241 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001242 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001243
1244 sg_len -= len;
1245 addr += len;
1246
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001247 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001248 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001249 }
Brett Russ31961942005-09-30 01:36:00 -04001250 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001251
1252 if (likely(last_sg))
1253 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001254}
1255
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001256static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001257{
Mark Lord559eeda2006-05-19 16:40:15 -04001258 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001259 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001260 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001261}
1262
Brett Russ05b308e2005-10-05 17:08:53 -04001263/**
1264 * mv_qc_prep - Host specific command preparation.
1265 * @qc: queued command to prepare
1266 *
1267 * This routine simply redirects to the general purpose routine
1268 * if command is not DMA. Else, it handles prep of the CRQB
1269 * (command request block), does some sanity checking, and calls
1270 * the SG load routine.
1271 *
1272 * LOCKING:
1273 * Inherited from caller.
1274 */
Brett Russ31961942005-09-30 01:36:00 -04001275static void mv_qc_prep(struct ata_queued_cmd *qc)
1276{
1277 struct ata_port *ap = qc->ap;
1278 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001279 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001280 struct ata_taskfile *tf;
1281 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001282 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001283
Mark Lord138bfdd2008-01-26 18:33:18 -05001284 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1285 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001286 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001287
Brett Russ31961942005-09-30 01:36:00 -04001288 /* Fill in command request block
1289 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001290 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001291 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001292 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001293 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001294 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001295
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001296 /* get current queue index from software */
1297 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001298
Mark Lorda6432432006-05-19 16:36:36 -04001299 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001300 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001301 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001302 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001303 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1304
1305 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001306 tf = &qc->tf;
1307
1308 /* Sadly, the CRQB cannot accomodate all registers--there are
1309 * only 11 bytes...so we must pick and choose required
1310 * registers based on the command. So, we drop feature and
1311 * hob_feature for [RW] DMA commands, but they are needed for
1312 * NCQ. NCQ will drop hob_nsect.
1313 */
1314 switch (tf->command) {
1315 case ATA_CMD_READ:
1316 case ATA_CMD_READ_EXT:
1317 case ATA_CMD_WRITE:
1318 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001319 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001320 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1321 break;
Brett Russ31961942005-09-30 01:36:00 -04001322 case ATA_CMD_FPDMA_READ:
1323 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001324 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001325 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1326 break;
Brett Russ31961942005-09-30 01:36:00 -04001327 default:
1328 /* The only other commands EDMA supports in non-queued and
1329 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1330 * of which are defined/used by Linux. If we get here, this
1331 * driver needs work.
1332 *
1333 * FIXME: modify libata to give qc_prep a return value and
1334 * return error here.
1335 */
1336 BUG_ON(tf->command);
1337 break;
1338 }
1339 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1340 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1341 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1342 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1343 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1344 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1345 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1346 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1347 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1348
Jeff Garzike4e7b892006-01-31 12:18:41 -05001349 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001350 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001351 mv_fill_sg(qc);
1352}
1353
1354/**
1355 * mv_qc_prep_iie - Host specific command preparation.
1356 * @qc: queued command to prepare
1357 *
1358 * This routine simply redirects to the general purpose routine
1359 * if command is not DMA. Else, it handles prep of the CRQB
1360 * (command request block), does some sanity checking, and calls
1361 * the SG load routine.
1362 *
1363 * LOCKING:
1364 * Inherited from caller.
1365 */
1366static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1367{
1368 struct ata_port *ap = qc->ap;
1369 struct mv_port_priv *pp = ap->private_data;
1370 struct mv_crqb_iie *crqb;
1371 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001372 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001373 u32 flags = 0;
1374
Mark Lord138bfdd2008-01-26 18:33:18 -05001375 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1376 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001377 return;
1378
Mark Lorde12bef52008-03-31 19:33:56 -04001379 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001380 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1381 flags |= CRQB_FLAG_READ;
1382
Tejun Heobeec7db2006-02-11 19:11:13 +09001383 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001384 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001385 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001386 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001387
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001388 /* get current queue index from software */
1389 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001390
1391 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001392 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1393 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001394 crqb->flags = cpu_to_le32(flags);
1395
1396 tf = &qc->tf;
1397 crqb->ata_cmd[0] = cpu_to_le32(
1398 (tf->command << 16) |
1399 (tf->feature << 24)
1400 );
1401 crqb->ata_cmd[1] = cpu_to_le32(
1402 (tf->lbal << 0) |
1403 (tf->lbam << 8) |
1404 (tf->lbah << 16) |
1405 (tf->device << 24)
1406 );
1407 crqb->ata_cmd[2] = cpu_to_le32(
1408 (tf->hob_lbal << 0) |
1409 (tf->hob_lbam << 8) |
1410 (tf->hob_lbah << 16) |
1411 (tf->hob_feature << 24)
1412 );
1413 crqb->ata_cmd[3] = cpu_to_le32(
1414 (tf->nsect << 0) |
1415 (tf->hob_nsect << 8)
1416 );
1417
1418 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1419 return;
Brett Russ31961942005-09-30 01:36:00 -04001420 mv_fill_sg(qc);
1421}
1422
Brett Russ05b308e2005-10-05 17:08:53 -04001423/**
1424 * mv_qc_issue - Initiate a command to the host
1425 * @qc: queued command to start
1426 *
1427 * This routine simply redirects to the general purpose routine
1428 * if command is not DMA. Else, it sanity checks our local
1429 * caches of the request producer/consumer indices then enables
1430 * DMA and bumps the request producer index.
1431 *
1432 * LOCKING:
1433 * Inherited from caller.
1434 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001435static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001436{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001437 struct ata_port *ap = qc->ap;
1438 void __iomem *port_mmio = mv_ap_base(ap);
1439 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001440 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001441
Mark Lord138bfdd2008-01-26 18:33:18 -05001442 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1443 (qc->tf.protocol != ATA_PROT_NCQ)) {
Mark Lord17c5aab2008-04-16 14:56:51 -04001444 /*
1445 * We're about to send a non-EDMA capable command to the
Brett Russ31961942005-09-30 01:36:00 -04001446 * port. Turn off EDMA so there won't be problems accessing
1447 * shadow block, etc registers.
1448 */
Mark Lordb5624682008-03-31 19:34:40 -04001449 mv_stop_edma(ap);
Mark Lorde49856d2008-04-16 14:59:07 -04001450 mv_pmp_select(ap, qc->dev->link->pmp);
Tejun Heo9363c382008-04-07 22:47:16 +09001451 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04001452 }
1453
Mark Lord72109162008-01-26 18:31:33 -05001454 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001455
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001456 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001457
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001458 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001459
1460 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001461 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1462 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001463
1464 return 0;
1465}
1466
Brett Russ05b308e2005-10-05 17:08:53 -04001467/**
Brett Russ05b308e2005-10-05 17:08:53 -04001468 * mv_err_intr - Handle error interrupts on the port
1469 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001470 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001471 *
1472 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001473 * some cases require an eDMA reset, which also performs a COMRESET.
1474 * The SERR case requires a clear of pending errors in the SATA
1475 * SERROR register. Finally, if the port disabled DMA,
1476 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001477 *
1478 * LOCKING:
1479 * Inherited from caller.
1480 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001481static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001482{
Brett Russ31961942005-09-30 01:36:00 -04001483 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001484 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1485 struct mv_port_priv *pp = ap->private_data;
1486 struct mv_host_priv *hpriv = ap->host->private_data;
1487 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1488 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001489 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001490
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001491 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001492
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001493 if (!edma_enabled) {
1494 /* just a guess: do we need to do this? should we
1495 * expand this, and do it in all cases?
1496 */
Tejun Heo936fd732007-08-06 18:36:23 +09001497 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1498 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001499 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001500
1501 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1502
Mark Lord352fab72008-04-19 14:43:42 -04001503 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001504
1505 /*
Mark Lord352fab72008-04-19 14:43:42 -04001506 * All generations share these EDMA error cause bits:
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001507 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001508 if (edma_err_cause & EDMA_ERR_DEV)
1509 err_mask |= AC_ERR_DEV;
1510 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001511 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001512 EDMA_ERR_INTRL_PAR)) {
1513 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001514 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001515 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001516 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001517 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1518 ata_ehi_hotplugged(ehi);
1519 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001520 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001521 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001522 }
1523
Mark Lord352fab72008-04-19 14:43:42 -04001524 /*
1525 * Gen-I has a different SELF_DIS bit,
1526 * different FREEZE bits, and no SERR bit:
1527 */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001528 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001529 eh_freeze_mask = EDMA_EH_FREEZE_5;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001530 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001531 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001532 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001533 }
1534 } else {
1535 eh_freeze_mask = EDMA_EH_FREEZE;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001536 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001537 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001538 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001539 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001540 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001541 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1542 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001543 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001544 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001545 }
1546 }
Brett Russ20f733e2005-09-01 18:26:17 -04001547
1548 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001549 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001550
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001551 if (!err_mask) {
1552 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001553 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001554 }
1555
1556 ehi->serror |= serr;
1557 ehi->action |= action;
1558
1559 if (qc)
1560 qc->err_mask |= err_mask;
1561 else
1562 ehi->err_mask |= err_mask;
1563
1564 if (edma_err_cause & eh_freeze_mask)
1565 ata_port_freeze(ap);
1566 else
1567 ata_port_abort(ap);
1568}
1569
1570static void mv_intr_pio(struct ata_port *ap)
1571{
1572 struct ata_queued_cmd *qc;
1573 u8 ata_status;
1574
1575 /* ignore spurious intr if drive still BUSY */
1576 ata_status = readb(ap->ioaddr.status_addr);
1577 if (unlikely(ata_status & ATA_BUSY))
1578 return;
1579
1580 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001581 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001582 if (unlikely(!qc)) /* no active tag */
1583 return;
1584 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1585 return;
1586
1587 /* and finally, complete the ATA command */
1588 qc->err_mask |= ac_err_mask(ata_status);
1589 ata_qc_complete(qc);
1590}
1591
1592static void mv_intr_edma(struct ata_port *ap)
1593{
1594 void __iomem *port_mmio = mv_ap_base(ap);
1595 struct mv_host_priv *hpriv = ap->host->private_data;
1596 struct mv_port_priv *pp = ap->private_data;
1597 struct ata_queued_cmd *qc;
1598 u32 out_index, in_index;
1599 bool work_done = false;
1600
1601 /* get h/w response queue pointer */
1602 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1603 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1604
1605 while (1) {
1606 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001607 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001608
1609 /* get s/w response queue last-read pointer, and compare */
1610 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1611 if (in_index == out_index)
1612 break;
1613
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001614 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001615 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001616 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001617
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001618 /* Gen II/IIE: get active ATA command via tag, to enable
1619 * support for queueing. this works transparently for
1620 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001621 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001622 else
1623 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001624
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001625 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001626
Mark Lordcb924412008-01-26 18:32:09 -05001627 /* For non-NCQ mode, the lower 8 bits of status
1628 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1629 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001630 */
1631 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001632 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001633 mv_err_intr(ap, qc);
1634 return;
1635 }
1636
1637 /* and finally, complete the ATA command */
1638 if (qc) {
1639 qc->err_mask |=
1640 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1641 ata_qc_complete(qc);
1642 }
1643
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001644 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001645 * indicate (after the loop completes) to hardware
1646 * that we have consumed a response queue entry.
1647 */
1648 work_done = true;
1649 pp->resp_idx++;
1650 }
1651
Mark Lord352fab72008-04-19 14:43:42 -04001652 /* Update the software queue position index in hardware */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001653 if (work_done)
1654 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1655 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1656 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001657}
1658
Brett Russ05b308e2005-10-05 17:08:53 -04001659/**
1660 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001661 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001662 * @relevant: port error bits relevant to this host controller
1663 * @hc: which host controller we're to look at
1664 *
1665 * Read then write clear the HC interrupt status then walk each
1666 * port connected to the HC and see if it needs servicing. Port
1667 * success ints are reported in the HC interrupt status reg, the
1668 * port error ints are reported in the higher level main
1669 * interrupt status register and thus are passed in via the
1670 * 'relevant' argument.
1671 *
1672 * LOCKING:
1673 * Inherited from caller.
1674 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001675static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001676{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001677 struct mv_host_priv *hpriv = host->private_data;
1678 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001679 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001680 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001681 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001682
Jeff Garzik35177262007-02-24 21:26:42 -05001683 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001684 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001685 else
Brett Russ20f733e2005-09-01 18:26:17 -04001686 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001687
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001688 if (HAS_PCI(host))
1689 last_port = port0 + MV_PORTS_PER_HC;
1690 else
1691 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001692 /* we'll need the HC success int register in most cases */
1693 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001694 if (!hc_irq_cause)
1695 return;
1696
1697 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001698
1699 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001700 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001701
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001702 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001703 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001704 struct mv_port_priv *pp;
Mark Lord352fab72008-04-19 14:43:42 -04001705 int have_err_bits, hardport, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001706
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001707 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001708 continue;
1709
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001710 pp = ap->private_data;
1711
Brett Russ31961942005-09-30 01:36:00 -04001712 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001713 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e2005-09-01 18:26:17 -04001714 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001715
Mark Lord352fab72008-04-19 14:43:42 -04001716 have_err_bits = ((ERR_IRQ << shift) & relevant);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001717
1718 if (unlikely(have_err_bits)) {
1719 struct ata_queued_cmd *qc;
1720
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001721 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001722 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1723 continue;
1724
1725 mv_err_intr(ap, qc);
1726 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001727 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001728
Mark Lord352fab72008-04-19 14:43:42 -04001729 hardport = mv_hardport_from_port(port); /* range 0..3 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001730
1731 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Mark Lord352fab72008-04-19 14:43:42 -04001732 if ((DMA_IRQ << hardport) & hc_irq_cause)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001733 mv_intr_edma(ap);
1734 } else {
Mark Lord352fab72008-04-19 14:43:42 -04001735 if ((DEV_IRQ << hardport) & hc_irq_cause)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001736 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001737 }
1738 }
1739 VPRINTK("EXIT\n");
1740}
1741
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001742static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1743{
Mark Lord02a121d2007-12-01 13:07:22 -05001744 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001745 struct ata_port *ap;
1746 struct ata_queued_cmd *qc;
1747 struct ata_eh_info *ehi;
1748 unsigned int i, err_mask, printed = 0;
1749 u32 err_cause;
1750
Mark Lord02a121d2007-12-01 13:07:22 -05001751 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001752
1753 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1754 err_cause);
1755
1756 DPRINTK("All regs @ PCI error\n");
1757 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1758
Mark Lord02a121d2007-12-01 13:07:22 -05001759 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001760
1761 for (i = 0; i < host->n_ports; i++) {
1762 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001763 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001764 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001765 ata_ehi_clear_desc(ehi);
1766 if (!printed++)
1767 ata_ehi_push_desc(ehi,
1768 "PCI err cause 0x%08x", err_cause);
1769 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001770 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001771 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001772 if (qc)
1773 qc->err_mask |= err_mask;
1774 else
1775 ehi->err_mask |= err_mask;
1776
1777 ata_port_freeze(ap);
1778 }
1779 }
1780}
1781
Brett Russ05b308e2005-10-05 17:08:53 -04001782/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001783 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001784 * @irq: unused
1785 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001786 *
1787 * Read the read only register to determine if any host
1788 * controllers have pending interrupts. If so, call lower level
1789 * routine to handle. Also check for PCI errors which are only
1790 * reported here.
1791 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001792 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001793 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001794 * interrupts.
1795 */
David Howells7d12e782006-10-05 14:55:46 +01001796static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001797{
Jeff Garzikcca39742006-08-24 03:19:22 -04001798 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001799 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001800 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001801 void __iomem *mmio = hpriv->base;
Mark Lord352fab72008-04-19 14:43:42 -04001802 u32 main_cause, main_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001803
Mark Lord646a4da2008-01-26 18:30:37 -05001804 spin_lock(&host->lock);
Mark Lord352fab72008-04-19 14:43:42 -04001805 main_cause = readl(hpriv->main_cause_reg_addr);
1806 main_mask = readl(hpriv->main_mask_reg_addr);
1807 /*
1808 * Deal with cases where we either have nothing pending, or have read
1809 * a bogus register value which can indicate HW removal or PCI fault.
Brett Russ20f733e2005-09-01 18:26:17 -04001810 */
Mark Lord352fab72008-04-19 14:43:42 -04001811 if (!(main_cause & main_mask) || (main_cause == 0xffffffffU))
Mark Lord646a4da2008-01-26 18:30:37 -05001812 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001813
Jeff Garzikcca39742006-08-24 03:19:22 -04001814 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001815
Mark Lord352fab72008-04-19 14:43:42 -04001816 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001817 mv_pci_error(host, mmio);
1818 handled = 1;
1819 goto out_unlock; /* skip all other HC irq handling */
1820 }
1821
Brett Russ20f733e2005-09-01 18:26:17 -04001822 for (hc = 0; hc < n_hcs; hc++) {
Mark Lord352fab72008-04-19 14:43:42 -04001823 u32 relevant = main_cause & (HC0_IRQ_PEND << (hc * HC_SHIFT));
Brett Russ20f733e2005-09-01 18:26:17 -04001824 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001825 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001826 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001827 }
1828 }
Mark Lord615ab952006-05-19 16:24:56 -04001829
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001830out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001831 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001832 return IRQ_RETVAL(handled);
1833}
1834
Jeff Garzikc9d39132005-11-13 17:47:51 -05001835static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1836{
1837 unsigned int ofs;
1838
1839 switch (sc_reg_in) {
1840 case SCR_STATUS:
1841 case SCR_ERROR:
1842 case SCR_CONTROL:
1843 ofs = sc_reg_in * sizeof(u32);
1844 break;
1845 default:
1846 ofs = 0xffffffffU;
1847 break;
1848 }
1849 return ofs;
1850}
1851
Tejun Heoda3dbb12007-07-16 14:29:40 +09001852static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001853{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001854 struct mv_host_priv *hpriv = ap->host->private_data;
1855 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001856 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001857 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1858
Tejun Heoda3dbb12007-07-16 14:29:40 +09001859 if (ofs != 0xffffffffU) {
1860 *val = readl(addr + ofs);
1861 return 0;
1862 } else
1863 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001864}
1865
Tejun Heoda3dbb12007-07-16 14:29:40 +09001866static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001867{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001868 struct mv_host_priv *hpriv = ap->host->private_data;
1869 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001870 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001871 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1872
Tejun Heoda3dbb12007-07-16 14:29:40 +09001873 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001874 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001875 return 0;
1876 } else
1877 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001878}
1879
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001880static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001881{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001882 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001883 int early_5080;
1884
Auke Kok44c10132007-06-08 15:46:36 -07001885 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001886
1887 if (!early_5080) {
1888 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1889 tmp |= (1 << 0);
1890 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1891 }
1892
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001893 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001894}
1895
1896static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1897{
1898 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1899}
1900
Jeff Garzik47c2b672005-11-12 21:13:17 -05001901static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001902 void __iomem *mmio)
1903{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001904 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1905 u32 tmp;
1906
1907 tmp = readl(phy_mmio + MV5_PHY_MODE);
1908
1909 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1910 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001911}
1912
Jeff Garzik47c2b672005-11-12 21:13:17 -05001913static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001914{
Jeff Garzik522479f2005-11-12 22:14:02 -05001915 u32 tmp;
1916
1917 writel(0, mmio + MV_GPIO_PORT_CTL);
1918
1919 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1920
1921 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1922 tmp |= ~(1 << 0);
1923 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001924}
1925
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001926static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1927 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001928{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001929 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1930 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1931 u32 tmp;
1932 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1933
1934 if (fix_apm_sq) {
1935 tmp = readl(phy_mmio + MV5_LT_MODE);
1936 tmp |= (1 << 19);
1937 writel(tmp, phy_mmio + MV5_LT_MODE);
1938
1939 tmp = readl(phy_mmio + MV5_PHY_CTL);
1940 tmp &= ~0x3;
1941 tmp |= 0x1;
1942 writel(tmp, phy_mmio + MV5_PHY_CTL);
1943 }
1944
1945 tmp = readl(phy_mmio + MV5_PHY_MODE);
1946 tmp &= ~mask;
1947 tmp |= hpriv->signal[port].pre;
1948 tmp |= hpriv->signal[port].amps;
1949 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001950}
1951
Jeff Garzikc9d39132005-11-13 17:47:51 -05001952
1953#undef ZERO
1954#define ZERO(reg) writel(0, port_mmio + (reg))
1955static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1956 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001957{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001958 void __iomem *port_mmio = mv_port_base(mmio, port);
1959
Mark Lordb5624682008-03-31 19:34:40 -04001960 /*
1961 * The datasheet warns against setting ATA_RST when EDMA is active
1962 * (but doesn't say what the problem might be). So we first try
1963 * to disable the EDMA engine before doing the ATA_RST operation.
1964 */
Mark Lorde12bef52008-03-31 19:33:56 -04001965 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001966
1967 ZERO(0x028); /* command */
1968 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1969 ZERO(0x004); /* timer */
1970 ZERO(0x008); /* irq err cause */
1971 ZERO(0x00c); /* irq err mask */
1972 ZERO(0x010); /* rq bah */
1973 ZERO(0x014); /* rq inp */
1974 ZERO(0x018); /* rq outp */
1975 ZERO(0x01c); /* respq bah */
1976 ZERO(0x024); /* respq outp */
1977 ZERO(0x020); /* respq inp */
1978 ZERO(0x02c); /* test control */
1979 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1980}
1981#undef ZERO
1982
1983#define ZERO(reg) writel(0, hc_mmio + (reg))
1984static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
1985 unsigned int hc)
1986{
1987 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1988 u32 tmp;
1989
1990 ZERO(0x00c);
1991 ZERO(0x010);
1992 ZERO(0x014);
1993 ZERO(0x018);
1994
1995 tmp = readl(hc_mmio + 0x20);
1996 tmp &= 0x1c1c1c1c;
1997 tmp |= 0x03030303;
1998 writel(tmp, hc_mmio + 0x20);
1999}
2000#undef ZERO
2001
2002static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2003 unsigned int n_hc)
2004{
2005 unsigned int hc, port;
2006
2007 for (hc = 0; hc < n_hc; hc++) {
2008 for (port = 0; port < MV_PORTS_PER_HC; port++)
2009 mv5_reset_hc_port(hpriv, mmio,
2010 (hc * MV_PORTS_PER_HC) + port);
2011
2012 mv5_reset_one_hc(hpriv, mmio, hc);
2013 }
2014
2015 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002016}
2017
Jeff Garzik101ffae2005-11-12 22:17:49 -05002018#undef ZERO
2019#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002020static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002021{
Mark Lord02a121d2007-12-01 13:07:22 -05002022 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002023 u32 tmp;
2024
2025 tmp = readl(mmio + MV_PCI_MODE);
2026 tmp &= 0xff00ffff;
2027 writel(tmp, mmio + MV_PCI_MODE);
2028
2029 ZERO(MV_PCI_DISC_TIMER);
2030 ZERO(MV_PCI_MSI_TRIGGER);
2031 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2032 ZERO(HC_MAIN_IRQ_MASK_OFS);
2033 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002034 ZERO(hpriv->irq_cause_ofs);
2035 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002036 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2037 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2038 ZERO(MV_PCI_ERR_ATTRIBUTE);
2039 ZERO(MV_PCI_ERR_COMMAND);
2040}
2041#undef ZERO
2042
2043static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2044{
2045 u32 tmp;
2046
2047 mv5_reset_flash(hpriv, mmio);
2048
2049 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2050 tmp &= 0x3;
2051 tmp |= (1 << 5) | (1 << 6);
2052 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2053}
2054
2055/**
2056 * mv6_reset_hc - Perform the 6xxx global soft reset
2057 * @mmio: base address of the HBA
2058 *
2059 * This routine only applies to 6xxx parts.
2060 *
2061 * LOCKING:
2062 * Inherited from caller.
2063 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002064static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2065 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002066{
2067 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2068 int i, rc = 0;
2069 u32 t;
2070
2071 /* Following procedure defined in PCI "main command and status
2072 * register" table.
2073 */
2074 t = readl(reg);
2075 writel(t | STOP_PCI_MASTER, reg);
2076
2077 for (i = 0; i < 1000; i++) {
2078 udelay(1);
2079 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002080 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002081 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002082 }
2083 if (!(PCI_MASTER_EMPTY & t)) {
2084 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2085 rc = 1;
2086 goto done;
2087 }
2088
2089 /* set reset */
2090 i = 5;
2091 do {
2092 writel(t | GLOB_SFT_RST, reg);
2093 t = readl(reg);
2094 udelay(1);
2095 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2096
2097 if (!(GLOB_SFT_RST & t)) {
2098 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2099 rc = 1;
2100 goto done;
2101 }
2102
2103 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2104 i = 5;
2105 do {
2106 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2107 t = readl(reg);
2108 udelay(1);
2109 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2110
2111 if (GLOB_SFT_RST & t) {
2112 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2113 rc = 1;
2114 }
Mark Lord094e50b2008-04-16 15:01:19 -04002115 /*
2116 * Temporary: wait 3 seconds before port-probing can happen,
2117 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2118 * This can go away once hotplug is fully/correctly implemented.
2119 */
2120 if (rc == 0)
2121 msleep(3000);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002122done:
2123 return rc;
2124}
2125
Jeff Garzik47c2b672005-11-12 21:13:17 -05002126static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002127 void __iomem *mmio)
2128{
2129 void __iomem *port_mmio;
2130 u32 tmp;
2131
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002132 tmp = readl(mmio + MV_RESET_CFG);
2133 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002134 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002135 hpriv->signal[idx].pre = 0x1 << 5;
2136 return;
2137 }
2138
2139 port_mmio = mv_port_base(mmio, idx);
2140 tmp = readl(port_mmio + PHY_MODE2);
2141
2142 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2143 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2144}
2145
Jeff Garzik47c2b672005-11-12 21:13:17 -05002146static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002147{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002148 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002149}
2150
Jeff Garzikc9d39132005-11-13 17:47:51 -05002151static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002152 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002153{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002154 void __iomem *port_mmio = mv_port_base(mmio, port);
2155
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002156 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002157 int fix_phy_mode2 =
2158 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002159 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002160 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2161 u32 m2, tmp;
2162
2163 if (fix_phy_mode2) {
2164 m2 = readl(port_mmio + PHY_MODE2);
2165 m2 &= ~(1 << 16);
2166 m2 |= (1 << 31);
2167 writel(m2, port_mmio + PHY_MODE2);
2168
2169 udelay(200);
2170
2171 m2 = readl(port_mmio + PHY_MODE2);
2172 m2 &= ~((1 << 16) | (1 << 31));
2173 writel(m2, port_mmio + PHY_MODE2);
2174
2175 udelay(200);
2176 }
2177
2178 /* who knows what this magic does */
2179 tmp = readl(port_mmio + PHY_MODE3);
2180 tmp &= ~0x7F800000;
2181 tmp |= 0x2A800000;
2182 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002183
2184 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002185 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002186
2187 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002188
2189 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002190 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002191
Mark Lorde12bef52008-03-31 19:33:56 -04002192 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002193 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2194
2195 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002196
2197 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002198 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002199 }
2200
2201 /* Revert values of pre-emphasis and signal amps to the saved ones */
2202 m2 = readl(port_mmio + PHY_MODE2);
2203
2204 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002205 m2 |= hpriv->signal[port].amps;
2206 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002207 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002208
Jeff Garzike4e7b892006-01-31 12:18:41 -05002209 /* according to mvSata 3.6.1, some IIE values are fixed */
2210 if (IS_GEN_IIE(hpriv)) {
2211 m2 &= ~0xC30FF01F;
2212 m2 |= 0x0000900F;
2213 }
2214
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002215 writel(m2, port_mmio + PHY_MODE2);
2216}
2217
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002218/* TODO: use the generic LED interface to configure the SATA Presence */
2219/* & Acitivy LEDs on the board */
2220static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2221 void __iomem *mmio)
2222{
2223 return;
2224}
2225
2226static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2227 void __iomem *mmio)
2228{
2229 void __iomem *port_mmio;
2230 u32 tmp;
2231
2232 port_mmio = mv_port_base(mmio, idx);
2233 tmp = readl(port_mmio + PHY_MODE2);
2234
2235 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2236 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2237}
2238
2239#undef ZERO
2240#define ZERO(reg) writel(0, port_mmio + (reg))
2241static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2242 void __iomem *mmio, unsigned int port)
2243{
2244 void __iomem *port_mmio = mv_port_base(mmio, port);
2245
Mark Lordb5624682008-03-31 19:34:40 -04002246 /*
2247 * The datasheet warns against setting ATA_RST when EDMA is active
2248 * (but doesn't say what the problem might be). So we first try
2249 * to disable the EDMA engine before doing the ATA_RST operation.
2250 */
Mark Lorde12bef52008-03-31 19:33:56 -04002251 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002252
2253 ZERO(0x028); /* command */
2254 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2255 ZERO(0x004); /* timer */
2256 ZERO(0x008); /* irq err cause */
2257 ZERO(0x00c); /* irq err mask */
2258 ZERO(0x010); /* rq bah */
2259 ZERO(0x014); /* rq inp */
2260 ZERO(0x018); /* rq outp */
2261 ZERO(0x01c); /* respq bah */
2262 ZERO(0x024); /* respq outp */
2263 ZERO(0x020); /* respq inp */
2264 ZERO(0x02c); /* test control */
2265 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2266}
2267
2268#undef ZERO
2269
2270#define ZERO(reg) writel(0, hc_mmio + (reg))
2271static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2272 void __iomem *mmio)
2273{
2274 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2275
2276 ZERO(0x00c);
2277 ZERO(0x010);
2278 ZERO(0x014);
2279
2280}
2281
2282#undef ZERO
2283
2284static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2285 void __iomem *mmio, unsigned int n_hc)
2286{
2287 unsigned int port;
2288
2289 for (port = 0; port < hpriv->n_ports; port++)
2290 mv_soc_reset_hc_port(hpriv, mmio, port);
2291
2292 mv_soc_reset_one_hc(hpriv, mmio);
2293
2294 return 0;
2295}
2296
2297static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2298 void __iomem *mmio)
2299{
2300 return;
2301}
2302
2303static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2304{
2305 return;
2306}
2307
Mark Lordb67a1062008-03-31 19:35:13 -04002308static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2309{
2310 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2311
2312 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2313 if (want_gen2i)
2314 ifctl |= (1 << 7); /* enable gen2i speed */
2315 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2316}
2317
Mark Lordb5624682008-03-31 19:34:40 -04002318/*
2319 * Caller must ensure that EDMA is not active,
2320 * by first doing mv_stop_edma() where needed.
2321 */
Mark Lorde12bef52008-03-31 19:33:56 -04002322static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002323 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002324{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002325 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002326
Mark Lord0d8be5c2008-04-16 14:56:12 -04002327 mv_stop_edma_engine(port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04002328 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002329
Mark Lordb67a1062008-03-31 19:35:13 -04002330 if (!IS_GEN_I(hpriv)) {
2331 /* Enable 3.0gb/s link speed */
2332 mv_setup_ifctl(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002333 }
Mark Lordb67a1062008-03-31 19:35:13 -04002334 /*
2335 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2336 * link, and physical layers. It resets all SATA interface registers
2337 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04002338 */
Mark Lordb67a1062008-03-31 19:35:13 -04002339 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2340 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04002341 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002342
Jeff Garzikc9d39132005-11-13 17:47:51 -05002343 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2344
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002345 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002346 mdelay(1);
2347}
2348
Mark Lorde49856d2008-04-16 14:59:07 -04002349static void mv_pmp_select(struct ata_port *ap, int pmp)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002350{
Mark Lorde49856d2008-04-16 14:59:07 -04002351 if (sata_pmp_supported(ap)) {
2352 void __iomem *port_mmio = mv_ap_base(ap);
2353 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2354 int old = reg & 0xf;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002355
Mark Lorde49856d2008-04-16 14:59:07 -04002356 if (old != pmp) {
2357 reg = (reg & ~0xf) | pmp;
2358 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2359 }
Tejun Heoda3dbb12007-07-16 14:29:40 +09002360 }
Brett Russ20f733e2005-09-01 18:26:17 -04002361}
2362
Mark Lorde49856d2008-04-16 14:59:07 -04002363static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2364 unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002365{
Mark Lorde49856d2008-04-16 14:59:07 -04002366 mv_pmp_select(link->ap, sata_srst_pmp(link));
2367 return sata_std_hardreset(link, class, deadline);
2368}
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002369
Mark Lorde49856d2008-04-16 14:59:07 -04002370static int mv_softreset(struct ata_link *link, unsigned int *class,
2371 unsigned long deadline)
2372{
2373 mv_pmp_select(link->ap, sata_srst_pmp(link));
2374 return ata_sff_softreset(link, class, deadline);
Jeff Garzik22374672005-11-17 10:59:48 -05002375}
2376
Tejun Heocc0680a2007-08-06 18:36:23 +09002377static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002378 unsigned long deadline)
2379{
Tejun Heocc0680a2007-08-06 18:36:23 +09002380 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002381 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002382 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002383 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002384 int rc, attempts = 0, extra = 0;
2385 u32 sstatus;
2386 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002387
Mark Lorde12bef52008-03-31 19:33:56 -04002388 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002389 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002390
Mark Lord0d8be5c2008-04-16 14:56:12 -04002391 /* Workaround for errata FEr SATA#10 (part 2) */
2392 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04002393 const unsigned long *timing =
2394 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002395
Mark Lord17c5aab2008-04-16 14:56:51 -04002396 rc = sata_link_hardreset(link, timing, deadline + extra,
2397 &online, NULL);
2398 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04002399 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002400 sata_scr_read(link, SCR_STATUS, &sstatus);
2401 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2402 /* Force 1.5gb/s link speed and try again */
2403 mv_setup_ifctl(mv_ap_base(ap), 0);
2404 if (time_after(jiffies + HZ, deadline))
2405 extra = HZ; /* only extend it once, max */
2406 }
2407 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002408
Mark Lord17c5aab2008-04-16 14:56:51 -04002409 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002410}
2411
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002412static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002413{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002414 struct mv_host_priv *hpriv = ap->host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002415 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002416 unsigned int shift;
Mark Lord352fab72008-04-19 14:43:42 -04002417 u32 main_mask;
Brett Russ31961942005-09-30 01:36:00 -04002418
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002419 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002420
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002421 shift = ap->port_no * 2;
2422 if (hc > 0)
2423 shift++;
Brett Russ31961942005-09-30 01:36:00 -04002424
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002425 /* disable assertion of portN err, done events */
Mark Lord352fab72008-04-19 14:43:42 -04002426 main_mask = readl(hpriv->main_mask_reg_addr);
2427 main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2428 writelfl(main_mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002429}
2430
2431static void mv_eh_thaw(struct ata_port *ap)
2432{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002433 struct mv_host_priv *hpriv = ap->host->private_data;
2434 void __iomem *mmio = hpriv->base;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002435 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2436 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2437 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002438 unsigned int shift, hc_port_no = ap->port_no;
Mark Lord352fab72008-04-19 14:43:42 -04002439 u32 main_mask, hc_irq_cause;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002440
2441 /* FIXME: handle coalescing completion events properly */
2442
2443 shift = ap->port_no * 2;
2444 if (hc > 0) {
2445 shift++;
2446 hc_port_no -= 4;
Mark Lord9b358e32006-05-19 16:21:03 -04002447 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002448
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002449 /* clear EDMA errors on this port */
2450 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2451
2452 /* clear pending irq events */
2453 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Mark Lord352fab72008-04-19 14:43:42 -04002454 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hc_port_no);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002455 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2456
2457 /* enable assertion of portN err, done events */
Mark Lord352fab72008-04-19 14:43:42 -04002458 main_mask = readl(hpriv->main_mask_reg_addr);
2459 main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2460 writelfl(main_mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002461}
2462
Brett Russ05b308e2005-10-05 17:08:53 -04002463/**
2464 * mv_port_init - Perform some early initialization on a single port.
2465 * @port: libata data structure storing shadow register addresses
2466 * @port_mmio: base address of the port
2467 *
2468 * Initialize shadow register mmio addresses, clear outstanding
2469 * interrupts on the port, and unmask interrupts for the future
2470 * start of the port.
2471 *
2472 * LOCKING:
2473 * Inherited from caller.
2474 */
Brett Russ31961942005-09-30 01:36:00 -04002475static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2476{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002477 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002478 unsigned serr_ofs;
2479
Jeff Garzik8b260242005-11-12 12:32:50 -05002480 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002481 */
2482 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002483 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002484 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2485 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2486 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2487 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2488 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2489 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002490 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002491 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2492 /* special case: control/altstatus doesn't have ATA_REG_ address */
2493 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2494
2495 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002496 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002497
Brett Russ31961942005-09-30 01:36:00 -04002498 /* Clear any currently outstanding port interrupt conditions */
2499 serr_ofs = mv_scr_offset(SCR_ERROR);
2500 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2501 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2502
Mark Lord646a4da2008-01-26 18:30:37 -05002503 /* unmask all non-transient EDMA error interrupts */
2504 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002505
Jeff Garzik8b260242005-11-12 12:32:50 -05002506 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002507 readl(port_mmio + EDMA_CFG_OFS),
2508 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2509 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002510}
2511
Tejun Heo4447d352007-04-17 23:44:08 +09002512static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002513{
Tejun Heo4447d352007-04-17 23:44:08 +09002514 struct pci_dev *pdev = to_pci_dev(host->dev);
2515 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002516 u32 hp_flags = hpriv->hp_flags;
2517
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002518 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002519 case chip_5080:
2520 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002521 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002522
Auke Kok44c10132007-06-08 15:46:36 -07002523 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002524 case 0x1:
2525 hp_flags |= MV_HP_ERRATA_50XXB0;
2526 break;
2527 case 0x3:
2528 hp_flags |= MV_HP_ERRATA_50XXB2;
2529 break;
2530 default:
2531 dev_printk(KERN_WARNING, &pdev->dev,
2532 "Applying 50XXB2 workarounds to unknown rev\n");
2533 hp_flags |= MV_HP_ERRATA_50XXB2;
2534 break;
2535 }
2536 break;
2537
2538 case chip_504x:
2539 case chip_508x:
2540 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002541 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002542
Auke Kok44c10132007-06-08 15:46:36 -07002543 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002544 case 0x0:
2545 hp_flags |= MV_HP_ERRATA_50XXB0;
2546 break;
2547 case 0x3:
2548 hp_flags |= MV_HP_ERRATA_50XXB2;
2549 break;
2550 default:
2551 dev_printk(KERN_WARNING, &pdev->dev,
2552 "Applying B2 workarounds to unknown rev\n");
2553 hp_flags |= MV_HP_ERRATA_50XXB2;
2554 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002555 }
2556 break;
2557
2558 case chip_604x:
2559 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002560 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002561 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002562
Auke Kok44c10132007-06-08 15:46:36 -07002563 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002564 case 0x7:
2565 hp_flags |= MV_HP_ERRATA_60X1B2;
2566 break;
2567 case 0x9:
2568 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002569 break;
2570 default:
2571 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002572 "Applying B2 workarounds to unknown rev\n");
2573 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002574 break;
2575 }
2576 break;
2577
Jeff Garzike4e7b892006-01-31 12:18:41 -05002578 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002579 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002580 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2581 (pdev->device == 0x2300 || pdev->device == 0x2310))
2582 {
Mark Lord4e520032007-12-11 12:58:05 -05002583 /*
2584 * Highpoint RocketRAID PCIe 23xx series cards:
2585 *
2586 * Unconfigured drives are treated as "Legacy"
2587 * by the BIOS, and it overwrites sector 8 with
2588 * a "Lgcy" metadata block prior to Linux boot.
2589 *
2590 * Configured drives (RAID or JBOD) leave sector 8
2591 * alone, but instead overwrite a high numbered
2592 * sector for the RAID metadata. This sector can
2593 * be determined exactly, by truncating the physical
2594 * drive capacity to a nice even GB value.
2595 *
2596 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2597 *
2598 * Warn the user, lest they think we're just buggy.
2599 */
2600 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2601 " BIOS CORRUPTS DATA on all attached drives,"
2602 " regardless of if/how they are configured."
2603 " BEWARE!\n");
2604 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2605 " use sectors 8-9 on \"Legacy\" drives,"
2606 " and avoid the final two gigabytes on"
2607 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002608 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002609 case chip_6042:
2610 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002611 hp_flags |= MV_HP_GEN_IIE;
2612
Auke Kok44c10132007-06-08 15:46:36 -07002613 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002614 case 0x0:
2615 hp_flags |= MV_HP_ERRATA_XX42A0;
2616 break;
2617 case 0x1:
2618 hp_flags |= MV_HP_ERRATA_60X1C0;
2619 break;
2620 default:
2621 dev_printk(KERN_WARNING, &pdev->dev,
2622 "Applying 60X1C0 workarounds to unknown rev\n");
2623 hp_flags |= MV_HP_ERRATA_60X1C0;
2624 break;
2625 }
2626 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002627 case chip_soc:
2628 hpriv->ops = &mv_soc_ops;
2629 hp_flags |= MV_HP_ERRATA_60X1C0;
2630 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002631
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002632 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002633 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002634 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002635 return 1;
2636 }
2637
2638 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002639 if (hp_flags & MV_HP_PCIE) {
2640 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2641 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2642 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2643 } else {
2644 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2645 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2646 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2647 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002648
2649 return 0;
2650}
2651
Brett Russ05b308e2005-10-05 17:08:53 -04002652/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002653 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002654 * @host: ATA host to initialize
2655 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002656 *
2657 * If possible, do an early global reset of the host. Then do
2658 * our port init and clear/unmask all/relevant host interrupts.
2659 *
2660 * LOCKING:
2661 * Inherited from caller.
2662 */
Tejun Heo4447d352007-04-17 23:44:08 +09002663static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002664{
2665 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002666 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002667 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002668
Tejun Heo4447d352007-04-17 23:44:08 +09002669 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002670 if (rc)
Mark Lord352fab72008-04-19 14:43:42 -04002671 goto done;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002672
2673 if (HAS_PCI(host)) {
Mark Lord352fab72008-04-19 14:43:42 -04002674 hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
2675 hpriv->main_mask_reg_addr = mmio + HC_MAIN_IRQ_MASK_OFS;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002676 } else {
Mark Lord352fab72008-04-19 14:43:42 -04002677 hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
2678 hpriv->main_mask_reg_addr = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002679 }
Mark Lord352fab72008-04-19 14:43:42 -04002680
2681 /* global interrupt mask: 0 == mask everything */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002682 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002683
Tejun Heo4447d352007-04-17 23:44:08 +09002684 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002685
Tejun Heo4447d352007-04-17 23:44:08 +09002686 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002687 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002688
Jeff Garzikc9d39132005-11-13 17:47:51 -05002689 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002690 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002691 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002692
Jeff Garzik522479f2005-11-12 22:14:02 -05002693 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002694 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002695 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002696
Tejun Heo4447d352007-04-17 23:44:08 +09002697 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002698 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002699 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002700
2701 mv_port_init(&ap->ioaddr, port_mmio);
2702
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002703#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002704 if (HAS_PCI(host)) {
2705 unsigned int offset = port_mmio - mmio;
2706 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2707 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2708 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002709#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002710 }
2711
2712 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002713 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2714
2715 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2716 "(before clear)=0x%08x\n", hc,
2717 readl(hc_mmio + HC_CFG_OFS),
2718 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2719
2720 /* Clear any currently outstanding hc interrupt conditions */
2721 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002722 }
2723
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002724 if (HAS_PCI(host)) {
2725 /* Clear any currently outstanding host interrupt conditions */
2726 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002727
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002728 /* and unmask interrupt generation for host regs */
2729 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2730 if (IS_GEN_I(hpriv))
2731 writelfl(~HC_MAIN_MASKED_IRQS_5,
2732 hpriv->main_mask_reg_addr);
2733 else
2734 writelfl(~HC_MAIN_MASKED_IRQS,
2735 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002736
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002737 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2738 "PCI int cause/mask=0x%08x/0x%08x\n",
2739 readl(hpriv->main_cause_reg_addr),
2740 readl(hpriv->main_mask_reg_addr),
2741 readl(mmio + hpriv->irq_cause_ofs),
2742 readl(mmio + hpriv->irq_mask_ofs));
2743 } else {
2744 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2745 hpriv->main_mask_reg_addr);
2746 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2747 readl(hpriv->main_cause_reg_addr),
2748 readl(hpriv->main_mask_reg_addr));
2749 }
Brett Russ31961942005-09-30 01:36:00 -04002750done:
Brett Russ20f733e2005-09-01 18:26:17 -04002751 return rc;
2752}
2753
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002754static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2755{
2756 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2757 MV_CRQB_Q_SZ, 0);
2758 if (!hpriv->crqb_pool)
2759 return -ENOMEM;
2760
2761 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2762 MV_CRPB_Q_SZ, 0);
2763 if (!hpriv->crpb_pool)
2764 return -ENOMEM;
2765
2766 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2767 MV_SG_TBL_SZ, 0);
2768 if (!hpriv->sg_tbl_pool)
2769 return -ENOMEM;
2770
2771 return 0;
2772}
2773
Lennert Buytenhek15a32632008-03-27 14:51:39 -04002774static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2775 struct mbus_dram_target_info *dram)
2776{
2777 int i;
2778
2779 for (i = 0; i < 4; i++) {
2780 writel(0, hpriv->base + WINDOW_CTRL(i));
2781 writel(0, hpriv->base + WINDOW_BASE(i));
2782 }
2783
2784 for (i = 0; i < dram->num_cs; i++) {
2785 struct mbus_dram_window *cs = dram->cs + i;
2786
2787 writel(((cs->size - 1) & 0xffff0000) |
2788 (cs->mbus_attr << 8) |
2789 (dram->mbus_dram_target_id << 4) | 1,
2790 hpriv->base + WINDOW_CTRL(i));
2791 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2792 }
2793}
2794
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002795/**
2796 * mv_platform_probe - handle a positive probe of an soc Marvell
2797 * host
2798 * @pdev: platform device found
2799 *
2800 * LOCKING:
2801 * Inherited from caller.
2802 */
2803static int mv_platform_probe(struct platform_device *pdev)
2804{
2805 static int printed_version;
2806 const struct mv_sata_platform_data *mv_platform_data;
2807 const struct ata_port_info *ppi[] =
2808 { &mv_port_info[chip_soc], NULL };
2809 struct ata_host *host;
2810 struct mv_host_priv *hpriv;
2811 struct resource *res;
2812 int n_ports, rc;
2813
2814 if (!printed_version++)
2815 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2816
2817 /*
2818 * Simple resource validation ..
2819 */
2820 if (unlikely(pdev->num_resources != 2)) {
2821 dev_err(&pdev->dev, "invalid number of resources\n");
2822 return -EINVAL;
2823 }
2824
2825 /*
2826 * Get the register base first
2827 */
2828 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2829 if (res == NULL)
2830 return -EINVAL;
2831
2832 /* allocate host */
2833 mv_platform_data = pdev->dev.platform_data;
2834 n_ports = mv_platform_data->n_ports;
2835
2836 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2837 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2838
2839 if (!host || !hpriv)
2840 return -ENOMEM;
2841 host->private_data = hpriv;
2842 hpriv->n_ports = n_ports;
2843
2844 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002845 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2846 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002847 hpriv->base -= MV_SATAHC0_REG_BASE;
2848
Lennert Buytenhek15a32632008-03-27 14:51:39 -04002849 /*
2850 * (Re-)program MBUS remapping windows if we are asked to.
2851 */
2852 if (mv_platform_data->dram != NULL)
2853 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2854
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002855 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2856 if (rc)
2857 return rc;
2858
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002859 /* initialize adapter */
2860 rc = mv_init_host(host, chip_soc);
2861 if (rc)
2862 return rc;
2863
2864 dev_printk(KERN_INFO, &pdev->dev,
2865 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2866 host->n_ports);
2867
2868 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2869 IRQF_SHARED, &mv6_sht);
2870}
2871
2872/*
2873 *
2874 * mv_platform_remove - unplug a platform interface
2875 * @pdev: platform device
2876 *
2877 * A platform bus SATA device has been unplugged. Perform the needed
2878 * cleanup. Also called on module unload for any active devices.
2879 */
2880static int __devexit mv_platform_remove(struct platform_device *pdev)
2881{
2882 struct device *dev = &pdev->dev;
2883 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002884
2885 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002886 return 0;
2887}
2888
2889static struct platform_driver mv_platform_driver = {
2890 .probe = mv_platform_probe,
2891 .remove = __devexit_p(mv_platform_remove),
2892 .driver = {
2893 .name = DRV_NAME,
2894 .owner = THIS_MODULE,
2895 },
2896};
2897
2898
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002899#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002900static int mv_pci_init_one(struct pci_dev *pdev,
2901 const struct pci_device_id *ent);
2902
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002903
2904static struct pci_driver mv_pci_driver = {
2905 .name = DRV_NAME,
2906 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002907 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002908 .remove = ata_pci_remove_one,
2909};
2910
2911/*
2912 * module options
2913 */
2914static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2915
2916
2917/* move to PCI layer or libata core? */
2918static int pci_go_64(struct pci_dev *pdev)
2919{
2920 int rc;
2921
2922 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2923 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2924 if (rc) {
2925 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2926 if (rc) {
2927 dev_printk(KERN_ERR, &pdev->dev,
2928 "64-bit DMA enable failed\n");
2929 return rc;
2930 }
2931 }
2932 } else {
2933 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2934 if (rc) {
2935 dev_printk(KERN_ERR, &pdev->dev,
2936 "32-bit DMA enable failed\n");
2937 return rc;
2938 }
2939 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2940 if (rc) {
2941 dev_printk(KERN_ERR, &pdev->dev,
2942 "32-bit consistent DMA enable failed\n");
2943 return rc;
2944 }
2945 }
2946
2947 return rc;
2948}
2949
Brett Russ05b308e2005-10-05 17:08:53 -04002950/**
2951 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002952 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002953 *
2954 * FIXME: complete this.
2955 *
2956 * LOCKING:
2957 * Inherited from caller.
2958 */
Tejun Heo4447d352007-04-17 23:44:08 +09002959static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002960{
Tejun Heo4447d352007-04-17 23:44:08 +09002961 struct pci_dev *pdev = to_pci_dev(host->dev);
2962 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002963 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002964 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002965
2966 /* Use this to determine the HW stepping of the chip so we know
2967 * what errata to workaround
2968 */
Brett Russ31961942005-09-30 01:36:00 -04002969 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2970 if (scc == 0)
2971 scc_s = "SCSI";
2972 else if (scc == 0x01)
2973 scc_s = "RAID";
2974 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002975 scc_s = "?";
2976
2977 if (IS_GEN_I(hpriv))
2978 gen = "I";
2979 else if (IS_GEN_II(hpriv))
2980 gen = "II";
2981 else if (IS_GEN_IIE(hpriv))
2982 gen = "IIE";
2983 else
2984 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002985
Jeff Garzika9524a72005-10-30 14:39:11 -05002986 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002987 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2988 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002989 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
2990}
2991
Brett Russ05b308e2005-10-05 17:08:53 -04002992/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002993 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04002994 * @pdev: PCI device found
2995 * @ent: PCI device ID entry for the matched host
2996 *
2997 * LOCKING:
2998 * Inherited from caller.
2999 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003000static int mv_pci_init_one(struct pci_dev *pdev,
3001 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003002{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003003 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003004 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003005 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3006 struct ata_host *host;
3007 struct mv_host_priv *hpriv;
3008 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003009
Jeff Garzika9524a72005-10-30 14:39:11 -05003010 if (!printed_version++)
3011 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003012
Tejun Heo4447d352007-04-17 23:44:08 +09003013 /* allocate host */
3014 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3015
3016 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3017 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3018 if (!host || !hpriv)
3019 return -ENOMEM;
3020 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003021 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003022
3023 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003024 rc = pcim_enable_device(pdev);
3025 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003026 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003027
Tejun Heo0d5ff562007-02-01 15:06:36 +09003028 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3029 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003030 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003031 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003032 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003033 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003034 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003035
Jeff Garzikd88184f2007-02-26 01:26:06 -05003036 rc = pci_go_64(pdev);
3037 if (rc)
3038 return rc;
3039
Mark Lordda2fa9b2008-01-26 18:32:45 -05003040 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3041 if (rc)
3042 return rc;
3043
Brett Russ20f733e2005-09-01 18:26:17 -04003044 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003045 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003046 if (rc)
3047 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003048
Brett Russ31961942005-09-30 01:36:00 -04003049 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003050 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003051 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003052
Brett Russ31961942005-09-30 01:36:00 -04003053 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003054 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003055
Tejun Heo4447d352007-04-17 23:44:08 +09003056 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003057 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003058 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003059 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003060}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003061#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003062
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003063static int mv_platform_probe(struct platform_device *pdev);
3064static int __devexit mv_platform_remove(struct platform_device *pdev);
3065
Brett Russ20f733e2005-09-01 18:26:17 -04003066static int __init mv_init(void)
3067{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003068 int rc = -ENODEV;
3069#ifdef CONFIG_PCI
3070 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003071 if (rc < 0)
3072 return rc;
3073#endif
3074 rc = platform_driver_register(&mv_platform_driver);
3075
3076#ifdef CONFIG_PCI
3077 if (rc < 0)
3078 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003079#endif
3080 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003081}
3082
3083static void __exit mv_exit(void)
3084{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003085#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003086 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003087#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003088 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003089}
3090
3091MODULE_AUTHOR("Brett Russ");
3092MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3093MODULE_LICENSE("GPL");
3094MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3095MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04003096MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04003097
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003098#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003099module_param(msi, int, 0444);
3100MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003101#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003102
Brett Russ20f733e2005-09-01 18:26:17 -04003103module_init(mv_init);
3104module_exit(mv_exit);