blob: 552006853cd718944867d4c26d861c6c33ea4a62 [file] [log] [blame]
Brett Russ20f733e2005-09-01 18:26:17 -04001/*
2 * sata_mv.c - Marvell SATA support
3 *
Mark Lorde12bef52008-03-31 19:33:56 -04004 * Copyright 2008: Marvell Corporation, all rights reserved.
Jeff Garzik8b260242005-11-12 12:32:50 -05005 * Copyright 2005: EMC Corporation, all rights reserved.
Jeff Garzike2b1be52005-11-18 14:04:23 -05006 * Copyright 2005 Red Hat, Inc. All rights reserved.
Brett Russ20f733e2005-09-01 18:26:17 -04007 *
8 * Please ALWAYS copy linux-ide@vger.kernel.org on emails.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; version 2 of the License.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
Jeff Garzik4a05e202007-05-24 23:40:15 -040025/*
26 sata_mv TODO list:
27
28 1) Needs a full errata audit for all chipsets. I implemented most
29 of the errata workarounds found in the Marvell vendor driver, but
30 I distinctly remember a couple workarounds (one related to PCI-X)
31 are still needed.
32
Mark Lord1fd2e1c2008-01-26 18:33:59 -050033 2) Improve/fix IRQ and error handling sequences.
34
35 3) ATAPI support (Marvell claims the 60xx/70xx chips can do it).
36
37 4) Think about TCQ support here, and for libata in general
38 with controllers that suppport it via host-queuing hardware
39 (a software-only implementation could be a nightmare).
Jeff Garzik4a05e202007-05-24 23:40:15 -040040
41 5) Investigate problems with PCI Message Signalled Interrupts (MSI).
42
Mark Lorde49856d2008-04-16 14:59:07 -040043 6) Cache frequently-accessed registers in mv_port_priv to reduce overhead.
Jeff Garzik4a05e202007-05-24 23:40:15 -040044
Mark Lord40f0bc22008-04-16 14:57:25 -040045 7) Fix/reenable hot plug/unplug (should happen as a side-effect of (2) above).
Jeff Garzik4a05e202007-05-24 23:40:15 -040046
Jeff Garzik4a05e202007-05-24 23:40:15 -040047 8) Develop a low-power-consumption strategy, and implement it.
48
49 9) [Experiment, low priority] See if ATAPI can be supported using
50 "unknown FIS" or "vendor-specific FIS" support, or something creative
51 like that.
52
53 10) [Experiment, low priority] Investigate interrupt coalescing.
54 Quite often, especially with PCI Message Signalled Interrupts (MSI),
55 the overhead reduced by interrupt mitigation is quite often not
56 worth the latency cost.
57
58 11) [Experiment, Marvell value added] Is it possible to use target
59 mode to cross-connect two Linux boxes with Marvell cards? If so,
60 creating LibATA target mode support would be very interesting.
61
62 Target mode, for those without docs, is the ability to directly
63 connect two SATA controllers.
64
Jeff Garzik4a05e202007-05-24 23:40:15 -040065*/
66
Brett Russ20f733e2005-09-01 18:26:17 -040067#include <linux/kernel.h>
68#include <linux/module.h>
69#include <linux/pci.h>
70#include <linux/init.h>
71#include <linux/blkdev.h>
72#include <linux/delay.h>
73#include <linux/interrupt.h>
Andrew Morton8d8b6002008-02-04 23:43:44 -080074#include <linux/dmapool.h>
Brett Russ20f733e2005-09-01 18:26:17 -040075#include <linux/dma-mapping.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050076#include <linux/device.h>
Saeed Bisharaf351b2d2008-02-01 18:08:03 -050077#include <linux/platform_device.h>
78#include <linux/ata_platform.h>
Lennert Buytenhek15a32632008-03-27 14:51:39 -040079#include <linux/mbus.h>
Brett Russ20f733e2005-09-01 18:26:17 -040080#include <scsi/scsi_host.h>
Jeff Garzik193515d2005-11-07 00:59:37 -050081#include <scsi/scsi_cmnd.h>
Jeff Garzik6c087722007-10-12 00:16:23 -040082#include <scsi/scsi_device.h>
Brett Russ20f733e2005-09-01 18:26:17 -040083#include <linux/libata.h>
Brett Russ20f733e2005-09-01 18:26:17 -040084
85#define DRV_NAME "sata_mv"
Mark Lord1fd2e1c2008-01-26 18:33:59 -050086#define DRV_VERSION "1.20"
Brett Russ20f733e2005-09-01 18:26:17 -040087
88enum {
89 /* BAR's are enumerated in terms of pci_resource_start() terms */
90 MV_PRIMARY_BAR = 0, /* offset 0x10: memory space */
91 MV_IO_BAR = 2, /* offset 0x18: IO space */
92 MV_MISC_BAR = 3, /* offset 0x1c: FLASH, NVRAM, SRAM */
93
94 MV_MAJOR_REG_AREA_SZ = 0x10000, /* 64KB */
95 MV_MINOR_REG_AREA_SZ = 0x2000, /* 8KB */
96
97 MV_PCI_REG_BASE = 0,
98 MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */
Mark Lord615ab952006-05-19 16:24:56 -040099 MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08),
100 MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88),
101 MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c),
102 MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc),
103 MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0),
104
Brett Russ20f733e2005-09-01 18:26:17 -0400105 MV_SATAHC0_REG_BASE = 0x20000,
Jeff Garzik522479f2005-11-12 22:14:02 -0500106 MV_FLASH_CTL = 0x1046c,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500107 MV_GPIO_PORT_CTL = 0x104f0,
108 MV_RESET_CFG = 0x180d8,
Brett Russ20f733e2005-09-01 18:26:17 -0400109
110 MV_PCI_REG_SZ = MV_MAJOR_REG_AREA_SZ,
111 MV_SATAHC_REG_SZ = MV_MAJOR_REG_AREA_SZ,
112 MV_SATAHC_ARBTR_REG_SZ = MV_MINOR_REG_AREA_SZ, /* arbiter */
113 MV_PORT_REG_SZ = MV_MINOR_REG_AREA_SZ,
114
Brett Russ31961942005-09-30 01:36:00 -0400115 MV_MAX_Q_DEPTH = 32,
116 MV_MAX_Q_DEPTH_MASK = MV_MAX_Q_DEPTH - 1,
117
118 /* CRQB needs alignment on a 1KB boundary. Size == 1KB
119 * CRPB needs alignment on a 256B boundary. Size == 256B
Brett Russ31961942005-09-30 01:36:00 -0400120 * ePRD (SG) entries need alignment on a 16B boundary. Size == 16B
121 */
122 MV_CRQB_Q_SZ = (32 * MV_MAX_Q_DEPTH),
123 MV_CRPB_Q_SZ = (8 * MV_MAX_Q_DEPTH),
Mark Lordda2fa9b2008-01-26 18:32:45 -0500124 MV_MAX_SG_CT = 256,
Brett Russ31961942005-09-30 01:36:00 -0400125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
Brett Russ31961942005-09-30 01:36:00 -0400126
Mark Lord352fab72008-04-19 14:43:42 -0400127 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
Brett Russ20f733e2005-09-01 18:26:17 -0400128 MV_PORT_HC_SHIFT = 2,
Mark Lord352fab72008-04-19 14:43:42 -0400129 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
130 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
131 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
Brett Russ20f733e2005-09-01 18:26:17 -0400132
133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
135 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100136 /* SoC integrated controllers, no PCI interface */
Mark Lorde12bef52008-03-31 19:33:56 -0400137 MV_FLAG_SOC = (1 << 28),
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100138
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400139 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400140 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
141 ATA_FLAG_PIO_POLLING,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500142 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
Brett Russ20f733e2005-09-01 18:26:17 -0400143
Brett Russ31961942005-09-30 01:36:00 -0400144 CRQB_FLAG_READ = (1 << 0),
145 CRQB_TAG_SHIFT = 1,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400146 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
Mark Lorde12bef52008-03-31 19:33:56 -0400147 CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400148 CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */
Brett Russ31961942005-09-30 01:36:00 -0400149 CRQB_CMD_ADDR_SHIFT = 8,
150 CRQB_CMD_CS = (0x2 << 11),
151 CRQB_CMD_LAST = (1 << 15),
152
153 CRPB_FLAG_STATUS_SHIFT = 8,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400154 CRPB_IOID_SHIFT_6 = 5, /* CRPB Gen-II IO Id shift */
155 CRPB_IOID_SHIFT_7 = 7, /* CRPB Gen-IIE IO Id shift */
Brett Russ31961942005-09-30 01:36:00 -0400156
157 EPRD_FLAG_END_OF_TBL = (1 << 31),
158
Brett Russ20f733e2005-09-01 18:26:17 -0400159 /* PCI interface registers */
160
Brett Russ31961942005-09-30 01:36:00 -0400161 PCI_COMMAND_OFS = 0xc00,
162
Brett Russ20f733e2005-09-01 18:26:17 -0400163 PCI_MAIN_CMD_STS_OFS = 0xd30,
164 STOP_PCI_MASTER = (1 << 2),
165 PCI_MASTER_EMPTY = (1 << 3),
166 GLOB_SFT_RST = (1 << 4),
167
Jeff Garzik522479f2005-11-12 22:14:02 -0500168 MV_PCI_MODE = 0xd00,
169 MV_PCI_EXP_ROM_BAR_CTL = 0xd2c,
170 MV_PCI_DISC_TIMER = 0xd04,
171 MV_PCI_MSI_TRIGGER = 0xc38,
172 MV_PCI_SERR_MASK = 0xc28,
173 MV_PCI_XBAR_TMOUT = 0x1d04,
174 MV_PCI_ERR_LOW_ADDRESS = 0x1d40,
175 MV_PCI_ERR_HIGH_ADDRESS = 0x1d44,
176 MV_PCI_ERR_ATTRIBUTE = 0x1d48,
177 MV_PCI_ERR_COMMAND = 0x1d50,
178
Mark Lord02a121d2007-12-01 13:07:22 -0500179 PCI_IRQ_CAUSE_OFS = 0x1d58,
180 PCI_IRQ_MASK_OFS = 0x1d5c,
Brett Russ20f733e2005-09-01 18:26:17 -0400181 PCI_UNMASK_ALL_IRQS = 0x7fffff, /* bits 22-0 */
182
Mark Lord02a121d2007-12-01 13:07:22 -0500183 PCIE_IRQ_CAUSE_OFS = 0x1900,
184 PCIE_IRQ_MASK_OFS = 0x1910,
Mark Lord646a4da2008-01-26 18:30:37 -0500185 PCIE_UNMASK_ALL_IRQS = 0x40a, /* assorted bits */
Mark Lord02a121d2007-12-01 13:07:22 -0500186
Brett Russ20f733e2005-09-01 18:26:17 -0400187 HC_MAIN_IRQ_CAUSE_OFS = 0x1d60,
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
Mark Lord352fab72008-04-19 14:43:42 -0400191 ERR_IRQ = (1 << 0), /* shift by port # */
192 DONE_IRQ = (1 << 1), /* shift by port # */
Brett Russ20f733e2005-09-01 18:26:17 -0400193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 PCI_ERR = (1 << 18),
196 TRAN_LO_DONE = (1 << 19), /* 6xxx: IRQ coalescing */
197 TRAN_HI_DONE = (1 << 20), /* 6xxx: IRQ coalescing */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500198 PORTS_0_3_COAL_DONE = (1 << 8),
199 PORTS_4_7_COAL_DONE = (1 << 17),
Brett Russ20f733e2005-09-01 18:26:17 -0400200 PORTS_0_7_COAL_DONE = (1 << 21), /* 6xxx: IRQ coalescing */
201 GPIO_INT = (1 << 22),
202 SELF_INT = (1 << 23),
203 TWSI_INT = (1 << 24),
204 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
Jeff Garzikfb621e22007-02-25 04:19:45 -0500205 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
Mark Lorde12bef52008-03-31 19:33:56 -0400206 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
Jeff Garzik8b260242005-11-12 12:32:50 -0500207 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
Mark Lordf9f7fe02008-04-19 14:44:42 -0400208 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
Brett Russ20f733e2005-09-01 18:26:17 -0400209 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
210 HC_MAIN_RSVD),
Jeff Garzikfb621e22007-02-25 04:19:45 -0500211 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
212 HC_MAIN_RSVD_5),
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500213 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
Brett Russ20f733e2005-09-01 18:26:17 -0400214
215 /* SATAHC registers */
216 HC_CFG_OFS = 0,
217
218 HC_IRQ_CAUSE_OFS = 0x14,
Mark Lord352fab72008-04-19 14:43:42 -0400219 DMA_IRQ = (1 << 0), /* shift by port # */
220 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
Brett Russ20f733e2005-09-01 18:26:17 -0400221 DEV_IRQ = (1 << 8), /* shift by port # */
222
223 /* Shadow block registers */
Brett Russ31961942005-09-30 01:36:00 -0400224 SHD_BLK_OFS = 0x100,
225 SHD_CTL_AST_OFS = 0x20, /* ofs from SHD_BLK_OFS */
Brett Russ20f733e2005-09-01 18:26:17 -0400226
227 /* SATA registers */
228 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
229 SATA_ACTIVE_OFS = 0x350,
Mark Lord0c589122008-01-26 18:31:16 -0500230 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
Mark Lord17c5aab2008-04-16 14:56:51 -0400231
Mark Lorde12bef52008-03-31 19:33:56 -0400232 LTMODE_OFS = 0x30c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400233 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
234
Jeff Garzik47c2b672005-11-12 21:13:17 -0500235 PHY_MODE3 = 0x310,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500236 PHY_MODE4 = 0x314,
237 PHY_MODE2 = 0x330,
Mark Lorde12bef52008-03-31 19:33:56 -0400238 SATA_IFCTL_OFS = 0x344,
239 SATA_IFSTAT_OFS = 0x34c,
240 VENDOR_UNIQUE_FIS_OFS = 0x35c,
Mark Lord17c5aab2008-04-16 14:56:51 -0400241
Mark Lorde12bef52008-03-31 19:33:56 -0400242 FIS_CFG_OFS = 0x360,
Mark Lord17c5aab2008-04-16 14:56:51 -0400243 FIS_CFG_SINGLE_SYNC = (1 << 16), /* SYNC on DMA activation */
244
Jeff Garzikc9d39132005-11-13 17:47:51 -0500245 MV5_PHY_MODE = 0x74,
246 MV5_LT_MODE = 0x30,
247 MV5_PHY_CTL = 0x0C,
Mark Lorde12bef52008-03-31 19:33:56 -0400248 SATA_INTERFACE_CFG = 0x050,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500249
250 MV_M2_PREAMP_MASK = 0x7e0,
Brett Russ20f733e2005-09-01 18:26:17 -0400251
252 /* Port registers */
253 EDMA_CFG_OFS = 0,
Mark Lord0c589122008-01-26 18:31:16 -0500254 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
255 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
256 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
257 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
258 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
Mark Lorde12bef52008-03-31 19:33:56 -0400259 EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */
260 EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */
Brett Russ20f733e2005-09-01 18:26:17 -0400261
262 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
263 EDMA_ERR_IRQ_MASK_OFS = 0xc,
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400264 EDMA_ERR_D_PAR = (1 << 0), /* UDMA data parity err */
265 EDMA_ERR_PRD_PAR = (1 << 1), /* UDMA PRD parity err */
266 EDMA_ERR_DEV = (1 << 2), /* device error */
267 EDMA_ERR_DEV_DCON = (1 << 3), /* device disconnect */
268 EDMA_ERR_DEV_CON = (1 << 4), /* device connected */
269 EDMA_ERR_SERR = (1 << 5), /* SError bits [WBDST] raised */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400270 EDMA_ERR_SELF_DIS = (1 << 7), /* Gen II/IIE self-disable */
271 EDMA_ERR_SELF_DIS_5 = (1 << 8), /* Gen I self-disable */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400272 EDMA_ERR_BIST_ASYNC = (1 << 8), /* BIST FIS or Async Notify */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400273 EDMA_ERR_TRANS_IRQ_7 = (1 << 8), /* Gen IIE transprt layer irq */
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400274 EDMA_ERR_CRQB_PAR = (1 << 9), /* CRQB parity error */
275 EDMA_ERR_CRPB_PAR = (1 << 10), /* CRPB parity error */
276 EDMA_ERR_INTRL_PAR = (1 << 11), /* internal parity error */
277 EDMA_ERR_IORDY = (1 << 12), /* IORdy timeout */
Mark Lord646a4da2008-01-26 18:30:37 -0500278
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400279 EDMA_ERR_LNK_CTRL_RX = (0xf << 13), /* link ctrl rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500280 EDMA_ERR_LNK_CTRL_RX_0 = (1 << 13), /* transient: CRC err */
281 EDMA_ERR_LNK_CTRL_RX_1 = (1 << 14), /* transient: FIFO err */
282 EDMA_ERR_LNK_CTRL_RX_2 = (1 << 15), /* fatal: caught SYNC */
283 EDMA_ERR_LNK_CTRL_RX_3 = (1 << 16), /* transient: FIS rx err */
284
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400285 EDMA_ERR_LNK_DATA_RX = (0xf << 17), /* link data rx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500286
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400287 EDMA_ERR_LNK_CTRL_TX = (0x1f << 21), /* link ctrl tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500288 EDMA_ERR_LNK_CTRL_TX_0 = (1 << 21), /* transient: CRC err */
289 EDMA_ERR_LNK_CTRL_TX_1 = (1 << 22), /* transient: FIFO err */
290 EDMA_ERR_LNK_CTRL_TX_2 = (1 << 23), /* transient: caught SYNC */
291 EDMA_ERR_LNK_CTRL_TX_3 = (1 << 24), /* transient: caught DMAT */
292 EDMA_ERR_LNK_CTRL_TX_4 = (1 << 25), /* transient: FIS collision */
293
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400294 EDMA_ERR_LNK_DATA_TX = (0x1f << 26), /* link data tx error */
Mark Lord646a4da2008-01-26 18:30:37 -0500295
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400296 EDMA_ERR_TRANS_PROTO = (1 << 31), /* transport protocol error */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400297 EDMA_ERR_OVERRUN_5 = (1 << 5),
298 EDMA_ERR_UNDERRUN_5 = (1 << 6),
Mark Lord646a4da2008-01-26 18:30:37 -0500299
300 EDMA_ERR_IRQ_TRANSIENT = EDMA_ERR_LNK_CTRL_RX_0 |
301 EDMA_ERR_LNK_CTRL_RX_1 |
302 EDMA_ERR_LNK_CTRL_RX_3 |
Mark Lord40f0bc22008-04-16 14:57:25 -0400303 EDMA_ERR_LNK_CTRL_TX |
304 /* temporary, until we fix hotplug: */
305 (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON),
Mark Lord646a4da2008-01-26 18:30:37 -0500306
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400307 EDMA_EH_FREEZE = EDMA_ERR_D_PAR |
308 EDMA_ERR_PRD_PAR |
309 EDMA_ERR_DEV_DCON |
310 EDMA_ERR_DEV_CON |
311 EDMA_ERR_SERR |
312 EDMA_ERR_SELF_DIS |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400313 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400314 EDMA_ERR_CRPB_PAR |
315 EDMA_ERR_INTRL_PAR |
316 EDMA_ERR_IORDY |
317 EDMA_ERR_LNK_CTRL_RX_2 |
318 EDMA_ERR_LNK_DATA_RX |
319 EDMA_ERR_LNK_DATA_TX |
320 EDMA_ERR_TRANS_PROTO,
Mark Lorde12bef52008-03-31 19:33:56 -0400321
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400322 EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR |
323 EDMA_ERR_PRD_PAR |
324 EDMA_ERR_DEV_DCON |
325 EDMA_ERR_DEV_CON |
326 EDMA_ERR_OVERRUN_5 |
327 EDMA_ERR_UNDERRUN_5 |
328 EDMA_ERR_SELF_DIS_5 |
Jeff Garzik6c1153e2007-07-13 15:20:15 -0400329 EDMA_ERR_CRQB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400330 EDMA_ERR_CRPB_PAR |
331 EDMA_ERR_INTRL_PAR |
332 EDMA_ERR_IORDY,
Brett Russ20f733e2005-09-01 18:26:17 -0400333
Brett Russ31961942005-09-30 01:36:00 -0400334 EDMA_REQ_Q_BASE_HI_OFS = 0x10,
335 EDMA_REQ_Q_IN_PTR_OFS = 0x14, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400336
337 EDMA_REQ_Q_OUT_PTR_OFS = 0x18,
338 EDMA_REQ_Q_PTR_SHIFT = 5,
339
340 EDMA_RSP_Q_BASE_HI_OFS = 0x1c,
341 EDMA_RSP_Q_IN_PTR_OFS = 0x20,
342 EDMA_RSP_Q_OUT_PTR_OFS = 0x24, /* also contains BASE_LO */
Brett Russ31961942005-09-30 01:36:00 -0400343 EDMA_RSP_Q_PTR_SHIFT = 3,
344
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400345 EDMA_CMD_OFS = 0x28, /* EDMA command register */
346 EDMA_EN = (1 << 0), /* enable EDMA */
347 EDMA_DS = (1 << 1), /* disable EDMA; self-negated */
348 ATA_RST = (1 << 2), /* reset trans/link/phy */
Brett Russ20f733e2005-09-01 18:26:17 -0400349
Jeff Garzikc9d39132005-11-13 17:47:51 -0500350 EDMA_IORDY_TMOUT = 0x34,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500351 EDMA_ARB_CFG = 0x38,
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500352
Mark Lord352fab72008-04-19 14:43:42 -0400353 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
354
Brett Russ31961942005-09-30 01:36:00 -0400355 /* Host private flags (hp_flags) */
356 MV_HP_FLAG_MSI = (1 << 0),
Jeff Garzik47c2b672005-11-12 21:13:17 -0500357 MV_HP_ERRATA_50XXB0 = (1 << 1),
358 MV_HP_ERRATA_50XXB2 = (1 << 2),
359 MV_HP_ERRATA_60X1B2 = (1 << 3),
360 MV_HP_ERRATA_60X1C0 = (1 << 4),
Jeff Garzike4e7b892006-01-31 12:18:41 -0500361 MV_HP_ERRATA_XX42A0 = (1 << 5),
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400362 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
363 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
364 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
Mark Lord02a121d2007-12-01 13:07:22 -0500365 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
Brett Russ20f733e2005-09-01 18:26:17 -0400366
Brett Russ31961942005-09-30 01:36:00 -0400367 /* Port private flags (pp_flags) */
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
Mark Lord72109162008-01-26 18:31:33 -0500369 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
Brett Russ31961942005-09-30 01:36:00 -0400370};
371
Jeff Garzikee9ccdf2007-07-12 15:51:22 -0400372#define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I)
373#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
Jeff Garzike4e7b892006-01-31 12:18:41 -0500374#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100375#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC))
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500376
Lennert Buytenhek15a32632008-03-27 14:51:39 -0400377#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
378#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
379
Jeff Garzik095fec82005-11-12 09:50:49 -0500380enum {
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400381 /* DMA boundary 0xffff is required by the s/g splitting
382 * we need on /length/ in mv_fill-sg().
383 */
384 MV_DMA_BOUNDARY = 0xffffU,
Jeff Garzik095fec82005-11-12 09:50:49 -0500385
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400386 /* mask of register bits containing lower 32 bits
387 * of EDMA request queue DMA address
388 */
Jeff Garzik095fec82005-11-12 09:50:49 -0500389 EDMA_REQ_Q_BASE_LO_MASK = 0xfffffc00U,
390
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400391 /* ditto, for response queue */
Jeff Garzik095fec82005-11-12 09:50:49 -0500392 EDMA_RSP_Q_BASE_LO_MASK = 0xffffff00U,
393};
394
Jeff Garzik522479f2005-11-12 22:14:02 -0500395enum chip_type {
396 chip_504x,
397 chip_508x,
398 chip_5080,
399 chip_604x,
400 chip_608x,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500401 chip_6042,
402 chip_7042,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500403 chip_soc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500404};
405
Brett Russ31961942005-09-30 01:36:00 -0400406/* Command ReQuest Block: 32B */
407struct mv_crqb {
Mark Lorde1469872006-05-22 19:02:03 -0400408 __le32 sg_addr;
409 __le32 sg_addr_hi;
410 __le16 ctrl_flags;
411 __le16 ata_cmd[11];
Brett Russ31961942005-09-30 01:36:00 -0400412};
413
Jeff Garzike4e7b892006-01-31 12:18:41 -0500414struct mv_crqb_iie {
Mark Lorde1469872006-05-22 19:02:03 -0400415 __le32 addr;
416 __le32 addr_hi;
417 __le32 flags;
418 __le32 len;
419 __le32 ata_cmd[4];
Jeff Garzike4e7b892006-01-31 12:18:41 -0500420};
421
Brett Russ31961942005-09-30 01:36:00 -0400422/* Command ResPonse Block: 8B */
423struct mv_crpb {
Mark Lorde1469872006-05-22 19:02:03 -0400424 __le16 id;
425 __le16 flags;
426 __le32 tmstmp;
Brett Russ31961942005-09-30 01:36:00 -0400427};
428
429/* EDMA Physical Region Descriptor (ePRD); A.K.A. SG */
430struct mv_sg {
Mark Lorde1469872006-05-22 19:02:03 -0400431 __le32 addr;
432 __le32 flags_size;
433 __le32 addr_hi;
434 __le32 reserved;
Brett Russ20f733e2005-09-01 18:26:17 -0400435};
436
437struct mv_port_priv {
Brett Russ31961942005-09-30 01:36:00 -0400438 struct mv_crqb *crqb;
439 dma_addr_t crqb_dma;
440 struct mv_crpb *crpb;
441 dma_addr_t crpb_dma;
Mark Lordeb73d552008-01-29 13:24:00 -0500442 struct mv_sg *sg_tbl[MV_MAX_Q_DEPTH];
443 dma_addr_t sg_tbl_dma[MV_MAX_Q_DEPTH];
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400444
445 unsigned int req_idx;
446 unsigned int resp_idx;
447
Brett Russ31961942005-09-30 01:36:00 -0400448 u32 pp_flags;
Brett Russ20f733e2005-09-01 18:26:17 -0400449};
450
Jeff Garzikbca1c4e2005-11-12 12:48:15 -0500451struct mv_port_signal {
452 u32 amps;
453 u32 pre;
454};
455
Mark Lord02a121d2007-12-01 13:07:22 -0500456struct mv_host_priv {
457 u32 hp_flags;
458 struct mv_port_signal signal[8];
459 const struct mv_hw_ops *ops;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500460 int n_ports;
461 void __iomem *base;
462 void __iomem *main_cause_reg_addr;
463 void __iomem *main_mask_reg_addr;
Mark Lord02a121d2007-12-01 13:07:22 -0500464 u32 irq_cause_ofs;
465 u32 irq_mask_ofs;
466 u32 unmask_all_irqs;
Mark Lordda2fa9b2008-01-26 18:32:45 -0500467 /*
468 * These consistent DMA memory pools give us guaranteed
469 * alignment for hardware-accessed data structures,
470 * and less memory waste in accomplishing the alignment.
471 */
472 struct dma_pool *crqb_pool;
473 struct dma_pool *crpb_pool;
474 struct dma_pool *sg_tbl_pool;
Mark Lord02a121d2007-12-01 13:07:22 -0500475};
476
Jeff Garzik47c2b672005-11-12 21:13:17 -0500477struct mv_hw_ops {
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500478 void (*phy_errata)(struct mv_host_priv *hpriv, void __iomem *mmio,
479 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500480 void (*enable_leds)(struct mv_host_priv *hpriv, void __iomem *mmio);
481 void (*read_preamp)(struct mv_host_priv *hpriv, int idx,
482 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500483 int (*reset_hc)(struct mv_host_priv *hpriv, void __iomem *mmio,
484 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500485 void (*reset_flash)(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100486 void (*reset_bus)(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500487};
488
Tejun Heoda3dbb12007-07-16 14:29:40 +0900489static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
490static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
491static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val);
492static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
Brett Russ31961942005-09-30 01:36:00 -0400493static int mv_port_start(struct ata_port *ap);
494static void mv_port_stop(struct ata_port *ap);
495static void mv_qc_prep(struct ata_queued_cmd *qc);
Jeff Garzike4e7b892006-01-31 12:18:41 -0500496static void mv_qc_prep_iie(struct ata_queued_cmd *qc);
Tejun Heo9a3d9eb2006-01-23 13:09:36 +0900497static unsigned int mv_qc_issue(struct ata_queued_cmd *qc);
Tejun Heoa1efdab2008-03-25 12:22:50 +0900498static int mv_hardreset(struct ata_link *link, unsigned int *class,
499 unsigned long deadline);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400500static void mv_eh_freeze(struct ata_port *ap);
501static void mv_eh_thaw(struct ata_port *ap);
Mark Lordf2738272008-01-26 18:32:29 -0500502static void mv6_dev_config(struct ata_device *dev);
Brett Russ20f733e2005-09-01 18:26:17 -0400503
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500504static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
505 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500506static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
507static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
508 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500509static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
510 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500511static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100512static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500513
Jeff Garzik2a47ce02005-11-12 23:05:14 -0500514static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
515 unsigned int port);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500516static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio);
517static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
518 void __iomem *mmio);
Jeff Garzikc9d39132005-11-13 17:47:51 -0500519static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
520 unsigned int n_hc);
Jeff Garzik522479f2005-11-12 22:14:02 -0500521static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500522static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
523 void __iomem *mmio);
524static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
525 void __iomem *mmio);
526static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
527 void __iomem *mmio, unsigned int n_hc);
528static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
529 void __iomem *mmio);
530static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -1100531static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400532static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500533 unsigned int port_no);
Mark Lorde12bef52008-03-31 19:33:56 -0400534static int mv_stop_edma(struct ata_port *ap);
Mark Lordb5624682008-03-31 19:34:40 -0400535static int mv_stop_edma_engine(void __iomem *port_mmio);
Mark Lorde12bef52008-03-31 19:33:56 -0400536static void mv_edma_cfg(struct ata_port *ap, int want_ncq);
Jeff Garzik47c2b672005-11-12 21:13:17 -0500537
Mark Lorde49856d2008-04-16 14:59:07 -0400538static void mv_pmp_select(struct ata_port *ap, int pmp);
539static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
540 unsigned long deadline);
541static int mv_softreset(struct ata_link *link, unsigned int *class,
542 unsigned long deadline);
Brett Russ20f733e2005-09-01 18:26:17 -0400543
Mark Lordeb73d552008-01-29 13:24:00 -0500544/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
545 * because we have to allow room for worst case splitting of
546 * PRDs for 64K boundaries in mv_fill_sg().
547 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400548static struct scsi_host_template mv5_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900549 ATA_BASE_SHT(DRV_NAME),
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400550 .sg_tablesize = MV_MAX_SG_CT / 2,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400551 .dma_boundary = MV_DMA_BOUNDARY,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400552};
553
554static struct scsi_host_template mv6_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900555 ATA_NCQ_SHT(DRV_NAME),
Mark Lord138bfdd2008-01-26 18:33:18 -0500556 .can_queue = MV_MAX_Q_DEPTH - 1,
Jeff Garzikbaf14aa2007-10-09 13:51:57 -0400557 .sg_tablesize = MV_MAX_SG_CT / 2,
Brett Russ20f733e2005-09-01 18:26:17 -0400558 .dma_boundary = MV_DMA_BOUNDARY,
Brett Russ20f733e2005-09-01 18:26:17 -0400559};
560
Tejun Heo029cfd62008-03-25 12:22:49 +0900561static struct ata_port_operations mv5_ops = {
562 .inherits = &ata_sff_port_ops,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500563
564 .qc_prep = mv_qc_prep,
565 .qc_issue = mv_qc_issue,
566
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400567 .freeze = mv_eh_freeze,
568 .thaw = mv_eh_thaw,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900569 .hardreset = mv_hardreset,
Tejun Heoa1efdab2008-03-25 12:22:50 +0900570 .error_handler = ata_std_error_handler, /* avoid SFF EH */
Tejun Heo029cfd62008-03-25 12:22:49 +0900571 .post_internal_cmd = ATA_OP_NULL,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400572
Jeff Garzikc9d39132005-11-13 17:47:51 -0500573 .scr_read = mv5_scr_read,
574 .scr_write = mv5_scr_write,
575
576 .port_start = mv_port_start,
577 .port_stop = mv_port_stop,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500578};
579
Tejun Heo029cfd62008-03-25 12:22:49 +0900580static struct ata_port_operations mv6_ops = {
581 .inherits = &mv5_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400582 .qc_defer = sata_pmp_qc_defer_cmd_switch,
Mark Lordf2738272008-01-26 18:32:29 -0500583 .dev_config = mv6_dev_config,
Brett Russ20f733e2005-09-01 18:26:17 -0400584 .scr_read = mv_scr_read,
585 .scr_write = mv_scr_write,
586
Mark Lorde49856d2008-04-16 14:59:07 -0400587 .pmp_hardreset = mv_pmp_hardreset,
588 .pmp_softreset = mv_softreset,
589 .softreset = mv_softreset,
590 .error_handler = sata_pmp_error_handler,
Brett Russ20f733e2005-09-01 18:26:17 -0400591};
592
Tejun Heo029cfd62008-03-25 12:22:49 +0900593static struct ata_port_operations mv_iie_ops = {
594 .inherits = &mv6_ops,
Mark Lorde49856d2008-04-16 14:59:07 -0400595 .qc_defer = ata_std_qc_defer, /* FIS-based switching */
Tejun Heo029cfd62008-03-25 12:22:49 +0900596 .dev_config = ATA_OP_NULL,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500597 .qc_prep = mv_qc_prep_iie,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500598};
599
Arjan van de Ven98ac62d2005-11-28 10:06:23 +0100600static const struct ata_port_info mv_port_info[] = {
Brett Russ20f733e2005-09-01 18:26:17 -0400601 { /* chip_504x */
Jeff Garzikcca39742006-08-24 03:19:22 -0400602 .flags = MV_COMMON_FLAGS,
Brett Russ31961942005-09-30 01:36:00 -0400603 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400604 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500605 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400606 },
607 { /* chip_508x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400608 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400609 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400610 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500611 .port_ops = &mv5_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400612 },
Jeff Garzik47c2b672005-11-12 21:13:17 -0500613 { /* chip_5080 */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400614 .flags = MV_COMMON_FLAGS | MV_FLAG_DUAL_HC,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500615 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400616 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500617 .port_ops = &mv5_ops,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500618 },
Brett Russ20f733e2005-09-01 18:26:17 -0400619 { /* chip_604x */
Mark Lord138bfdd2008-01-26 18:33:18 -0500620 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400621 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500622 ATA_FLAG_NCQ,
Brett Russ31961942005-09-30 01:36:00 -0400623 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400624 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500625 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400626 },
627 { /* chip_608x */
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400628 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400629 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500630 ATA_FLAG_NCQ | MV_FLAG_DUAL_HC,
Brett Russ31961942005-09-30 01:36:00 -0400631 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400632 .udma_mask = ATA_UDMA6,
Jeff Garzikc9d39132005-11-13 17:47:51 -0500633 .port_ops = &mv6_ops,
Brett Russ20f733e2005-09-01 18:26:17 -0400634 },
Jeff Garzike4e7b892006-01-31 12:18:41 -0500635 { /* chip_6042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500636 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400637 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500638 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500639 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400640 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500641 .port_ops = &mv_iie_ops,
642 },
643 { /* chip_7042 */
Mark Lord138bfdd2008-01-26 18:33:18 -0500644 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400645 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord138bfdd2008-01-26 18:33:18 -0500646 ATA_FLAG_NCQ,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500647 .pio_mask = 0x1f, /* pio0-4 */
Jeff Garzikbf6263a2007-07-09 12:16:50 -0400648 .udma_mask = ATA_UDMA6,
Jeff Garzike4e7b892006-01-31 12:18:41 -0500649 .port_ops = &mv_iie_ops,
650 },
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500651 { /* chip_soc */
Mark Lord02c1f322008-04-16 14:58:13 -0400652 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
Mark Lorde49856d2008-04-16 14:59:07 -0400653 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
Mark Lord02c1f322008-04-16 14:58:13 -0400654 ATA_FLAG_NCQ | MV_FLAG_SOC,
Mark Lord17c5aab2008-04-16 14:56:51 -0400655 .pio_mask = 0x1f, /* pio0-4 */
656 .udma_mask = ATA_UDMA6,
657 .port_ops = &mv_iie_ops,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500658 },
Brett Russ20f733e2005-09-01 18:26:17 -0400659};
660
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500661static const struct pci_device_id mv_pci_tbl[] = {
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400662 { PCI_VDEVICE(MARVELL, 0x5040), chip_504x },
663 { PCI_VDEVICE(MARVELL, 0x5041), chip_504x },
664 { PCI_VDEVICE(MARVELL, 0x5080), chip_5080 },
665 { PCI_VDEVICE(MARVELL, 0x5081), chip_508x },
Alan Coxcfbf7232007-07-09 14:38:41 +0100666 /* RocketRAID 1740/174x have different identifiers */
667 { PCI_VDEVICE(TTI, 0x1740), chip_508x },
668 { PCI_VDEVICE(TTI, 0x1742), chip_508x },
Brett Russ20f733e2005-09-01 18:26:17 -0400669
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400670 { PCI_VDEVICE(MARVELL, 0x6040), chip_604x },
671 { PCI_VDEVICE(MARVELL, 0x6041), chip_604x },
672 { PCI_VDEVICE(MARVELL, 0x6042), chip_6042 },
673 { PCI_VDEVICE(MARVELL, 0x6080), chip_608x },
674 { PCI_VDEVICE(MARVELL, 0x6081), chip_608x },
Jeff Garzik29179532005-11-11 08:08:03 -0500675
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400676 { PCI_VDEVICE(ADAPTEC2, 0x0241), chip_604x },
677
Florian Attenbergerd9f9c6b2007-07-02 17:09:29 +0200678 /* Adaptec 1430SA */
679 { PCI_VDEVICE(ADAPTEC2, 0x0243), chip_7042 },
680
Mark Lord02a121d2007-12-01 13:07:22 -0500681 /* Marvell 7042 support */
Morrison, Tom6a3d5862007-03-06 02:38:10 -0800682 { PCI_VDEVICE(MARVELL, 0x7042), chip_7042 },
683
Mark Lord02a121d2007-12-01 13:07:22 -0500684 /* Highpoint RocketRAID PCIe series */
685 { PCI_VDEVICE(TTI, 0x2300), chip_7042 },
686 { PCI_VDEVICE(TTI, 0x2310), chip_7042 },
687
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400688 { } /* terminate list */
Brett Russ20f733e2005-09-01 18:26:17 -0400689};
690
Jeff Garzik47c2b672005-11-12 21:13:17 -0500691static const struct mv_hw_ops mv5xxx_ops = {
692 .phy_errata = mv5_phy_errata,
693 .enable_leds = mv5_enable_leds,
694 .read_preamp = mv5_read_preamp,
695 .reset_hc = mv5_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500696 .reset_flash = mv5_reset_flash,
697 .reset_bus = mv5_reset_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500698};
699
700static const struct mv_hw_ops mv6xxx_ops = {
701 .phy_errata = mv6_phy_errata,
702 .enable_leds = mv6_enable_leds,
703 .read_preamp = mv6_read_preamp,
704 .reset_hc = mv6_reset_hc,
Jeff Garzik522479f2005-11-12 22:14:02 -0500705 .reset_flash = mv6_reset_flash,
706 .reset_bus = mv_reset_pci_bus,
Jeff Garzik47c2b672005-11-12 21:13:17 -0500707};
708
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500709static const struct mv_hw_ops mv_soc_ops = {
710 .phy_errata = mv6_phy_errata,
711 .enable_leds = mv_soc_enable_leds,
712 .read_preamp = mv_soc_read_preamp,
713 .reset_hc = mv_soc_reset_hc,
714 .reset_flash = mv_soc_reset_flash,
715 .reset_bus = mv_soc_reset_bus,
716};
717
Brett Russ20f733e2005-09-01 18:26:17 -0400718/*
719 * Functions
720 */
721
722static inline void writelfl(unsigned long data, void __iomem *addr)
723{
724 writel(data, addr);
725 (void) readl(addr); /* flush to avoid PCI posted write */
726}
727
Jeff Garzikc9d39132005-11-13 17:47:51 -0500728static inline unsigned int mv_hc_from_port(unsigned int port)
729{
730 return port >> MV_PORT_HC_SHIFT;
731}
732
733static inline unsigned int mv_hardport_from_port(unsigned int port)
734{
735 return port & MV_PORT_MASK;
736}
737
Mark Lord1cfd19a2008-04-19 15:05:50 -0400738/*
739 * Consolidate some rather tricky bit shift calculations.
740 * This is hot-path stuff, so not a function.
741 * Simple code, with two return values, so macro rather than inline.
742 *
743 * port is the sole input, in range 0..7.
744 * shift is one output, for use with the main_cause and main_mask registers.
745 * hardport is the other output, in range 0..3
746 *
747 * Note that port and hardport may be the same variable in some cases.
748 */
749#define MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport) \
750{ \
751 shift = mv_hc_from_port(port) * HC_SHIFT; \
752 hardport = mv_hardport_from_port(port); \
753 shift += hardport * 2; \
754}
755
Mark Lord352fab72008-04-19 14:43:42 -0400756static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
757{
758 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
759}
760
Jeff Garzikc9d39132005-11-13 17:47:51 -0500761static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
762 unsigned int port)
763{
764 return mv_hc_base(base, mv_hc_from_port(port));
765}
766
Brett Russ20f733e2005-09-01 18:26:17 -0400767static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
768{
Jeff Garzikc9d39132005-11-13 17:47:51 -0500769 return mv_hc_base_from_port(base, port) +
Jeff Garzik8b260242005-11-12 12:32:50 -0500770 MV_SATAHC_ARBTR_REG_SZ +
Jeff Garzikc9d39132005-11-13 17:47:51 -0500771 (mv_hardport_from_port(port) * MV_PORT_REG_SZ);
Brett Russ20f733e2005-09-01 18:26:17 -0400772}
773
Mark Lorde12bef52008-03-31 19:33:56 -0400774static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port)
775{
776 void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port);
777 unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL;
778
779 return hc_mmio + ofs;
780}
781
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500782static inline void __iomem *mv_host_base(struct ata_host *host)
783{
784 struct mv_host_priv *hpriv = host->private_data;
785 return hpriv->base;
786}
787
Brett Russ20f733e2005-09-01 18:26:17 -0400788static inline void __iomem *mv_ap_base(struct ata_port *ap)
789{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -0500790 return mv_port_base(mv_host_base(ap->host), ap->port_no);
Brett Russ20f733e2005-09-01 18:26:17 -0400791}
792
Jeff Garzikcca39742006-08-24 03:19:22 -0400793static inline int mv_get_hc_count(unsigned long port_flags)
Brett Russ20f733e2005-09-01 18:26:17 -0400794{
Jeff Garzikcca39742006-08-24 03:19:22 -0400795 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
Brett Russ20f733e2005-09-01 18:26:17 -0400796}
797
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400798static void mv_set_edma_ptrs(void __iomem *port_mmio,
799 struct mv_host_priv *hpriv,
800 struct mv_port_priv *pp)
801{
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400802 u32 index;
803
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400804 /*
805 * initialize request queue
806 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400807 index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
808
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400809 WARN_ON(pp->crqb_dma & 0x3ff);
810 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400811 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400812 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
813
814 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400815 writelfl((pp->crqb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400816 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
817 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400818 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400819
820 /*
821 * initialize response queue
822 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400823 index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT;
824
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400825 WARN_ON(pp->crpb_dma & 0xff);
826 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
827
828 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400829 writelfl((pp->crpb_dma & 0xffffffff) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400830 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
831 else
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400832 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400833
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400834 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400835 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400836}
837
Brett Russ05b308e2005-10-05 17:08:53 -0400838/**
839 * mv_start_dma - Enable eDMA engine
840 * @base: port base address
841 * @pp: port private data
842 *
Tejun Heobeec7db2006-02-11 19:11:13 +0900843 * Verify the local cache of the eDMA state is accurate with a
844 * WARN_ON.
Brett Russ05b308e2005-10-05 17:08:53 -0400845 *
846 * LOCKING:
847 * Inherited from caller.
848 */
Mark Lord0c589122008-01-26 18:31:16 -0500849static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
Mark Lord72109162008-01-26 18:31:33 -0500850 struct mv_port_priv *pp, u8 protocol)
Brett Russ31961942005-09-30 01:36:00 -0400851{
Mark Lord72109162008-01-26 18:31:33 -0500852 int want_ncq = (protocol == ATA_PROT_NCQ);
853
854 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
855 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
856 if (want_ncq != using_ncq)
Mark Lordb5624682008-03-31 19:34:40 -0400857 mv_stop_edma(ap);
Mark Lord72109162008-01-26 18:31:33 -0500858 }
Jeff Garzikc5d3e452007-07-11 18:30:50 -0400859 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
Mark Lord0c589122008-01-26 18:31:16 -0500860 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lord352fab72008-04-19 14:43:42 -0400861 int hardport = mv_hardport_from_port(ap->port_no);
Mark Lord0c589122008-01-26 18:31:16 -0500862 void __iomem *hc_mmio = mv_hc_base_from_port(
Mark Lord352fab72008-04-19 14:43:42 -0400863 mv_host_base(ap->host), hardport);
Mark Lord0c589122008-01-26 18:31:16 -0500864 u32 hc_irq_cause, ipending;
865
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400866 /* clear EDMA event indicators, if any */
Mark Lordf630d562008-01-26 18:31:00 -0500867 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400868
Mark Lord0c589122008-01-26 18:31:16 -0500869 /* clear EDMA interrupt indicator, if any */
870 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Mark Lord352fab72008-04-19 14:43:42 -0400871 ipending = (DEV_IRQ | DMA_IRQ) << hardport;
Mark Lord0c589122008-01-26 18:31:16 -0500872 if (hc_irq_cause & ipending) {
873 writelfl(hc_irq_cause & ~ipending,
874 hc_mmio + HC_IRQ_CAUSE_OFS);
875 }
876
Mark Lorde12bef52008-03-31 19:33:56 -0400877 mv_edma_cfg(ap, want_ncq);
Mark Lord0c589122008-01-26 18:31:16 -0500878
879 /* clear FIS IRQ Cause */
880 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
881
Mark Lordf630d562008-01-26 18:31:00 -0500882 mv_set_edma_ptrs(port_mmio, hpriv, pp);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -0400883
Mark Lordf630d562008-01-26 18:31:00 -0500884 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
Brett Russafb0edd2005-10-05 17:08:42 -0400885 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
886 }
Brett Russ31961942005-09-30 01:36:00 -0400887}
888
Brett Russ05b308e2005-10-05 17:08:53 -0400889/**
Mark Lorde12bef52008-03-31 19:33:56 -0400890 * mv_stop_edma_engine - Disable eDMA engine
Mark Lordb5624682008-03-31 19:34:40 -0400891 * @port_mmio: io base address
Brett Russ05b308e2005-10-05 17:08:53 -0400892 *
893 * LOCKING:
894 * Inherited from caller.
895 */
Mark Lordb5624682008-03-31 19:34:40 -0400896static int mv_stop_edma_engine(void __iomem *port_mmio)
Brett Russ31961942005-09-30 01:36:00 -0400897{
Mark Lordb5624682008-03-31 19:34:40 -0400898 int i;
Brett Russ31961942005-09-30 01:36:00 -0400899
Mark Lordb5624682008-03-31 19:34:40 -0400900 /* Disable eDMA. The disable bit auto clears. */
901 writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS);
Jeff Garzik8b260242005-11-12 12:32:50 -0500902
Mark Lordb5624682008-03-31 19:34:40 -0400903 /* Wait for the chip to confirm eDMA is off. */
904 for (i = 10000; i > 0; i--) {
905 u32 reg = readl(port_mmio + EDMA_CMD_OFS);
Jeff Garzik4537deb52007-07-12 14:30:19 -0400906 if (!(reg & EDMA_EN))
Mark Lordb5624682008-03-31 19:34:40 -0400907 return 0;
908 udelay(10);
Brett Russ31961942005-09-30 01:36:00 -0400909 }
Mark Lordb5624682008-03-31 19:34:40 -0400910 return -EIO;
Brett Russ31961942005-09-30 01:36:00 -0400911}
912
Mark Lorde12bef52008-03-31 19:33:56 -0400913static int mv_stop_edma(struct ata_port *ap)
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400914{
Mark Lordb5624682008-03-31 19:34:40 -0400915 void __iomem *port_mmio = mv_ap_base(ap);
916 struct mv_port_priv *pp = ap->private_data;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400917
Mark Lordb5624682008-03-31 19:34:40 -0400918 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
919 return 0;
920 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
921 if (mv_stop_edma_engine(port_mmio)) {
922 ata_port_printk(ap, KERN_ERR, "Unable to stop eDMA\n");
923 return -EIO;
924 }
925 return 0;
Jeff Garzik0ea9e172007-07-13 17:06:45 -0400926}
927
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400928#ifdef ATA_DEBUG
Brett Russ31961942005-09-30 01:36:00 -0400929static void mv_dump_mem(void __iomem *start, unsigned bytes)
930{
Brett Russ31961942005-09-30 01:36:00 -0400931 int b, w;
932 for (b = 0; b < bytes; ) {
933 DPRINTK("%p: ", start + b);
934 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400935 printk("%08x ", readl(start + b));
Brett Russ31961942005-09-30 01:36:00 -0400936 b += sizeof(u32);
937 }
938 printk("\n");
939 }
Brett Russ31961942005-09-30 01:36:00 -0400940}
Jeff Garzik8a70f8d2005-10-05 17:19:47 -0400941#endif
942
Brett Russ31961942005-09-30 01:36:00 -0400943static void mv_dump_pci_cfg(struct pci_dev *pdev, unsigned bytes)
944{
945#ifdef ATA_DEBUG
946 int b, w;
947 u32 dw;
948 for (b = 0; b < bytes; ) {
949 DPRINTK("%02x: ", b);
950 for (w = 0; b < bytes && w < 4; w++) {
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400951 (void) pci_read_config_dword(pdev, b, &dw);
952 printk("%08x ", dw);
Brett Russ31961942005-09-30 01:36:00 -0400953 b += sizeof(u32);
954 }
955 printk("\n");
956 }
957#endif
958}
959static void mv_dump_all_regs(void __iomem *mmio_base, int port,
960 struct pci_dev *pdev)
961{
962#ifdef ATA_DEBUG
Jeff Garzik8b260242005-11-12 12:32:50 -0500963 void __iomem *hc_base = mv_hc_base(mmio_base,
Brett Russ31961942005-09-30 01:36:00 -0400964 port >> MV_PORT_HC_SHIFT);
965 void __iomem *port_base;
966 int start_port, num_ports, p, start_hc, num_hcs, hc;
967
968 if (0 > port) {
969 start_hc = start_port = 0;
970 num_ports = 8; /* shld be benign for 4 port devs */
971 num_hcs = 2;
972 } else {
973 start_hc = port >> MV_PORT_HC_SHIFT;
974 start_port = port;
975 num_ports = num_hcs = 1;
976 }
Jeff Garzik8b260242005-11-12 12:32:50 -0500977 DPRINTK("All registers for port(s) %u-%u:\n", start_port,
Brett Russ31961942005-09-30 01:36:00 -0400978 num_ports > 1 ? num_ports - 1 : start_port);
979
980 if (NULL != pdev) {
981 DPRINTK("PCI config space regs:\n");
982 mv_dump_pci_cfg(pdev, 0x68);
983 }
984 DPRINTK("PCI regs:\n");
985 mv_dump_mem(mmio_base+0xc00, 0x3c);
986 mv_dump_mem(mmio_base+0xd00, 0x34);
987 mv_dump_mem(mmio_base+0xf00, 0x4);
988 mv_dump_mem(mmio_base+0x1d00, 0x6c);
989 for (hc = start_hc; hc < start_hc + num_hcs; hc++) {
Dan Alonid220c37e2006-04-10 23:20:22 -0700990 hc_base = mv_hc_base(mmio_base, hc);
Brett Russ31961942005-09-30 01:36:00 -0400991 DPRINTK("HC regs (HC %i):\n", hc);
992 mv_dump_mem(hc_base, 0x1c);
993 }
994 for (p = start_port; p < start_port + num_ports; p++) {
995 port_base = mv_port_base(mmio_base, p);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400996 DPRINTK("EDMA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400997 mv_dump_mem(port_base, 0x54);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400998 DPRINTK("SATA regs (port %i):\n", p);
Brett Russ31961942005-09-30 01:36:00 -0400999 mv_dump_mem(port_base+0x300, 0x60);
1000 }
1001#endif
1002}
1003
Brett Russ20f733e2005-09-01 18:26:17 -04001004static unsigned int mv_scr_offset(unsigned int sc_reg_in)
1005{
1006 unsigned int ofs;
1007
1008 switch (sc_reg_in) {
1009 case SCR_STATUS:
1010 case SCR_CONTROL:
1011 case SCR_ERROR:
1012 ofs = SATA_STATUS_OFS + (sc_reg_in * sizeof(u32));
1013 break;
1014 case SCR_ACTIVE:
1015 ofs = SATA_ACTIVE_OFS; /* active is not with the others */
1016 break;
1017 default:
1018 ofs = 0xffffffffU;
1019 break;
1020 }
1021 return ofs;
1022}
1023
Tejun Heoda3dbb12007-07-16 14:29:40 +09001024static int mv_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Brett Russ20f733e2005-09-01 18:26:17 -04001025{
1026 unsigned int ofs = mv_scr_offset(sc_reg_in);
1027
Tejun Heoda3dbb12007-07-16 14:29:40 +09001028 if (ofs != 0xffffffffU) {
1029 *val = readl(mv_ap_base(ap) + ofs);
1030 return 0;
1031 } else
1032 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001033}
1034
Tejun Heoda3dbb12007-07-16 14:29:40 +09001035static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Brett Russ20f733e2005-09-01 18:26:17 -04001036{
1037 unsigned int ofs = mv_scr_offset(sc_reg_in);
1038
Tejun Heoda3dbb12007-07-16 14:29:40 +09001039 if (ofs != 0xffffffffU) {
Brett Russ20f733e2005-09-01 18:26:17 -04001040 writelfl(val, mv_ap_base(ap) + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001041 return 0;
1042 } else
1043 return -EINVAL;
Brett Russ20f733e2005-09-01 18:26:17 -04001044}
1045
Mark Lordf2738272008-01-26 18:32:29 -05001046static void mv6_dev_config(struct ata_device *adev)
1047{
1048 /*
Mark Lorde49856d2008-04-16 14:59:07 -04001049 * Deal with Gen-II ("mv6") hardware quirks/restrictions:
1050 *
1051 * Gen-II does not support NCQ over a port multiplier
1052 * (no FIS-based switching).
1053 *
Mark Lordf2738272008-01-26 18:32:29 -05001054 * We don't have hob_nsect when doing NCQ commands on Gen-II.
1055 * See mv_qc_prep() for more info.
1056 */
Mark Lorde49856d2008-04-16 14:59:07 -04001057 if (adev->flags & ATA_DFLAG_NCQ) {
Mark Lord352fab72008-04-19 14:43:42 -04001058 if (sata_pmp_attached(adev->link->ap)) {
Mark Lorde49856d2008-04-16 14:59:07 -04001059 adev->flags &= ~ATA_DFLAG_NCQ;
Mark Lord352fab72008-04-19 14:43:42 -04001060 ata_dev_printk(adev, KERN_INFO,
1061 "NCQ disabled for command-based switching\n");
1062 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1063 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1064 ata_dev_printk(adev, KERN_INFO,
1065 "max_sectors limited to %u for NCQ\n",
1066 adev->max_sectors);
1067 }
Mark Lorde49856d2008-04-16 14:59:07 -04001068 }
Mark Lordf2738272008-01-26 18:32:29 -05001069}
1070
Mark Lorde49856d2008-04-16 14:59:07 -04001071static void mv_config_fbs(void __iomem *port_mmio, int enable_fbs)
1072{
1073 u32 old_fcfg, new_fcfg, old_ltmode, new_ltmode;
1074 /*
1075 * Various bit settings required for operation
1076 * in FIS-based switching (fbs) mode on GenIIe:
1077 */
1078 old_fcfg = readl(port_mmio + FIS_CFG_OFS);
1079 old_ltmode = readl(port_mmio + LTMODE_OFS);
1080 if (enable_fbs) {
1081 new_fcfg = old_fcfg | FIS_CFG_SINGLE_SYNC;
1082 new_ltmode = old_ltmode | LTMODE_BIT8;
1083 } else { /* disable fbs */
1084 new_fcfg = old_fcfg & ~FIS_CFG_SINGLE_SYNC;
1085 new_ltmode = old_ltmode & ~LTMODE_BIT8;
1086 }
1087 if (new_fcfg != old_fcfg)
1088 writelfl(new_fcfg, port_mmio + FIS_CFG_OFS);
1089 if (new_ltmode != old_ltmode)
1090 writelfl(new_ltmode, port_mmio + LTMODE_OFS);
Mark Lord0c589122008-01-26 18:31:16 -05001091}
Jeff Garzike4e7b892006-01-31 12:18:41 -05001092
Mark Lorde12bef52008-03-31 19:33:56 -04001093static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
Jeff Garzike4e7b892006-01-31 12:18:41 -05001094{
1095 u32 cfg;
Mark Lorde12bef52008-03-31 19:33:56 -04001096 struct mv_port_priv *pp = ap->private_data;
1097 struct mv_host_priv *hpriv = ap->host->private_data;
1098 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001099
1100 /* set up non-NCQ EDMA configuration */
1101 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1102
1103 if (IS_GEN_I(hpriv))
1104 cfg |= (1 << 8); /* enab config burst size mask */
1105
1106 else if (IS_GEN_II(hpriv))
1107 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1108
1109 else if (IS_GEN_IIE(hpriv)) {
Jeff Garzike728eab2007-02-25 02:53:41 -05001110 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1111 cfg |= (1 << 22); /* enab 4-entry host queue cache */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001112 cfg |= (1 << 18); /* enab early completion */
Jeff Garzike728eab2007-02-25 02:53:41 -05001113 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
Mark Lorde49856d2008-04-16 14:59:07 -04001114
1115 if (want_ncq && sata_pmp_attached(ap)) {
1116 cfg |= EDMA_CFG_EDMA_FBS; /* FIS-based switching */
1117 mv_config_fbs(port_mmio, 1);
1118 } else {
1119 mv_config_fbs(port_mmio, 0);
1120 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05001121 }
1122
Mark Lord72109162008-01-26 18:31:33 -05001123 if (want_ncq) {
1124 cfg |= EDMA_CFG_NCQ;
1125 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1126 } else
1127 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1128
Jeff Garzike4e7b892006-01-31 12:18:41 -05001129 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1130}
1131
Mark Lordda2fa9b2008-01-26 18:32:45 -05001132static void mv_port_free_dma_mem(struct ata_port *ap)
1133{
1134 struct mv_host_priv *hpriv = ap->host->private_data;
1135 struct mv_port_priv *pp = ap->private_data;
Mark Lordeb73d552008-01-29 13:24:00 -05001136 int tag;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001137
1138 if (pp->crqb) {
1139 dma_pool_free(hpriv->crqb_pool, pp->crqb, pp->crqb_dma);
1140 pp->crqb = NULL;
1141 }
1142 if (pp->crpb) {
1143 dma_pool_free(hpriv->crpb_pool, pp->crpb, pp->crpb_dma);
1144 pp->crpb = NULL;
1145 }
Mark Lordeb73d552008-01-29 13:24:00 -05001146 /*
1147 * For GEN_I, there's no NCQ, so we have only a single sg_tbl.
1148 * For later hardware, we have one unique sg_tbl per NCQ tag.
1149 */
1150 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1151 if (pp->sg_tbl[tag]) {
1152 if (tag == 0 || !IS_GEN_I(hpriv))
1153 dma_pool_free(hpriv->sg_tbl_pool,
1154 pp->sg_tbl[tag],
1155 pp->sg_tbl_dma[tag]);
1156 pp->sg_tbl[tag] = NULL;
1157 }
Mark Lordda2fa9b2008-01-26 18:32:45 -05001158 }
1159}
1160
Brett Russ05b308e2005-10-05 17:08:53 -04001161/**
1162 * mv_port_start - Port specific init/start routine.
1163 * @ap: ATA channel to manipulate
1164 *
1165 * Allocate and point to DMA memory, init port private memory,
1166 * zero indices.
1167 *
1168 * LOCKING:
1169 * Inherited from caller.
1170 */
Brett Russ31961942005-09-30 01:36:00 -04001171static int mv_port_start(struct ata_port *ap)
1172{
Jeff Garzikcca39742006-08-24 03:19:22 -04001173 struct device *dev = ap->host->dev;
1174 struct mv_host_priv *hpriv = ap->host->private_data;
Brett Russ31961942005-09-30 01:36:00 -04001175 struct mv_port_priv *pp;
James Bottomleydde20202008-02-19 11:36:56 +01001176 int tag;
Brett Russ31961942005-09-30 01:36:00 -04001177
Tejun Heo24dc5f32007-01-20 16:00:28 +09001178 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
Jeff Garzik6037d6b2005-11-04 22:08:00 -05001179 if (!pp)
Tejun Heo24dc5f32007-01-20 16:00:28 +09001180 return -ENOMEM;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001181 ap->private_data = pp;
Brett Russ31961942005-09-30 01:36:00 -04001182
Mark Lordda2fa9b2008-01-26 18:32:45 -05001183 pp->crqb = dma_pool_alloc(hpriv->crqb_pool, GFP_KERNEL, &pp->crqb_dma);
1184 if (!pp->crqb)
1185 return -ENOMEM;
1186 memset(pp->crqb, 0, MV_CRQB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001187
Mark Lordda2fa9b2008-01-26 18:32:45 -05001188 pp->crpb = dma_pool_alloc(hpriv->crpb_pool, GFP_KERNEL, &pp->crpb_dma);
1189 if (!pp->crpb)
1190 goto out_port_free_dma_mem;
1191 memset(pp->crpb, 0, MV_CRPB_Q_SZ);
Brett Russ31961942005-09-30 01:36:00 -04001192
Mark Lordeb73d552008-01-29 13:24:00 -05001193 /*
1194 * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl.
1195 * For later hardware, we need one unique sg_tbl per NCQ tag.
1196 */
1197 for (tag = 0; tag < MV_MAX_Q_DEPTH; ++tag) {
1198 if (tag == 0 || !IS_GEN_I(hpriv)) {
1199 pp->sg_tbl[tag] = dma_pool_alloc(hpriv->sg_tbl_pool,
1200 GFP_KERNEL, &pp->sg_tbl_dma[tag]);
1201 if (!pp->sg_tbl[tag])
1202 goto out_port_free_dma_mem;
1203 } else {
1204 pp->sg_tbl[tag] = pp->sg_tbl[0];
1205 pp->sg_tbl_dma[tag] = pp->sg_tbl_dma[0];
1206 }
1207 }
Brett Russ31961942005-09-30 01:36:00 -04001208 return 0;
Mark Lordda2fa9b2008-01-26 18:32:45 -05001209
1210out_port_free_dma_mem:
1211 mv_port_free_dma_mem(ap);
1212 return -ENOMEM;
Brett Russ31961942005-09-30 01:36:00 -04001213}
1214
Brett Russ05b308e2005-10-05 17:08:53 -04001215/**
1216 * mv_port_stop - Port specific cleanup/stop routine.
1217 * @ap: ATA channel to manipulate
1218 *
1219 * Stop DMA, cleanup port memory.
1220 *
1221 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001222 * This routine uses the host lock to protect the DMA stop.
Brett Russ05b308e2005-10-05 17:08:53 -04001223 */
Brett Russ31961942005-09-30 01:36:00 -04001224static void mv_port_stop(struct ata_port *ap)
1225{
Mark Lorde12bef52008-03-31 19:33:56 -04001226 mv_stop_edma(ap);
Mark Lordda2fa9b2008-01-26 18:32:45 -05001227 mv_port_free_dma_mem(ap);
Brett Russ31961942005-09-30 01:36:00 -04001228}
1229
Brett Russ05b308e2005-10-05 17:08:53 -04001230/**
1231 * mv_fill_sg - Fill out the Marvell ePRD (scatter gather) entries
1232 * @qc: queued command whose SG list to source from
1233 *
1234 * Populate the SG list and mark the last entry.
1235 *
1236 * LOCKING:
1237 * Inherited from caller.
1238 */
Jeff Garzik6c087722007-10-12 00:16:23 -04001239static void mv_fill_sg(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001240{
1241 struct mv_port_priv *pp = qc->ap->private_data;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001242 struct scatterlist *sg;
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001243 struct mv_sg *mv_sg, *last_sg = NULL;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001244 unsigned int si;
Brett Russ31961942005-09-30 01:36:00 -04001245
Mark Lordeb73d552008-01-29 13:24:00 -05001246 mv_sg = pp->sg_tbl[qc->tag];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001247 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Jeff Garzikd88184f2007-02-26 01:26:06 -05001248 dma_addr_t addr = sg_dma_address(sg);
1249 u32 sg_len = sg_dma_len(sg);
Brett Russ31961942005-09-30 01:36:00 -04001250
Olof Johansson4007b492007-10-02 20:45:27 -05001251 while (sg_len) {
1252 u32 offset = addr & 0xffff;
1253 u32 len = sg_len;
Brett Russ31961942005-09-30 01:36:00 -04001254
Olof Johansson4007b492007-10-02 20:45:27 -05001255 if ((offset + sg_len > 0x10000))
1256 len = 0x10000 - offset;
Jeff Garzik972c26b2005-10-18 22:14:54 -04001257
Olof Johansson4007b492007-10-02 20:45:27 -05001258 mv_sg->addr = cpu_to_le32(addr & 0xffffffff);
1259 mv_sg->addr_hi = cpu_to_le32((addr >> 16) >> 16);
Jeff Garzik6c087722007-10-12 00:16:23 -04001260 mv_sg->flags_size = cpu_to_le32(len & 0xffff);
Olof Johansson4007b492007-10-02 20:45:27 -05001261
1262 sg_len -= len;
1263 addr += len;
1264
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001265 last_sg = mv_sg;
Olof Johansson4007b492007-10-02 20:45:27 -05001266 mv_sg++;
Olof Johansson4007b492007-10-02 20:45:27 -05001267 }
Brett Russ31961942005-09-30 01:36:00 -04001268 }
Jeff Garzik3be6cbd2007-10-18 16:21:18 -04001269
1270 if (likely(last_sg))
1271 last_sg->flags_size |= cpu_to_le32(EPRD_FLAG_END_OF_TBL);
Brett Russ31961942005-09-30 01:36:00 -04001272}
1273
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001274static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
Brett Russ31961942005-09-30 01:36:00 -04001275{
Mark Lord559eeda2006-05-19 16:40:15 -04001276 u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS |
Brett Russ31961942005-09-30 01:36:00 -04001277 (last ? CRQB_CMD_LAST : 0);
Mark Lord559eeda2006-05-19 16:40:15 -04001278 *cmdw = cpu_to_le16(tmp);
Brett Russ31961942005-09-30 01:36:00 -04001279}
1280
Brett Russ05b308e2005-10-05 17:08:53 -04001281/**
1282 * mv_qc_prep - Host specific command preparation.
1283 * @qc: queued command to prepare
1284 *
1285 * This routine simply redirects to the general purpose routine
1286 * if command is not DMA. Else, it handles prep of the CRQB
1287 * (command request block), does some sanity checking, and calls
1288 * the SG load routine.
1289 *
1290 * LOCKING:
1291 * Inherited from caller.
1292 */
Brett Russ31961942005-09-30 01:36:00 -04001293static void mv_qc_prep(struct ata_queued_cmd *qc)
1294{
1295 struct ata_port *ap = qc->ap;
1296 struct mv_port_priv *pp = ap->private_data;
Mark Lorde1469872006-05-22 19:02:03 -04001297 __le16 *cw;
Brett Russ31961942005-09-30 01:36:00 -04001298 struct ata_taskfile *tf;
1299 u16 flags = 0;
Mark Lorda6432432006-05-19 16:36:36 -04001300 unsigned in_index;
Brett Russ31961942005-09-30 01:36:00 -04001301
Mark Lord138bfdd2008-01-26 18:33:18 -05001302 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1303 (qc->tf.protocol != ATA_PROT_NCQ))
Brett Russ31961942005-09-30 01:36:00 -04001304 return;
Brett Russ20f733e2005-09-01 18:26:17 -04001305
Brett Russ31961942005-09-30 01:36:00 -04001306 /* Fill in command request block
1307 */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001308 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
Brett Russ31961942005-09-30 01:36:00 -04001309 flags |= CRQB_FLAG_READ;
Tejun Heobeec7db2006-02-11 19:11:13 +09001310 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Brett Russ31961942005-09-30 01:36:00 -04001311 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001312 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001313
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001314 /* get current queue index from software */
1315 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Brett Russ31961942005-09-30 01:36:00 -04001316
Mark Lorda6432432006-05-19 16:36:36 -04001317 pp->crqb[in_index].sg_addr =
Mark Lordeb73d552008-01-29 13:24:00 -05001318 cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
Mark Lorda6432432006-05-19 16:36:36 -04001319 pp->crqb[in_index].sg_addr_hi =
Mark Lordeb73d552008-01-29 13:24:00 -05001320 cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Mark Lorda6432432006-05-19 16:36:36 -04001321 pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags);
1322
1323 cw = &pp->crqb[in_index].ata_cmd[0];
Brett Russ31961942005-09-30 01:36:00 -04001324 tf = &qc->tf;
1325
1326 /* Sadly, the CRQB cannot accomodate all registers--there are
1327 * only 11 bytes...so we must pick and choose required
1328 * registers based on the command. So, we drop feature and
1329 * hob_feature for [RW] DMA commands, but they are needed for
1330 * NCQ. NCQ will drop hob_nsect.
1331 */
1332 switch (tf->command) {
1333 case ATA_CMD_READ:
1334 case ATA_CMD_READ_EXT:
1335 case ATA_CMD_WRITE:
1336 case ATA_CMD_WRITE_EXT:
Jens Axboec15d85c2006-02-15 15:59:25 +01001337 case ATA_CMD_WRITE_FUA_EXT:
Brett Russ31961942005-09-30 01:36:00 -04001338 mv_crqb_pack_cmd(cw++, tf->hob_nsect, ATA_REG_NSECT, 0);
1339 break;
Brett Russ31961942005-09-30 01:36:00 -04001340 case ATA_CMD_FPDMA_READ:
1341 case ATA_CMD_FPDMA_WRITE:
Jeff Garzik8b260242005-11-12 12:32:50 -05001342 mv_crqb_pack_cmd(cw++, tf->hob_feature, ATA_REG_FEATURE, 0);
Brett Russ31961942005-09-30 01:36:00 -04001343 mv_crqb_pack_cmd(cw++, tf->feature, ATA_REG_FEATURE, 0);
1344 break;
Brett Russ31961942005-09-30 01:36:00 -04001345 default:
1346 /* The only other commands EDMA supports in non-queued and
1347 * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
1348 * of which are defined/used by Linux. If we get here, this
1349 * driver needs work.
1350 *
1351 * FIXME: modify libata to give qc_prep a return value and
1352 * return error here.
1353 */
1354 BUG_ON(tf->command);
1355 break;
1356 }
1357 mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
1358 mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
1359 mv_crqb_pack_cmd(cw++, tf->lbal, ATA_REG_LBAL, 0);
1360 mv_crqb_pack_cmd(cw++, tf->hob_lbam, ATA_REG_LBAM, 0);
1361 mv_crqb_pack_cmd(cw++, tf->lbam, ATA_REG_LBAM, 0);
1362 mv_crqb_pack_cmd(cw++, tf->hob_lbah, ATA_REG_LBAH, 0);
1363 mv_crqb_pack_cmd(cw++, tf->lbah, ATA_REG_LBAH, 0);
1364 mv_crqb_pack_cmd(cw++, tf->device, ATA_REG_DEVICE, 0);
1365 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */
1366
Jeff Garzike4e7b892006-01-31 12:18:41 -05001367 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Brett Russ31961942005-09-30 01:36:00 -04001368 return;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001369 mv_fill_sg(qc);
1370}
1371
1372/**
1373 * mv_qc_prep_iie - Host specific command preparation.
1374 * @qc: queued command to prepare
1375 *
1376 * This routine simply redirects to the general purpose routine
1377 * if command is not DMA. Else, it handles prep of the CRQB
1378 * (command request block), does some sanity checking, and calls
1379 * the SG load routine.
1380 *
1381 * LOCKING:
1382 * Inherited from caller.
1383 */
1384static void mv_qc_prep_iie(struct ata_queued_cmd *qc)
1385{
1386 struct ata_port *ap = qc->ap;
1387 struct mv_port_priv *pp = ap->private_data;
1388 struct mv_crqb_iie *crqb;
1389 struct ata_taskfile *tf;
Mark Lorda6432432006-05-19 16:36:36 -04001390 unsigned in_index;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001391 u32 flags = 0;
1392
Mark Lord138bfdd2008-01-26 18:33:18 -05001393 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1394 (qc->tf.protocol != ATA_PROT_NCQ))
Jeff Garzike4e7b892006-01-31 12:18:41 -05001395 return;
1396
Mark Lorde12bef52008-03-31 19:33:56 -04001397 /* Fill in Gen IIE command request block */
Jeff Garzike4e7b892006-01-31 12:18:41 -05001398 if (!(qc->tf.flags & ATA_TFLAG_WRITE))
1399 flags |= CRQB_FLAG_READ;
1400
Tejun Heobeec7db2006-02-11 19:11:13 +09001401 WARN_ON(MV_MAX_Q_DEPTH <= qc->tag);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001402 flags |= qc->tag << CRQB_TAG_SHIFT;
Mark Lord8c0aeb42008-01-26 18:31:48 -05001403 flags |= qc->tag << CRQB_HOSTQ_SHIFT;
Mark Lorde49856d2008-04-16 14:59:07 -04001404 flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT;
Jeff Garzike4e7b892006-01-31 12:18:41 -05001405
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001406 /* get current queue index from software */
1407 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
Mark Lorda6432432006-05-19 16:36:36 -04001408
1409 crqb = (struct mv_crqb_iie *) &pp->crqb[in_index];
Mark Lordeb73d552008-01-29 13:24:00 -05001410 crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff);
1411 crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma[qc->tag] >> 16) >> 16);
Jeff Garzike4e7b892006-01-31 12:18:41 -05001412 crqb->flags = cpu_to_le32(flags);
1413
1414 tf = &qc->tf;
1415 crqb->ata_cmd[0] = cpu_to_le32(
1416 (tf->command << 16) |
1417 (tf->feature << 24)
1418 );
1419 crqb->ata_cmd[1] = cpu_to_le32(
1420 (tf->lbal << 0) |
1421 (tf->lbam << 8) |
1422 (tf->lbah << 16) |
1423 (tf->device << 24)
1424 );
1425 crqb->ata_cmd[2] = cpu_to_le32(
1426 (tf->hob_lbal << 0) |
1427 (tf->hob_lbam << 8) |
1428 (tf->hob_lbah << 16) |
1429 (tf->hob_feature << 24)
1430 );
1431 crqb->ata_cmd[3] = cpu_to_le32(
1432 (tf->nsect << 0) |
1433 (tf->hob_nsect << 8)
1434 );
1435
1436 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1437 return;
Brett Russ31961942005-09-30 01:36:00 -04001438 mv_fill_sg(qc);
1439}
1440
Brett Russ05b308e2005-10-05 17:08:53 -04001441/**
1442 * mv_qc_issue - Initiate a command to the host
1443 * @qc: queued command to start
1444 *
1445 * This routine simply redirects to the general purpose routine
1446 * if command is not DMA. Else, it sanity checks our local
1447 * caches of the request producer/consumer indices then enables
1448 * DMA and bumps the request producer index.
1449 *
1450 * LOCKING:
1451 * Inherited from caller.
1452 */
Tejun Heo9a3d9eb2006-01-23 13:09:36 +09001453static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
Brett Russ31961942005-09-30 01:36:00 -04001454{
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001455 struct ata_port *ap = qc->ap;
1456 void __iomem *port_mmio = mv_ap_base(ap);
1457 struct mv_port_priv *pp = ap->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001458 u32 in_index;
Brett Russ31961942005-09-30 01:36:00 -04001459
Mark Lord138bfdd2008-01-26 18:33:18 -05001460 if ((qc->tf.protocol != ATA_PROT_DMA) &&
1461 (qc->tf.protocol != ATA_PROT_NCQ)) {
Mark Lord17c5aab2008-04-16 14:56:51 -04001462 /*
1463 * We're about to send a non-EDMA capable command to the
Brett Russ31961942005-09-30 01:36:00 -04001464 * port. Turn off EDMA so there won't be problems accessing
1465 * shadow block, etc registers.
1466 */
Mark Lordb5624682008-03-31 19:34:40 -04001467 mv_stop_edma(ap);
Mark Lorde49856d2008-04-16 14:59:07 -04001468 mv_pmp_select(ap, qc->dev->link->pmp);
Tejun Heo9363c382008-04-07 22:47:16 +09001469 return ata_sff_qc_issue(qc);
Brett Russ31961942005-09-30 01:36:00 -04001470 }
1471
Mark Lord72109162008-01-26 18:31:33 -05001472 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001473
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001474 pp->req_idx++;
Brett Russ31961942005-09-30 01:36:00 -04001475
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001476 in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT;
Brett Russ31961942005-09-30 01:36:00 -04001477
1478 /* and write the request in pointer to kick the EDMA to life */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001479 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index,
1480 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
Brett Russ31961942005-09-30 01:36:00 -04001481
1482 return 0;
1483}
1484
Brett Russ05b308e2005-10-05 17:08:53 -04001485/**
Brett Russ05b308e2005-10-05 17:08:53 -04001486 * mv_err_intr - Handle error interrupts on the port
1487 * @ap: ATA channel to manipulate
Mark Lord9b358e32006-05-19 16:21:03 -04001488 * @reset_allowed: bool: 0 == don't trigger from reset here
Brett Russ05b308e2005-10-05 17:08:53 -04001489 *
1490 * In most cases, just clear the interrupt and move on. However,
Mark Lorde12bef52008-03-31 19:33:56 -04001491 * some cases require an eDMA reset, which also performs a COMRESET.
1492 * The SERR case requires a clear of pending errors in the SATA
1493 * SERROR register. Finally, if the port disabled DMA,
1494 * update our cached copy to match.
Brett Russ05b308e2005-10-05 17:08:53 -04001495 *
1496 * LOCKING:
1497 * Inherited from caller.
1498 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001499static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
Brett Russ20f733e2005-09-01 18:26:17 -04001500{
Brett Russ31961942005-09-30 01:36:00 -04001501 void __iomem *port_mmio = mv_ap_base(ap);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001502 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1503 struct mv_port_priv *pp = ap->private_data;
1504 struct mv_host_priv *hpriv = ap->host->private_data;
1505 unsigned int edma_enabled = (pp->pp_flags & MV_PP_FLAG_EDMA_EN);
1506 unsigned int action = 0, err_mask = 0;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001507 struct ata_eh_info *ehi = &ap->link.eh_info;
Brett Russ20f733e2005-09-01 18:26:17 -04001508
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001509 ata_ehi_clear_desc(ehi);
Brett Russ20f733e2005-09-01 18:26:17 -04001510
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001511 if (!edma_enabled) {
1512 /* just a guess: do we need to do this? should we
1513 * expand this, and do it in all cases?
1514 */
Tejun Heo936fd732007-08-06 18:36:23 +09001515 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1516 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Brett Russ20f733e2005-09-01 18:26:17 -04001517 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001518
1519 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1520
Mark Lord352fab72008-04-19 14:43:42 -04001521 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001522
1523 /*
Mark Lord352fab72008-04-19 14:43:42 -04001524 * All generations share these EDMA error cause bits:
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001525 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001526 if (edma_err_cause & EDMA_ERR_DEV)
1527 err_mask |= AC_ERR_DEV;
1528 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001529 EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR |
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001530 EDMA_ERR_INTRL_PAR)) {
1531 err_mask |= AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001532 action |= ATA_EH_RESET;
Tejun Heob64bbc32007-07-16 14:29:39 +09001533 ata_ehi_push_desc(ehi, "parity error");
Brett Russafb0edd2005-10-05 17:08:42 -04001534 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001535 if (edma_err_cause & (EDMA_ERR_DEV_DCON | EDMA_ERR_DEV_CON)) {
1536 ata_ehi_hotplugged(ehi);
1537 ata_ehi_push_desc(ehi, edma_err_cause & EDMA_ERR_DEV_DCON ?
Tejun Heob64bbc32007-07-16 14:29:39 +09001538 "dev disconnect" : "dev connect");
Tejun Heocf480622008-01-24 00:05:14 +09001539 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001540 }
1541
Mark Lord352fab72008-04-19 14:43:42 -04001542 /*
1543 * Gen-I has a different SELF_DIS bit,
1544 * different FREEZE bits, and no SERR bit:
1545 */
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04001546 if (IS_GEN_I(hpriv)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001547 eh_freeze_mask = EDMA_EH_FREEZE_5;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001548 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001549 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001550 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001551 }
1552 } else {
1553 eh_freeze_mask = EDMA_EH_FREEZE;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001554 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001555 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Tejun Heob64bbc32007-07-16 14:29:39 +09001556 ata_ehi_push_desc(ehi, "EDMA self-disable");
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001557 }
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001558 if (edma_err_cause & EDMA_ERR_SERR) {
Tejun Heo936fd732007-08-06 18:36:23 +09001559 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1560 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001561 err_mask = AC_ERR_ATA_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001562 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001563 }
1564 }
Brett Russ20f733e2005-09-01 18:26:17 -04001565
1566 /* Clear EDMA now that SERR cleanup done */
Mark Lord3606a382008-01-26 18:28:23 -05001567 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001568
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001569 if (!err_mask) {
1570 err_mask = AC_ERR_OTHER;
Tejun Heocf480622008-01-24 00:05:14 +09001571 action |= ATA_EH_RESET;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001572 }
1573
1574 ehi->serror |= serr;
1575 ehi->action |= action;
1576
1577 if (qc)
1578 qc->err_mask |= err_mask;
1579 else
1580 ehi->err_mask |= err_mask;
1581
1582 if (edma_err_cause & eh_freeze_mask)
1583 ata_port_freeze(ap);
1584 else
1585 ata_port_abort(ap);
1586}
1587
1588static void mv_intr_pio(struct ata_port *ap)
1589{
1590 struct ata_queued_cmd *qc;
1591 u8 ata_status;
1592
1593 /* ignore spurious intr if drive still BUSY */
1594 ata_status = readb(ap->ioaddr.status_addr);
1595 if (unlikely(ata_status & ATA_BUSY))
1596 return;
1597
1598 /* get active ATA command */
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001599 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001600 if (unlikely(!qc)) /* no active tag */
1601 return;
1602 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1603 return;
1604
1605 /* and finally, complete the ATA command */
1606 qc->err_mask |= ac_err_mask(ata_status);
1607 ata_qc_complete(qc);
1608}
1609
1610static void mv_intr_edma(struct ata_port *ap)
1611{
1612 void __iomem *port_mmio = mv_ap_base(ap);
1613 struct mv_host_priv *hpriv = ap->host->private_data;
1614 struct mv_port_priv *pp = ap->private_data;
1615 struct ata_queued_cmd *qc;
1616 u32 out_index, in_index;
1617 bool work_done = false;
1618
1619 /* get h/w response queue pointer */
1620 in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS)
1621 >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1622
1623 while (1) {
1624 u16 status;
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001625 unsigned int tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001626
1627 /* get s/w response queue last-read pointer, and compare */
1628 out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK;
1629 if (in_index == out_index)
1630 break;
1631
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001632 /* 50xx: get active ATA command */
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001633 if (IS_GEN_I(hpriv))
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001634 tag = ap->link.active_tag;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001635
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001636 /* Gen II/IIE: get active ATA command via tag, to enable
1637 * support for queueing. this works transparently for
1638 * queued and non-queued modes.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001639 */
Mark Lord8c0aeb42008-01-26 18:31:48 -05001640 else
1641 tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001642
Jeff Garzik6c1153e2007-07-13 15:20:15 -04001643 qc = ata_qc_from_tag(ap, tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001644
Mark Lordcb924412008-01-26 18:32:09 -05001645 /* For non-NCQ mode, the lower 8 bits of status
1646 * are from EDMA_ERR_IRQ_CAUSE_OFS,
1647 * which should be zero if all went well.
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001648 */
1649 status = le16_to_cpu(pp->crpb[out_index].flags);
Mark Lordcb924412008-01-26 18:32:09 -05001650 if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001651 mv_err_intr(ap, qc);
1652 return;
1653 }
1654
1655 /* and finally, complete the ATA command */
1656 if (qc) {
1657 qc->err_mask |=
1658 ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT);
1659 ata_qc_complete(qc);
1660 }
1661
Jeff Garzik0ea9e172007-07-13 17:06:45 -04001662 /* advance software response queue pointer, to
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001663 * indicate (after the loop completes) to hardware
1664 * that we have consumed a response queue entry.
1665 */
1666 work_done = true;
1667 pp->resp_idx++;
1668 }
1669
Mark Lord352fab72008-04-19 14:43:42 -04001670 /* Update the software queue position index in hardware */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001671 if (work_done)
1672 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1673 (out_index << EDMA_RSP_Q_PTR_SHIFT),
1674 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001675}
1676
Brett Russ05b308e2005-10-05 17:08:53 -04001677/**
1678 * mv_host_intr - Handle all interrupts on the given host controller
Jeff Garzikcca39742006-08-24 03:19:22 -04001679 * @host: host specific structure
Brett Russ05b308e2005-10-05 17:08:53 -04001680 * @relevant: port error bits relevant to this host controller
1681 * @hc: which host controller we're to look at
1682 *
1683 * Read then write clear the HC interrupt status then walk each
1684 * port connected to the HC and see if it needs servicing. Port
1685 * success ints are reported in the HC interrupt status reg, the
1686 * port error ints are reported in the higher level main
1687 * interrupt status register and thus are passed in via the
1688 * 'relevant' argument.
1689 *
1690 * LOCKING:
1691 * Inherited from caller.
1692 */
Jeff Garzikcca39742006-08-24 03:19:22 -04001693static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
Brett Russ20f733e2005-09-01 18:26:17 -04001694{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001695 struct mv_host_priv *hpriv = host->private_data;
1696 void __iomem *mmio = hpriv->base;
Brett Russ20f733e2005-09-01 18:26:17 -04001697 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
Brett Russ20f733e2005-09-01 18:26:17 -04001698 u32 hc_irq_cause;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001699 int port, port0, last_port;
Brett Russ20f733e2005-09-01 18:26:17 -04001700
Jeff Garzik35177262007-02-24 21:26:42 -05001701 if (hc == 0)
Brett Russ20f733e2005-09-01 18:26:17 -04001702 port0 = 0;
Jeff Garzik35177262007-02-24 21:26:42 -05001703 else
Brett Russ20f733e2005-09-01 18:26:17 -04001704 port0 = MV_PORTS_PER_HC;
Brett Russ20f733e2005-09-01 18:26:17 -04001705
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001706 if (HAS_PCI(host))
1707 last_port = port0 + MV_PORTS_PER_HC;
1708 else
1709 last_port = port0 + hpriv->n_ports;
Brett Russ20f733e2005-09-01 18:26:17 -04001710 /* we'll need the HC success int register in most cases */
1711 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001712 if (!hc_irq_cause)
1713 return;
1714
1715 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04001716
1717 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n",
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001718 hc, relevant, hc_irq_cause);
Brett Russ20f733e2005-09-01 18:26:17 -04001719
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001720 for (port = port0; port < last_port; port++) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001721 struct ata_port *ap = host->ports[port];
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001722 struct mv_port_priv *pp;
Mark Lord352fab72008-04-19 14:43:42 -04001723 int have_err_bits, hardport, shift;
Jeff Garzik55d8ca42006-03-29 19:43:31 -05001724
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001725 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
Jeff Garzika2c91a82005-11-17 05:44:44 -05001726 continue;
1727
Yinghai Lu8f71efe2008-02-07 15:06:17 -08001728 pp = ap->private_data;
1729
Brett Russ31961942005-09-30 01:36:00 -04001730 shift = port << 1; /* (port * 2) */
Mark Lorde12bef52008-03-31 19:33:56 -04001731 if (port >= MV_PORTS_PER_HC)
Brett Russ20f733e2005-09-01 18:26:17 -04001732 shift++; /* skip bit 8 in the HC Main IRQ reg */
Mark Lorde12bef52008-03-31 19:33:56 -04001733
Mark Lord352fab72008-04-19 14:43:42 -04001734 have_err_bits = ((ERR_IRQ << shift) & relevant);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001735
1736 if (unlikely(have_err_bits)) {
1737 struct ata_queued_cmd *qc;
1738
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001739 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001740 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1741 continue;
1742
1743 mv_err_intr(ap, qc);
1744 continue;
Brett Russ20f733e2005-09-01 18:26:17 -04001745 }
Jeff Garzik8b260242005-11-12 12:32:50 -05001746
Mark Lord352fab72008-04-19 14:43:42 -04001747 hardport = mv_hardport_from_port(port); /* range 0..3 */
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001748
1749 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
Mark Lord352fab72008-04-19 14:43:42 -04001750 if ((DMA_IRQ << hardport) & hc_irq_cause)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001751 mv_intr_edma(ap);
1752 } else {
Mark Lord352fab72008-04-19 14:43:42 -04001753 if ((DEV_IRQ << hardport) & hc_irq_cause)
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001754 mv_intr_pio(ap);
Brett Russ20f733e2005-09-01 18:26:17 -04001755 }
1756 }
1757 VPRINTK("EXIT\n");
1758}
1759
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001760static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1761{
Mark Lord02a121d2007-12-01 13:07:22 -05001762 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001763 struct ata_port *ap;
1764 struct ata_queued_cmd *qc;
1765 struct ata_eh_info *ehi;
1766 unsigned int i, err_mask, printed = 0;
1767 u32 err_cause;
1768
Mark Lord02a121d2007-12-01 13:07:22 -05001769 err_cause = readl(mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001770
1771 dev_printk(KERN_ERR, host->dev, "PCI ERROR; PCI IRQ cause=0x%08x\n",
1772 err_cause);
1773
1774 DPRINTK("All regs @ PCI error\n");
1775 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1776
Mark Lord02a121d2007-12-01 13:07:22 -05001777 writelfl(0, mmio + hpriv->irq_cause_ofs);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001778
1779 for (i = 0; i < host->n_ports; i++) {
1780 ap = host->ports[i];
Tejun Heo936fd732007-08-06 18:36:23 +09001781 if (!ata_link_offline(&ap->link)) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001782 ehi = &ap->link.eh_info;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001783 ata_ehi_clear_desc(ehi);
1784 if (!printed++)
1785 ata_ehi_push_desc(ehi,
1786 "PCI err cause 0x%08x", err_cause);
1787 err_mask = AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09001788 ehi->action = ATA_EH_RESET;
Tejun Heo9af5c9c2007-08-06 18:36:22 +09001789 qc = ata_qc_from_tag(ap, ap->link.active_tag);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001790 if (qc)
1791 qc->err_mask |= err_mask;
1792 else
1793 ehi->err_mask |= err_mask;
1794
1795 ata_port_freeze(ap);
1796 }
1797 }
1798}
1799
Brett Russ05b308e2005-10-05 17:08:53 -04001800/**
Jeff Garzikc5d3e452007-07-11 18:30:50 -04001801 * mv_interrupt - Main interrupt event handler
Brett Russ05b308e2005-10-05 17:08:53 -04001802 * @irq: unused
1803 * @dev_instance: private data; in this case the host structure
Brett Russ05b308e2005-10-05 17:08:53 -04001804 *
1805 * Read the read only register to determine if any host
1806 * controllers have pending interrupts. If so, call lower level
1807 * routine to handle. Also check for PCI errors which are only
1808 * reported here.
1809 *
Jeff Garzik8b260242005-11-12 12:32:50 -05001810 * LOCKING:
Jeff Garzikcca39742006-08-24 03:19:22 -04001811 * This routine holds the host lock while processing pending
Brett Russ05b308e2005-10-05 17:08:53 -04001812 * interrupts.
1813 */
David Howells7d12e782006-10-05 14:55:46 +01001814static irqreturn_t mv_interrupt(int irq, void *dev_instance)
Brett Russ20f733e2005-09-01 18:26:17 -04001815{
Jeff Garzikcca39742006-08-24 03:19:22 -04001816 struct ata_host *host = dev_instance;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001817 struct mv_host_priv *hpriv = host->private_data;
Brett Russ20f733e2005-09-01 18:26:17 -04001818 unsigned int hc, handled = 0, n_hcs;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001819 void __iomem *mmio = hpriv->base;
Mark Lord352fab72008-04-19 14:43:42 -04001820 u32 main_cause, main_mask;
Brett Russ20f733e2005-09-01 18:26:17 -04001821
Mark Lord646a4da2008-01-26 18:30:37 -05001822 spin_lock(&host->lock);
Mark Lord352fab72008-04-19 14:43:42 -04001823 main_cause = readl(hpriv->main_cause_reg_addr);
1824 main_mask = readl(hpriv->main_mask_reg_addr);
1825 /*
1826 * Deal with cases where we either have nothing pending, or have read
1827 * a bogus register value which can indicate HW removal or PCI fault.
Brett Russ20f733e2005-09-01 18:26:17 -04001828 */
Mark Lord352fab72008-04-19 14:43:42 -04001829 if (!(main_cause & main_mask) || (main_cause == 0xffffffffU))
Mark Lord646a4da2008-01-26 18:30:37 -05001830 goto out_unlock;
Brett Russ20f733e2005-09-01 18:26:17 -04001831
Jeff Garzikcca39742006-08-24 03:19:22 -04001832 n_hcs = mv_get_hc_count(host->ports[0]->flags);
Brett Russ20f733e2005-09-01 18:26:17 -04001833
Mark Lord352fab72008-04-19 14:43:42 -04001834 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host))) {
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001835 mv_pci_error(host, mmio);
1836 handled = 1;
1837 goto out_unlock; /* skip all other HC irq handling */
1838 }
1839
Brett Russ20f733e2005-09-01 18:26:17 -04001840 for (hc = 0; hc < n_hcs; hc++) {
Mark Lord352fab72008-04-19 14:43:42 -04001841 u32 relevant = main_cause & (HC0_IRQ_PEND << (hc * HC_SHIFT));
Brett Russ20f733e2005-09-01 18:26:17 -04001842 if (relevant) {
Jeff Garzikcca39742006-08-24 03:19:22 -04001843 mv_host_intr(host, relevant, hc);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001844 handled = 1;
Brett Russ20f733e2005-09-01 18:26:17 -04001845 }
1846 }
Mark Lord615ab952006-05-19 16:24:56 -04001847
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04001848out_unlock:
Jeff Garzikcca39742006-08-24 03:19:22 -04001849 spin_unlock(&host->lock);
Brett Russ20f733e2005-09-01 18:26:17 -04001850 return IRQ_RETVAL(handled);
1851}
1852
Jeff Garzikc9d39132005-11-13 17:47:51 -05001853static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1854{
1855 unsigned int ofs;
1856
1857 switch (sc_reg_in) {
1858 case SCR_STATUS:
1859 case SCR_ERROR:
1860 case SCR_CONTROL:
1861 ofs = sc_reg_in * sizeof(u32);
1862 break;
1863 default:
1864 ofs = 0xffffffffU;
1865 break;
1866 }
1867 return ofs;
1868}
1869
Tejun Heoda3dbb12007-07-16 14:29:40 +09001870static int mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in, u32 *val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001871{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001872 struct mv_host_priv *hpriv = ap->host->private_data;
1873 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001874 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001875 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1876
Tejun Heoda3dbb12007-07-16 14:29:40 +09001877 if (ofs != 0xffffffffU) {
1878 *val = readl(addr + ofs);
1879 return 0;
1880 } else
1881 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001882}
1883
Tejun Heoda3dbb12007-07-16 14:29:40 +09001884static int mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
Jeff Garzikc9d39132005-11-13 17:47:51 -05001885{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05001886 struct mv_host_priv *hpriv = ap->host->private_data;
1887 void __iomem *mmio = hpriv->base;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001888 void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001889 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1890
Tejun Heoda3dbb12007-07-16 14:29:40 +09001891 if (ofs != 0xffffffffU) {
Tejun Heo0d5ff562007-02-01 15:06:36 +09001892 writelfl(val, addr + ofs);
Tejun Heoda3dbb12007-07-16 14:29:40 +09001893 return 0;
1894 } else
1895 return -EINVAL;
Jeff Garzikc9d39132005-11-13 17:47:51 -05001896}
1897
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001898static void mv5_reset_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik522479f2005-11-12 22:14:02 -05001899{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001900 struct pci_dev *pdev = to_pci_dev(host->dev);
Jeff Garzik522479f2005-11-12 22:14:02 -05001901 int early_5080;
1902
Auke Kok44c10132007-06-08 15:46:36 -07001903 early_5080 = (pdev->device == 0x5080) && (pdev->revision == 0);
Jeff Garzik522479f2005-11-12 22:14:02 -05001904
1905 if (!early_5080) {
1906 u32 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1907 tmp |= (1 << 0);
1908 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
1909 }
1910
Saeed Bishara7bb3c522008-01-30 11:50:45 -11001911 mv_reset_pci_bus(host, mmio);
Jeff Garzik522479f2005-11-12 22:14:02 -05001912}
1913
1914static void mv5_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
1915{
1916 writel(0x0fcfffff, mmio + MV_FLASH_CTL);
1917}
1918
Jeff Garzik47c2b672005-11-12 21:13:17 -05001919static void mv5_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001920 void __iomem *mmio)
1921{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001922 void __iomem *phy_mmio = mv5_phy_base(mmio, idx);
1923 u32 tmp;
1924
1925 tmp = readl(phy_mmio + MV5_PHY_MODE);
1926
1927 hpriv->signal[idx].pre = tmp & 0x1800; /* bits 12:11 */
1928 hpriv->signal[idx].amps = tmp & 0xe0; /* bits 7:5 */
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001929}
1930
Jeff Garzik47c2b672005-11-12 21:13:17 -05001931static void mv5_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001932{
Jeff Garzik522479f2005-11-12 22:14:02 -05001933 u32 tmp;
1934
1935 writel(0, mmio + MV_GPIO_PORT_CTL);
1936
1937 /* FIXME: handle MV_HP_ERRATA_50XXB2 errata */
1938
1939 tmp = readl(mmio + MV_PCI_EXP_ROM_BAR_CTL);
1940 tmp |= ~(1 << 0);
1941 writel(tmp, mmio + MV_PCI_EXP_ROM_BAR_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05001942}
1943
Jeff Garzik2a47ce02005-11-12 23:05:14 -05001944static void mv5_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
1945 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001946{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001947 void __iomem *phy_mmio = mv5_phy_base(mmio, port);
1948 const u32 mask = (1<<12) | (1<<11) | (1<<7) | (1<<6) | (1<<5);
1949 u32 tmp;
1950 int fix_apm_sq = (hpriv->hp_flags & MV_HP_ERRATA_50XXB0);
1951
1952 if (fix_apm_sq) {
1953 tmp = readl(phy_mmio + MV5_LT_MODE);
1954 tmp |= (1 << 19);
1955 writel(tmp, phy_mmio + MV5_LT_MODE);
1956
1957 tmp = readl(phy_mmio + MV5_PHY_CTL);
1958 tmp &= ~0x3;
1959 tmp |= 0x1;
1960 writel(tmp, phy_mmio + MV5_PHY_CTL);
1961 }
1962
1963 tmp = readl(phy_mmio + MV5_PHY_MODE);
1964 tmp &= ~mask;
1965 tmp |= hpriv->signal[port].pre;
1966 tmp |= hpriv->signal[port].amps;
1967 writel(tmp, phy_mmio + MV5_PHY_MODE);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05001968}
1969
Jeff Garzikc9d39132005-11-13 17:47:51 -05001970
1971#undef ZERO
1972#define ZERO(reg) writel(0, port_mmio + (reg))
1973static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio,
1974 unsigned int port)
Jeff Garzik47c2b672005-11-12 21:13:17 -05001975{
Jeff Garzikc9d39132005-11-13 17:47:51 -05001976 void __iomem *port_mmio = mv_port_base(mmio, port);
1977
Mark Lordb5624682008-03-31 19:34:40 -04001978 /*
1979 * The datasheet warns against setting ATA_RST when EDMA is active
1980 * (but doesn't say what the problem might be). So we first try
1981 * to disable the EDMA engine before doing the ATA_RST operation.
1982 */
Mark Lorde12bef52008-03-31 19:33:56 -04001983 mv_reset_channel(hpriv, mmio, port);
Jeff Garzikc9d39132005-11-13 17:47:51 -05001984
1985 ZERO(0x028); /* command */
1986 writel(0x11f, port_mmio + EDMA_CFG_OFS);
1987 ZERO(0x004); /* timer */
1988 ZERO(0x008); /* irq err cause */
1989 ZERO(0x00c); /* irq err mask */
1990 ZERO(0x010); /* rq bah */
1991 ZERO(0x014); /* rq inp */
1992 ZERO(0x018); /* rq outp */
1993 ZERO(0x01c); /* respq bah */
1994 ZERO(0x024); /* respq outp */
1995 ZERO(0x020); /* respq inp */
1996 ZERO(0x02c); /* test control */
1997 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
1998}
1999#undef ZERO
2000
2001#define ZERO(reg) writel(0, hc_mmio + (reg))
2002static void mv5_reset_one_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2003 unsigned int hc)
2004{
2005 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2006 u32 tmp;
2007
2008 ZERO(0x00c);
2009 ZERO(0x010);
2010 ZERO(0x014);
2011 ZERO(0x018);
2012
2013 tmp = readl(hc_mmio + 0x20);
2014 tmp &= 0x1c1c1c1c;
2015 tmp |= 0x03030303;
2016 writel(tmp, hc_mmio + 0x20);
2017}
2018#undef ZERO
2019
2020static int mv5_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2021 unsigned int n_hc)
2022{
2023 unsigned int hc, port;
2024
2025 for (hc = 0; hc < n_hc; hc++) {
2026 for (port = 0; port < MV_PORTS_PER_HC; port++)
2027 mv5_reset_hc_port(hpriv, mmio,
2028 (hc * MV_PORTS_PER_HC) + port);
2029
2030 mv5_reset_one_hc(hpriv, mmio, hc);
2031 }
2032
2033 return 0;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002034}
2035
Jeff Garzik101ffae2005-11-12 22:17:49 -05002036#undef ZERO
2037#define ZERO(reg) writel(0, mmio + (reg))
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002038static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002039{
Mark Lord02a121d2007-12-01 13:07:22 -05002040 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002041 u32 tmp;
2042
2043 tmp = readl(mmio + MV_PCI_MODE);
2044 tmp &= 0xff00ffff;
2045 writel(tmp, mmio + MV_PCI_MODE);
2046
2047 ZERO(MV_PCI_DISC_TIMER);
2048 ZERO(MV_PCI_MSI_TRIGGER);
2049 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT);
2050 ZERO(HC_MAIN_IRQ_MASK_OFS);
2051 ZERO(MV_PCI_SERR_MASK);
Mark Lord02a121d2007-12-01 13:07:22 -05002052 ZERO(hpriv->irq_cause_ofs);
2053 ZERO(hpriv->irq_mask_ofs);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002054 ZERO(MV_PCI_ERR_LOW_ADDRESS);
2055 ZERO(MV_PCI_ERR_HIGH_ADDRESS);
2056 ZERO(MV_PCI_ERR_ATTRIBUTE);
2057 ZERO(MV_PCI_ERR_COMMAND);
2058}
2059#undef ZERO
2060
2061static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio)
2062{
2063 u32 tmp;
2064
2065 mv5_reset_flash(hpriv, mmio);
2066
2067 tmp = readl(mmio + MV_GPIO_PORT_CTL);
2068 tmp &= 0x3;
2069 tmp |= (1 << 5) | (1 << 6);
2070 writel(tmp, mmio + MV_GPIO_PORT_CTL);
2071}
2072
2073/**
2074 * mv6_reset_hc - Perform the 6xxx global soft reset
2075 * @mmio: base address of the HBA
2076 *
2077 * This routine only applies to 6xxx parts.
2078 *
2079 * LOCKING:
2080 * Inherited from caller.
2081 */
Jeff Garzikc9d39132005-11-13 17:47:51 -05002082static int mv6_reset_hc(struct mv_host_priv *hpriv, void __iomem *mmio,
2083 unsigned int n_hc)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002084{
2085 void __iomem *reg = mmio + PCI_MAIN_CMD_STS_OFS;
2086 int i, rc = 0;
2087 u32 t;
2088
2089 /* Following procedure defined in PCI "main command and status
2090 * register" table.
2091 */
2092 t = readl(reg);
2093 writel(t | STOP_PCI_MASTER, reg);
2094
2095 for (i = 0; i < 1000; i++) {
2096 udelay(1);
2097 t = readl(reg);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04002098 if (PCI_MASTER_EMPTY & t)
Jeff Garzik101ffae2005-11-12 22:17:49 -05002099 break;
Jeff Garzik101ffae2005-11-12 22:17:49 -05002100 }
2101 if (!(PCI_MASTER_EMPTY & t)) {
2102 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n");
2103 rc = 1;
2104 goto done;
2105 }
2106
2107 /* set reset */
2108 i = 5;
2109 do {
2110 writel(t | GLOB_SFT_RST, reg);
2111 t = readl(reg);
2112 udelay(1);
2113 } while (!(GLOB_SFT_RST & t) && (i-- > 0));
2114
2115 if (!(GLOB_SFT_RST & t)) {
2116 printk(KERN_ERR DRV_NAME ": can't set global reset\n");
2117 rc = 1;
2118 goto done;
2119 }
2120
2121 /* clear reset and *reenable the PCI master* (not mentioned in spec) */
2122 i = 5;
2123 do {
2124 writel(t & ~(GLOB_SFT_RST | STOP_PCI_MASTER), reg);
2125 t = readl(reg);
2126 udelay(1);
2127 } while ((GLOB_SFT_RST & t) && (i-- > 0));
2128
2129 if (GLOB_SFT_RST & t) {
2130 printk(KERN_ERR DRV_NAME ": can't clear global reset\n");
2131 rc = 1;
2132 }
Mark Lord094e50b2008-04-16 15:01:19 -04002133 /*
2134 * Temporary: wait 3 seconds before port-probing can happen,
2135 * so that we don't miss finding sleepy SilXXXX port-multipliers.
2136 * This can go away once hotplug is fully/correctly implemented.
2137 */
2138 if (rc == 0)
2139 msleep(3000);
Jeff Garzik101ffae2005-11-12 22:17:49 -05002140done:
2141 return rc;
2142}
2143
Jeff Garzik47c2b672005-11-12 21:13:17 -05002144static void mv6_read_preamp(struct mv_host_priv *hpriv, int idx,
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002145 void __iomem *mmio)
2146{
2147 void __iomem *port_mmio;
2148 u32 tmp;
2149
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002150 tmp = readl(mmio + MV_RESET_CFG);
2151 if ((tmp & (1 << 0)) == 0) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002152 hpriv->signal[idx].amps = 0x7 << 8;
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002153 hpriv->signal[idx].pre = 0x1 << 5;
2154 return;
2155 }
2156
2157 port_mmio = mv_port_base(mmio, idx);
2158 tmp = readl(port_mmio + PHY_MODE2);
2159
2160 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2161 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2162}
2163
Jeff Garzik47c2b672005-11-12 21:13:17 -05002164static void mv6_enable_leds(struct mv_host_priv *hpriv, void __iomem *mmio)
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002165{
Jeff Garzik47c2b672005-11-12 21:13:17 -05002166 writel(0x00000060, mmio + MV_GPIO_PORT_CTL);
Jeff Garzikba3fe8f2005-11-12 19:08:48 -05002167}
2168
Jeff Garzikc9d39132005-11-13 17:47:51 -05002169static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002170 unsigned int port)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002171{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002172 void __iomem *port_mmio = mv_port_base(mmio, port);
2173
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002174 u32 hp_flags = hpriv->hp_flags;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002175 int fix_phy_mode2 =
2176 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002177 int fix_phy_mode4 =
Jeff Garzik47c2b672005-11-12 21:13:17 -05002178 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2179 u32 m2, tmp;
2180
2181 if (fix_phy_mode2) {
2182 m2 = readl(port_mmio + PHY_MODE2);
2183 m2 &= ~(1 << 16);
2184 m2 |= (1 << 31);
2185 writel(m2, port_mmio + PHY_MODE2);
2186
2187 udelay(200);
2188
2189 m2 = readl(port_mmio + PHY_MODE2);
2190 m2 &= ~((1 << 16) | (1 << 31));
2191 writel(m2, port_mmio + PHY_MODE2);
2192
2193 udelay(200);
2194 }
2195
2196 /* who knows what this magic does */
2197 tmp = readl(port_mmio + PHY_MODE3);
2198 tmp &= ~0x7F800000;
2199 tmp |= 0x2A800000;
2200 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002201
2202 if (fix_phy_mode4) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002203 u32 m4;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002204
2205 m4 = readl(port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002206
2207 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002208 tmp = readl(port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002209
Mark Lorde12bef52008-03-31 19:33:56 -04002210 /* workaround for errata FEr SATA#10 (part 1) */
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002211 m4 = (m4 & ~(1 << 1)) | (1 << 0);
2212
2213 writel(m4, port_mmio + PHY_MODE4);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002214
2215 if (hp_flags & MV_HP_ERRATA_60X1B2)
Mark Lorde12bef52008-03-31 19:33:56 -04002216 writel(tmp, port_mmio + PHY_MODE3);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002217 }
2218
2219 /* Revert values of pre-emphasis and signal amps to the saved ones */
2220 m2 = readl(port_mmio + PHY_MODE2);
2221
2222 m2 &= ~MV_M2_PREAMP_MASK;
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002223 m2 |= hpriv->signal[port].amps;
2224 m2 |= hpriv->signal[port].pre;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002225 m2 &= ~(1 << 16);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002226
Jeff Garzike4e7b892006-01-31 12:18:41 -05002227 /* according to mvSata 3.6.1, some IIE values are fixed */
2228 if (IS_GEN_IIE(hpriv)) {
2229 m2 &= ~0xC30FF01F;
2230 m2 |= 0x0000900F;
2231 }
2232
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002233 writel(m2, port_mmio + PHY_MODE2);
2234}
2235
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002236/* TODO: use the generic LED interface to configure the SATA Presence */
2237/* & Acitivy LEDs on the board */
2238static void mv_soc_enable_leds(struct mv_host_priv *hpriv,
2239 void __iomem *mmio)
2240{
2241 return;
2242}
2243
2244static void mv_soc_read_preamp(struct mv_host_priv *hpriv, int idx,
2245 void __iomem *mmio)
2246{
2247 void __iomem *port_mmio;
2248 u32 tmp;
2249
2250 port_mmio = mv_port_base(mmio, idx);
2251 tmp = readl(port_mmio + PHY_MODE2);
2252
2253 hpriv->signal[idx].amps = tmp & 0x700; /* bits 10:8 */
2254 hpriv->signal[idx].pre = tmp & 0xe0; /* bits 7:5 */
2255}
2256
2257#undef ZERO
2258#define ZERO(reg) writel(0, port_mmio + (reg))
2259static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv,
2260 void __iomem *mmio, unsigned int port)
2261{
2262 void __iomem *port_mmio = mv_port_base(mmio, port);
2263
Mark Lordb5624682008-03-31 19:34:40 -04002264 /*
2265 * The datasheet warns against setting ATA_RST when EDMA is active
2266 * (but doesn't say what the problem might be). So we first try
2267 * to disable the EDMA engine before doing the ATA_RST operation.
2268 */
Mark Lorde12bef52008-03-31 19:33:56 -04002269 mv_reset_channel(hpriv, mmio, port);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002270
2271 ZERO(0x028); /* command */
2272 writel(0x101f, port_mmio + EDMA_CFG_OFS);
2273 ZERO(0x004); /* timer */
2274 ZERO(0x008); /* irq err cause */
2275 ZERO(0x00c); /* irq err mask */
2276 ZERO(0x010); /* rq bah */
2277 ZERO(0x014); /* rq inp */
2278 ZERO(0x018); /* rq outp */
2279 ZERO(0x01c); /* respq bah */
2280 ZERO(0x024); /* respq outp */
2281 ZERO(0x020); /* respq inp */
2282 ZERO(0x02c); /* test control */
2283 writel(0xbc, port_mmio + EDMA_IORDY_TMOUT);
2284}
2285
2286#undef ZERO
2287
2288#define ZERO(reg) writel(0, hc_mmio + (reg))
2289static void mv_soc_reset_one_hc(struct mv_host_priv *hpriv,
2290 void __iomem *mmio)
2291{
2292 void __iomem *hc_mmio = mv_hc_base(mmio, 0);
2293
2294 ZERO(0x00c);
2295 ZERO(0x010);
2296 ZERO(0x014);
2297
2298}
2299
2300#undef ZERO
2301
2302static int mv_soc_reset_hc(struct mv_host_priv *hpriv,
2303 void __iomem *mmio, unsigned int n_hc)
2304{
2305 unsigned int port;
2306
2307 for (port = 0; port < hpriv->n_ports; port++)
2308 mv_soc_reset_hc_port(hpriv, mmio, port);
2309
2310 mv_soc_reset_one_hc(hpriv, mmio);
2311
2312 return 0;
2313}
2314
2315static void mv_soc_reset_flash(struct mv_host_priv *hpriv,
2316 void __iomem *mmio)
2317{
2318 return;
2319}
2320
2321static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio)
2322{
2323 return;
2324}
2325
Mark Lordb67a1062008-03-31 19:35:13 -04002326static void mv_setup_ifctl(void __iomem *port_mmio, int want_gen2i)
2327{
2328 u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG);
2329
2330 ifctl = (ifctl & 0xf7f) | 0x9b1000; /* from chip spec */
2331 if (want_gen2i)
2332 ifctl |= (1 << 7); /* enable gen2i speed */
2333 writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG);
2334}
2335
Mark Lordb5624682008-03-31 19:34:40 -04002336/*
2337 * Caller must ensure that EDMA is not active,
2338 * by first doing mv_stop_edma() where needed.
2339 */
Mark Lorde12bef52008-03-31 19:33:56 -04002340static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio,
Jeff Garzikc9d39132005-11-13 17:47:51 -05002341 unsigned int port_no)
Brett Russ20f733e2005-09-01 18:26:17 -04002342{
Jeff Garzikc9d39132005-11-13 17:47:51 -05002343 void __iomem *port_mmio = mv_port_base(mmio, port_no);
Brett Russ20f733e2005-09-01 18:26:17 -04002344
Mark Lord0d8be5c2008-04-16 14:56:12 -04002345 mv_stop_edma_engine(port_mmio);
Brett Russ31961942005-09-30 01:36:00 -04002346 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002347
Mark Lordb67a1062008-03-31 19:35:13 -04002348 if (!IS_GEN_I(hpriv)) {
2349 /* Enable 3.0gb/s link speed */
2350 mv_setup_ifctl(port_mmio, 1);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002351 }
Mark Lordb67a1062008-03-31 19:35:13 -04002352 /*
2353 * Strobing ATA_RST here causes a hard reset of the SATA transport,
2354 * link, and physical layers. It resets all SATA interface registers
2355 * (except for SATA_INTERFACE_CFG), and issues a COMRESET to the dev.
Brett Russ20f733e2005-09-01 18:26:17 -04002356 */
Mark Lordb67a1062008-03-31 19:35:13 -04002357 writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS);
2358 udelay(25); /* allow reset propagation */
Brett Russ31961942005-09-30 01:36:00 -04002359 writelfl(0, port_mmio + EDMA_CMD_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002360
Jeff Garzikc9d39132005-11-13 17:47:51 -05002361 hpriv->ops->phy_errata(hpriv, mmio, port_no);
2362
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002363 if (IS_GEN_I(hpriv))
Jeff Garzikc9d39132005-11-13 17:47:51 -05002364 mdelay(1);
2365}
2366
Mark Lorde49856d2008-04-16 14:59:07 -04002367static void mv_pmp_select(struct ata_port *ap, int pmp)
Jeff Garzikc9d39132005-11-13 17:47:51 -05002368{
Mark Lorde49856d2008-04-16 14:59:07 -04002369 if (sata_pmp_supported(ap)) {
2370 void __iomem *port_mmio = mv_ap_base(ap);
2371 u32 reg = readl(port_mmio + SATA_IFCTL_OFS);
2372 int old = reg & 0xf;
Jeff Garzikc9d39132005-11-13 17:47:51 -05002373
Mark Lorde49856d2008-04-16 14:59:07 -04002374 if (old != pmp) {
2375 reg = (reg & ~0xf) | pmp;
2376 writelfl(reg, port_mmio + SATA_IFCTL_OFS);
2377 }
Tejun Heoda3dbb12007-07-16 14:29:40 +09002378 }
Brett Russ20f733e2005-09-01 18:26:17 -04002379}
2380
Mark Lorde49856d2008-04-16 14:59:07 -04002381static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
2382 unsigned long deadline)
Jeff Garzik22374672005-11-17 10:59:48 -05002383{
Mark Lorde49856d2008-04-16 14:59:07 -04002384 mv_pmp_select(link->ap, sata_srst_pmp(link));
2385 return sata_std_hardreset(link, class, deadline);
2386}
Jeff Garzik0ea9e172007-07-13 17:06:45 -04002387
Mark Lorde49856d2008-04-16 14:59:07 -04002388static int mv_softreset(struct ata_link *link, unsigned int *class,
2389 unsigned long deadline)
2390{
2391 mv_pmp_select(link->ap, sata_srst_pmp(link));
2392 return ata_sff_softreset(link, class, deadline);
Jeff Garzik22374672005-11-17 10:59:48 -05002393}
2394
Tejun Heocc0680a2007-08-06 18:36:23 +09002395static int mv_hardreset(struct ata_link *link, unsigned int *class,
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002396 unsigned long deadline)
2397{
Tejun Heocc0680a2007-08-06 18:36:23 +09002398 struct ata_port *ap = link->ap;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002399 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lordb5624682008-03-31 19:34:40 -04002400 struct mv_port_priv *pp = ap->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002401 void __iomem *mmio = hpriv->base;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002402 int rc, attempts = 0, extra = 0;
2403 u32 sstatus;
2404 bool online;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002405
Mark Lorde12bef52008-03-31 19:33:56 -04002406 mv_reset_channel(hpriv, mmio, ap->port_no);
Mark Lordb5624682008-03-31 19:34:40 -04002407 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002408
Mark Lord0d8be5c2008-04-16 14:56:12 -04002409 /* Workaround for errata FEr SATA#10 (part 2) */
2410 do {
Mark Lord17c5aab2008-04-16 14:56:51 -04002411 const unsigned long *timing =
2412 sata_ehc_deb_timing(&link->eh_context);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002413
Mark Lord17c5aab2008-04-16 14:56:51 -04002414 rc = sata_link_hardreset(link, timing, deadline + extra,
2415 &online, NULL);
2416 if (rc)
Mark Lord0d8be5c2008-04-16 14:56:12 -04002417 return rc;
Mark Lord0d8be5c2008-04-16 14:56:12 -04002418 sata_scr_read(link, SCR_STATUS, &sstatus);
2419 if (!IS_GEN_I(hpriv) && ++attempts >= 5 && sstatus == 0x121) {
2420 /* Force 1.5gb/s link speed and try again */
2421 mv_setup_ifctl(mv_ap_base(ap), 0);
2422 if (time_after(jiffies + HZ, deadline))
2423 extra = HZ; /* only extend it once, max */
2424 }
2425 } while (sstatus != 0x0 && sstatus != 0x113 && sstatus != 0x123);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002426
Mark Lord17c5aab2008-04-16 14:56:51 -04002427 return rc;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002428}
2429
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002430static void mv_eh_freeze(struct ata_port *ap)
Brett Russ20f733e2005-09-01 18:26:17 -04002431{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002432 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lord1cfd19a2008-04-19 15:05:50 -04002433 unsigned int shift, hardport, port = ap->port_no;
Mark Lord352fab72008-04-19 14:43:42 -04002434 u32 main_mask;
Brett Russ31961942005-09-30 01:36:00 -04002435
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002436 /* FIXME: handle coalescing completion events properly */
Brett Russ31961942005-09-30 01:36:00 -04002437
Mark Lord1cfd19a2008-04-19 15:05:50 -04002438 mv_stop_edma(ap);
2439 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
Brett Russ31961942005-09-30 01:36:00 -04002440
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002441 /* disable assertion of portN err, done events */
Mark Lord352fab72008-04-19 14:43:42 -04002442 main_mask = readl(hpriv->main_mask_reg_addr);
2443 main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2444 writelfl(main_mask, hpriv->main_mask_reg_addr);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002445}
2446
2447static void mv_eh_thaw(struct ata_port *ap)
2448{
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002449 struct mv_host_priv *hpriv = ap->host->private_data;
Mark Lord1cfd19a2008-04-19 15:05:50 -04002450 unsigned int shift, hardport, port = ap->port_no;
2451 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002452 void __iomem *port_mmio = mv_ap_base(ap);
Mark Lord352fab72008-04-19 14:43:42 -04002453 u32 main_mask, hc_irq_cause;
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002454
2455 /* FIXME: handle coalescing completion events properly */
2456
Mark Lord1cfd19a2008-04-19 15:05:50 -04002457 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002458
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002459 /* clear EDMA errors on this port */
2460 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2461
2462 /* clear pending irq events */
2463 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
Mark Lord1cfd19a2008-04-19 15:05:50 -04002464 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2465 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
Jeff Garzikbdd4ddd2007-07-12 14:34:26 -04002466
2467 /* enable assertion of portN err, done events */
Mark Lord352fab72008-04-19 14:43:42 -04002468 main_mask = readl(hpriv->main_mask_reg_addr);
2469 main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2470 writelfl(main_mask, hpriv->main_mask_reg_addr);
Brett Russ31961942005-09-30 01:36:00 -04002471}
2472
Brett Russ05b308e2005-10-05 17:08:53 -04002473/**
2474 * mv_port_init - Perform some early initialization on a single port.
2475 * @port: libata data structure storing shadow register addresses
2476 * @port_mmio: base address of the port
2477 *
2478 * Initialize shadow register mmio addresses, clear outstanding
2479 * interrupts on the port, and unmask interrupts for the future
2480 * start of the port.
2481 *
2482 * LOCKING:
2483 * Inherited from caller.
2484 */
Brett Russ31961942005-09-30 01:36:00 -04002485static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
2486{
Tejun Heo0d5ff562007-02-01 15:06:36 +09002487 void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
Brett Russ31961942005-09-30 01:36:00 -04002488 unsigned serr_ofs;
2489
Jeff Garzik8b260242005-11-12 12:32:50 -05002490 /* PIO related setup
Brett Russ31961942005-09-30 01:36:00 -04002491 */
2492 port->data_addr = shd_base + (sizeof(u32) * ATA_REG_DATA);
Jeff Garzik8b260242005-11-12 12:32:50 -05002493 port->error_addr =
Brett Russ31961942005-09-30 01:36:00 -04002494 port->feature_addr = shd_base + (sizeof(u32) * ATA_REG_ERR);
2495 port->nsect_addr = shd_base + (sizeof(u32) * ATA_REG_NSECT);
2496 port->lbal_addr = shd_base + (sizeof(u32) * ATA_REG_LBAL);
2497 port->lbam_addr = shd_base + (sizeof(u32) * ATA_REG_LBAM);
2498 port->lbah_addr = shd_base + (sizeof(u32) * ATA_REG_LBAH);
2499 port->device_addr = shd_base + (sizeof(u32) * ATA_REG_DEVICE);
Jeff Garzik8b260242005-11-12 12:32:50 -05002500 port->status_addr =
Brett Russ31961942005-09-30 01:36:00 -04002501 port->command_addr = shd_base + (sizeof(u32) * ATA_REG_STATUS);
2502 /* special case: control/altstatus doesn't have ATA_REG_ address */
2503 port->altstatus_addr = port->ctl_addr = shd_base + SHD_CTL_AST_OFS;
2504
2505 /* unused: */
Randy Dunlap8d9db2d2007-02-16 01:40:06 -08002506 port->cmd_addr = port->bmdma_addr = port->scr_addr = NULL;
Brett Russ20f733e2005-09-01 18:26:17 -04002507
Brett Russ31961942005-09-30 01:36:00 -04002508 /* Clear any currently outstanding port interrupt conditions */
2509 serr_ofs = mv_scr_offset(SCR_ERROR);
2510 writelfl(readl(port_mmio + serr_ofs), port_mmio + serr_ofs);
2511 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2512
Mark Lord646a4da2008-01-26 18:30:37 -05002513 /* unmask all non-transient EDMA error interrupts */
2514 writelfl(~EDMA_ERR_IRQ_TRANSIENT, port_mmio + EDMA_ERR_IRQ_MASK_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002515
Jeff Garzik8b260242005-11-12 12:32:50 -05002516 VPRINTK("EDMA cfg=0x%08x EDMA IRQ err cause/mask=0x%08x/0x%08x\n",
Brett Russ31961942005-09-30 01:36:00 -04002517 readl(port_mmio + EDMA_CFG_OFS),
2518 readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS),
2519 readl(port_mmio + EDMA_ERR_IRQ_MASK_OFS));
Brett Russ20f733e2005-09-01 18:26:17 -04002520}
2521
Tejun Heo4447d352007-04-17 23:44:08 +09002522static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002523{
Tejun Heo4447d352007-04-17 23:44:08 +09002524 struct pci_dev *pdev = to_pci_dev(host->dev);
2525 struct mv_host_priv *hpriv = host->private_data;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002526 u32 hp_flags = hpriv->hp_flags;
2527
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002528 switch (board_idx) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002529 case chip_5080:
2530 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002531 hp_flags |= MV_HP_GEN_I;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002532
Auke Kok44c10132007-06-08 15:46:36 -07002533 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002534 case 0x1:
2535 hp_flags |= MV_HP_ERRATA_50XXB0;
2536 break;
2537 case 0x3:
2538 hp_flags |= MV_HP_ERRATA_50XXB2;
2539 break;
2540 default:
2541 dev_printk(KERN_WARNING, &pdev->dev,
2542 "Applying 50XXB2 workarounds to unknown rev\n");
2543 hp_flags |= MV_HP_ERRATA_50XXB2;
2544 break;
2545 }
2546 break;
2547
2548 case chip_504x:
2549 case chip_508x:
2550 hpriv->ops = &mv5xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002551 hp_flags |= MV_HP_GEN_I;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002552
Auke Kok44c10132007-06-08 15:46:36 -07002553 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002554 case 0x0:
2555 hp_flags |= MV_HP_ERRATA_50XXB0;
2556 break;
2557 case 0x3:
2558 hp_flags |= MV_HP_ERRATA_50XXB2;
2559 break;
2560 default:
2561 dev_printk(KERN_WARNING, &pdev->dev,
2562 "Applying B2 workarounds to unknown rev\n");
2563 hp_flags |= MV_HP_ERRATA_50XXB2;
2564 break;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002565 }
2566 break;
2567
2568 case chip_604x:
2569 case chip_608x:
Jeff Garzik47c2b672005-11-12 21:13:17 -05002570 hpriv->ops = &mv6xxx_ops;
Jeff Garzikee9ccdf2007-07-12 15:51:22 -04002571 hp_flags |= MV_HP_GEN_II;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002572
Auke Kok44c10132007-06-08 15:46:36 -07002573 switch (pdev->revision) {
Jeff Garzik47c2b672005-11-12 21:13:17 -05002574 case 0x7:
2575 hp_flags |= MV_HP_ERRATA_60X1B2;
2576 break;
2577 case 0x9:
2578 hp_flags |= MV_HP_ERRATA_60X1C0;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002579 break;
2580 default:
2581 dev_printk(KERN_WARNING, &pdev->dev,
Jeff Garzik47c2b672005-11-12 21:13:17 -05002582 "Applying B2 workarounds to unknown rev\n");
2583 hp_flags |= MV_HP_ERRATA_60X1B2;
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002584 break;
2585 }
2586 break;
2587
Jeff Garzike4e7b892006-01-31 12:18:41 -05002588 case chip_7042:
Mark Lord02a121d2007-12-01 13:07:22 -05002589 hp_flags |= MV_HP_PCIE;
Mark Lord306b30f2007-12-04 14:07:52 -05002590 if (pdev->vendor == PCI_VENDOR_ID_TTI &&
2591 (pdev->device == 0x2300 || pdev->device == 0x2310))
2592 {
Mark Lord4e520032007-12-11 12:58:05 -05002593 /*
2594 * Highpoint RocketRAID PCIe 23xx series cards:
2595 *
2596 * Unconfigured drives are treated as "Legacy"
2597 * by the BIOS, and it overwrites sector 8 with
2598 * a "Lgcy" metadata block prior to Linux boot.
2599 *
2600 * Configured drives (RAID or JBOD) leave sector 8
2601 * alone, but instead overwrite a high numbered
2602 * sector for the RAID metadata. This sector can
2603 * be determined exactly, by truncating the physical
2604 * drive capacity to a nice even GB value.
2605 *
2606 * RAID metadata is at: (dev->n_sectors & ~0xfffff)
2607 *
2608 * Warn the user, lest they think we're just buggy.
2609 */
2610 printk(KERN_WARNING DRV_NAME ": Highpoint RocketRAID"
2611 " BIOS CORRUPTS DATA on all attached drives,"
2612 " regardless of if/how they are configured."
2613 " BEWARE!\n");
2614 printk(KERN_WARNING DRV_NAME ": For data safety, do not"
2615 " use sectors 8-9 on \"Legacy\" drives,"
2616 " and avoid the final two gigabytes on"
2617 " all RocketRAID BIOS initialized drives.\n");
Mark Lord306b30f2007-12-04 14:07:52 -05002618 }
Jeff Garzike4e7b892006-01-31 12:18:41 -05002619 case chip_6042:
2620 hpriv->ops = &mv6xxx_ops;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002621 hp_flags |= MV_HP_GEN_IIE;
2622
Auke Kok44c10132007-06-08 15:46:36 -07002623 switch (pdev->revision) {
Jeff Garzike4e7b892006-01-31 12:18:41 -05002624 case 0x0:
2625 hp_flags |= MV_HP_ERRATA_XX42A0;
2626 break;
2627 case 0x1:
2628 hp_flags |= MV_HP_ERRATA_60X1C0;
2629 break;
2630 default:
2631 dev_printk(KERN_WARNING, &pdev->dev,
2632 "Applying 60X1C0 workarounds to unknown rev\n");
2633 hp_flags |= MV_HP_ERRATA_60X1C0;
2634 break;
2635 }
2636 break;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002637 case chip_soc:
2638 hpriv->ops = &mv_soc_ops;
2639 hp_flags |= MV_HP_ERRATA_60X1C0;
2640 break;
Jeff Garzike4e7b892006-01-31 12:18:41 -05002641
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002642 default:
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002643 dev_printk(KERN_ERR, host->dev,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002644 "BUG: invalid board index %u\n", board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002645 return 1;
2646 }
2647
2648 hpriv->hp_flags = hp_flags;
Mark Lord02a121d2007-12-01 13:07:22 -05002649 if (hp_flags & MV_HP_PCIE) {
2650 hpriv->irq_cause_ofs = PCIE_IRQ_CAUSE_OFS;
2651 hpriv->irq_mask_ofs = PCIE_IRQ_MASK_OFS;
2652 hpriv->unmask_all_irqs = PCIE_UNMASK_ALL_IRQS;
2653 } else {
2654 hpriv->irq_cause_ofs = PCI_IRQ_CAUSE_OFS;
2655 hpriv->irq_mask_ofs = PCI_IRQ_MASK_OFS;
2656 hpriv->unmask_all_irqs = PCI_UNMASK_ALL_IRQS;
2657 }
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002658
2659 return 0;
2660}
2661
Brett Russ05b308e2005-10-05 17:08:53 -04002662/**
Jeff Garzik47c2b672005-11-12 21:13:17 -05002663 * mv_init_host - Perform some early initialization of the host.
Tejun Heo4447d352007-04-17 23:44:08 +09002664 * @host: ATA host to initialize
2665 * @board_idx: controller index
Brett Russ05b308e2005-10-05 17:08:53 -04002666 *
2667 * If possible, do an early global reset of the host. Then do
2668 * our port init and clear/unmask all/relevant host interrupts.
2669 *
2670 * LOCKING:
2671 * Inherited from caller.
2672 */
Tejun Heo4447d352007-04-17 23:44:08 +09002673static int mv_init_host(struct ata_host *host, unsigned int board_idx)
Brett Russ20f733e2005-09-01 18:26:17 -04002674{
2675 int rc = 0, n_hc, port, hc;
Tejun Heo4447d352007-04-17 23:44:08 +09002676 struct mv_host_priv *hpriv = host->private_data;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002677 void __iomem *mmio = hpriv->base;
Jeff Garzik47c2b672005-11-12 21:13:17 -05002678
Tejun Heo4447d352007-04-17 23:44:08 +09002679 rc = mv_chip_id(host, board_idx);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002680 if (rc)
Mark Lord352fab72008-04-19 14:43:42 -04002681 goto done;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002682
2683 if (HAS_PCI(host)) {
Mark Lord352fab72008-04-19 14:43:42 -04002684 hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
2685 hpriv->main_mask_reg_addr = mmio + HC_MAIN_IRQ_MASK_OFS;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002686 } else {
Mark Lord352fab72008-04-19 14:43:42 -04002687 hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
2688 hpriv->main_mask_reg_addr = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002689 }
Mark Lord352fab72008-04-19 14:43:42 -04002690
2691 /* global interrupt mask: 0 == mask everything */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002692 writel(0, hpriv->main_mask_reg_addr);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002693
Tejun Heo4447d352007-04-17 23:44:08 +09002694 n_hc = mv_get_hc_count(host->ports[0]->flags);
Jeff Garzikbca1c4e2005-11-12 12:48:15 -05002695
Tejun Heo4447d352007-04-17 23:44:08 +09002696 for (port = 0; port < host->n_ports; port++)
Jeff Garzik47c2b672005-11-12 21:13:17 -05002697 hpriv->ops->read_preamp(hpriv, port, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002698
Jeff Garzikc9d39132005-11-13 17:47:51 -05002699 rc = hpriv->ops->reset_hc(hpriv, mmio, n_hc);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002700 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04002701 goto done;
Brett Russ20f733e2005-09-01 18:26:17 -04002702
Jeff Garzik522479f2005-11-12 22:14:02 -05002703 hpriv->ops->reset_flash(hpriv, mmio);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002704 hpriv->ops->reset_bus(host, mmio);
Jeff Garzik47c2b672005-11-12 21:13:17 -05002705 hpriv->ops->enable_leds(hpriv, mmio);
Brett Russ20f733e2005-09-01 18:26:17 -04002706
Tejun Heo4447d352007-04-17 23:44:08 +09002707 for (port = 0; port < host->n_ports; port++) {
Tejun Heocbcdd872007-08-18 13:14:55 +09002708 struct ata_port *ap = host->ports[port];
Jeff Garzik2a47ce02005-11-12 23:05:14 -05002709 void __iomem *port_mmio = mv_port_base(mmio, port);
Tejun Heocbcdd872007-08-18 13:14:55 +09002710
2711 mv_port_init(&ap->ioaddr, port_mmio);
2712
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002713#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002714 if (HAS_PCI(host)) {
2715 unsigned int offset = port_mmio - mmio;
2716 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
2717 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
2718 }
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002719#endif
Brett Russ20f733e2005-09-01 18:26:17 -04002720 }
2721
2722 for (hc = 0; hc < n_hc; hc++) {
Brett Russ31961942005-09-30 01:36:00 -04002723 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2724
2725 VPRINTK("HC%i: HC config=0x%08x HC IRQ cause "
2726 "(before clear)=0x%08x\n", hc,
2727 readl(hc_mmio + HC_CFG_OFS),
2728 readl(hc_mmio + HC_IRQ_CAUSE_OFS));
2729
2730 /* Clear any currently outstanding hc interrupt conditions */
2731 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
Brett Russ20f733e2005-09-01 18:26:17 -04002732 }
2733
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002734 if (HAS_PCI(host)) {
2735 /* Clear any currently outstanding host interrupt conditions */
2736 writelfl(0, mmio + hpriv->irq_cause_ofs);
Brett Russ31961942005-09-30 01:36:00 -04002737
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002738 /* and unmask interrupt generation for host regs */
2739 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
2740 if (IS_GEN_I(hpriv))
2741 writelfl(~HC_MAIN_MASKED_IRQS_5,
2742 hpriv->main_mask_reg_addr);
2743 else
2744 writelfl(~HC_MAIN_MASKED_IRQS,
2745 hpriv->main_mask_reg_addr);
Jeff Garzikfb621e22007-02-25 04:19:45 -05002746
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002747 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
2748 "PCI int cause/mask=0x%08x/0x%08x\n",
2749 readl(hpriv->main_cause_reg_addr),
2750 readl(hpriv->main_mask_reg_addr),
2751 readl(mmio + hpriv->irq_cause_ofs),
2752 readl(mmio + hpriv->irq_mask_ofs));
2753 } else {
2754 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
2755 hpriv->main_mask_reg_addr);
2756 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
2757 readl(hpriv->main_cause_reg_addr),
2758 readl(hpriv->main_mask_reg_addr));
2759 }
Brett Russ31961942005-09-30 01:36:00 -04002760done:
Brett Russ20f733e2005-09-01 18:26:17 -04002761 return rc;
2762}
2763
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002764static int mv_create_dma_pools(struct mv_host_priv *hpriv, struct device *dev)
2765{
2766 hpriv->crqb_pool = dmam_pool_create("crqb_q", dev, MV_CRQB_Q_SZ,
2767 MV_CRQB_Q_SZ, 0);
2768 if (!hpriv->crqb_pool)
2769 return -ENOMEM;
2770
2771 hpriv->crpb_pool = dmam_pool_create("crpb_q", dev, MV_CRPB_Q_SZ,
2772 MV_CRPB_Q_SZ, 0);
2773 if (!hpriv->crpb_pool)
2774 return -ENOMEM;
2775
2776 hpriv->sg_tbl_pool = dmam_pool_create("sg_tbl", dev, MV_SG_TBL_SZ,
2777 MV_SG_TBL_SZ, 0);
2778 if (!hpriv->sg_tbl_pool)
2779 return -ENOMEM;
2780
2781 return 0;
2782}
2783
Lennert Buytenhek15a32632008-03-27 14:51:39 -04002784static void mv_conf_mbus_windows(struct mv_host_priv *hpriv,
2785 struct mbus_dram_target_info *dram)
2786{
2787 int i;
2788
2789 for (i = 0; i < 4; i++) {
2790 writel(0, hpriv->base + WINDOW_CTRL(i));
2791 writel(0, hpriv->base + WINDOW_BASE(i));
2792 }
2793
2794 for (i = 0; i < dram->num_cs; i++) {
2795 struct mbus_dram_window *cs = dram->cs + i;
2796
2797 writel(((cs->size - 1) & 0xffff0000) |
2798 (cs->mbus_attr << 8) |
2799 (dram->mbus_dram_target_id << 4) | 1,
2800 hpriv->base + WINDOW_CTRL(i));
2801 writel(cs->base, hpriv->base + WINDOW_BASE(i));
2802 }
2803}
2804
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002805/**
2806 * mv_platform_probe - handle a positive probe of an soc Marvell
2807 * host
2808 * @pdev: platform device found
2809 *
2810 * LOCKING:
2811 * Inherited from caller.
2812 */
2813static int mv_platform_probe(struct platform_device *pdev)
2814{
2815 static int printed_version;
2816 const struct mv_sata_platform_data *mv_platform_data;
2817 const struct ata_port_info *ppi[] =
2818 { &mv_port_info[chip_soc], NULL };
2819 struct ata_host *host;
2820 struct mv_host_priv *hpriv;
2821 struct resource *res;
2822 int n_ports, rc;
2823
2824 if (!printed_version++)
2825 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
2826
2827 /*
2828 * Simple resource validation ..
2829 */
2830 if (unlikely(pdev->num_resources != 2)) {
2831 dev_err(&pdev->dev, "invalid number of resources\n");
2832 return -EINVAL;
2833 }
2834
2835 /*
2836 * Get the register base first
2837 */
2838 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2839 if (res == NULL)
2840 return -EINVAL;
2841
2842 /* allocate host */
2843 mv_platform_data = pdev->dev.platform_data;
2844 n_ports = mv_platform_data->n_ports;
2845
2846 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
2847 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
2848
2849 if (!host || !hpriv)
2850 return -ENOMEM;
2851 host->private_data = hpriv;
2852 hpriv->n_ports = n_ports;
2853
2854 host->iomap = NULL;
Saeed Bisharaf1cb0ea2008-02-18 07:42:28 -11002855 hpriv->base = devm_ioremap(&pdev->dev, res->start,
2856 res->end - res->start + 1);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002857 hpriv->base -= MV_SATAHC0_REG_BASE;
2858
Lennert Buytenhek15a32632008-03-27 14:51:39 -04002859 /*
2860 * (Re-)program MBUS remapping windows if we are asked to.
2861 */
2862 if (mv_platform_data->dram != NULL)
2863 mv_conf_mbus_windows(hpriv, mv_platform_data->dram);
2864
Byron Bradleyfbf14e22008-02-10 21:17:30 +00002865 rc = mv_create_dma_pools(hpriv, &pdev->dev);
2866 if (rc)
2867 return rc;
2868
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002869 /* initialize adapter */
2870 rc = mv_init_host(host, chip_soc);
2871 if (rc)
2872 return rc;
2873
2874 dev_printk(KERN_INFO, &pdev->dev,
2875 "slots %u ports %d\n", (unsigned)MV_MAX_Q_DEPTH,
2876 host->n_ports);
2877
2878 return ata_host_activate(host, platform_get_irq(pdev, 0), mv_interrupt,
2879 IRQF_SHARED, &mv6_sht);
2880}
2881
2882/*
2883 *
2884 * mv_platform_remove - unplug a platform interface
2885 * @pdev: platform device
2886 *
2887 * A platform bus SATA device has been unplugged. Perform the needed
2888 * cleanup. Also called on module unload for any active devices.
2889 */
2890static int __devexit mv_platform_remove(struct platform_device *pdev)
2891{
2892 struct device *dev = &pdev->dev;
2893 struct ata_host *host = dev_get_drvdata(dev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002894
2895 ata_host_detach(host);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002896 return 0;
2897}
2898
2899static struct platform_driver mv_platform_driver = {
2900 .probe = mv_platform_probe,
2901 .remove = __devexit_p(mv_platform_remove),
2902 .driver = {
2903 .name = DRV_NAME,
2904 .owner = THIS_MODULE,
2905 },
2906};
2907
2908
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002909#ifdef CONFIG_PCI
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002910static int mv_pci_init_one(struct pci_dev *pdev,
2911 const struct pci_device_id *ent);
2912
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002913
2914static struct pci_driver mv_pci_driver = {
2915 .name = DRV_NAME,
2916 .id_table = mv_pci_tbl,
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05002917 .probe = mv_pci_init_one,
Saeed Bishara7bb3c522008-01-30 11:50:45 -11002918 .remove = ata_pci_remove_one,
2919};
2920
2921/*
2922 * module options
2923 */
2924static int msi; /* Use PCI msi; either zero (off, default) or non-zero */
2925
2926
2927/* move to PCI layer or libata core? */
2928static int pci_go_64(struct pci_dev *pdev)
2929{
2930 int rc;
2931
2932 if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
2933 rc = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
2934 if (rc) {
2935 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2936 if (rc) {
2937 dev_printk(KERN_ERR, &pdev->dev,
2938 "64-bit DMA enable failed\n");
2939 return rc;
2940 }
2941 }
2942 } else {
2943 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
2944 if (rc) {
2945 dev_printk(KERN_ERR, &pdev->dev,
2946 "32-bit DMA enable failed\n");
2947 return rc;
2948 }
2949 rc = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
2950 if (rc) {
2951 dev_printk(KERN_ERR, &pdev->dev,
2952 "32-bit consistent DMA enable failed\n");
2953 return rc;
2954 }
2955 }
2956
2957 return rc;
2958}
2959
Brett Russ05b308e2005-10-05 17:08:53 -04002960/**
2961 * mv_print_info - Dump key info to kernel log for perusal.
Tejun Heo4447d352007-04-17 23:44:08 +09002962 * @host: ATA host to print info about
Brett Russ05b308e2005-10-05 17:08:53 -04002963 *
2964 * FIXME: complete this.
2965 *
2966 * LOCKING:
2967 * Inherited from caller.
2968 */
Tejun Heo4447d352007-04-17 23:44:08 +09002969static void mv_print_info(struct ata_host *host)
Brett Russ31961942005-09-30 01:36:00 -04002970{
Tejun Heo4447d352007-04-17 23:44:08 +09002971 struct pci_dev *pdev = to_pci_dev(host->dev);
2972 struct mv_host_priv *hpriv = host->private_data;
Auke Kok44c10132007-06-08 15:46:36 -07002973 u8 scc;
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002974 const char *scc_s, *gen;
Brett Russ31961942005-09-30 01:36:00 -04002975
2976 /* Use this to determine the HW stepping of the chip so we know
2977 * what errata to workaround
2978 */
Brett Russ31961942005-09-30 01:36:00 -04002979 pci_read_config_byte(pdev, PCI_CLASS_DEVICE, &scc);
2980 if (scc == 0)
2981 scc_s = "SCSI";
2982 else if (scc == 0x01)
2983 scc_s = "RAID";
2984 else
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002985 scc_s = "?";
2986
2987 if (IS_GEN_I(hpriv))
2988 gen = "I";
2989 else if (IS_GEN_II(hpriv))
2990 gen = "II";
2991 else if (IS_GEN_IIE(hpriv))
2992 gen = "IIE";
2993 else
2994 gen = "?";
Brett Russ31961942005-09-30 01:36:00 -04002995
Jeff Garzika9524a72005-10-30 14:39:11 -05002996 dev_printk(KERN_INFO, &pdev->dev,
Jeff Garzikc1e4fe72007-07-09 12:29:31 -04002997 "Gen-%s %u slots %u ports %s mode IRQ via %s\n",
2998 gen, (unsigned)MV_MAX_Q_DEPTH, host->n_ports,
Brett Russ31961942005-09-30 01:36:00 -04002999 scc_s, (MV_HP_FLAG_MSI & hpriv->hp_flags) ? "MSI" : "INTx");
3000}
3001
Brett Russ05b308e2005-10-05 17:08:53 -04003002/**
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003003 * mv_pci_init_one - handle a positive probe of a PCI Marvell host
Brett Russ05b308e2005-10-05 17:08:53 -04003004 * @pdev: PCI device found
3005 * @ent: PCI device ID entry for the matched host
3006 *
3007 * LOCKING:
3008 * Inherited from caller.
3009 */
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003010static int mv_pci_init_one(struct pci_dev *pdev,
3011 const struct pci_device_id *ent)
Brett Russ20f733e2005-09-01 18:26:17 -04003012{
Jeff Garzik2dcb4072007-10-19 06:42:56 -04003013 static int printed_version;
Brett Russ20f733e2005-09-01 18:26:17 -04003014 unsigned int board_idx = (unsigned int)ent->driver_data;
Tejun Heo4447d352007-04-17 23:44:08 +09003015 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL };
3016 struct ata_host *host;
3017 struct mv_host_priv *hpriv;
3018 int n_ports, rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003019
Jeff Garzika9524a72005-10-30 14:39:11 -05003020 if (!printed_version++)
3021 dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
Brett Russ20f733e2005-09-01 18:26:17 -04003022
Tejun Heo4447d352007-04-17 23:44:08 +09003023 /* allocate host */
3024 n_ports = mv_get_hc_count(ppi[0]->flags) * MV_PORTS_PER_HC;
3025
3026 host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports);
3027 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
3028 if (!host || !hpriv)
3029 return -ENOMEM;
3030 host->private_data = hpriv;
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003031 hpriv->n_ports = n_ports;
Tejun Heo4447d352007-04-17 23:44:08 +09003032
3033 /* acquire resources */
Tejun Heo24dc5f32007-01-20 16:00:28 +09003034 rc = pcim_enable_device(pdev);
3035 if (rc)
Brett Russ20f733e2005-09-01 18:26:17 -04003036 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003037
Tejun Heo0d5ff562007-02-01 15:06:36 +09003038 rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
3039 if (rc == -EBUSY)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003040 pcim_pin_device(pdev);
Tejun Heo0d5ff562007-02-01 15:06:36 +09003041 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09003042 return rc;
Tejun Heo4447d352007-04-17 23:44:08 +09003043 host->iomap = pcim_iomap_table(pdev);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003044 hpriv->base = host->iomap[MV_PRIMARY_BAR];
Brett Russ20f733e2005-09-01 18:26:17 -04003045
Jeff Garzikd88184f2007-02-26 01:26:06 -05003046 rc = pci_go_64(pdev);
3047 if (rc)
3048 return rc;
3049
Mark Lordda2fa9b2008-01-26 18:32:45 -05003050 rc = mv_create_dma_pools(hpriv, &pdev->dev);
3051 if (rc)
3052 return rc;
3053
Brett Russ20f733e2005-09-01 18:26:17 -04003054 /* initialize adapter */
Tejun Heo4447d352007-04-17 23:44:08 +09003055 rc = mv_init_host(host, board_idx);
Tejun Heo24dc5f32007-01-20 16:00:28 +09003056 if (rc)
3057 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003058
Brett Russ31961942005-09-30 01:36:00 -04003059 /* Enable interrupts */
Tejun Heo6a59dcf2007-02-24 15:12:31 +09003060 if (msi && pci_enable_msi(pdev))
Brett Russ31961942005-09-30 01:36:00 -04003061 pci_intx(pdev, 1);
Brett Russ20f733e2005-09-01 18:26:17 -04003062
Brett Russ31961942005-09-30 01:36:00 -04003063 mv_dump_pci_cfg(pdev, 0x68);
Tejun Heo4447d352007-04-17 23:44:08 +09003064 mv_print_info(host);
Brett Russ20f733e2005-09-01 18:26:17 -04003065
Tejun Heo4447d352007-04-17 23:44:08 +09003066 pci_set_master(pdev);
Jeff Garzikea8b4db2007-07-17 02:21:50 -04003067 pci_try_set_mwi(pdev);
Tejun Heo4447d352007-04-17 23:44:08 +09003068 return ata_host_activate(host, pdev->irq, mv_interrupt, IRQF_SHARED,
Jeff Garzikc5d3e452007-07-11 18:30:50 -04003069 IS_GEN_I(hpriv) ? &mv5_sht : &mv6_sht);
Brett Russ20f733e2005-09-01 18:26:17 -04003070}
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003071#endif
Brett Russ20f733e2005-09-01 18:26:17 -04003072
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003073static int mv_platform_probe(struct platform_device *pdev);
3074static int __devexit mv_platform_remove(struct platform_device *pdev);
3075
Brett Russ20f733e2005-09-01 18:26:17 -04003076static int __init mv_init(void)
3077{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003078 int rc = -ENODEV;
3079#ifdef CONFIG_PCI
3080 rc = pci_register_driver(&mv_pci_driver);
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003081 if (rc < 0)
3082 return rc;
3083#endif
3084 rc = platform_driver_register(&mv_platform_driver);
3085
3086#ifdef CONFIG_PCI
3087 if (rc < 0)
3088 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003089#endif
3090 return rc;
Brett Russ20f733e2005-09-01 18:26:17 -04003091}
3092
3093static void __exit mv_exit(void)
3094{
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003095#ifdef CONFIG_PCI
Brett Russ20f733e2005-09-01 18:26:17 -04003096 pci_unregister_driver(&mv_pci_driver);
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003097#endif
Saeed Bisharaf351b2d2008-02-01 18:08:03 -05003098 platform_driver_unregister(&mv_platform_driver);
Brett Russ20f733e2005-09-01 18:26:17 -04003099}
3100
3101MODULE_AUTHOR("Brett Russ");
3102MODULE_DESCRIPTION("SCSI low-level driver for Marvell SATA controllers");
3103MODULE_LICENSE("GPL");
3104MODULE_DEVICE_TABLE(pci, mv_pci_tbl);
3105MODULE_VERSION(DRV_VERSION);
Mark Lord17c5aab2008-04-16 14:56:51 -04003106MODULE_ALIAS("platform:" DRV_NAME);
Brett Russ20f733e2005-09-01 18:26:17 -04003107
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003108#ifdef CONFIG_PCI
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003109module_param(msi, int, 0444);
3110MODULE_PARM_DESC(msi, "Enable use of PCI MSI (0=off, 1=on)");
Saeed Bishara7bb3c522008-01-30 11:50:45 -11003111#endif
Jeff Garzikddef9bb2006-02-02 16:17:06 -05003112
Brett Russ20f733e2005-09-01 18:26:17 -04003113module_init(mv_init);
3114module_exit(mv_exit);