blob: 54bfab15c74a997b0bd55f5739fe7b49e4e929c2 [file] [log] [blame]
Thomas Gleixnerc82ee6d2019-05-19 15:51:48 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * sata_nv.c - NVIDIA nForce SATA
4 *
5 * Copyright 2004 NVIDIA Corp. All rights reserved.
6 * Copyright 2004 Andrew Chew
7 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -04008 * libata documentation is available via 'make {ps|pdf}docs',
Mauro Carvalho Chehab19285f32017-05-14 11:52:56 -03009 * as Documentation/driver-api/libata.rst
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040010 *
11 * No hardware documentation available outside of NVIDIA.
12 * This driver programs the NVIDIA SATA controller in a similar
13 * fashion as with other PCI IDE BMDMA controllers, with a few
14 * NV-specific details such as register offsets, SATA phy location,
15 * hotplug info, etc.
16 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070017 * CK804/MCP04 controllers support an alternate programming interface
18 * similar to the ADMA specification (with some modifications).
19 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
20 * sent through the legacy interface.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/kernel.h>
24#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/pci.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/blkdev.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050030#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070032#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/libata.h>
34
35#define DRV_NAME "sata_nv"
Jeff Garzik2a3103c2007-08-31 04:54:06 -040036#define DRV_VERSION "3.5"
Robert Hancockfbbb2622006-10-27 19:08:41 -070037
38#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Jeff Garzik10ad05d2006-03-22 23:50:50 -050040enum {
Tejun Heo0d5ff562007-02-01 15:06:36 +090041 NV_MMIO_BAR = 5,
42
Jeff Garzik10ad05d2006-03-22 23:50:50 -050043 NV_PORTS = 2,
Erik Inge Bolsø14bdef92009-03-14 21:38:24 +010044 NV_PIO_MASK = ATA_PIO4,
45 NV_MWDMA_MASK = ATA_MWDMA2,
46 NV_UDMA_MASK = ATA_UDMA6,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050047 NV_PORT0_SCR_REG_OFFSET = 0x00,
48 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Tejun Heo27e4b272006-06-17 15:49:55 +090050 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050051 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050052 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090053 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050054 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Tejun Heo27e4b272006-06-17 15:49:55 +090056 /* INT_STATUS/ENABLE bits */
57 NV_INT_DEV = 0x01,
58 NV_INT_PM = 0x02,
59 NV_INT_ADDED = 0x04,
60 NV_INT_REMOVED = 0x08,
61
62 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
63
Tejun Heo39f87582006-06-17 15:49:56 +090064 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090065 NV_INT_MASK = NV_INT_DEV |
66 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090067
Tejun Heo27e4b272006-06-17 15:49:55 +090068 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050069 NV_INT_CONFIG = 0x12,
70 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Jeff Garzik10ad05d2006-03-22 23:50:50 -050072 // For PCI config register 20
73 NV_MCP_SATA_CFG_20 = 0x50,
74 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070075 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
76 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
77 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
78 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
79
80 NV_ADMA_MAX_CPBS = 32,
81 NV_ADMA_CPB_SZ = 128,
82 NV_ADMA_APRD_SZ = 16,
83 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
84 NV_ADMA_APRD_SZ,
85 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
86 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
87 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
88 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
89
90 /* BAR5 offset to ADMA general registers */
91 NV_ADMA_GEN = 0x400,
92 NV_ADMA_GEN_CTL = 0x00,
93 NV_ADMA_NOTIFIER_CLEAR = 0x30,
94
95 /* BAR5 offset to ADMA ports */
96 NV_ADMA_PORT = 0x480,
97
98 /* size of ADMA port register space */
99 NV_ADMA_PORT_SIZE = 0x100,
100
101 /* ADMA port registers */
102 NV_ADMA_CTL = 0x40,
103 NV_ADMA_CPB_COUNT = 0x42,
104 NV_ADMA_NEXT_CPB_IDX = 0x43,
105 NV_ADMA_STAT = 0x44,
106 NV_ADMA_CPB_BASE_LOW = 0x48,
107 NV_ADMA_CPB_BASE_HIGH = 0x4C,
108 NV_ADMA_APPEND = 0x50,
109 NV_ADMA_NOTIFIER = 0x68,
110 NV_ADMA_NOTIFIER_ERROR = 0x6C,
111
112 /* NV_ADMA_CTL register bits */
113 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
114 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
115 NV_ADMA_CTL_GO = (1 << 7),
116 NV_ADMA_CTL_AIEN = (1 << 8),
117 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
118 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
119
120 /* CPB response flag bits */
121 NV_CPB_RESP_DONE = (1 << 0),
122 NV_CPB_RESP_ATA_ERR = (1 << 3),
123 NV_CPB_RESP_CMD_ERR = (1 << 4),
124 NV_CPB_RESP_CPB_ERR = (1 << 7),
125
126 /* CPB control flag bits */
127 NV_CPB_CTL_CPB_VALID = (1 << 0),
128 NV_CPB_CTL_QUEUE = (1 << 1),
129 NV_CPB_CTL_APRD_VALID = (1 << 2),
130 NV_CPB_CTL_IEN = (1 << 3),
131 NV_CPB_CTL_FPDMA = (1 << 4),
132
133 /* APRD flags */
134 NV_APRD_WRITE = (1 << 1),
135 NV_APRD_END = (1 << 2),
136 NV_APRD_CONT = (1 << 3),
137
138 /* NV_ADMA_STAT flags */
139 NV_ADMA_STAT_TIMEOUT = (1 << 0),
140 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
141 NV_ADMA_STAT_HOTPLUG = (1 << 2),
142 NV_ADMA_STAT_CPBERR = (1 << 4),
143 NV_ADMA_STAT_SERROR = (1 << 5),
144 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
145 NV_ADMA_STAT_IDLE = (1 << 8),
146 NV_ADMA_STAT_LEGACY = (1 << 9),
147 NV_ADMA_STAT_STOPPED = (1 << 10),
148 NV_ADMA_STAT_DONE = (1 << 12),
149 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400150 NV_ADMA_STAT_TIMEOUT,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700151
152 /* port flags */
153 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
Robert Hancock2dec7552006-11-26 14:20:19 -0600154 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700155
Kuan Luof140f0f2007-10-15 15:16:53 -0400156 /* MCP55 reg offset */
157 NV_CTL_MCP55 = 0x400,
158 NV_INT_STATUS_MCP55 = 0x440,
159 NV_INT_ENABLE_MCP55 = 0x444,
160 NV_NCQ_REG_MCP55 = 0x448,
161
162 /* MCP55 */
163 NV_INT_ALL_MCP55 = 0xffff,
164 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
165 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
166
167 /* SWNCQ ENABLE BITS*/
168 NV_CTL_PRI_SWNCQ = 0x02,
169 NV_CTL_SEC_SWNCQ = 0x04,
170
171 /* SW NCQ status bits*/
172 NV_SWNCQ_IRQ_DEV = (1 << 0),
173 NV_SWNCQ_IRQ_PM = (1 << 1),
174 NV_SWNCQ_IRQ_ADDED = (1 << 2),
175 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
176
177 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
178 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
179 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
180 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
181
182 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
183 NV_SWNCQ_IRQ_REMOVED,
184
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500185};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Robert Hancockfbbb2622006-10-27 19:08:41 -0700187/* ADMA Physical Region Descriptor - one SG segment */
188struct nv_adma_prd {
189 __le64 addr;
190 __le32 len;
191 u8 flags;
192 u8 packet_len;
193 __le16 reserved;
194};
195
196enum nv_adma_regbits {
197 CMDEND = (1 << 15), /* end of command list */
198 WNB = (1 << 14), /* wait-not-BSY */
199 IGN = (1 << 13), /* ignore this entry */
200 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
201 DA2 = (1 << (2 + 8)),
202 DA1 = (1 << (1 + 8)),
203 DA0 = (1 << (0 + 8)),
204};
205
206/* ADMA Command Parameter Block
207 The first 5 SG segments are stored inside the Command Parameter Block itself.
208 If there are more than 5 segments the remainder are stored in a separate
209 memory area indicated by next_aprd. */
210struct nv_adma_cpb {
211 u8 resp_flags; /* 0 */
212 u8 reserved1; /* 1 */
213 u8 ctl_flags; /* 2 */
214 /* len is length of taskfile in 64 bit words */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400215 u8 len; /* 3 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700216 u8 tag; /* 4 */
217 u8 next_cpb_idx; /* 5 */
218 __le16 reserved2; /* 6-7 */
219 __le16 tf[12]; /* 8-31 */
220 struct nv_adma_prd aprd[5]; /* 32-111 */
221 __le64 next_aprd; /* 112-119 */
222 __le64 reserved3; /* 120-127 */
223};
224
225
226struct nv_adma_port_priv {
227 struct nv_adma_cpb *cpb;
228 dma_addr_t cpb_dma;
229 struct nv_adma_prd *aprd;
230 dma_addr_t aprd_dma;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400231 void __iomem *ctl_block;
232 void __iomem *gen_block;
233 void __iomem *notifier_clear_block;
Robert Hancock8959d302008-02-04 19:39:02 -0600234 u64 adma_dma_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700235 u8 flags;
Robert Hancock5e5c74a2007-02-19 18:42:30 -0600236 int last_issue_ncq;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700237};
238
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600239struct nv_host_priv {
240 unsigned long type;
241};
242
Kuan Luof140f0f2007-10-15 15:16:53 -0400243struct defer_queue {
244 u32 defer_bits;
245 unsigned int head;
246 unsigned int tail;
247 unsigned int tag[ATA_MAX_QUEUE];
248};
249
250enum ncq_saw_flag_list {
251 ncq_saw_d2h = (1U << 0),
252 ncq_saw_dmas = (1U << 1),
253 ncq_saw_sdb = (1U << 2),
254 ncq_saw_backout = (1U << 3),
255};
256
257struct nv_swncq_port_priv {
Tejun Heof60d7012010-05-10 21:41:41 +0200258 struct ata_bmdma_prd *prd; /* our SG list */
Kuan Luof140f0f2007-10-15 15:16:53 -0400259 dma_addr_t prd_dma; /* and its DMA mapping */
260 void __iomem *sactive_block;
261 void __iomem *irq_block;
262 void __iomem *tag_block;
263 u32 qc_active;
264
265 unsigned int last_issue_tag;
266
267 /* fifo circular queue to store deferral command */
268 struct defer_queue defer_queue;
269
270 /* for NCQ interrupt analysis */
271 u32 dhfis_bits;
272 u32 dmafis_bits;
273 u32 sdbfis_bits;
274
275 unsigned int ncq_flags;
276};
277
278
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400279#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
Robert Hancockfbbb2622006-10-27 19:08:41 -0700280
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400281static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
Bartlomiej Zolnierkiewicz58eb8cd2014-05-07 17:17:44 +0200282#ifdef CONFIG_PM_SLEEP
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600283static int nv_pci_device_resume(struct pci_dev *pdev);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900284#endif
Jeff Garzikcca39742006-08-24 03:19:22 -0400285static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100286static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
287static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
288static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Tejun Heo82ef04f2008-07-31 17:02:40 +0900289static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
290static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Tejun Heo7f4774b2009-06-10 16:29:07 +0900292static int nv_hardreset(struct ata_link *link, unsigned int *class,
293 unsigned long deadline);
Tejun Heo39f87582006-06-17 15:49:56 +0900294static void nv_nf2_freeze(struct ata_port *ap);
295static void nv_nf2_thaw(struct ata_port *ap);
296static void nv_ck804_freeze(struct ata_port *ap);
297static void nv_ck804_thaw(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700298static int nv_adma_slave_config(struct scsi_device *sdev);
Robert Hancock2dec7552006-11-26 14:20:19 -0600299static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700300static void nv_adma_qc_prep(struct ata_queued_cmd *qc);
301static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
302static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
303static void nv_adma_irq_clear(struct ata_port *ap);
304static int nv_adma_port_start(struct ata_port *ap);
305static void nv_adma_port_stop(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900306#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600307static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
308static int nv_adma_port_resume(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900309#endif
Robert Hancock53014e22007-05-05 15:36:36 -0600310static void nv_adma_freeze(struct ata_port *ap);
311static void nv_adma_thaw(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700312static void nv_adma_error_handler(struct ata_port *ap);
313static void nv_adma_host_stop(struct ata_host *host);
Robert Hancockf5ecac22007-02-20 21:49:10 -0600314static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800315static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
Tejun Heo39f87582006-06-17 15:49:56 +0900316
Kuan Luof140f0f2007-10-15 15:16:53 -0400317static void nv_mcp55_thaw(struct ata_port *ap);
318static void nv_mcp55_freeze(struct ata_port *ap);
319static void nv_swncq_error_handler(struct ata_port *ap);
320static int nv_swncq_slave_config(struct scsi_device *sdev);
321static int nv_swncq_port_start(struct ata_port *ap);
322static void nv_swncq_qc_prep(struct ata_queued_cmd *qc);
323static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
324static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
325static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
326static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
327#ifdef CONFIG_PM
328static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
329static int nv_swncq_port_resume(struct ata_port *ap);
330#endif
331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332enum nv_host_type
333{
334 GENERIC,
335 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900336 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700337 CK804,
Kuan Luof140f0f2007-10-15 15:16:53 -0400338 ADMA,
Tejun Heo2d775702009-01-25 11:29:38 +0900339 MCP5x,
Kuan Luof140f0f2007-10-15 15:16:53 -0400340 SWNCQ,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341};
342
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500343static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a942006-09-27 22:20:11 -0400344 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
345 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
346 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
347 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
348 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
349 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
350 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
Tejun Heo2d775702009-01-25 11:29:38 +0900351 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
352 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
353 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
354 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
Kuan Luoe2e031e2007-10-25 02:14:17 -0400355 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
356 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
357 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400358
359 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360};
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362static struct pci_driver nv_pci_driver = {
363 .name = DRV_NAME,
364 .id_table = nv_pci_tbl,
365 .probe = nv_init_one,
Bartlomiej Zolnierkiewicz58eb8cd2014-05-07 17:17:44 +0200366#ifdef CONFIG_PM_SLEEP
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600367 .suspend = ata_pci_device_suspend,
368 .resume = nv_pci_device_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900369#endif
Tejun Heo1daf9ce2007-05-17 13:13:57 +0200370 .remove = ata_pci_remove_one,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371};
372
Jeff Garzik193515d2005-11-07 00:59:37 -0500373static struct scsi_host_template nv_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900374 ATA_BMDMA_SHT(DRV_NAME),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375};
376
Robert Hancockfbbb2622006-10-27 19:08:41 -0700377static struct scsi_host_template nv_adma_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900378 ATA_NCQ_SHT(DRV_NAME),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700379 .can_queue = NV_ADMA_MAX_CPBS,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700380 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700381 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
382 .slave_configure = nv_adma_slave_config,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700383};
384
Kuan Luof140f0f2007-10-15 15:16:53 -0400385static struct scsi_host_template nv_swncq_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900386 ATA_NCQ_SHT(DRV_NAME),
Jens Axboeba80c3a2018-05-11 12:51:08 -0600387 .can_queue = ATA_MAX_QUEUE - 1,
Kuan Luof140f0f2007-10-15 15:16:53 -0400388 .sg_tablesize = LIBATA_MAX_PRD,
Kuan Luof140f0f2007-10-15 15:16:53 -0400389 .dma_boundary = ATA_DMA_BOUNDARY,
390 .slave_configure = nv_swncq_slave_config,
Kuan Luof140f0f2007-10-15 15:16:53 -0400391};
392
Tejun Heo7f4774b2009-06-10 16:29:07 +0900393/*
394 * NV SATA controllers have various different problems with hardreset
395 * protocol depending on the specific controller and device.
396 *
397 * GENERIC:
398 *
399 * bko11195 reports that link doesn't come online after hardreset on
400 * generic nv's and there have been several other similar reports on
401 * linux-ide.
402 *
403 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
404 * softreset.
405 *
406 * NF2/3:
407 *
408 * bko3352 reports nf2/3 controllers can't determine device signature
409 * reliably after hardreset. The following thread reports detection
410 * failure on cold boot with the standard debouncing timing.
411 *
412 * http://thread.gmane.org/gmane.linux.ide/34098
413 *
414 * bko12176 reports that hardreset fails to bring up the link during
415 * boot on nf2.
416 *
417 * CK804:
418 *
419 * For initial probing after boot and hot plugging, hardreset mostly
420 * works fine on CK804 but curiously, reprobing on the initial port
421 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
422 * FIS in somewhat undeterministic way.
423 *
424 * SWNCQ:
425 *
426 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
427 * hardreset should be used and hardreset can't report proper
428 * signature, which suggests that mcp5x is closer to nf2 as long as
429 * reset quirkiness is concerned.
430 *
431 * bko12703 reports that boot probing fails for intel SSD with
432 * hardreset. Link fails to come online. Softreset works fine.
433 *
434 * The failures are varied but the following patterns seem true for
435 * all flavors.
436 *
437 * - Softreset during boot always works.
438 *
439 * - Hardreset during boot sometimes fails to bring up the link on
440 * certain comibnations and device signature acquisition is
441 * unreliable.
442 *
443 * - Hardreset is often necessary after hotplug.
444 *
445 * So, preferring softreset for boot probing and error handling (as
446 * hardreset might bring down the link) but using hardreset for
447 * post-boot probing should work around the above issues in most
448 * cases. Define nv_hardreset() which only kicks in for post-boot
449 * probing and use it for all variants.
450 */
451static struct ata_port_operations nv_generic_ops = {
Tejun Heo029cfd62008-03-25 12:22:49 +0900452 .inherits = &ata_bmdma_port_ops,
Alan Coxc96f1732009-03-24 10:23:46 +0000453 .lost_interrupt = ATA_OP_NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 .scr_read = nv_scr_read,
455 .scr_write = nv_scr_write,
Tejun Heo7f4774b2009-06-10 16:29:07 +0900456 .hardreset = nv_hardreset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457};
458
Tejun Heo029cfd62008-03-25 12:22:49 +0900459static struct ata_port_operations nv_nf2_ops = {
Tejun Heo7dac7452009-02-12 10:34:32 +0900460 .inherits = &nv_generic_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900461 .freeze = nv_nf2_freeze,
462 .thaw = nv_nf2_thaw,
Tejun Heoada364e2006-06-17 15:49:56 +0900463};
464
Tejun Heo029cfd62008-03-25 12:22:49 +0900465static struct ata_port_operations nv_ck804_ops = {
Tejun Heo7f4774b2009-06-10 16:29:07 +0900466 .inherits = &nv_generic_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900467 .freeze = nv_ck804_freeze,
468 .thaw = nv_ck804_thaw,
Tejun Heoada364e2006-06-17 15:49:56 +0900469 .host_stop = nv_ck804_host_stop,
470};
471
Tejun Heo029cfd62008-03-25 12:22:49 +0900472static struct ata_port_operations nv_adma_ops = {
Tejun Heo3c324282008-11-03 12:37:49 +0900473 .inherits = &nv_ck804_ops,
Tejun Heo029cfd62008-03-25 12:22:49 +0900474
Robert Hancock2dec7552006-11-26 14:20:19 -0600475 .check_atapi_dma = nv_adma_check_atapi_dma,
Tejun Heo5682ed32008-04-07 22:47:16 +0900476 .sff_tf_read = nv_adma_tf_read,
Tejun Heo31cc23b2007-09-23 13:14:12 +0900477 .qc_defer = ata_std_qc_defer,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700478 .qc_prep = nv_adma_qc_prep,
479 .qc_issue = nv_adma_qc_issue,
Tejun Heo5682ed32008-04-07 22:47:16 +0900480 .sff_irq_clear = nv_adma_irq_clear,
Tejun Heo029cfd62008-03-25 12:22:49 +0900481
Robert Hancock53014e22007-05-05 15:36:36 -0600482 .freeze = nv_adma_freeze,
483 .thaw = nv_adma_thaw,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700484 .error_handler = nv_adma_error_handler,
Robert Hancockf5ecac22007-02-20 21:49:10 -0600485 .post_internal_cmd = nv_adma_post_internal_cmd,
Tejun Heo029cfd62008-03-25 12:22:49 +0900486
Robert Hancockfbbb2622006-10-27 19:08:41 -0700487 .port_start = nv_adma_port_start,
488 .port_stop = nv_adma_port_stop,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900489#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600490 .port_suspend = nv_adma_port_suspend,
491 .port_resume = nv_adma_port_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900492#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -0700493 .host_stop = nv_adma_host_stop,
494};
495
Tejun Heo029cfd62008-03-25 12:22:49 +0900496static struct ata_port_operations nv_swncq_ops = {
Tejun Heo7f4774b2009-06-10 16:29:07 +0900497 .inherits = &nv_generic_ops,
Tejun Heo029cfd62008-03-25 12:22:49 +0900498
Kuan Luof140f0f2007-10-15 15:16:53 -0400499 .qc_defer = ata_std_qc_defer,
500 .qc_prep = nv_swncq_qc_prep,
501 .qc_issue = nv_swncq_qc_issue,
Tejun Heo029cfd62008-03-25 12:22:49 +0900502
Kuan Luof140f0f2007-10-15 15:16:53 -0400503 .freeze = nv_mcp55_freeze,
504 .thaw = nv_mcp55_thaw,
505 .error_handler = nv_swncq_error_handler,
Tejun Heo029cfd62008-03-25 12:22:49 +0900506
Kuan Luof140f0f2007-10-15 15:16:53 -0400507#ifdef CONFIG_PM
508 .port_suspend = nv_swncq_port_suspend,
509 .port_resume = nv_swncq_port_resume,
510#endif
511 .port_start = nv_swncq_port_start,
512};
513
Tejun Heo95947192008-03-25 12:22:49 +0900514struct nv_pi_priv {
515 irq_handler_t irq_handler;
516 struct scsi_host_template *sht;
517};
518
519#define NV_PI_PRIV(_irq_handler, _sht) \
520 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
521
Tejun Heo1626aeb2007-05-04 12:43:58 +0200522static const struct ata_port_info nv_port_info[] = {
Tejun Heoada364e2006-06-17 15:49:56 +0900523 /* generic */
524 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300525 .flags = ATA_FLAG_SATA,
Tejun Heoada364e2006-06-17 15:49:56 +0900526 .pio_mask = NV_PIO_MASK,
527 .mwdma_mask = NV_MWDMA_MASK,
528 .udma_mask = NV_UDMA_MASK,
529 .port_ops = &nv_generic_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900530 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900531 },
532 /* nforce2/3 */
533 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300534 .flags = ATA_FLAG_SATA,
Tejun Heoada364e2006-06-17 15:49:56 +0900535 .pio_mask = NV_PIO_MASK,
536 .mwdma_mask = NV_MWDMA_MASK,
537 .udma_mask = NV_UDMA_MASK,
538 .port_ops = &nv_nf2_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900539 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900540 },
541 /* ck804 */
542 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300543 .flags = ATA_FLAG_SATA,
Tejun Heoada364e2006-06-17 15:49:56 +0900544 .pio_mask = NV_PIO_MASK,
545 .mwdma_mask = NV_MWDMA_MASK,
546 .udma_mask = NV_UDMA_MASK,
547 .port_ops = &nv_ck804_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900548 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900549 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700550 /* ADMA */
551 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300552 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700553 .pio_mask = NV_PIO_MASK,
554 .mwdma_mask = NV_MWDMA_MASK,
555 .udma_mask = NV_UDMA_MASK,
556 .port_ops = &nv_adma_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900557 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700558 },
Tejun Heo2d775702009-01-25 11:29:38 +0900559 /* MCP5x */
560 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300561 .flags = ATA_FLAG_SATA,
Tejun Heo2d775702009-01-25 11:29:38 +0900562 .pio_mask = NV_PIO_MASK,
563 .mwdma_mask = NV_MWDMA_MASK,
564 .udma_mask = NV_UDMA_MASK,
Tejun Heo7f4774b2009-06-10 16:29:07 +0900565 .port_ops = &nv_generic_ops,
Tejun Heo2d775702009-01-25 11:29:38 +0900566 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
567 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400568 /* SWNCQ */
569 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300570 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
Kuan Luof140f0f2007-10-15 15:16:53 -0400571 .pio_mask = NV_PIO_MASK,
572 .mwdma_mask = NV_MWDMA_MASK,
573 .udma_mask = NV_UDMA_MASK,
574 .port_ops = &nv_swncq_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900575 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
Kuan Luof140f0f2007-10-15 15:16:53 -0400576 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577};
578
579MODULE_AUTHOR("NVIDIA");
580MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
581MODULE_LICENSE("GPL");
582MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
583MODULE_VERSION(DRV_VERSION);
584
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030585static bool adma_enabled;
Shailendra Vermac13aff32015-05-26 01:38:25 +0530586static bool swncq_enabled = true;
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030587static bool msi_enabled;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700588
Robert Hancock2dec7552006-11-26 14:20:19 -0600589static void nv_adma_register_mode(struct ata_port *ap)
590{
Robert Hancock2dec7552006-11-26 14:20:19 -0600591 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600592 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800593 u16 tmp, status;
594 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600595
596 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
597 return;
598
Robert Hancocka2cfe812007-02-05 16:26:03 -0800599 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400600 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800601 ndelay(50);
602 status = readw(mmio + NV_ADMA_STAT);
603 count++;
604 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400605 if (count == 20)
Joe Perchesa9a79df2011-04-15 15:51:59 -0700606 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
607 status);
Robert Hancocka2cfe812007-02-05 16:26:03 -0800608
Robert Hancock2dec7552006-11-26 14:20:19 -0600609 tmp = readw(mmio + NV_ADMA_CTL);
610 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
611
Robert Hancocka2cfe812007-02-05 16:26:03 -0800612 count = 0;
613 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400614 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800615 ndelay(50);
616 status = readw(mmio + NV_ADMA_STAT);
617 count++;
618 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400619 if (count == 20)
Joe Perchesa9a79df2011-04-15 15:51:59 -0700620 ata_port_warn(ap,
621 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
622 status);
Robert Hancocka2cfe812007-02-05 16:26:03 -0800623
Robert Hancock2dec7552006-11-26 14:20:19 -0600624 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
625}
626
627static void nv_adma_mode(struct ata_port *ap)
628{
Robert Hancock2dec7552006-11-26 14:20:19 -0600629 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600630 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800631 u16 tmp, status;
632 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600633
634 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
635 return;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500636
Robert Hancock2dec7552006-11-26 14:20:19 -0600637 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
638
639 tmp = readw(mmio + NV_ADMA_CTL);
640 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
641
Robert Hancocka2cfe812007-02-05 16:26:03 -0800642 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400643 while (((status & NV_ADMA_STAT_LEGACY) ||
Robert Hancocka2cfe812007-02-05 16:26:03 -0800644 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
645 ndelay(50);
646 status = readw(mmio + NV_ADMA_STAT);
647 count++;
648 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400649 if (count == 20)
Joe Perchesa9a79df2011-04-15 15:51:59 -0700650 ata_port_warn(ap,
Robert Hancocka2cfe812007-02-05 16:26:03 -0800651 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
652 status);
653
Robert Hancock2dec7552006-11-26 14:20:19 -0600654 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
655}
656
Robert Hancockfbbb2622006-10-27 19:08:41 -0700657static int nv_adma_slave_config(struct scsi_device *sdev)
658{
659 struct ata_port *ap = ata_shost_to_port(sdev->host);
Robert Hancock2dec7552006-11-26 14:20:19 -0600660 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock8959d302008-02-04 19:39:02 -0600661 struct nv_adma_port_priv *port0, *port1;
Robert Hancock2dec7552006-11-26 14:20:19 -0600662 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
Robert Hancock8959d302008-02-04 19:39:02 -0600663 unsigned long segment_boundary, flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700664 unsigned short sg_tablesize;
665 int rc;
Robert Hancock2dec7552006-11-26 14:20:19 -0600666 int adma_enable;
667 u32 current_reg, new_reg, config_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700668
669 rc = ata_scsi_slave_config(sdev);
670
671 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
672 /* Not a proper libata device, ignore */
673 return rc;
674
Robert Hancock8959d302008-02-04 19:39:02 -0600675 spin_lock_irqsave(ap->lock, flags);
676
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900677 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700678 /*
679 * NVIDIA reports that ADMA mode does not support ATAPI commands.
680 * Therefore ATAPI commands are sent through the legacy interface.
681 * However, the legacy interface only supports 32-bit DMA.
682 * Restrict DMA parameters as required by the legacy interface
683 * when an ATAPI device is connected.
684 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700685 segment_boundary = ATA_DMA_BOUNDARY;
686 /* Subtract 1 since an extra entry may be needed for padding, see
687 libata-scsi.c */
688 sg_tablesize = LIBATA_MAX_PRD - 1;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500689
Robert Hancock2dec7552006-11-26 14:20:19 -0600690 /* Since the legacy DMA engine is in use, we need to disable ADMA
691 on the port. */
692 adma_enable = 0;
693 nv_adma_register_mode(ap);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400694 } else {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700695 segment_boundary = NV_ADMA_DMA_BOUNDARY;
696 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
Robert Hancock2dec7552006-11-26 14:20:19 -0600697 adma_enable = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700698 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500699
Robert Hancock2dec7552006-11-26 14:20:19 -0600700 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700701
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400702 if (ap->port_no == 1)
Robert Hancock2dec7552006-11-26 14:20:19 -0600703 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
704 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
705 else
706 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
707 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500708
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400709 if (adma_enable) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600710 new_reg = current_reg | config_mask;
711 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400712 } else {
Robert Hancock2dec7552006-11-26 14:20:19 -0600713 new_reg = current_reg & ~config_mask;
714 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
715 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500716
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400717 if (current_reg != new_reg)
Robert Hancock2dec7552006-11-26 14:20:19 -0600718 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500719
Robert Hancock8959d302008-02-04 19:39:02 -0600720 port0 = ap->host->ports[0]->private_data;
721 port1 = ap->host->ports[1]->private_data;
Robert Hancock8959d302008-02-04 19:39:02 -0600722 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
723 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
Christoph Hellwig258c9fd2018-05-09 16:01:00 +0200724 /*
725 * We have to set the DMA mask to 32-bit if either port is in
726 * ATAPI mode, since they are on the same PCI device which is
727 * used for DMA mapping. If either SCSI device is not allocated
728 * yet, it's OK since that port will discover its correct
729 * setting when it does get allocated.
730 */
731 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
Robert Hancock8959d302008-02-04 19:39:02 -0600732 } else {
Christoph Hellwig258c9fd2018-05-09 16:01:00 +0200733 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
Robert Hancock8959d302008-02-04 19:39:02 -0600734 }
735
Robert Hancockfbbb2622006-10-27 19:08:41 -0700736 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500737 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
Joe Perchesa9a79df2011-04-15 15:51:59 -0700738 ata_port_info(ap,
739 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
740 (unsigned long long)*ap->host->dev->dma_mask,
741 segment_boundary, sg_tablesize);
Robert Hancock8959d302008-02-04 19:39:02 -0600742
743 spin_unlock_irqrestore(ap->lock, flags);
744
Robert Hancockfbbb2622006-10-27 19:08:41 -0700745 return rc;
746}
747
Robert Hancock2dec7552006-11-26 14:20:19 -0600748static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
749{
750 struct nv_adma_port_priv *pp = qc->ap->private_data;
751 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
752}
753
Robert Hancockf2fb3442007-03-26 21:43:36 -0800754static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
755{
Robert Hancock3f3debd2007-11-25 16:59:36 -0600756 /* Other than when internal or pass-through commands are executed,
757 the only time this function will be called in ADMA mode will be
758 if a command fails. In the failure case we don't care about going
759 into register mode with ADMA commands pending, as the commands will
760 all shortly be aborted anyway. We assume that NCQ commands are not
761 issued via passthrough, which is the only way that switching into
762 ADMA mode could abort outstanding commands. */
Robert Hancockf2fb3442007-03-26 21:43:36 -0800763 nv_adma_register_mode(ap);
764
Tejun Heo9363c382008-04-07 22:47:16 +0900765 ata_sff_tf_read(ap, tf);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800766}
767
Robert Hancock2dec7552006-11-26 14:20:19 -0600768static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700769{
770 unsigned int idx = 0;
771
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400772 if (tf->flags & ATA_TFLAG_ISADDR) {
Robert Hancockac3d6b82007-02-19 19:02:46 -0600773 if (tf->flags & ATA_TFLAG_LBA48) {
774 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
775 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
776 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
777 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
778 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
779 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
780 } else
781 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
Jeff Garzika84471f2007-02-26 05:51:33 -0500782
Robert Hancockac3d6b82007-02-19 19:02:46 -0600783 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
784 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
785 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
786 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700787 }
Jeff Garzika84471f2007-02-26 05:51:33 -0500788
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400789 if (tf->flags & ATA_TFLAG_DEVICE)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600790 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700791
792 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
Jeff Garzika84471f2007-02-26 05:51:33 -0500793
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400794 while (idx < 12)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600795 cpb[idx++] = cpu_to_le16(IGN);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700796
797 return idx;
798}
799
Robert Hancock5bd28a42007-02-05 16:26:01 -0800800static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700801{
802 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock2dec7552006-11-26 14:20:19 -0600803 u8 flags = pp->cpb[cpb_num].resp_flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700804
805 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
806
Robert Hancock5bd28a42007-02-05 16:26:01 -0800807 if (unlikely((force_err ||
808 flags & (NV_CPB_RESP_ATA_ERR |
809 NV_CPB_RESP_CMD_ERR |
810 NV_CPB_RESP_CPB_ERR)))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900811 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800812 int freeze = 0;
813
814 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400815 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800816 if (flags & NV_CPB_RESP_ATA_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900817 ata_ehi_push_desc(ehi, "ATA error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800818 ehi->err_mask |= AC_ERR_DEV;
819 } else if (flags & NV_CPB_RESP_CMD_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900820 ata_ehi_push_desc(ehi, "CMD error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800821 ehi->err_mask |= AC_ERR_DEV;
822 } else if (flags & NV_CPB_RESP_CPB_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900823 ata_ehi_push_desc(ehi, "CPB error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800824 ehi->err_mask |= AC_ERR_SYSTEM;
825 freeze = 1;
826 } else {
827 /* notifier error, but no error in CPB flags? */
Tejun Heob64bbc32007-07-16 14:29:39 +0900828 ata_ehi_push_desc(ehi, "unknown");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800829 ehi->err_mask |= AC_ERR_OTHER;
830 freeze = 1;
831 }
832 /* Kill all commands. EH will determine what actually failed. */
833 if (freeze)
834 ata_port_freeze(ap);
835 else
836 ata_port_abort(ap);
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200837 return -1;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800838 }
839
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200840 if (likely(flags & NV_CPB_RESP_DONE))
841 return 1;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800842 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700843}
844
Robert Hancock2dec7552006-11-26 14:20:19 -0600845static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
846{
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900847 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
Robert Hancock2dec7552006-11-26 14:20:19 -0600848
849 /* freeze if hotplugged */
850 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
851 ata_port_freeze(ap);
852 return 1;
853 }
854
855 /* bail out if not our interrupt */
856 if (!(irq_stat & NV_INT_DEV))
857 return 0;
858
859 /* DEV interrupt w/ no active qc? */
860 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
Tejun Heo9363c382008-04-07 22:47:16 +0900861 ata_sff_check_status(ap);
Robert Hancock2dec7552006-11-26 14:20:19 -0600862 return 1;
863 }
864
865 /* handle interrupt */
Tejun Heoc3b28892010-05-19 22:10:21 +0200866 return ata_bmdma_port_intr(ap, qc);
Robert Hancock2dec7552006-11-26 14:20:19 -0600867}
868
Robert Hancockfbbb2622006-10-27 19:08:41 -0700869static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
870{
871 struct ata_host *host = dev_instance;
872 int i, handled = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600873 u32 notifier_clears[2];
Robert Hancockfbbb2622006-10-27 19:08:41 -0700874
875 spin_lock(&host->lock);
876
877 for (i = 0; i < host->n_ports; i++) {
878 struct ata_port *ap = host->ports[i];
Tejun Heo3e4ec342010-05-10 21:41:30 +0200879 struct nv_adma_port_priv *pp = ap->private_data;
880 void __iomem *mmio = pp->ctl_block;
881 u16 status;
882 u32 gen_ctl;
883 u32 notifier, notifier_error;
884
Robert Hancock2dec7552006-11-26 14:20:19 -0600885 notifier_clears[i] = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700886
Tejun Heo3e4ec342010-05-10 21:41:30 +0200887 /* if ADMA is disabled, use standard ata interrupt handler */
888 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
889 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
890 >> (NV_INT_PORT_SHIFT * i);
891 handled += nv_host_intr(ap, irq_stat);
892 continue;
893 }
Jeff Garzika617c092007-05-21 20:14:23 -0400894
Tejun Heo3e4ec342010-05-10 21:41:30 +0200895 /* if in ATA register mode, check for standard interrupts */
896 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
897 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
898 >> (NV_INT_PORT_SHIFT * i);
899 if (ata_tag_valid(ap->link.active_tag))
900 /** NV_INT_DEV indication seems unreliable
901 at times at least in ADMA mode. Force it
902 on always when a command is active, to
903 prevent losing interrupts. */
904 irq_stat |= NV_INT_DEV;
905 handled += nv_host_intr(ap, irq_stat);
906 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700907
Tejun Heo3e4ec342010-05-10 21:41:30 +0200908 notifier = readl(mmio + NV_ADMA_NOTIFIER);
909 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
910 notifier_clears[i] = notifier | notifier_error;
911
912 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
913
914 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
915 !notifier_error)
916 /* Nothing to do */
917 continue;
918
919 status = readw(mmio + NV_ADMA_STAT);
920
921 /*
922 * Clear status. Ensure the controller sees the
923 * clearing before we start looking at any of the CPB
924 * statuses, so that any CPB completions after this
925 * point in the handler will raise another interrupt.
926 */
927 writew(status, mmio + NV_ADMA_STAT);
928 readw(mmio + NV_ADMA_STAT); /* flush posted write */
929 rmb();
930
931 handled++; /* irq handled if we got here */
932
933 /* freeze if hotplugged or controller error */
934 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
935 NV_ADMA_STAT_HOTUNPLUG |
936 NV_ADMA_STAT_TIMEOUT |
937 NV_ADMA_STAT_SERROR))) {
938 struct ata_eh_info *ehi = &ap->link.eh_info;
939
940 ata_ehi_clear_desc(ehi);
941 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
942 if (status & NV_ADMA_STAT_TIMEOUT) {
943 ehi->err_mask |= AC_ERR_SYSTEM;
944 ata_ehi_push_desc(ehi, "timeout");
945 } else if (status & NV_ADMA_STAT_HOTPLUG) {
946 ata_ehi_hotplugged(ehi);
947 ata_ehi_push_desc(ehi, "hotplug");
948 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
949 ata_ehi_hotplugged(ehi);
950 ata_ehi_push_desc(ehi, "hot unplug");
951 } else if (status & NV_ADMA_STAT_SERROR) {
952 /* let EH analyze SError and figure out cause */
953 ata_ehi_push_desc(ehi, "SError");
954 } else
955 ata_ehi_push_desc(ehi, "unknown");
956 ata_port_freeze(ap);
957 continue;
958 }
959
960 if (status & (NV_ADMA_STAT_DONE |
961 NV_ADMA_STAT_CPBERR |
962 NV_ADMA_STAT_CMD_COMPLETE)) {
963 u32 check_commands = notifier_clears[i];
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200964 u32 done_mask = 0;
Tejun Heo752e3862010-06-25 15:02:59 +0200965 int pos, rc;
Tejun Heo3e4ec342010-05-10 21:41:30 +0200966
967 if (status & NV_ADMA_STAT_CPBERR) {
968 /* check all active commands */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400969 if (ata_tag_valid(ap->link.active_tag))
Tejun Heo3e4ec342010-05-10 21:41:30 +0200970 check_commands = 1 <<
971 ap->link.active_tag;
972 else
973 check_commands = ap->link.sactive;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700974 }
975
Tejun Heo3e4ec342010-05-10 21:41:30 +0200976 /* check CPBs for completed commands */
Tejun Heo752e3862010-06-25 15:02:59 +0200977 while ((pos = ffs(check_commands))) {
Tejun Heo3e4ec342010-05-10 21:41:30 +0200978 pos--;
Tejun Heo752e3862010-06-25 15:02:59 +0200979 rc = nv_adma_check_cpb(ap, pos,
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400980 notifier_error & (1 << pos));
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200981 if (rc > 0)
982 done_mask |= 1 << pos;
983 else if (unlikely(rc < 0))
Tejun Heo752e3862010-06-25 15:02:59 +0200984 check_commands = 0;
Tejun Heo3e4ec342010-05-10 21:41:30 +0200985 check_commands &= ~(1 << pos);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700986 }
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200987 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700988 }
989 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500990
Jeff Garzikb4479162007-10-25 20:47:30 -0400991 if (notifier_clears[0] || notifier_clears[1]) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600992 /* Note: Both notifier clear registers must be written
993 if either is set, even if one is zero, according to NVIDIA. */
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600994 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
995 writel(notifier_clears[0], pp->notifier_clear_block);
996 pp = host->ports[1]->private_data;
997 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancock2dec7552006-11-26 14:20:19 -0600998 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700999
1000 spin_unlock(&host->lock);
1001
1002 return IRQ_RETVAL(handled);
1003}
1004
Robert Hancock53014e22007-05-05 15:36:36 -06001005static void nv_adma_freeze(struct ata_port *ap)
1006{
1007 struct nv_adma_port_priv *pp = ap->private_data;
1008 void __iomem *mmio = pp->ctl_block;
1009 u16 tmp;
1010
1011 nv_ck804_freeze(ap);
1012
1013 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1014 return;
1015
1016 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001017 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001018 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1019
1020 /* Disable interrupt */
1021 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001022 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001023 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001024 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001025}
1026
1027static void nv_adma_thaw(struct ata_port *ap)
1028{
1029 struct nv_adma_port_priv *pp = ap->private_data;
1030 void __iomem *mmio = pp->ctl_block;
1031 u16 tmp;
1032
1033 nv_ck804_thaw(ap);
1034
1035 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1036 return;
1037
1038 /* Enable interrupt */
1039 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001040 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001041 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001042 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001043}
1044
Robert Hancockfbbb2622006-10-27 19:08:41 -07001045static void nv_adma_irq_clear(struct ata_port *ap)
1046{
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001047 struct nv_adma_port_priv *pp = ap->private_data;
1048 void __iomem *mmio = pp->ctl_block;
Robert Hancock53014e22007-05-05 15:36:36 -06001049 u32 notifier_clears[2];
1050
1051 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
Tejun Heo37f65b82010-05-19 22:10:20 +02001052 ata_bmdma_irq_clear(ap);
Robert Hancock53014e22007-05-05 15:36:36 -06001053 return;
1054 }
1055
1056 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001057 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001058 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001059
1060 /* clear ADMA status */
Robert Hancock53014e22007-05-05 15:36:36 -06001061 writew(0xffff, mmio + NV_ADMA_STAT);
Jeff Garzika617c092007-05-21 20:14:23 -04001062
Robert Hancock53014e22007-05-05 15:36:36 -06001063 /* clear notifiers - note both ports need to be written with
1064 something even though we are only clearing on one */
1065 if (ap->port_no == 0) {
1066 notifier_clears[0] = 0xFFFFFFFF;
1067 notifier_clears[1] = 0;
1068 } else {
1069 notifier_clears[0] = 0;
1070 notifier_clears[1] = 0xFFFFFFFF;
1071 }
1072 pp = ap->host->ports[0]->private_data;
1073 writel(notifier_clears[0], pp->notifier_clear_block);
1074 pp = ap->host->ports[1]->private_data;
1075 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001076}
1077
Robert Hancockf5ecac22007-02-20 21:49:10 -06001078static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001079{
Robert Hancockf5ecac22007-02-20 21:49:10 -06001080 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001081
Jeff Garzikb4479162007-10-25 20:47:30 -04001082 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
Tejun Heofe06e5f2010-05-10 21:41:39 +02001083 ata_bmdma_post_internal_cmd(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001084}
1085
1086static int nv_adma_port_start(struct ata_port *ap)
1087{
1088 struct device *dev = ap->host->dev;
1089 struct nv_adma_port_priv *pp;
1090 int rc;
1091 void *mem;
1092 dma_addr_t mem_dma;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001093 void __iomem *mmio;
Robert Hancock8959d302008-02-04 19:39:02 -06001094 struct pci_dev *pdev = to_pci_dev(dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001095 u16 tmp;
1096
1097 VPRINTK("ENTER\n");
1098
Christoph Hellwig258c9fd2018-05-09 16:01:00 +02001099 /*
1100 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1101 * pad buffers.
1102 */
1103 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Robert Hancock8959d302008-02-04 19:39:02 -06001104 if (rc)
1105 return rc;
1106
Tejun Heoc7087652010-05-10 21:41:34 +02001107 /* we might fallback to bmdma, allocate bmdma resources */
1108 rc = ata_bmdma_port_start(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001109 if (rc)
1110 return rc;
1111
Tejun Heo24dc5f32007-01-20 16:00:28 +09001112 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1113 if (!pp)
1114 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001115
Tejun Heo0d5ff562007-02-01 15:06:36 +09001116 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001117 ap->port_no * NV_ADMA_PORT_SIZE;
1118 pp->ctl_block = mmio;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001119 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001120 pp->notifier_clear_block = pp->gen_block +
1121 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1122
Christoph Hellwig258c9fd2018-05-09 16:01:00 +02001123 /*
1124 * Now that the legacy PRD and padding buffer are allocated we can
1125 * try to raise the DMA mask to allocate the CPB/APRD table.
1126 */
1127 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1128 if (rc) {
1129 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1130 if (rc)
1131 return rc;
1132 }
Robert Hancock8959d302008-02-04 19:39:02 -06001133 pp->adma_dma_mask = *dev->dma_mask;
1134
Tejun Heo24dc5f32007-01-20 16:00:28 +09001135 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1136 &mem_dma, GFP_KERNEL);
1137 if (!mem)
1138 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001139 memset(mem, 0, NV_ADMA_PORT_PRIV_DMA_SZ);
1140
1141 /*
1142 * First item in chunk of DMA memory:
1143 * 128-byte command parameter block (CPB)
1144 * one for each command tag
1145 */
1146 pp->cpb = mem;
1147 pp->cpb_dma = mem_dma;
1148
1149 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001150 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001151
1152 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1153 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1154
1155 /*
1156 * Second item: block of ADMA_SGTBL_LEN s/g entries
1157 */
1158 pp->aprd = mem;
1159 pp->aprd_dma = mem_dma;
1160
1161 ap->private_data = pp;
1162
1163 /* clear any outstanding interrupt conditions */
1164 writew(0xffff, mmio + NV_ADMA_STAT);
1165
1166 /* initialize port variables */
1167 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1168
1169 /* clear CPB fetch count */
1170 writew(0, mmio + NV_ADMA_CPB_COUNT);
1171
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001172 /* clear GO for register mode, enable interrupt */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001173 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001174 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1175 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001176
1177 tmp = readw(mmio + NV_ADMA_CTL);
1178 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001179 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001180 udelay(1);
1181 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001182 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001183
1184 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001185}
1186
1187static void nv_adma_port_stop(struct ata_port *ap)
1188{
Robert Hancockfbbb2622006-10-27 19:08:41 -07001189 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001190 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001191
1192 VPRINTK("ENTER\n");
Robert Hancockfbbb2622006-10-27 19:08:41 -07001193 writew(0, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001194}
1195
Tejun Heo438ac6d2007-03-02 17:31:26 +09001196#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001197static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1198{
1199 struct nv_adma_port_priv *pp = ap->private_data;
1200 void __iomem *mmio = pp->ctl_block;
1201
1202 /* Go to register mode - clears GO */
1203 nv_adma_register_mode(ap);
1204
1205 /* clear CPB fetch count */
1206 writew(0, mmio + NV_ADMA_CPB_COUNT);
1207
1208 /* disable interrupt, shut down port */
1209 writew(0, mmio + NV_ADMA_CTL);
1210
1211 return 0;
1212}
1213
1214static int nv_adma_port_resume(struct ata_port *ap)
1215{
1216 struct nv_adma_port_priv *pp = ap->private_data;
1217 void __iomem *mmio = pp->ctl_block;
1218 u16 tmp;
1219
1220 /* set CPB block location */
1221 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001222 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001223
1224 /* clear any outstanding interrupt conditions */
1225 writew(0xffff, mmio + NV_ADMA_STAT);
1226
1227 /* initialize port variables */
1228 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1229
1230 /* clear CPB fetch count */
1231 writew(0, mmio + NV_ADMA_CPB_COUNT);
1232
1233 /* clear GO for register mode, enable interrupt */
1234 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001235 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1236 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001237
1238 tmp = readw(mmio + NV_ADMA_CTL);
1239 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001240 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001241 udelay(1);
1242 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001243 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001244
1245 return 0;
1246}
Tejun Heo438ac6d2007-03-02 17:31:26 +09001247#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -07001248
Tejun Heo9a829cc2007-04-17 23:44:08 +09001249static void nv_adma_setup_port(struct ata_port *ap)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001250{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001251 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1252 struct ata_ioports *ioport = &ap->ioaddr;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001253
1254 VPRINTK("ENTER\n");
1255
Tejun Heo9a829cc2007-04-17 23:44:08 +09001256 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001257
Tejun Heo0d5ff562007-02-01 15:06:36 +09001258 ioport->cmd_addr = mmio;
1259 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001260 ioport->error_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001261 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1262 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1263 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1264 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1265 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1266 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001267 ioport->status_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001268 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001269 ioport->altstatus_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001270 ioport->ctl_addr = mmio + 0x20;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001271}
1272
Tejun Heo9a829cc2007-04-17 23:44:08 +09001273static int nv_adma_host_init(struct ata_host *host)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001274{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001275 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001276 unsigned int i;
1277 u32 tmp32;
1278
1279 VPRINTK("ENTER\n");
1280
1281 /* enable ADMA on the ports */
1282 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1283 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1284 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1285 NV_MCP_SATA_CFG_20_PORT1_EN |
1286 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1287
1288 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1289
Tejun Heo9a829cc2007-04-17 23:44:08 +09001290 for (i = 0; i < host->n_ports; i++)
1291 nv_adma_setup_port(host->ports[i]);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001292
Robert Hancockfbbb2622006-10-27 19:08:41 -07001293 return 0;
1294}
1295
1296static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1297 struct scatterlist *sg,
1298 int idx,
1299 struct nv_adma_prd *aprd)
1300{
Robert Hancock41949ed2007-02-19 19:02:27 -06001301 u8 flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001302 if (qc->tf.flags & ATA_TFLAG_WRITE)
1303 flags |= NV_APRD_WRITE;
1304 if (idx == qc->n_elem - 1)
1305 flags |= NV_APRD_END;
1306 else if (idx != 4)
1307 flags |= NV_APRD_CONT;
1308
1309 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1310 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
Robert Hancock2dec7552006-11-26 14:20:19 -06001311 aprd->flags = flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001312 aprd->packet_len = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001313}
1314
1315static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1316{
1317 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001318 struct nv_adma_prd *aprd;
1319 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001320 unsigned int si;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001321
1322 VPRINTK("ENTER\n");
1323
Tejun Heoff2aeb12007-12-05 16:43:11 +09001324 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1325 aprd = (si < 5) ? &cpb->aprd[si] :
Jens Axboe4e5b6262018-05-11 12:51:04 -06001326 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001327 nv_adma_fill_aprd(qc, sg, si, aprd);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001328 }
Tejun Heoff2aeb12007-12-05 16:43:11 +09001329 if (si > 5)
Jens Axboe4e5b6262018-05-11 12:51:04 -06001330 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
Robert Hancock41949ed2007-02-19 19:02:27 -06001331 else
1332 cpb->next_aprd = cpu_to_le64(0);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001333}
1334
Robert Hancock382a6652007-02-05 16:26:02 -08001335static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1336{
1337 struct nv_adma_port_priv *pp = qc->ap->private_data;
1338
1339 /* ADMA engine can only be used for non-ATAPI DMA commands,
Robert Hancock3f3debd2007-11-25 16:59:36 -06001340 or interrupt-driven no-data commands. */
Jeff Garzikb4479162007-10-25 20:47:30 -04001341 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
Robert Hancock3f3debd2007-11-25 16:59:36 -06001342 (qc->tf.flags & ATA_TFLAG_POLLING))
Robert Hancock382a6652007-02-05 16:26:02 -08001343 return 1;
1344
Jeff Garzikb4479162007-10-25 20:47:30 -04001345 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
Robert Hancock382a6652007-02-05 16:26:02 -08001346 (qc->tf.protocol == ATA_PROT_NODATA))
1347 return 0;
1348
1349 return 1;
1350}
1351
Robert Hancockfbbb2622006-10-27 19:08:41 -07001352static void nv_adma_qc_prep(struct ata_queued_cmd *qc)
1353{
1354 struct nv_adma_port_priv *pp = qc->ap->private_data;
Jens Axboe4e5b6262018-05-11 12:51:04 -06001355 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
Robert Hancockfbbb2622006-10-27 19:08:41 -07001356 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
Robert Hancockfbbb2622006-10-27 19:08:41 -07001357 NV_CPB_CTL_IEN;
1358
Robert Hancock382a6652007-02-05 16:26:02 -08001359 if (nv_adma_use_reg_mode(qc)) {
Robert Hancock3f3debd2007-11-25 16:59:36 -06001360 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1361 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancock2dec7552006-11-26 14:20:19 -06001362 nv_adma_register_mode(qc->ap);
Tejun Heof47451c2010-05-10 21:41:40 +02001363 ata_bmdma_qc_prep(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001364 return;
1365 }
1366
Robert Hancock41949ed2007-02-19 19:02:27 -06001367 cpb->resp_flags = NV_CPB_RESP_DONE;
1368 wmb();
1369 cpb->ctl_flags = 0;
1370 wmb();
Robert Hancockfbbb2622006-10-27 19:08:41 -07001371
1372 cpb->len = 3;
Jens Axboe4e5b6262018-05-11 12:51:04 -06001373 cpb->tag = qc->hw_tag;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001374 cpb->next_cpb_idx = 0;
1375
1376 /* turn on NCQ flags for NCQ commands */
1377 if (qc->tf.protocol == ATA_PROT_NCQ)
1378 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1379
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001380 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1381
Robert Hancockfbbb2622006-10-27 19:08:41 -07001382 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1383
Jeff Garzikb4479162007-10-25 20:47:30 -04001384 if (qc->flags & ATA_QCFLAG_DMAMAP) {
Robert Hancock382a6652007-02-05 16:26:02 -08001385 nv_adma_fill_sg(qc, cpb);
1386 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1387 } else
1388 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001389
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001390 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1391 until we are finished filling in all of the contents */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001392 wmb();
1393 cpb->ctl_flags = ctl_flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001394 wmb();
1395 cpb->resp_flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001396}
1397
1398static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1399{
Robert Hancock2dec7552006-11-26 14:20:19 -06001400 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001401 void __iomem *mmio = pp->ctl_block;
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001402 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001403
1404 VPRINTK("ENTER\n");
1405
Robert Hancock3f3debd2007-11-25 16:59:36 -06001406 /* We can't handle result taskfile with NCQ commands, since
1407 retrieving the taskfile switches us out of ADMA mode and would abort
1408 existing commands. */
1409 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1410 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
Joe Perchesa9a79df2011-04-15 15:51:59 -07001411 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
Robert Hancock3f3debd2007-11-25 16:59:36 -06001412 return AC_ERR_SYSTEM;
1413 }
1414
Robert Hancock382a6652007-02-05 16:26:02 -08001415 if (nv_adma_use_reg_mode(qc)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07001416 /* use ATA register mode */
Robert Hancock382a6652007-02-05 16:26:02 -08001417 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
Robert Hancock3f3debd2007-11-25 16:59:36 -06001418 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1419 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancockfbbb2622006-10-27 19:08:41 -07001420 nv_adma_register_mode(qc->ap);
Tejun Heo360ff782010-05-10 21:41:42 +02001421 return ata_bmdma_qc_issue(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001422 } else
1423 nv_adma_mode(qc->ap);
1424
1425 /* write append register, command tag in lower 8 bits
1426 and (number of cpbs to append -1) in top 8 bits */
1427 wmb();
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001428
Jeff Garzikb4479162007-10-25 20:47:30 -04001429 if (curr_ncq != pp->last_issue_ncq) {
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001430 /* Seems to need some delay before switching between NCQ and
1431 non-NCQ commands, else we get command timeouts and such. */
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001432 udelay(20);
1433 pp->last_issue_ncq = curr_ncq;
1434 }
1435
Jens Axboe4e5b6262018-05-11 12:51:04 -06001436 writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001437
Jens Axboe4e5b6262018-05-11 12:51:04 -06001438 DPRINTK("Issued tag %u\n", qc->hw_tag);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001439
1440 return 0;
1441}
1442
David Howells7d12e782006-10-05 14:55:46 +01001443static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444{
Jeff Garzikcca39742006-08-24 03:19:22 -04001445 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 unsigned int i;
1447 unsigned int handled = 0;
1448 unsigned long flags;
1449
Jeff Garzikcca39742006-08-24 03:19:22 -04001450 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
Jeff Garzikcca39742006-08-24 03:19:22 -04001452 for (i = 0; i < host->n_ports; i++) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02001453 struct ata_port *ap = host->ports[i];
1454 struct ata_queued_cmd *qc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
Tejun Heo3e4ec342010-05-10 21:41:30 +02001456 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1457 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
Tejun Heoc3b28892010-05-19 22:10:21 +02001458 handled += ata_bmdma_port_intr(ap, qc);
Tejun Heo3e4ec342010-05-10 21:41:30 +02001459 } else {
1460 /*
1461 * No request pending? Clear interrupt status
1462 * anyway, in case there's one pending.
1463 */
1464 ap->ops->sff_check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466 }
1467
Jeff Garzikcca39742006-08-24 03:19:22 -04001468 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
1470 return IRQ_RETVAL(handled);
1471}
1472
Jeff Garzikcca39742006-08-24 03:19:22 -04001473static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001474{
1475 int i, handled = 0;
1476
Jeff Garzikcca39742006-08-24 03:19:22 -04001477 for (i = 0; i < host->n_ports; i++) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02001478 handled += nv_host_intr(host->ports[i], irq_stat);
Tejun Heoada364e2006-06-17 15:49:56 +09001479 irq_stat >>= NV_INT_PORT_SHIFT;
1480 }
1481
1482 return IRQ_RETVAL(handled);
1483}
1484
David Howells7d12e782006-10-05 14:55:46 +01001485static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001486{
Jeff Garzikcca39742006-08-24 03:19:22 -04001487 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001488 u8 irq_stat;
1489 irqreturn_t ret;
1490
Jeff Garzikcca39742006-08-24 03:19:22 -04001491 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001492 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
Jeff Garzikcca39742006-08-24 03:19:22 -04001493 ret = nv_do_interrupt(host, irq_stat);
1494 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001495
1496 return ret;
1497}
1498
David Howells7d12e782006-10-05 14:55:46 +01001499static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001500{
Jeff Garzikcca39742006-08-24 03:19:22 -04001501 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001502 u8 irq_stat;
1503 irqreturn_t ret;
1504
Jeff Garzikcca39742006-08-24 03:19:22 -04001505 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001506 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Jeff Garzikcca39742006-08-24 03:19:22 -04001507 ret = nv_do_interrupt(host, irq_stat);
1508 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001509
1510 return ret;
1511}
1512
Tejun Heo82ef04f2008-07-31 17:02:40 +09001513static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001515 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001516 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517
Tejun Heo82ef04f2008-07-31 17:02:40 +09001518 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
Tejun Heoda3dbb12007-07-16 14:29:40 +09001519 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520}
1521
Tejun Heo82ef04f2008-07-31 17:02:40 +09001522static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001525 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526
Tejun Heo82ef04f2008-07-31 17:02:40 +09001527 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
Tejun Heoda3dbb12007-07-16 14:29:40 +09001528 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529}
1530
Tejun Heo7f4774b2009-06-10 16:29:07 +09001531static int nv_hardreset(struct ata_link *link, unsigned int *class,
1532 unsigned long deadline)
Tejun Heoe8caa3c2009-01-25 11:25:22 +09001533{
Tejun Heo7f4774b2009-06-10 16:29:07 +09001534 struct ata_eh_context *ehc = &link->eh_context;
Tejun Heoe8caa3c2009-01-25 11:25:22 +09001535
Tejun Heo7f4774b2009-06-10 16:29:07 +09001536 /* Do hardreset iff it's post-boot probing, please read the
1537 * comment above port ops for details.
1538 */
1539 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1540 !ata_dev_enabled(link->device))
1541 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1542 NULL, NULL);
Tejun Heo6489e322009-10-14 11:18:28 +09001543 else {
1544 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1545 int rc;
1546
1547 if (!(ehc->i.flags & ATA_EHI_QUIET))
Joe Perchesa9a79df2011-04-15 15:51:59 -07001548 ata_link_info(link,
1549 "nv: skipping hardreset on occupied port\n");
Tejun Heo6489e322009-10-14 11:18:28 +09001550
1551 /* make sure the link is online */
1552 rc = sata_link_resume(link, timing, deadline);
1553 /* whine about phy resume failure but proceed */
1554 if (rc && rc != -EOPNOTSUPP)
Joe Perchesa9a79df2011-04-15 15:51:59 -07001555 ata_link_warn(link, "failed to resume link (errno=%d)\n",
1556 rc);
Tejun Heo6489e322009-10-14 11:18:28 +09001557 }
Tejun Heo7f4774b2009-06-10 16:29:07 +09001558
1559 /* device signature acquisition is unreliable */
1560 return -EAGAIN;
Tejun Heoe8caa3c2009-01-25 11:25:22 +09001561}
1562
Tejun Heo39f87582006-06-17 15:49:56 +09001563static void nv_nf2_freeze(struct ata_port *ap)
1564{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001565 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001566 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1567 u8 mask;
1568
Tejun Heo0d5ff562007-02-01 15:06:36 +09001569 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001570 mask &= ~(NV_INT_ALL << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001571 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001572}
1573
1574static void nv_nf2_thaw(struct ata_port *ap)
1575{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001576 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001577 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1578 u8 mask;
1579
Tejun Heo0d5ff562007-02-01 15:06:36 +09001580 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
Tejun Heo39f87582006-06-17 15:49:56 +09001581
Tejun Heo0d5ff562007-02-01 15:06:36 +09001582 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001583 mask |= (NV_INT_MASK << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001584 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001585}
1586
1587static void nv_ck804_freeze(struct ata_port *ap)
1588{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001589 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001590 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1591 u8 mask;
1592
1593 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1594 mask &= ~(NV_INT_ALL << shift);
1595 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1596}
1597
1598static void nv_ck804_thaw(struct ata_port *ap)
1599{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001600 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001601 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1602 u8 mask;
1603
1604 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1605
1606 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1607 mask |= (NV_INT_MASK << shift);
1608 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1609}
1610
Kuan Luof140f0f2007-10-15 15:16:53 -04001611static void nv_mcp55_freeze(struct ata_port *ap)
1612{
1613 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1614 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1615 u32 mask;
1616
1617 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1618
1619 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1620 mask &= ~(NV_INT_ALL_MCP55 << shift);
1621 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
Kuan Luof140f0f2007-10-15 15:16:53 -04001622}
1623
1624static void nv_mcp55_thaw(struct ata_port *ap)
1625{
1626 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1627 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1628 u32 mask;
1629
1630 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1631
1632 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1633 mask |= (NV_INT_MASK_MCP55 << shift);
1634 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
Kuan Luof140f0f2007-10-15 15:16:53 -04001635}
1636
Robert Hancockfbbb2622006-10-27 19:08:41 -07001637static void nv_adma_error_handler(struct ata_port *ap)
1638{
1639 struct nv_adma_port_priv *pp = ap->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04001640 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001641 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001642 int i;
1643 u16 tmp;
Jeff Garzika84471f2007-02-26 05:51:33 -05001644
Jeff Garzikb4479162007-10-25 20:47:30 -04001645 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001646 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1647 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1648 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1649 u32 status = readw(mmio + NV_ADMA_STAT);
Robert Hancock08af7412007-02-19 19:01:59 -06001650 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1651 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
Robert Hancock2cb27852007-02-11 18:34:44 -06001652
Joe Perchesa9a79df2011-04-15 15:51:59 -07001653 ata_port_err(ap,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001654 "EH in ADMA mode, notifier 0x%X "
Robert Hancock08af7412007-02-19 19:01:59 -06001655 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1656 "next cpb count 0x%X next cpb idx 0x%x\n",
1657 notifier, notifier_error, gen_ctl, status,
1658 cpb_count, next_cpb_idx);
Robert Hancock2cb27852007-02-11 18:34:44 -06001659
Jeff Garzikb4479162007-10-25 20:47:30 -04001660 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001661 struct nv_adma_cpb *cpb = &pp->cpb[i];
Jeff Garzikb4479162007-10-25 20:47:30 -04001662 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001663 ap->link.sactive & (1 << i))
Joe Perchesa9a79df2011-04-15 15:51:59 -07001664 ata_port_err(ap,
Robert Hancock2cb27852007-02-11 18:34:44 -06001665 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1666 i, cpb->ctl_flags, cpb->resp_flags);
1667 }
1668 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001669
Robert Hancockfbbb2622006-10-27 19:08:41 -07001670 /* Push us back into port register mode for error handling. */
1671 nv_adma_register_mode(ap);
1672
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001673 /* Mark all of the CPBs as invalid to prevent them from
1674 being executed */
Jeff Garzikb4479162007-10-25 20:47:30 -04001675 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001676 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1677
1678 /* clear CPB fetch count */
1679 writew(0, mmio + NV_ADMA_CPB_COUNT);
1680
1681 /* Reset channel */
1682 tmp = readw(mmio + NV_ADMA_CTL);
1683 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001684 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001685 udelay(1);
1686 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001687 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001688 }
1689
Tejun Heofe06e5f2010-05-10 21:41:39 +02001690 ata_bmdma_error_handler(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001691}
1692
Kuan Luof140f0f2007-10-15 15:16:53 -04001693static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1694{
1695 struct nv_swncq_port_priv *pp = ap->private_data;
1696 struct defer_queue *dq = &pp->defer_queue;
1697
1698 /* queue is full */
1699 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
Jens Axboe4e5b6262018-05-11 12:51:04 -06001700 dq->defer_bits |= (1 << qc->hw_tag);
1701 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
Kuan Luof140f0f2007-10-15 15:16:53 -04001702}
1703
1704static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1705{
1706 struct nv_swncq_port_priv *pp = ap->private_data;
1707 struct defer_queue *dq = &pp->defer_queue;
1708 unsigned int tag;
1709
1710 if (dq->head == dq->tail) /* null queue */
1711 return NULL;
1712
1713 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1714 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1715 WARN_ON(!(dq->defer_bits & (1 << tag)));
1716 dq->defer_bits &= ~(1 << tag);
1717
1718 return ata_qc_from_tag(ap, tag);
1719}
1720
1721static void nv_swncq_fis_reinit(struct ata_port *ap)
1722{
1723 struct nv_swncq_port_priv *pp = ap->private_data;
1724
1725 pp->dhfis_bits = 0;
1726 pp->dmafis_bits = 0;
1727 pp->sdbfis_bits = 0;
1728 pp->ncq_flags = 0;
1729}
1730
1731static void nv_swncq_pp_reinit(struct ata_port *ap)
1732{
1733 struct nv_swncq_port_priv *pp = ap->private_data;
1734 struct defer_queue *dq = &pp->defer_queue;
1735
1736 dq->head = 0;
1737 dq->tail = 0;
1738 dq->defer_bits = 0;
1739 pp->qc_active = 0;
1740 pp->last_issue_tag = ATA_TAG_POISON;
1741 nv_swncq_fis_reinit(ap);
1742}
1743
1744static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1745{
1746 struct nv_swncq_port_priv *pp = ap->private_data;
1747
1748 writew(fis, pp->irq_block);
1749}
1750
1751static void __ata_bmdma_stop(struct ata_port *ap)
1752{
1753 struct ata_queued_cmd qc;
1754
1755 qc.ap = ap;
1756 ata_bmdma_stop(&qc);
1757}
1758
1759static void nv_swncq_ncq_stop(struct ata_port *ap)
1760{
1761 struct nv_swncq_port_priv *pp = ap->private_data;
1762 unsigned int i;
1763 u32 sactive;
1764 u32 done_mask;
1765
Jens Axboee3ed89392018-05-11 12:51:05 -06001766 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
Joe Perchesa9a79df2011-04-15 15:51:59 -07001767 ap->qc_active, ap->link.sactive);
1768 ata_port_err(ap,
Kuan Luof140f0f2007-10-15 15:16:53 -04001769 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1770 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1771 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1772 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1773
Joe Perchesa9a79df2011-04-15 15:51:59 -07001774 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1775 ap->ops->sff_check_status(ap),
1776 ioread8(ap->ioaddr.error_addr));
Kuan Luof140f0f2007-10-15 15:16:53 -04001777
1778 sactive = readl(pp->sactive_block);
1779 done_mask = pp->qc_active ^ sactive;
1780
Joe Perchesa9a79df2011-04-15 15:51:59 -07001781 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
Kuan Luof140f0f2007-10-15 15:16:53 -04001782 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1783 u8 err = 0;
1784 if (pp->qc_active & (1 << i))
1785 err = 0;
1786 else if (done_mask & (1 << i))
1787 err = 1;
1788 else
1789 continue;
1790
Joe Perchesa9a79df2011-04-15 15:51:59 -07001791 ata_port_err(ap,
1792 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1793 (pp->dhfis_bits >> i) & 0x1,
1794 (pp->dmafis_bits >> i) & 0x1,
1795 (pp->sdbfis_bits >> i) & 0x1,
1796 (sactive >> i) & 0x1,
1797 (err ? "error! tag doesn't exit" : " "));
Kuan Luof140f0f2007-10-15 15:16:53 -04001798 }
1799
1800 nv_swncq_pp_reinit(ap);
Tejun Heo5682ed32008-04-07 22:47:16 +09001801 ap->ops->sff_irq_clear(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001802 __ata_bmdma_stop(ap);
1803 nv_swncq_irq_clear(ap, 0xffff);
1804}
1805
1806static void nv_swncq_error_handler(struct ata_port *ap)
1807{
1808 struct ata_eh_context *ehc = &ap->link.eh_context;
1809
1810 if (ap->link.sactive) {
1811 nv_swncq_ncq_stop(ap);
Tejun Heocf480622008-01-24 00:05:14 +09001812 ehc->i.action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04001813 }
1814
Tejun Heofe06e5f2010-05-10 21:41:39 +02001815 ata_bmdma_error_handler(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001816}
1817
1818#ifdef CONFIG_PM
1819static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1820{
1821 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1822 u32 tmp;
1823
1824 /* clear irq */
1825 writel(~0, mmio + NV_INT_STATUS_MCP55);
1826
1827 /* disable irq */
1828 writel(0, mmio + NV_INT_ENABLE_MCP55);
1829
1830 /* disable swncq */
1831 tmp = readl(mmio + NV_CTL_MCP55);
1832 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1833 writel(tmp, mmio + NV_CTL_MCP55);
1834
1835 return 0;
1836}
1837
1838static int nv_swncq_port_resume(struct ata_port *ap)
1839{
1840 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1841 u32 tmp;
1842
1843 /* clear irq */
1844 writel(~0, mmio + NV_INT_STATUS_MCP55);
1845
1846 /* enable irq */
1847 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1848
1849 /* enable swncq */
1850 tmp = readl(mmio + NV_CTL_MCP55);
1851 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1852
1853 return 0;
1854}
1855#endif
1856
1857static void nv_swncq_host_init(struct ata_host *host)
1858{
1859 u32 tmp;
1860 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1861 struct pci_dev *pdev = to_pci_dev(host->dev);
1862 u8 regval;
1863
1864 /* disable ECO 398 */
1865 pci_read_config_byte(pdev, 0x7f, &regval);
1866 regval &= ~(1 << 7);
1867 pci_write_config_byte(pdev, 0x7f, regval);
1868
1869 /* enable swncq */
1870 tmp = readl(mmio + NV_CTL_MCP55);
1871 VPRINTK("HOST_CTL:0x%X\n", tmp);
1872 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1873
1874 /* enable irq intr */
1875 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1876 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1877 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1878
1879 /* clear port irq */
1880 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1881}
1882
1883static int nv_swncq_slave_config(struct scsi_device *sdev)
1884{
1885 struct ata_port *ap = ata_shost_to_port(sdev->host);
1886 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1887 struct ata_device *dev;
1888 int rc;
1889 u8 rev;
1890 u8 check_maxtor = 0;
1891 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1892
1893 rc = ata_scsi_slave_config(sdev);
1894 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1895 /* Not a proper libata device, ignore */
1896 return rc;
1897
1898 dev = &ap->link.device[sdev->id];
1899 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1900 return rc;
1901
1902 /* if MCP51 and Maxtor, then disable ncq */
1903 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1904 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1905 check_maxtor = 1;
1906
1907 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1908 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1909 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1910 pci_read_config_byte(pdev, 0x8, &rev);
1911 if (rev <= 0xa2)
1912 check_maxtor = 1;
1913 }
1914
1915 if (!check_maxtor)
1916 return rc;
1917
1918 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1919
1920 if (strncmp(model_num, "Maxtor", 6) == 0) {
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01001921 ata_scsi_change_queue_depth(sdev, 1);
Joe Perchesa9a79df2011-04-15 15:51:59 -07001922 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1923 sdev->queue_depth);
Kuan Luof140f0f2007-10-15 15:16:53 -04001924 }
1925
1926 return rc;
1927}
1928
1929static int nv_swncq_port_start(struct ata_port *ap)
1930{
1931 struct device *dev = ap->host->dev;
1932 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1933 struct nv_swncq_port_priv *pp;
1934 int rc;
1935
Tejun Heoc7087652010-05-10 21:41:34 +02001936 /* we might fallback to bmdma, allocate bmdma resources */
1937 rc = ata_bmdma_port_start(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001938 if (rc)
1939 return rc;
1940
1941 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1942 if (!pp)
1943 return -ENOMEM;
1944
1945 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1946 &pp->prd_dma, GFP_KERNEL);
1947 if (!pp->prd)
1948 return -ENOMEM;
1949 memset(pp->prd, 0, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE);
1950
1951 ap->private_data = pp;
1952 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1953 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1954 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1955
1956 return 0;
1957}
1958
1959static void nv_swncq_qc_prep(struct ata_queued_cmd *qc)
1960{
1961 if (qc->tf.protocol != ATA_PROT_NCQ) {
Tejun Heof47451c2010-05-10 21:41:40 +02001962 ata_bmdma_qc_prep(qc);
Kuan Luof140f0f2007-10-15 15:16:53 -04001963 return;
1964 }
1965
1966 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
1967 return;
1968
1969 nv_swncq_fill_sg(qc);
1970}
1971
1972static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1973{
1974 struct ata_port *ap = qc->ap;
1975 struct scatterlist *sg;
Kuan Luof140f0f2007-10-15 15:16:53 -04001976 struct nv_swncq_port_priv *pp = ap->private_data;
Tejun Heof60d7012010-05-10 21:41:41 +02001977 struct ata_bmdma_prd *prd;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001978 unsigned int si, idx;
Kuan Luof140f0f2007-10-15 15:16:53 -04001979
Jens Axboe4e5b6262018-05-11 12:51:04 -06001980 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
Kuan Luof140f0f2007-10-15 15:16:53 -04001981
1982 idx = 0;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001983 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Kuan Luof140f0f2007-10-15 15:16:53 -04001984 u32 addr, offset;
1985 u32 sg_len, len;
1986
1987 addr = (u32)sg_dma_address(sg);
1988 sg_len = sg_dma_len(sg);
1989
1990 while (sg_len) {
1991 offset = addr & 0xffff;
1992 len = sg_len;
1993 if ((offset + sg_len) > 0x10000)
1994 len = 0x10000 - offset;
1995
1996 prd[idx].addr = cpu_to_le32(addr);
1997 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1998
1999 idx++;
2000 sg_len -= len;
2001 addr += len;
2002 }
2003 }
2004
Tejun Heoff2aeb12007-12-05 16:43:11 +09002005 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
Kuan Luof140f0f2007-10-15 15:16:53 -04002006}
2007
2008static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2009 struct ata_queued_cmd *qc)
2010{
2011 struct nv_swncq_port_priv *pp = ap->private_data;
2012
2013 if (qc == NULL)
2014 return 0;
2015
2016 DPRINTK("Enter\n");
2017
Jens Axboe4e5b6262018-05-11 12:51:04 -06002018 writel((1 << qc->hw_tag), pp->sactive_block);
2019 pp->last_issue_tag = qc->hw_tag;
2020 pp->dhfis_bits &= ~(1 << qc->hw_tag);
2021 pp->dmafis_bits &= ~(1 << qc->hw_tag);
2022 pp->qc_active |= (0x1 << qc->hw_tag);
Kuan Luof140f0f2007-10-15 15:16:53 -04002023
Tejun Heo5682ed32008-04-07 22:47:16 +09002024 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2025 ap->ops->sff_exec_command(ap, &qc->tf);
Kuan Luof140f0f2007-10-15 15:16:53 -04002026
Jens Axboe4e5b6262018-05-11 12:51:04 -06002027 DPRINTK("Issued tag %u\n", qc->hw_tag);
Kuan Luof140f0f2007-10-15 15:16:53 -04002028
2029 return 0;
2030}
2031
2032static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2033{
2034 struct ata_port *ap = qc->ap;
2035 struct nv_swncq_port_priv *pp = ap->private_data;
2036
2037 if (qc->tf.protocol != ATA_PROT_NCQ)
Tejun Heo360ff782010-05-10 21:41:42 +02002038 return ata_bmdma_qc_issue(qc);
Kuan Luof140f0f2007-10-15 15:16:53 -04002039
2040 DPRINTK("Enter\n");
2041
2042 if (!pp->qc_active)
2043 nv_swncq_issue_atacmd(ap, qc);
2044 else
2045 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2046
2047 return 0;
2048}
2049
2050static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2051{
2052 u32 serror;
2053 struct ata_eh_info *ehi = &ap->link.eh_info;
2054
2055 ata_ehi_clear_desc(ehi);
2056
2057 /* AHCI needs SError cleared; otherwise, it might lock up */
2058 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2059 sata_scr_write(&ap->link, SCR_ERROR, serror);
2060
2061 /* analyze @irq_stat */
2062 if (fis & NV_SWNCQ_IRQ_ADDED)
2063 ata_ehi_push_desc(ehi, "hot plug");
2064 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2065 ata_ehi_push_desc(ehi, "hot unplug");
2066
2067 ata_ehi_hotplugged(ehi);
2068
2069 /* okay, let's hand over to EH */
2070 ehi->serror |= serror;
2071
2072 ata_port_freeze(ap);
2073}
2074
2075static int nv_swncq_sdbfis(struct ata_port *ap)
2076{
2077 struct ata_queued_cmd *qc;
2078 struct nv_swncq_port_priv *pp = ap->private_data;
2079 struct ata_eh_info *ehi = &ap->link.eh_info;
2080 u32 sactive;
Kuan Luof140f0f2007-10-15 15:16:53 -04002081 u32 done_mask;
Kuan Luof140f0f2007-10-15 15:16:53 -04002082 u8 host_stat;
2083 u8 lack_dhfis = 0;
2084
2085 host_stat = ap->ops->bmdma_status(ap);
2086 if (unlikely(host_stat & ATA_DMA_ERR)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002087 /* error when transferring data to/from memory */
Kuan Luof140f0f2007-10-15 15:16:53 -04002088 ata_ehi_clear_desc(ehi);
2089 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2090 ehi->err_mask |= AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002091 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002092 return -EINVAL;
2093 }
2094
Tejun Heo5682ed32008-04-07 22:47:16 +09002095 ap->ops->sff_irq_clear(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002096 __ata_bmdma_stop(ap);
2097
2098 sactive = readl(pp->sactive_block);
2099 done_mask = pp->qc_active ^ sactive;
2100
Tejun Heo1aadf5c2010-06-25 15:03:34 +02002101 pp->qc_active &= ~done_mask;
2102 pp->dhfis_bits &= ~done_mask;
2103 pp->dmafis_bits &= ~done_mask;
2104 pp->sdbfis_bits |= done_mask;
2105 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
Kuan Luof140f0f2007-10-15 15:16:53 -04002106
2107 if (!ap->qc_active) {
2108 DPRINTK("over\n");
2109 nv_swncq_pp_reinit(ap);
Tejun Heo752e3862010-06-25 15:02:59 +02002110 return 0;
Kuan Luof140f0f2007-10-15 15:16:53 -04002111 }
2112
2113 if (pp->qc_active & pp->dhfis_bits)
Tejun Heo752e3862010-06-25 15:02:59 +02002114 return 0;
Kuan Luof140f0f2007-10-15 15:16:53 -04002115
2116 if ((pp->ncq_flags & ncq_saw_backout) ||
2117 (pp->qc_active ^ pp->dhfis_bits))
Tejun Heo752e3862010-06-25 15:02:59 +02002118 /* if the controller can't get a device to host register FIS,
Kuan Luof140f0f2007-10-15 15:16:53 -04002119 * The driver needs to reissue the new command.
2120 */
2121 lack_dhfis = 1;
2122
2123 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2124 "SWNCQ:qc_active 0x%X defer_bits %X "
2125 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2126 ap->print_id, ap->qc_active, pp->qc_active,
2127 pp->defer_queue.defer_bits, pp->dhfis_bits,
2128 pp->dmafis_bits, pp->last_issue_tag);
2129
2130 nv_swncq_fis_reinit(ap);
2131
2132 if (lack_dhfis) {
2133 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2134 nv_swncq_issue_atacmd(ap, qc);
Tejun Heo752e3862010-06-25 15:02:59 +02002135 return 0;
Kuan Luof140f0f2007-10-15 15:16:53 -04002136 }
2137
2138 if (pp->defer_queue.defer_bits) {
2139 /* send deferral queue command */
2140 qc = nv_swncq_qc_from_dq(ap);
2141 WARN_ON(qc == NULL);
2142 nv_swncq_issue_atacmd(ap, qc);
2143 }
2144
Tejun Heo752e3862010-06-25 15:02:59 +02002145 return 0;
Kuan Luof140f0f2007-10-15 15:16:53 -04002146}
2147
2148static inline u32 nv_swncq_tag(struct ata_port *ap)
2149{
2150 struct nv_swncq_port_priv *pp = ap->private_data;
2151 u32 tag;
2152
2153 tag = readb(pp->tag_block) >> 2;
2154 return (tag & 0x1f);
2155}
2156
Tejun Heo752e3862010-06-25 15:02:59 +02002157static void nv_swncq_dmafis(struct ata_port *ap)
Kuan Luof140f0f2007-10-15 15:16:53 -04002158{
2159 struct ata_queued_cmd *qc;
2160 unsigned int rw;
2161 u8 dmactl;
2162 u32 tag;
2163 struct nv_swncq_port_priv *pp = ap->private_data;
2164
2165 __ata_bmdma_stop(ap);
2166 tag = nv_swncq_tag(ap);
2167
2168 DPRINTK("dma setup tag 0x%x\n", tag);
2169 qc = ata_qc_from_tag(ap, tag);
2170
2171 if (unlikely(!qc))
Tejun Heo752e3862010-06-25 15:02:59 +02002172 return;
Kuan Luof140f0f2007-10-15 15:16:53 -04002173
2174 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2175
2176 /* load PRD table addr. */
Jens Axboe4e5b6262018-05-11 12:51:04 -06002177 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
Kuan Luof140f0f2007-10-15 15:16:53 -04002178 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2179
2180 /* specify data direction, triple-check start bit is clear */
2181 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2182 dmactl &= ~ATA_DMA_WR;
2183 if (!rw)
2184 dmactl |= ATA_DMA_WR;
2185
2186 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Kuan Luof140f0f2007-10-15 15:16:53 -04002187}
2188
2189static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2190{
2191 struct nv_swncq_port_priv *pp = ap->private_data;
2192 struct ata_queued_cmd *qc;
2193 struct ata_eh_info *ehi = &ap->link.eh_info;
2194 u32 serror;
2195 u8 ata_stat;
Kuan Luof140f0f2007-10-15 15:16:53 -04002196
Tejun Heo5682ed32008-04-07 22:47:16 +09002197 ata_stat = ap->ops->sff_check_status(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002198 nv_swncq_irq_clear(ap, fis);
2199 if (!fis)
2200 return;
2201
2202 if (ap->pflags & ATA_PFLAG_FROZEN)
2203 return;
2204
2205 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2206 nv_swncq_hotplug(ap, fis);
2207 return;
2208 }
2209
2210 if (!pp->qc_active)
2211 return;
2212
Tejun Heo82ef04f2008-07-31 17:02:40 +09002213 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
Kuan Luof140f0f2007-10-15 15:16:53 -04002214 return;
Tejun Heo82ef04f2008-07-31 17:02:40 +09002215 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
Kuan Luof140f0f2007-10-15 15:16:53 -04002216
2217 if (ata_stat & ATA_ERR) {
2218 ata_ehi_clear_desc(ehi);
2219 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2220 ehi->err_mask |= AC_ERR_DEV;
2221 ehi->serror |= serror;
Tejun Heocf480622008-01-24 00:05:14 +09002222 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002223 ata_port_freeze(ap);
2224 return;
2225 }
2226
2227 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2228 /* If the IRQ is backout, driver must issue
2229 * the new command again some time later.
2230 */
2231 pp->ncq_flags |= ncq_saw_backout;
2232 }
2233
2234 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2235 pp->ncq_flags |= ncq_saw_sdb;
2236 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2237 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2238 ap->print_id, pp->qc_active, pp->dhfis_bits,
2239 pp->dmafis_bits, readl(pp->sactive_block));
Tejun Heo752e3862010-06-25 15:02:59 +02002240 if (nv_swncq_sdbfis(ap) < 0)
Kuan Luof140f0f2007-10-15 15:16:53 -04002241 goto irq_error;
2242 }
2243
2244 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2245 /* The interrupt indicates the new command
2246 * was transmitted correctly to the drive.
2247 */
2248 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2249 pp->ncq_flags |= ncq_saw_d2h;
2250 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2251 ata_ehi_push_desc(ehi, "illegal fis transaction");
2252 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002253 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002254 goto irq_error;
2255 }
2256
2257 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2258 !(pp->ncq_flags & ncq_saw_dmas)) {
Tejun Heo5682ed32008-04-07 22:47:16 +09002259 ata_stat = ap->ops->sff_check_status(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002260 if (ata_stat & ATA_BUSY)
2261 goto irq_exit;
2262
2263 if (pp->defer_queue.defer_bits) {
2264 DPRINTK("send next command\n");
2265 qc = nv_swncq_qc_from_dq(ap);
2266 nv_swncq_issue_atacmd(ap, qc);
2267 }
2268 }
2269 }
2270
2271 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2272 /* program the dma controller with appropriate PRD buffers
2273 * and start the DMA transfer for requested command.
2274 */
2275 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2276 pp->ncq_flags |= ncq_saw_dmas;
Tejun Heo752e3862010-06-25 15:02:59 +02002277 nv_swncq_dmafis(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002278 }
2279
2280irq_exit:
2281 return;
2282irq_error:
2283 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2284 ata_port_freeze(ap);
2285 return;
2286}
2287
2288static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2289{
2290 struct ata_host *host = dev_instance;
2291 unsigned int i;
2292 unsigned int handled = 0;
2293 unsigned long flags;
2294 u32 irq_stat;
2295
2296 spin_lock_irqsave(&host->lock, flags);
2297
2298 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2299
2300 for (i = 0; i < host->n_ports; i++) {
2301 struct ata_port *ap = host->ports[i];
2302
Tejun Heo3e4ec342010-05-10 21:41:30 +02002303 if (ap->link.sactive) {
2304 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2305 handled = 1;
2306 } else {
2307 if (irq_stat) /* reserve Hotplug */
2308 nv_swncq_irq_clear(ap, 0xfff0);
Kuan Luof140f0f2007-10-15 15:16:53 -04002309
Tejun Heo3e4ec342010-05-10 21:41:30 +02002310 handled += nv_host_intr(ap, (u8)irq_stat);
Kuan Luof140f0f2007-10-15 15:16:53 -04002311 }
2312 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2313 }
2314
2315 spin_unlock_irqrestore(&host->lock, flags);
2316
2317 return IRQ_RETVAL(handled);
2318}
2319
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002320static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321{
Tejun Heo1626aeb2007-05-04 12:43:58 +02002322 const struct ata_port_info *ppi[] = { NULL, NULL };
Tejun Heo95947192008-03-25 12:22:49 +09002323 struct nv_pi_priv *ipriv;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002324 struct ata_host *host;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002325 struct nv_host_priv *hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 int rc;
2327 u32 bar;
Tejun Heo0d5ff562007-02-01 15:06:36 +09002328 void __iomem *base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002329 unsigned long type = ent->driver_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
2331 // Make sure this is a SATA controller by counting the number of bars
2332 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2333 // it's an IDE controller and we ignore it.
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002334 for (bar = 0; bar < 6; bar++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 if (pci_resource_start(pdev, bar) == 0)
2336 return -ENODEV;
2337
Joe Perches06296a12011-04-15 15:52:00 -07002338 ata_print_version_once(&pdev->dev, DRV_VERSION);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339
Tejun Heo24dc5f32007-01-20 16:00:28 +09002340 rc = pcim_enable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002342 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
Tejun Heo9a829cc2007-04-17 23:44:08 +09002344 /* determine type and allocate host */
Kuan Luof140f0f2007-10-15 15:16:53 -04002345 if (type == CK804 && adma_enabled) {
Joe Perchesa44fec12011-04-15 15:51:58 -07002346 dev_notice(&pdev->dev, "Using ADMA mode\n");
Robert Hancockfbbb2622006-10-27 19:08:41 -07002347 type = ADMA;
Tejun Heo2d775702009-01-25 11:29:38 +09002348 } else if (type == MCP5x && swncq_enabled) {
Joe Perchesa44fec12011-04-15 15:51:58 -07002349 dev_notice(&pdev->dev, "Using SWNCQ mode\n");
Tejun Heo2d775702009-01-25 11:29:38 +09002350 type = SWNCQ;
Jeff Garzik360737a2007-10-29 06:49:24 -04002351 }
2352
Tejun Heo1626aeb2007-05-04 12:43:58 +02002353 ppi[0] = &nv_port_info[type];
Tejun Heo95947192008-03-25 12:22:49 +09002354 ipriv = ppi[0]->private_data;
Tejun Heo1c5afdf2010-05-19 22:10:22 +02002355 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
Tejun Heo9a829cc2007-04-17 23:44:08 +09002356 if (rc)
2357 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358
Tejun Heo24dc5f32007-01-20 16:00:28 +09002359 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002360 if (!hpriv)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002361 return -ENOMEM;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002362 hpriv->type = type;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002363 host->private_data = hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364
Tejun Heo9a829cc2007-04-17 23:44:08 +09002365 /* request and iomap NV_MMIO_BAR */
2366 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2367 if (rc)
2368 return rc;
2369
2370 /* configure SCR access */
2371 base = host->iomap[NV_MMIO_BAR];
2372 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2373 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
Jeff Garzik02cbd922006-03-22 23:59:46 -05002374
Tejun Heoada364e2006-06-17 15:49:56 +09002375 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002376 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09002377 u8 regval;
2378
2379 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2380 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2381 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2382 }
2383
Tejun Heo9a829cc2007-04-17 23:44:08 +09002384 /* init ADMA */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002385 if (type == ADMA) {
Tejun Heo9a829cc2007-04-17 23:44:08 +09002386 rc = nv_adma_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002387 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002388 return rc;
Jeff Garzik360737a2007-10-29 06:49:24 -04002389 } else if (type == SWNCQ)
Kuan Luof140f0f2007-10-15 15:16:53 -04002390 nv_swncq_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002391
Tony Vroon51c89492009-08-06 00:50:09 +01002392 if (msi_enabled) {
Joe Perchesa44fec12011-04-15 15:51:58 -07002393 dev_notice(&pdev->dev, "Using MSI\n");
Tony Vroon51c89492009-08-06 00:50:09 +01002394 pci_enable_msi(pdev);
2395 }
2396
Tejun Heo9a829cc2007-04-17 23:44:08 +09002397 pci_set_master(pdev);
Tejun Heo95cc2c72010-05-14 11:48:50 +02002398 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399}
2400
Bartlomiej Zolnierkiewicz58eb8cd2014-05-07 17:17:44 +02002401#ifdef CONFIG_PM_SLEEP
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002402static int nv_pci_device_resume(struct pci_dev *pdev)
2403{
Jingoo Han0a86e1c2013-06-03 14:05:36 +09002404 struct ata_host *host = pci_get_drvdata(pdev);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002405 struct nv_host_priv *hpriv = host->private_data;
Robert Hancockce053fa2007-02-05 16:26:04 -08002406 int rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002407
Robert Hancockce053fa2007-02-05 16:26:04 -08002408 rc = ata_pci_device_do_resume(pdev);
Jeff Garzikb4479162007-10-25 20:47:30 -04002409 if (rc)
Robert Hancockce053fa2007-02-05 16:26:04 -08002410 return rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002411
2412 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
Jeff Garzikb4479162007-10-25 20:47:30 -04002413 if (hpriv->type >= CK804) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002414 u8 regval;
2415
2416 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2417 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2418 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2419 }
Jeff Garzikb4479162007-10-25 20:47:30 -04002420 if (hpriv->type == ADMA) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002421 u32 tmp32;
2422 struct nv_adma_port_priv *pp;
2423 /* enable/disable ADMA on the ports appropriately */
2424 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2425
2426 pp = host->ports[0]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002427 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002428 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002429 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002430 else
2431 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002432 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002433 pp = host->ports[1]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002434 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002435 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002436 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002437 else
2438 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002439 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002440
2441 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2442 }
2443 }
2444
2445 ata_host_resume(host);
2446
2447 return 0;
2448}
Tejun Heo438ac6d2007-03-02 17:31:26 +09002449#endif
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002450
Jeff Garzikcca39742006-08-24 03:19:22 -04002451static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09002452{
Jeff Garzikcca39742006-08-24 03:19:22 -04002453 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09002454 u8 regval;
2455
2456 /* disable SATA space for CK804 */
2457 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2458 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2459 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
Tejun Heoada364e2006-06-17 15:49:56 +09002460}
2461
Robert Hancockfbbb2622006-10-27 19:08:41 -07002462static void nv_adma_host_stop(struct ata_host *host)
2463{
2464 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002465 u32 tmp32;
2466
Robert Hancockfbbb2622006-10-27 19:08:41 -07002467 /* disable ADMA on the ports */
2468 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2469 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2470 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2471 NV_MCP_SATA_CFG_20_PORT1_EN |
2472 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2473
2474 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2475
2476 nv_ck804_host_stop(host);
2477}
2478
Axel Lin2fc75da2012-04-19 13:43:05 +08002479module_pci_driver(nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480
Robert Hancockfbbb2622006-10-27 19:08:41 -07002481module_param_named(adma, adma_enabled, bool, 0444);
Brandon Ehle55f784c2009-03-01 00:02:49 -08002482MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
Kuan Luof140f0f2007-10-15 15:16:53 -04002483module_param_named(swncq, swncq_enabled, bool, 0444);
Zoltan Boszormenyid21279f2008-03-28 14:33:46 -07002484MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
Tony Vroon51c89492009-08-06 00:50:09 +01002485module_param_named(msi, msi_enabled, bool, 0444);
2486MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");