blob: eb9dc14e5147aaebbc210c1481ce146a6d9c57a4 [file] [log] [blame]
Thomas Gleixnerc82ee6d2019-05-19 15:51:48 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * sata_nv.c - NVIDIA nForce SATA
4 *
5 * Copyright 2004 NVIDIA Corp. All rights reserved.
6 * Copyright 2004 Andrew Chew
7 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -04008 * libata documentation is available via 'make {ps|pdf}docs',
Mauro Carvalho Chehab19285f32017-05-14 11:52:56 -03009 * as Documentation/driver-api/libata.rst
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040010 *
11 * No hardware documentation available outside of NVIDIA.
12 * This driver programs the NVIDIA SATA controller in a similar
13 * fashion as with other PCI IDE BMDMA controllers, with a few
14 * NV-specific details such as register offsets, SATA phy location,
15 * hotplug info, etc.
16 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070017 * CK804/MCP04 controllers support an alternate programming interface
18 * similar to the ADMA specification (with some modifications).
19 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
20 * sent through the legacy interface.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/kernel.h>
24#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/pci.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/blkdev.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050030#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070032#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/libata.h>
34
35#define DRV_NAME "sata_nv"
Jeff Garzik2a3103c2007-08-31 04:54:06 -040036#define DRV_VERSION "3.5"
Robert Hancockfbbb2622006-10-27 19:08:41 -070037
38#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Jeff Garzik10ad05d2006-03-22 23:50:50 -050040enum {
Tejun Heo0d5ff562007-02-01 15:06:36 +090041 NV_MMIO_BAR = 5,
42
Jeff Garzik10ad05d2006-03-22 23:50:50 -050043 NV_PORTS = 2,
Erik Inge Bolsø14bdef92009-03-14 21:38:24 +010044 NV_PIO_MASK = ATA_PIO4,
45 NV_MWDMA_MASK = ATA_MWDMA2,
46 NV_UDMA_MASK = ATA_UDMA6,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050047 NV_PORT0_SCR_REG_OFFSET = 0x00,
48 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Tejun Heo27e4b272006-06-17 15:49:55 +090050 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050051 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050052 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090053 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050054 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Tejun Heo27e4b272006-06-17 15:49:55 +090056 /* INT_STATUS/ENABLE bits */
57 NV_INT_DEV = 0x01,
58 NV_INT_PM = 0x02,
59 NV_INT_ADDED = 0x04,
60 NV_INT_REMOVED = 0x08,
61
62 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
63
Tejun Heo39f87582006-06-17 15:49:56 +090064 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090065 NV_INT_MASK = NV_INT_DEV |
66 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090067
Tejun Heo27e4b272006-06-17 15:49:55 +090068 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050069 NV_INT_CONFIG = 0x12,
70 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Jeff Garzik10ad05d2006-03-22 23:50:50 -050072 // For PCI config register 20
73 NV_MCP_SATA_CFG_20 = 0x50,
74 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070075 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
76 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
77 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
78 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
79
80 NV_ADMA_MAX_CPBS = 32,
81 NV_ADMA_CPB_SZ = 128,
82 NV_ADMA_APRD_SZ = 16,
83 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
84 NV_ADMA_APRD_SZ,
85 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
86 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
87 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
88 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
89
90 /* BAR5 offset to ADMA general registers */
91 NV_ADMA_GEN = 0x400,
92 NV_ADMA_GEN_CTL = 0x00,
93 NV_ADMA_NOTIFIER_CLEAR = 0x30,
94
95 /* BAR5 offset to ADMA ports */
96 NV_ADMA_PORT = 0x480,
97
98 /* size of ADMA port register space */
99 NV_ADMA_PORT_SIZE = 0x100,
100
101 /* ADMA port registers */
102 NV_ADMA_CTL = 0x40,
103 NV_ADMA_CPB_COUNT = 0x42,
104 NV_ADMA_NEXT_CPB_IDX = 0x43,
105 NV_ADMA_STAT = 0x44,
106 NV_ADMA_CPB_BASE_LOW = 0x48,
107 NV_ADMA_CPB_BASE_HIGH = 0x4C,
108 NV_ADMA_APPEND = 0x50,
109 NV_ADMA_NOTIFIER = 0x68,
110 NV_ADMA_NOTIFIER_ERROR = 0x6C,
111
112 /* NV_ADMA_CTL register bits */
113 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
114 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
115 NV_ADMA_CTL_GO = (1 << 7),
116 NV_ADMA_CTL_AIEN = (1 << 8),
117 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
118 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
119
120 /* CPB response flag bits */
121 NV_CPB_RESP_DONE = (1 << 0),
122 NV_CPB_RESP_ATA_ERR = (1 << 3),
123 NV_CPB_RESP_CMD_ERR = (1 << 4),
124 NV_CPB_RESP_CPB_ERR = (1 << 7),
125
126 /* CPB control flag bits */
127 NV_CPB_CTL_CPB_VALID = (1 << 0),
128 NV_CPB_CTL_QUEUE = (1 << 1),
129 NV_CPB_CTL_APRD_VALID = (1 << 2),
130 NV_CPB_CTL_IEN = (1 << 3),
131 NV_CPB_CTL_FPDMA = (1 << 4),
132
133 /* APRD flags */
134 NV_APRD_WRITE = (1 << 1),
135 NV_APRD_END = (1 << 2),
136 NV_APRD_CONT = (1 << 3),
137
138 /* NV_ADMA_STAT flags */
139 NV_ADMA_STAT_TIMEOUT = (1 << 0),
140 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
141 NV_ADMA_STAT_HOTPLUG = (1 << 2),
142 NV_ADMA_STAT_CPBERR = (1 << 4),
143 NV_ADMA_STAT_SERROR = (1 << 5),
144 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
145 NV_ADMA_STAT_IDLE = (1 << 8),
146 NV_ADMA_STAT_LEGACY = (1 << 9),
147 NV_ADMA_STAT_STOPPED = (1 << 10),
148 NV_ADMA_STAT_DONE = (1 << 12),
149 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400150 NV_ADMA_STAT_TIMEOUT,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700151
152 /* port flags */
153 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
Robert Hancock2dec7552006-11-26 14:20:19 -0600154 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700155
Kuan Luof140f0f2007-10-15 15:16:53 -0400156 /* MCP55 reg offset */
157 NV_CTL_MCP55 = 0x400,
158 NV_INT_STATUS_MCP55 = 0x440,
159 NV_INT_ENABLE_MCP55 = 0x444,
160 NV_NCQ_REG_MCP55 = 0x448,
161
162 /* MCP55 */
163 NV_INT_ALL_MCP55 = 0xffff,
164 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
165 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
166
167 /* SWNCQ ENABLE BITS*/
168 NV_CTL_PRI_SWNCQ = 0x02,
169 NV_CTL_SEC_SWNCQ = 0x04,
170
171 /* SW NCQ status bits*/
172 NV_SWNCQ_IRQ_DEV = (1 << 0),
173 NV_SWNCQ_IRQ_PM = (1 << 1),
174 NV_SWNCQ_IRQ_ADDED = (1 << 2),
175 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
176
177 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
178 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
179 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
180 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
181
182 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
183 NV_SWNCQ_IRQ_REMOVED,
184
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500185};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Robert Hancockfbbb2622006-10-27 19:08:41 -0700187/* ADMA Physical Region Descriptor - one SG segment */
188struct nv_adma_prd {
189 __le64 addr;
190 __le32 len;
191 u8 flags;
192 u8 packet_len;
193 __le16 reserved;
194};
195
196enum nv_adma_regbits {
197 CMDEND = (1 << 15), /* end of command list */
198 WNB = (1 << 14), /* wait-not-BSY */
199 IGN = (1 << 13), /* ignore this entry */
200 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
201 DA2 = (1 << (2 + 8)),
202 DA1 = (1 << (1 + 8)),
203 DA0 = (1 << (0 + 8)),
204};
205
206/* ADMA Command Parameter Block
207 The first 5 SG segments are stored inside the Command Parameter Block itself.
208 If there are more than 5 segments the remainder are stored in a separate
209 memory area indicated by next_aprd. */
210struct nv_adma_cpb {
211 u8 resp_flags; /* 0 */
212 u8 reserved1; /* 1 */
213 u8 ctl_flags; /* 2 */
214 /* len is length of taskfile in 64 bit words */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400215 u8 len; /* 3 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700216 u8 tag; /* 4 */
217 u8 next_cpb_idx; /* 5 */
218 __le16 reserved2; /* 6-7 */
219 __le16 tf[12]; /* 8-31 */
220 struct nv_adma_prd aprd[5]; /* 32-111 */
221 __le64 next_aprd; /* 112-119 */
222 __le64 reserved3; /* 120-127 */
223};
224
225
226struct nv_adma_port_priv {
227 struct nv_adma_cpb *cpb;
228 dma_addr_t cpb_dma;
229 struct nv_adma_prd *aprd;
230 dma_addr_t aprd_dma;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400231 void __iomem *ctl_block;
232 void __iomem *gen_block;
233 void __iomem *notifier_clear_block;
Robert Hancock8959d302008-02-04 19:39:02 -0600234 u64 adma_dma_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700235 u8 flags;
Robert Hancock5e5c74a2007-02-19 18:42:30 -0600236 int last_issue_ncq;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700237};
238
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600239struct nv_host_priv {
240 unsigned long type;
241};
242
Kuan Luof140f0f2007-10-15 15:16:53 -0400243struct defer_queue {
244 u32 defer_bits;
245 unsigned int head;
246 unsigned int tail;
247 unsigned int tag[ATA_MAX_QUEUE];
248};
249
250enum ncq_saw_flag_list {
251 ncq_saw_d2h = (1U << 0),
252 ncq_saw_dmas = (1U << 1),
253 ncq_saw_sdb = (1U << 2),
254 ncq_saw_backout = (1U << 3),
255};
256
257struct nv_swncq_port_priv {
Tejun Heof60d7012010-05-10 21:41:41 +0200258 struct ata_bmdma_prd *prd; /* our SG list */
Kuan Luof140f0f2007-10-15 15:16:53 -0400259 dma_addr_t prd_dma; /* and its DMA mapping */
260 void __iomem *sactive_block;
261 void __iomem *irq_block;
262 void __iomem *tag_block;
263 u32 qc_active;
264
265 unsigned int last_issue_tag;
266
267 /* fifo circular queue to store deferral command */
268 struct defer_queue defer_queue;
269
270 /* for NCQ interrupt analysis */
271 u32 dhfis_bits;
272 u32 dmafis_bits;
273 u32 sdbfis_bits;
274
275 unsigned int ncq_flags;
276};
277
278
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400279#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
Robert Hancockfbbb2622006-10-27 19:08:41 -0700280
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400281static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
Bartlomiej Zolnierkiewicz58eb8cd2014-05-07 17:17:44 +0200282#ifdef CONFIG_PM_SLEEP
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600283static int nv_pci_device_resume(struct pci_dev *pdev);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900284#endif
Jeff Garzikcca39742006-08-24 03:19:22 -0400285static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100286static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
287static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
288static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Tejun Heo82ef04f2008-07-31 17:02:40 +0900289static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
290static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Tejun Heo7f4774b2009-06-10 16:29:07 +0900292static int nv_hardreset(struct ata_link *link, unsigned int *class,
293 unsigned long deadline);
Tejun Heo39f87582006-06-17 15:49:56 +0900294static void nv_nf2_freeze(struct ata_port *ap);
295static void nv_nf2_thaw(struct ata_port *ap);
296static void nv_ck804_freeze(struct ata_port *ap);
297static void nv_ck804_thaw(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700298static int nv_adma_slave_config(struct scsi_device *sdev);
Robert Hancock2dec7552006-11-26 14:20:19 -0600299static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
Jiri Slaby95364f32019-10-31 10:59:45 +0100300static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700301static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
302static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
303static void nv_adma_irq_clear(struct ata_port *ap);
304static int nv_adma_port_start(struct ata_port *ap);
305static void nv_adma_port_stop(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900306#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600307static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
308static int nv_adma_port_resume(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900309#endif
Robert Hancock53014e22007-05-05 15:36:36 -0600310static void nv_adma_freeze(struct ata_port *ap);
311static void nv_adma_thaw(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700312static void nv_adma_error_handler(struct ata_port *ap);
313static void nv_adma_host_stop(struct ata_host *host);
Robert Hancockf5ecac22007-02-20 21:49:10 -0600314static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800315static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
Tejun Heo39f87582006-06-17 15:49:56 +0900316
Kuan Luof140f0f2007-10-15 15:16:53 -0400317static void nv_mcp55_thaw(struct ata_port *ap);
318static void nv_mcp55_freeze(struct ata_port *ap);
319static void nv_swncq_error_handler(struct ata_port *ap);
320static int nv_swncq_slave_config(struct scsi_device *sdev);
321static int nv_swncq_port_start(struct ata_port *ap);
Jiri Slaby95364f32019-10-31 10:59:45 +0100322static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
Kuan Luof140f0f2007-10-15 15:16:53 -0400323static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
324static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
325static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
326static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
327#ifdef CONFIG_PM
328static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
329static int nv_swncq_port_resume(struct ata_port *ap);
330#endif
331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332enum nv_host_type
333{
334 GENERIC,
335 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900336 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700337 CK804,
Kuan Luof140f0f2007-10-15 15:16:53 -0400338 ADMA,
Tejun Heo2d775702009-01-25 11:29:38 +0900339 MCP5x,
Kuan Luof140f0f2007-10-15 15:16:53 -0400340 SWNCQ,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341};
342
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500343static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a942006-09-27 22:20:11 -0400344 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
345 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
346 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
347 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
348 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
349 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
350 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
Tejun Heo2d775702009-01-25 11:29:38 +0900351 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
352 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
353 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
354 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
Kuan Luoe2e031e2007-10-25 02:14:17 -0400355 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
356 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
357 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400358
359 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360};
361
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362static struct pci_driver nv_pci_driver = {
363 .name = DRV_NAME,
364 .id_table = nv_pci_tbl,
365 .probe = nv_init_one,
Bartlomiej Zolnierkiewicz58eb8cd2014-05-07 17:17:44 +0200366#ifdef CONFIG_PM_SLEEP
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600367 .suspend = ata_pci_device_suspend,
368 .resume = nv_pci_device_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900369#endif
Tejun Heo1daf9ce2007-05-17 13:13:57 +0200370 .remove = ata_pci_remove_one,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371};
372
Jeff Garzik193515d2005-11-07 00:59:37 -0500373static struct scsi_host_template nv_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900374 ATA_BMDMA_SHT(DRV_NAME),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375};
376
Robert Hancockfbbb2622006-10-27 19:08:41 -0700377static struct scsi_host_template nv_adma_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900378 ATA_NCQ_SHT(DRV_NAME),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700379 .can_queue = NV_ADMA_MAX_CPBS,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700380 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700381 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
382 .slave_configure = nv_adma_slave_config,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700383};
384
Kuan Luof140f0f2007-10-15 15:16:53 -0400385static struct scsi_host_template nv_swncq_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900386 ATA_NCQ_SHT(DRV_NAME),
Jens Axboeba80c3a2018-05-11 12:51:08 -0600387 .can_queue = ATA_MAX_QUEUE - 1,
Kuan Luof140f0f2007-10-15 15:16:53 -0400388 .sg_tablesize = LIBATA_MAX_PRD,
Kuan Luof140f0f2007-10-15 15:16:53 -0400389 .dma_boundary = ATA_DMA_BOUNDARY,
390 .slave_configure = nv_swncq_slave_config,
Kuan Luof140f0f2007-10-15 15:16:53 -0400391};
392
Tejun Heo7f4774b2009-06-10 16:29:07 +0900393/*
394 * NV SATA controllers have various different problems with hardreset
395 * protocol depending on the specific controller and device.
396 *
397 * GENERIC:
398 *
399 * bko11195 reports that link doesn't come online after hardreset on
400 * generic nv's and there have been several other similar reports on
401 * linux-ide.
402 *
403 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
404 * softreset.
405 *
406 * NF2/3:
407 *
408 * bko3352 reports nf2/3 controllers can't determine device signature
409 * reliably after hardreset. The following thread reports detection
410 * failure on cold boot with the standard debouncing timing.
411 *
412 * http://thread.gmane.org/gmane.linux.ide/34098
413 *
414 * bko12176 reports that hardreset fails to bring up the link during
415 * boot on nf2.
416 *
417 * CK804:
418 *
419 * For initial probing after boot and hot plugging, hardreset mostly
420 * works fine on CK804 but curiously, reprobing on the initial port
421 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
422 * FIS in somewhat undeterministic way.
423 *
424 * SWNCQ:
425 *
426 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
427 * hardreset should be used and hardreset can't report proper
428 * signature, which suggests that mcp5x is closer to nf2 as long as
429 * reset quirkiness is concerned.
430 *
431 * bko12703 reports that boot probing fails for intel SSD with
432 * hardreset. Link fails to come online. Softreset works fine.
433 *
434 * The failures are varied but the following patterns seem true for
435 * all flavors.
436 *
437 * - Softreset during boot always works.
438 *
439 * - Hardreset during boot sometimes fails to bring up the link on
440 * certain comibnations and device signature acquisition is
441 * unreliable.
442 *
443 * - Hardreset is often necessary after hotplug.
444 *
445 * So, preferring softreset for boot probing and error handling (as
446 * hardreset might bring down the link) but using hardreset for
447 * post-boot probing should work around the above issues in most
448 * cases. Define nv_hardreset() which only kicks in for post-boot
449 * probing and use it for all variants.
450 */
451static struct ata_port_operations nv_generic_ops = {
Tejun Heo029cfd62008-03-25 12:22:49 +0900452 .inherits = &ata_bmdma_port_ops,
Alan Coxc96f1732009-03-24 10:23:46 +0000453 .lost_interrupt = ATA_OP_NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 .scr_read = nv_scr_read,
455 .scr_write = nv_scr_write,
Tejun Heo7f4774b2009-06-10 16:29:07 +0900456 .hardreset = nv_hardreset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457};
458
Tejun Heo029cfd62008-03-25 12:22:49 +0900459static struct ata_port_operations nv_nf2_ops = {
Tejun Heo7dac7452009-02-12 10:34:32 +0900460 .inherits = &nv_generic_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900461 .freeze = nv_nf2_freeze,
462 .thaw = nv_nf2_thaw,
Tejun Heoada364e2006-06-17 15:49:56 +0900463};
464
Tejun Heo029cfd62008-03-25 12:22:49 +0900465static struct ata_port_operations nv_ck804_ops = {
Tejun Heo7f4774b2009-06-10 16:29:07 +0900466 .inherits = &nv_generic_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900467 .freeze = nv_ck804_freeze,
468 .thaw = nv_ck804_thaw,
Tejun Heoada364e2006-06-17 15:49:56 +0900469 .host_stop = nv_ck804_host_stop,
470};
471
Tejun Heo029cfd62008-03-25 12:22:49 +0900472static struct ata_port_operations nv_adma_ops = {
Tejun Heo3c324282008-11-03 12:37:49 +0900473 .inherits = &nv_ck804_ops,
Tejun Heo029cfd62008-03-25 12:22:49 +0900474
Robert Hancock2dec7552006-11-26 14:20:19 -0600475 .check_atapi_dma = nv_adma_check_atapi_dma,
Tejun Heo5682ed32008-04-07 22:47:16 +0900476 .sff_tf_read = nv_adma_tf_read,
Tejun Heo31cc23b2007-09-23 13:14:12 +0900477 .qc_defer = ata_std_qc_defer,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700478 .qc_prep = nv_adma_qc_prep,
479 .qc_issue = nv_adma_qc_issue,
Tejun Heo5682ed32008-04-07 22:47:16 +0900480 .sff_irq_clear = nv_adma_irq_clear,
Tejun Heo029cfd62008-03-25 12:22:49 +0900481
Robert Hancock53014e22007-05-05 15:36:36 -0600482 .freeze = nv_adma_freeze,
483 .thaw = nv_adma_thaw,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700484 .error_handler = nv_adma_error_handler,
Robert Hancockf5ecac22007-02-20 21:49:10 -0600485 .post_internal_cmd = nv_adma_post_internal_cmd,
Tejun Heo029cfd62008-03-25 12:22:49 +0900486
Robert Hancockfbbb2622006-10-27 19:08:41 -0700487 .port_start = nv_adma_port_start,
488 .port_stop = nv_adma_port_stop,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900489#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600490 .port_suspend = nv_adma_port_suspend,
491 .port_resume = nv_adma_port_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900492#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -0700493 .host_stop = nv_adma_host_stop,
494};
495
Tejun Heo029cfd62008-03-25 12:22:49 +0900496static struct ata_port_operations nv_swncq_ops = {
Tejun Heo7f4774b2009-06-10 16:29:07 +0900497 .inherits = &nv_generic_ops,
Tejun Heo029cfd62008-03-25 12:22:49 +0900498
Kuan Luof140f0f2007-10-15 15:16:53 -0400499 .qc_defer = ata_std_qc_defer,
500 .qc_prep = nv_swncq_qc_prep,
501 .qc_issue = nv_swncq_qc_issue,
Tejun Heo029cfd62008-03-25 12:22:49 +0900502
Kuan Luof140f0f2007-10-15 15:16:53 -0400503 .freeze = nv_mcp55_freeze,
504 .thaw = nv_mcp55_thaw,
505 .error_handler = nv_swncq_error_handler,
Tejun Heo029cfd62008-03-25 12:22:49 +0900506
Kuan Luof140f0f2007-10-15 15:16:53 -0400507#ifdef CONFIG_PM
508 .port_suspend = nv_swncq_port_suspend,
509 .port_resume = nv_swncq_port_resume,
510#endif
511 .port_start = nv_swncq_port_start,
512};
513
Tejun Heo95947192008-03-25 12:22:49 +0900514struct nv_pi_priv {
515 irq_handler_t irq_handler;
516 struct scsi_host_template *sht;
517};
518
519#define NV_PI_PRIV(_irq_handler, _sht) \
520 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
521
Tejun Heo1626aeb2007-05-04 12:43:58 +0200522static const struct ata_port_info nv_port_info[] = {
Tejun Heoada364e2006-06-17 15:49:56 +0900523 /* generic */
524 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300525 .flags = ATA_FLAG_SATA,
Tejun Heoada364e2006-06-17 15:49:56 +0900526 .pio_mask = NV_PIO_MASK,
527 .mwdma_mask = NV_MWDMA_MASK,
528 .udma_mask = NV_UDMA_MASK,
529 .port_ops = &nv_generic_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900530 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900531 },
532 /* nforce2/3 */
533 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300534 .flags = ATA_FLAG_SATA,
Tejun Heoada364e2006-06-17 15:49:56 +0900535 .pio_mask = NV_PIO_MASK,
536 .mwdma_mask = NV_MWDMA_MASK,
537 .udma_mask = NV_UDMA_MASK,
538 .port_ops = &nv_nf2_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900539 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900540 },
541 /* ck804 */
542 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300543 .flags = ATA_FLAG_SATA,
Tejun Heoada364e2006-06-17 15:49:56 +0900544 .pio_mask = NV_PIO_MASK,
545 .mwdma_mask = NV_MWDMA_MASK,
546 .udma_mask = NV_UDMA_MASK,
547 .port_ops = &nv_ck804_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900548 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900549 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700550 /* ADMA */
551 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300552 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700553 .pio_mask = NV_PIO_MASK,
554 .mwdma_mask = NV_MWDMA_MASK,
555 .udma_mask = NV_UDMA_MASK,
556 .port_ops = &nv_adma_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900557 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700558 },
Tejun Heo2d775702009-01-25 11:29:38 +0900559 /* MCP5x */
560 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300561 .flags = ATA_FLAG_SATA,
Tejun Heo2d775702009-01-25 11:29:38 +0900562 .pio_mask = NV_PIO_MASK,
563 .mwdma_mask = NV_MWDMA_MASK,
564 .udma_mask = NV_UDMA_MASK,
Tejun Heo7f4774b2009-06-10 16:29:07 +0900565 .port_ops = &nv_generic_ops,
Tejun Heo2d775702009-01-25 11:29:38 +0900566 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
567 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400568 /* SWNCQ */
569 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300570 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
Kuan Luof140f0f2007-10-15 15:16:53 -0400571 .pio_mask = NV_PIO_MASK,
572 .mwdma_mask = NV_MWDMA_MASK,
573 .udma_mask = NV_UDMA_MASK,
574 .port_ops = &nv_swncq_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900575 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
Kuan Luof140f0f2007-10-15 15:16:53 -0400576 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577};
578
579MODULE_AUTHOR("NVIDIA");
580MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
581MODULE_LICENSE("GPL");
582MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
583MODULE_VERSION(DRV_VERSION);
584
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030585static bool adma_enabled;
Shailendra Vermac13aff32015-05-26 01:38:25 +0530586static bool swncq_enabled = true;
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030587static bool msi_enabled;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700588
Robert Hancock2dec7552006-11-26 14:20:19 -0600589static void nv_adma_register_mode(struct ata_port *ap)
590{
Robert Hancock2dec7552006-11-26 14:20:19 -0600591 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600592 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800593 u16 tmp, status;
594 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600595
596 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
597 return;
598
Robert Hancocka2cfe812007-02-05 16:26:03 -0800599 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400600 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800601 ndelay(50);
602 status = readw(mmio + NV_ADMA_STAT);
603 count++;
604 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400605 if (count == 20)
Joe Perchesa9a79df2011-04-15 15:51:59 -0700606 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
607 status);
Robert Hancocka2cfe812007-02-05 16:26:03 -0800608
Robert Hancock2dec7552006-11-26 14:20:19 -0600609 tmp = readw(mmio + NV_ADMA_CTL);
610 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
611
Robert Hancocka2cfe812007-02-05 16:26:03 -0800612 count = 0;
613 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400614 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800615 ndelay(50);
616 status = readw(mmio + NV_ADMA_STAT);
617 count++;
618 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400619 if (count == 20)
Joe Perchesa9a79df2011-04-15 15:51:59 -0700620 ata_port_warn(ap,
621 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
622 status);
Robert Hancocka2cfe812007-02-05 16:26:03 -0800623
Robert Hancock2dec7552006-11-26 14:20:19 -0600624 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
625}
626
627static void nv_adma_mode(struct ata_port *ap)
628{
Robert Hancock2dec7552006-11-26 14:20:19 -0600629 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600630 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800631 u16 tmp, status;
632 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600633
634 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
635 return;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500636
Robert Hancock2dec7552006-11-26 14:20:19 -0600637 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
638
639 tmp = readw(mmio + NV_ADMA_CTL);
640 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
641
Robert Hancocka2cfe812007-02-05 16:26:03 -0800642 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400643 while (((status & NV_ADMA_STAT_LEGACY) ||
Robert Hancocka2cfe812007-02-05 16:26:03 -0800644 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
645 ndelay(50);
646 status = readw(mmio + NV_ADMA_STAT);
647 count++;
648 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400649 if (count == 20)
Joe Perchesa9a79df2011-04-15 15:51:59 -0700650 ata_port_warn(ap,
Robert Hancocka2cfe812007-02-05 16:26:03 -0800651 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
652 status);
653
Robert Hancock2dec7552006-11-26 14:20:19 -0600654 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
655}
656
Robert Hancockfbbb2622006-10-27 19:08:41 -0700657static int nv_adma_slave_config(struct scsi_device *sdev)
658{
659 struct ata_port *ap = ata_shost_to_port(sdev->host);
Robert Hancock2dec7552006-11-26 14:20:19 -0600660 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock8959d302008-02-04 19:39:02 -0600661 struct nv_adma_port_priv *port0, *port1;
Robert Hancock2dec7552006-11-26 14:20:19 -0600662 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
Robert Hancock8959d302008-02-04 19:39:02 -0600663 unsigned long segment_boundary, flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700664 unsigned short sg_tablesize;
665 int rc;
Robert Hancock2dec7552006-11-26 14:20:19 -0600666 int adma_enable;
667 u32 current_reg, new_reg, config_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700668
669 rc = ata_scsi_slave_config(sdev);
670
671 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
672 /* Not a proper libata device, ignore */
673 return rc;
674
Robert Hancock8959d302008-02-04 19:39:02 -0600675 spin_lock_irqsave(ap->lock, flags);
676
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900677 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700678 /*
679 * NVIDIA reports that ADMA mode does not support ATAPI commands.
680 * Therefore ATAPI commands are sent through the legacy interface.
681 * However, the legacy interface only supports 32-bit DMA.
682 * Restrict DMA parameters as required by the legacy interface
683 * when an ATAPI device is connected.
684 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700685 segment_boundary = ATA_DMA_BOUNDARY;
686 /* Subtract 1 since an extra entry may be needed for padding, see
687 libata-scsi.c */
688 sg_tablesize = LIBATA_MAX_PRD - 1;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500689
Robert Hancock2dec7552006-11-26 14:20:19 -0600690 /* Since the legacy DMA engine is in use, we need to disable ADMA
691 on the port. */
692 adma_enable = 0;
693 nv_adma_register_mode(ap);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400694 } else {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700695 segment_boundary = NV_ADMA_DMA_BOUNDARY;
696 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
Robert Hancock2dec7552006-11-26 14:20:19 -0600697 adma_enable = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700698 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500699
Robert Hancock2dec7552006-11-26 14:20:19 -0600700 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700701
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400702 if (ap->port_no == 1)
Robert Hancock2dec7552006-11-26 14:20:19 -0600703 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
704 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
705 else
706 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
707 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500708
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400709 if (adma_enable) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600710 new_reg = current_reg | config_mask;
711 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400712 } else {
Robert Hancock2dec7552006-11-26 14:20:19 -0600713 new_reg = current_reg & ~config_mask;
714 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
715 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500716
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400717 if (current_reg != new_reg)
Robert Hancock2dec7552006-11-26 14:20:19 -0600718 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500719
Robert Hancock8959d302008-02-04 19:39:02 -0600720 port0 = ap->host->ports[0]->private_data;
721 port1 = ap->host->ports[1]->private_data;
Robert Hancock8959d302008-02-04 19:39:02 -0600722 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
723 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
Christoph Hellwig258c9fd2018-05-09 16:01:00 +0200724 /*
725 * We have to set the DMA mask to 32-bit if either port is in
726 * ATAPI mode, since they are on the same PCI device which is
727 * used for DMA mapping. If either SCSI device is not allocated
728 * yet, it's OK since that port will discover its correct
729 * setting when it does get allocated.
730 */
731 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
Robert Hancock8959d302008-02-04 19:39:02 -0600732 } else {
Christoph Hellwig258c9fd2018-05-09 16:01:00 +0200733 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
Robert Hancock8959d302008-02-04 19:39:02 -0600734 }
735
Robert Hancockfbbb2622006-10-27 19:08:41 -0700736 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500737 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
Joe Perchesa9a79df2011-04-15 15:51:59 -0700738 ata_port_info(ap,
739 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
740 (unsigned long long)*ap->host->dev->dma_mask,
741 segment_boundary, sg_tablesize);
Robert Hancock8959d302008-02-04 19:39:02 -0600742
743 spin_unlock_irqrestore(ap->lock, flags);
744
Robert Hancockfbbb2622006-10-27 19:08:41 -0700745 return rc;
746}
747
Robert Hancock2dec7552006-11-26 14:20:19 -0600748static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
749{
750 struct nv_adma_port_priv *pp = qc->ap->private_data;
751 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
752}
753
Robert Hancockf2fb3442007-03-26 21:43:36 -0800754static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
755{
Robert Hancock3f3debd2007-11-25 16:59:36 -0600756 /* Other than when internal or pass-through commands are executed,
757 the only time this function will be called in ADMA mode will be
758 if a command fails. In the failure case we don't care about going
759 into register mode with ADMA commands pending, as the commands will
760 all shortly be aborted anyway. We assume that NCQ commands are not
761 issued via passthrough, which is the only way that switching into
762 ADMA mode could abort outstanding commands. */
Robert Hancockf2fb3442007-03-26 21:43:36 -0800763 nv_adma_register_mode(ap);
764
Tejun Heo9363c382008-04-07 22:47:16 +0900765 ata_sff_tf_read(ap, tf);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800766}
767
Robert Hancock2dec7552006-11-26 14:20:19 -0600768static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700769{
770 unsigned int idx = 0;
771
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400772 if (tf->flags & ATA_TFLAG_ISADDR) {
Robert Hancockac3d6b82007-02-19 19:02:46 -0600773 if (tf->flags & ATA_TFLAG_LBA48) {
774 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
775 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
776 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
777 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
778 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
779 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
780 } else
781 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
Jeff Garzika84471f2007-02-26 05:51:33 -0500782
Robert Hancockac3d6b82007-02-19 19:02:46 -0600783 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
784 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
785 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
786 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700787 }
Jeff Garzika84471f2007-02-26 05:51:33 -0500788
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400789 if (tf->flags & ATA_TFLAG_DEVICE)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600790 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700791
792 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
Jeff Garzika84471f2007-02-26 05:51:33 -0500793
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400794 while (idx < 12)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600795 cpb[idx++] = cpu_to_le16(IGN);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700796
797 return idx;
798}
799
Robert Hancock5bd28a42007-02-05 16:26:01 -0800800static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700801{
802 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock2dec7552006-11-26 14:20:19 -0600803 u8 flags = pp->cpb[cpb_num].resp_flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700804
805 VPRINTK("CPB %d, flags=0x%x\n", cpb_num, flags);
806
Robert Hancock5bd28a42007-02-05 16:26:01 -0800807 if (unlikely((force_err ||
808 flags & (NV_CPB_RESP_ATA_ERR |
809 NV_CPB_RESP_CMD_ERR |
810 NV_CPB_RESP_CPB_ERR)))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900811 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800812 int freeze = 0;
813
814 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400815 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800816 if (flags & NV_CPB_RESP_ATA_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900817 ata_ehi_push_desc(ehi, "ATA error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800818 ehi->err_mask |= AC_ERR_DEV;
819 } else if (flags & NV_CPB_RESP_CMD_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900820 ata_ehi_push_desc(ehi, "CMD error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800821 ehi->err_mask |= AC_ERR_DEV;
822 } else if (flags & NV_CPB_RESP_CPB_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900823 ata_ehi_push_desc(ehi, "CPB error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800824 ehi->err_mask |= AC_ERR_SYSTEM;
825 freeze = 1;
826 } else {
827 /* notifier error, but no error in CPB flags? */
Tejun Heob64bbc32007-07-16 14:29:39 +0900828 ata_ehi_push_desc(ehi, "unknown");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800829 ehi->err_mask |= AC_ERR_OTHER;
830 freeze = 1;
831 }
832 /* Kill all commands. EH will determine what actually failed. */
833 if (freeze)
834 ata_port_freeze(ap);
835 else
836 ata_port_abort(ap);
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200837 return -1;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800838 }
839
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200840 if (likely(flags & NV_CPB_RESP_DONE))
841 return 1;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800842 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700843}
844
Robert Hancock2dec7552006-11-26 14:20:19 -0600845static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
846{
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900847 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
Robert Hancock2dec7552006-11-26 14:20:19 -0600848
849 /* freeze if hotplugged */
850 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
851 ata_port_freeze(ap);
852 return 1;
853 }
854
855 /* bail out if not our interrupt */
856 if (!(irq_stat & NV_INT_DEV))
857 return 0;
858
859 /* DEV interrupt w/ no active qc? */
860 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
Tejun Heo9363c382008-04-07 22:47:16 +0900861 ata_sff_check_status(ap);
Robert Hancock2dec7552006-11-26 14:20:19 -0600862 return 1;
863 }
864
865 /* handle interrupt */
Tejun Heoc3b28892010-05-19 22:10:21 +0200866 return ata_bmdma_port_intr(ap, qc);
Robert Hancock2dec7552006-11-26 14:20:19 -0600867}
868
Robert Hancockfbbb2622006-10-27 19:08:41 -0700869static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
870{
871 struct ata_host *host = dev_instance;
872 int i, handled = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600873 u32 notifier_clears[2];
Robert Hancockfbbb2622006-10-27 19:08:41 -0700874
875 spin_lock(&host->lock);
876
877 for (i = 0; i < host->n_ports; i++) {
878 struct ata_port *ap = host->ports[i];
Tejun Heo3e4ec342010-05-10 21:41:30 +0200879 struct nv_adma_port_priv *pp = ap->private_data;
880 void __iomem *mmio = pp->ctl_block;
881 u16 status;
882 u32 gen_ctl;
883 u32 notifier, notifier_error;
884
Robert Hancock2dec7552006-11-26 14:20:19 -0600885 notifier_clears[i] = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700886
Tejun Heo3e4ec342010-05-10 21:41:30 +0200887 /* if ADMA is disabled, use standard ata interrupt handler */
888 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
889 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
890 >> (NV_INT_PORT_SHIFT * i);
891 handled += nv_host_intr(ap, irq_stat);
892 continue;
893 }
Jeff Garzika617c092007-05-21 20:14:23 -0400894
Tejun Heo3e4ec342010-05-10 21:41:30 +0200895 /* if in ATA register mode, check for standard interrupts */
896 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
897 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
898 >> (NV_INT_PORT_SHIFT * i);
899 if (ata_tag_valid(ap->link.active_tag))
900 /** NV_INT_DEV indication seems unreliable
901 at times at least in ADMA mode. Force it
902 on always when a command is active, to
903 prevent losing interrupts. */
904 irq_stat |= NV_INT_DEV;
905 handled += nv_host_intr(ap, irq_stat);
906 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700907
Tejun Heo3e4ec342010-05-10 21:41:30 +0200908 notifier = readl(mmio + NV_ADMA_NOTIFIER);
909 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
910 notifier_clears[i] = notifier | notifier_error;
911
912 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
913
914 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
915 !notifier_error)
916 /* Nothing to do */
917 continue;
918
919 status = readw(mmio + NV_ADMA_STAT);
920
921 /*
922 * Clear status. Ensure the controller sees the
923 * clearing before we start looking at any of the CPB
924 * statuses, so that any CPB completions after this
925 * point in the handler will raise another interrupt.
926 */
927 writew(status, mmio + NV_ADMA_STAT);
928 readw(mmio + NV_ADMA_STAT); /* flush posted write */
929 rmb();
930
931 handled++; /* irq handled if we got here */
932
933 /* freeze if hotplugged or controller error */
934 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
935 NV_ADMA_STAT_HOTUNPLUG |
936 NV_ADMA_STAT_TIMEOUT |
937 NV_ADMA_STAT_SERROR))) {
938 struct ata_eh_info *ehi = &ap->link.eh_info;
939
940 ata_ehi_clear_desc(ehi);
941 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
942 if (status & NV_ADMA_STAT_TIMEOUT) {
943 ehi->err_mask |= AC_ERR_SYSTEM;
944 ata_ehi_push_desc(ehi, "timeout");
945 } else if (status & NV_ADMA_STAT_HOTPLUG) {
946 ata_ehi_hotplugged(ehi);
947 ata_ehi_push_desc(ehi, "hotplug");
948 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
949 ata_ehi_hotplugged(ehi);
950 ata_ehi_push_desc(ehi, "hot unplug");
951 } else if (status & NV_ADMA_STAT_SERROR) {
952 /* let EH analyze SError and figure out cause */
953 ata_ehi_push_desc(ehi, "SError");
954 } else
955 ata_ehi_push_desc(ehi, "unknown");
956 ata_port_freeze(ap);
957 continue;
958 }
959
960 if (status & (NV_ADMA_STAT_DONE |
961 NV_ADMA_STAT_CPBERR |
962 NV_ADMA_STAT_CMD_COMPLETE)) {
963 u32 check_commands = notifier_clears[i];
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200964 u32 done_mask = 0;
Tejun Heo752e3862010-06-25 15:02:59 +0200965 int pos, rc;
Tejun Heo3e4ec342010-05-10 21:41:30 +0200966
967 if (status & NV_ADMA_STAT_CPBERR) {
968 /* check all active commands */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400969 if (ata_tag_valid(ap->link.active_tag))
Tejun Heo3e4ec342010-05-10 21:41:30 +0200970 check_commands = 1 <<
971 ap->link.active_tag;
972 else
973 check_commands = ap->link.sactive;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700974 }
975
Tejun Heo3e4ec342010-05-10 21:41:30 +0200976 /* check CPBs for completed commands */
Tejun Heo752e3862010-06-25 15:02:59 +0200977 while ((pos = ffs(check_commands))) {
Tejun Heo3e4ec342010-05-10 21:41:30 +0200978 pos--;
Tejun Heo752e3862010-06-25 15:02:59 +0200979 rc = nv_adma_check_cpb(ap, pos,
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400980 notifier_error & (1 << pos));
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200981 if (rc > 0)
982 done_mask |= 1 << pos;
983 else if (unlikely(rc < 0))
Tejun Heo752e3862010-06-25 15:02:59 +0200984 check_commands = 0;
Tejun Heo3e4ec342010-05-10 21:41:30 +0200985 check_commands &= ~(1 << pos);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700986 }
Sascha Hauer8385d752019-12-13 09:04:08 +0100987 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700988 }
989 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500990
Jeff Garzikb4479162007-10-25 20:47:30 -0400991 if (notifier_clears[0] || notifier_clears[1]) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600992 /* Note: Both notifier clear registers must be written
993 if either is set, even if one is zero, according to NVIDIA. */
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600994 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
995 writel(notifier_clears[0], pp->notifier_clear_block);
996 pp = host->ports[1]->private_data;
997 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancock2dec7552006-11-26 14:20:19 -0600998 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700999
1000 spin_unlock(&host->lock);
1001
1002 return IRQ_RETVAL(handled);
1003}
1004
Robert Hancock53014e22007-05-05 15:36:36 -06001005static void nv_adma_freeze(struct ata_port *ap)
1006{
1007 struct nv_adma_port_priv *pp = ap->private_data;
1008 void __iomem *mmio = pp->ctl_block;
1009 u16 tmp;
1010
1011 nv_ck804_freeze(ap);
1012
1013 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1014 return;
1015
1016 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001017 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001018 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1019
1020 /* Disable interrupt */
1021 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001022 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001023 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001024 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001025}
1026
1027static void nv_adma_thaw(struct ata_port *ap)
1028{
1029 struct nv_adma_port_priv *pp = ap->private_data;
1030 void __iomem *mmio = pp->ctl_block;
1031 u16 tmp;
1032
1033 nv_ck804_thaw(ap);
1034
1035 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1036 return;
1037
1038 /* Enable interrupt */
1039 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001040 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001041 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001042 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001043}
1044
Robert Hancockfbbb2622006-10-27 19:08:41 -07001045static void nv_adma_irq_clear(struct ata_port *ap)
1046{
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001047 struct nv_adma_port_priv *pp = ap->private_data;
1048 void __iomem *mmio = pp->ctl_block;
Robert Hancock53014e22007-05-05 15:36:36 -06001049 u32 notifier_clears[2];
1050
1051 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
Tejun Heo37f65b82010-05-19 22:10:20 +02001052 ata_bmdma_irq_clear(ap);
Robert Hancock53014e22007-05-05 15:36:36 -06001053 return;
1054 }
1055
1056 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001057 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001058 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001059
1060 /* clear ADMA status */
Robert Hancock53014e22007-05-05 15:36:36 -06001061 writew(0xffff, mmio + NV_ADMA_STAT);
Jeff Garzika617c092007-05-21 20:14:23 -04001062
Robert Hancock53014e22007-05-05 15:36:36 -06001063 /* clear notifiers - note both ports need to be written with
1064 something even though we are only clearing on one */
1065 if (ap->port_no == 0) {
1066 notifier_clears[0] = 0xFFFFFFFF;
1067 notifier_clears[1] = 0;
1068 } else {
1069 notifier_clears[0] = 0;
1070 notifier_clears[1] = 0xFFFFFFFF;
1071 }
1072 pp = ap->host->ports[0]->private_data;
1073 writel(notifier_clears[0], pp->notifier_clear_block);
1074 pp = ap->host->ports[1]->private_data;
1075 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001076}
1077
Robert Hancockf5ecac22007-02-20 21:49:10 -06001078static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001079{
Robert Hancockf5ecac22007-02-20 21:49:10 -06001080 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001081
Jeff Garzikb4479162007-10-25 20:47:30 -04001082 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
Tejun Heofe06e5f2010-05-10 21:41:39 +02001083 ata_bmdma_post_internal_cmd(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001084}
1085
1086static int nv_adma_port_start(struct ata_port *ap)
1087{
1088 struct device *dev = ap->host->dev;
1089 struct nv_adma_port_priv *pp;
1090 int rc;
1091 void *mem;
1092 dma_addr_t mem_dma;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001093 void __iomem *mmio;
Robert Hancock8959d302008-02-04 19:39:02 -06001094 struct pci_dev *pdev = to_pci_dev(dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001095 u16 tmp;
1096
1097 VPRINTK("ENTER\n");
1098
Christoph Hellwig258c9fd2018-05-09 16:01:00 +02001099 /*
1100 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1101 * pad buffers.
1102 */
1103 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Robert Hancock8959d302008-02-04 19:39:02 -06001104 if (rc)
1105 return rc;
1106
Tejun Heoc7087652010-05-10 21:41:34 +02001107 /* we might fallback to bmdma, allocate bmdma resources */
1108 rc = ata_bmdma_port_start(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001109 if (rc)
1110 return rc;
1111
Tejun Heo24dc5f32007-01-20 16:00:28 +09001112 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1113 if (!pp)
1114 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001115
Tejun Heo0d5ff562007-02-01 15:06:36 +09001116 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001117 ap->port_no * NV_ADMA_PORT_SIZE;
1118 pp->ctl_block = mmio;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001119 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001120 pp->notifier_clear_block = pp->gen_block +
1121 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1122
Christoph Hellwig258c9fd2018-05-09 16:01:00 +02001123 /*
1124 * Now that the legacy PRD and padding buffer are allocated we can
Christoph Hellwig51872b62019-08-26 12:57:22 +02001125 * raise the DMA mask to allocate the CPB/APRD table.
Christoph Hellwig258c9fd2018-05-09 16:01:00 +02001126 */
Christoph Hellwig51872b62019-08-26 12:57:22 +02001127 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1128
Robert Hancock8959d302008-02-04 19:39:02 -06001129 pp->adma_dma_mask = *dev->dma_mask;
1130
Tejun Heo24dc5f32007-01-20 16:00:28 +09001131 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1132 &mem_dma, GFP_KERNEL);
1133 if (!mem)
1134 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001135
1136 /*
1137 * First item in chunk of DMA memory:
1138 * 128-byte command parameter block (CPB)
1139 * one for each command tag
1140 */
1141 pp->cpb = mem;
1142 pp->cpb_dma = mem_dma;
1143
1144 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001145 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001146
1147 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1148 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1149
1150 /*
1151 * Second item: block of ADMA_SGTBL_LEN s/g entries
1152 */
1153 pp->aprd = mem;
1154 pp->aprd_dma = mem_dma;
1155
1156 ap->private_data = pp;
1157
1158 /* clear any outstanding interrupt conditions */
1159 writew(0xffff, mmio + NV_ADMA_STAT);
1160
1161 /* initialize port variables */
1162 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1163
1164 /* clear CPB fetch count */
1165 writew(0, mmio + NV_ADMA_CPB_COUNT);
1166
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001167 /* clear GO for register mode, enable interrupt */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001168 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001169 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1170 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001171
1172 tmp = readw(mmio + NV_ADMA_CTL);
1173 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001174 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001175 udelay(1);
1176 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001177 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001178
1179 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001180}
1181
1182static void nv_adma_port_stop(struct ata_port *ap)
1183{
Robert Hancockfbbb2622006-10-27 19:08:41 -07001184 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001185 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001186
1187 VPRINTK("ENTER\n");
Robert Hancockfbbb2622006-10-27 19:08:41 -07001188 writew(0, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001189}
1190
Tejun Heo438ac6d2007-03-02 17:31:26 +09001191#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001192static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1193{
1194 struct nv_adma_port_priv *pp = ap->private_data;
1195 void __iomem *mmio = pp->ctl_block;
1196
1197 /* Go to register mode - clears GO */
1198 nv_adma_register_mode(ap);
1199
1200 /* clear CPB fetch count */
1201 writew(0, mmio + NV_ADMA_CPB_COUNT);
1202
1203 /* disable interrupt, shut down port */
1204 writew(0, mmio + NV_ADMA_CTL);
1205
1206 return 0;
1207}
1208
1209static int nv_adma_port_resume(struct ata_port *ap)
1210{
1211 struct nv_adma_port_priv *pp = ap->private_data;
1212 void __iomem *mmio = pp->ctl_block;
1213 u16 tmp;
1214
1215 /* set CPB block location */
1216 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001217 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001218
1219 /* clear any outstanding interrupt conditions */
1220 writew(0xffff, mmio + NV_ADMA_STAT);
1221
1222 /* initialize port variables */
1223 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1224
1225 /* clear CPB fetch count */
1226 writew(0, mmio + NV_ADMA_CPB_COUNT);
1227
1228 /* clear GO for register mode, enable interrupt */
1229 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001230 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1231 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001232
1233 tmp = readw(mmio + NV_ADMA_CTL);
1234 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001235 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001236 udelay(1);
1237 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001238 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001239
1240 return 0;
1241}
Tejun Heo438ac6d2007-03-02 17:31:26 +09001242#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -07001243
Tejun Heo9a829cc2007-04-17 23:44:08 +09001244static void nv_adma_setup_port(struct ata_port *ap)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001245{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001246 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1247 struct ata_ioports *ioport = &ap->ioaddr;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001248
1249 VPRINTK("ENTER\n");
1250
Tejun Heo9a829cc2007-04-17 23:44:08 +09001251 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001252
Tejun Heo0d5ff562007-02-01 15:06:36 +09001253 ioport->cmd_addr = mmio;
1254 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001255 ioport->error_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001256 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1257 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1258 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1259 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1260 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1261 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001262 ioport->status_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001263 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001264 ioport->altstatus_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001265 ioport->ctl_addr = mmio + 0x20;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001266}
1267
Tejun Heo9a829cc2007-04-17 23:44:08 +09001268static int nv_adma_host_init(struct ata_host *host)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001269{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001270 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001271 unsigned int i;
1272 u32 tmp32;
1273
1274 VPRINTK("ENTER\n");
1275
1276 /* enable ADMA on the ports */
1277 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1278 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1279 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1280 NV_MCP_SATA_CFG_20_PORT1_EN |
1281 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1282
1283 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1284
Tejun Heo9a829cc2007-04-17 23:44:08 +09001285 for (i = 0; i < host->n_ports; i++)
1286 nv_adma_setup_port(host->ports[i]);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001287
Robert Hancockfbbb2622006-10-27 19:08:41 -07001288 return 0;
1289}
1290
1291static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1292 struct scatterlist *sg,
1293 int idx,
1294 struct nv_adma_prd *aprd)
1295{
Robert Hancock41949ed2007-02-19 19:02:27 -06001296 u8 flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001297 if (qc->tf.flags & ATA_TFLAG_WRITE)
1298 flags |= NV_APRD_WRITE;
1299 if (idx == qc->n_elem - 1)
1300 flags |= NV_APRD_END;
1301 else if (idx != 4)
1302 flags |= NV_APRD_CONT;
1303
1304 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1305 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
Robert Hancock2dec7552006-11-26 14:20:19 -06001306 aprd->flags = flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001307 aprd->packet_len = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001308}
1309
1310static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1311{
1312 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001313 struct nv_adma_prd *aprd;
1314 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001315 unsigned int si;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001316
1317 VPRINTK("ENTER\n");
1318
Tejun Heoff2aeb12007-12-05 16:43:11 +09001319 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1320 aprd = (si < 5) ? &cpb->aprd[si] :
Jens Axboe4e5b6262018-05-11 12:51:04 -06001321 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001322 nv_adma_fill_aprd(qc, sg, si, aprd);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001323 }
Tejun Heoff2aeb12007-12-05 16:43:11 +09001324 if (si > 5)
Jens Axboe4e5b6262018-05-11 12:51:04 -06001325 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
Robert Hancock41949ed2007-02-19 19:02:27 -06001326 else
1327 cpb->next_aprd = cpu_to_le64(0);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001328}
1329
Robert Hancock382a6652007-02-05 16:26:02 -08001330static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1331{
1332 struct nv_adma_port_priv *pp = qc->ap->private_data;
1333
1334 /* ADMA engine can only be used for non-ATAPI DMA commands,
Robert Hancock3f3debd2007-11-25 16:59:36 -06001335 or interrupt-driven no-data commands. */
Jeff Garzikb4479162007-10-25 20:47:30 -04001336 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
Robert Hancock3f3debd2007-11-25 16:59:36 -06001337 (qc->tf.flags & ATA_TFLAG_POLLING))
Robert Hancock382a6652007-02-05 16:26:02 -08001338 return 1;
1339
Jeff Garzikb4479162007-10-25 20:47:30 -04001340 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
Robert Hancock382a6652007-02-05 16:26:02 -08001341 (qc->tf.protocol == ATA_PROT_NODATA))
1342 return 0;
1343
1344 return 1;
1345}
1346
Jiri Slaby95364f32019-10-31 10:59:45 +01001347static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001348{
1349 struct nv_adma_port_priv *pp = qc->ap->private_data;
Jens Axboe4e5b6262018-05-11 12:51:04 -06001350 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
Robert Hancockfbbb2622006-10-27 19:08:41 -07001351 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
Robert Hancockfbbb2622006-10-27 19:08:41 -07001352 NV_CPB_CTL_IEN;
1353
Robert Hancock382a6652007-02-05 16:26:02 -08001354 if (nv_adma_use_reg_mode(qc)) {
Robert Hancock3f3debd2007-11-25 16:59:36 -06001355 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1356 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancock2dec7552006-11-26 14:20:19 -06001357 nv_adma_register_mode(qc->ap);
Tejun Heof47451c2010-05-10 21:41:40 +02001358 ata_bmdma_qc_prep(qc);
Jiri Slaby95364f32019-10-31 10:59:45 +01001359 return AC_ERR_OK;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001360 }
1361
Robert Hancock41949ed2007-02-19 19:02:27 -06001362 cpb->resp_flags = NV_CPB_RESP_DONE;
1363 wmb();
1364 cpb->ctl_flags = 0;
1365 wmb();
Robert Hancockfbbb2622006-10-27 19:08:41 -07001366
1367 cpb->len = 3;
Jens Axboe4e5b6262018-05-11 12:51:04 -06001368 cpb->tag = qc->hw_tag;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001369 cpb->next_cpb_idx = 0;
1370
1371 /* turn on NCQ flags for NCQ commands */
1372 if (qc->tf.protocol == ATA_PROT_NCQ)
1373 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1374
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001375 VPRINTK("qc->flags = 0x%lx\n", qc->flags);
1376
Robert Hancockfbbb2622006-10-27 19:08:41 -07001377 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1378
Jeff Garzikb4479162007-10-25 20:47:30 -04001379 if (qc->flags & ATA_QCFLAG_DMAMAP) {
Robert Hancock382a6652007-02-05 16:26:02 -08001380 nv_adma_fill_sg(qc, cpb);
1381 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1382 } else
1383 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001384
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001385 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1386 until we are finished filling in all of the contents */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001387 wmb();
1388 cpb->ctl_flags = ctl_flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001389 wmb();
1390 cpb->resp_flags = 0;
Jiri Slaby95364f32019-10-31 10:59:45 +01001391
1392 return AC_ERR_OK;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001393}
1394
1395static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1396{
Robert Hancock2dec7552006-11-26 14:20:19 -06001397 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001398 void __iomem *mmio = pp->ctl_block;
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001399 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001400
1401 VPRINTK("ENTER\n");
1402
Robert Hancock3f3debd2007-11-25 16:59:36 -06001403 /* We can't handle result taskfile with NCQ commands, since
1404 retrieving the taskfile switches us out of ADMA mode and would abort
1405 existing commands. */
1406 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1407 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
Joe Perchesa9a79df2011-04-15 15:51:59 -07001408 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
Robert Hancock3f3debd2007-11-25 16:59:36 -06001409 return AC_ERR_SYSTEM;
1410 }
1411
Robert Hancock382a6652007-02-05 16:26:02 -08001412 if (nv_adma_use_reg_mode(qc)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07001413 /* use ATA register mode */
Robert Hancock382a6652007-02-05 16:26:02 -08001414 VPRINTK("using ATA register mode: 0x%lx\n", qc->flags);
Robert Hancock3f3debd2007-11-25 16:59:36 -06001415 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1416 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancockfbbb2622006-10-27 19:08:41 -07001417 nv_adma_register_mode(qc->ap);
Tejun Heo360ff782010-05-10 21:41:42 +02001418 return ata_bmdma_qc_issue(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001419 } else
1420 nv_adma_mode(qc->ap);
1421
1422 /* write append register, command tag in lower 8 bits
1423 and (number of cpbs to append -1) in top 8 bits */
1424 wmb();
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001425
Jeff Garzikb4479162007-10-25 20:47:30 -04001426 if (curr_ncq != pp->last_issue_ncq) {
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001427 /* Seems to need some delay before switching between NCQ and
1428 non-NCQ commands, else we get command timeouts and such. */
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001429 udelay(20);
1430 pp->last_issue_ncq = curr_ncq;
1431 }
1432
Jens Axboe4e5b6262018-05-11 12:51:04 -06001433 writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001434
Jens Axboe4e5b6262018-05-11 12:51:04 -06001435 DPRINTK("Issued tag %u\n", qc->hw_tag);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001436
1437 return 0;
1438}
1439
David Howells7d12e782006-10-05 14:55:46 +01001440static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441{
Jeff Garzikcca39742006-08-24 03:19:22 -04001442 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 unsigned int i;
1444 unsigned int handled = 0;
1445 unsigned long flags;
1446
Jeff Garzikcca39742006-08-24 03:19:22 -04001447 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
Jeff Garzikcca39742006-08-24 03:19:22 -04001449 for (i = 0; i < host->n_ports; i++) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02001450 struct ata_port *ap = host->ports[i];
1451 struct ata_queued_cmd *qc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
Tejun Heo3e4ec342010-05-10 21:41:30 +02001453 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1454 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
Tejun Heoc3b28892010-05-19 22:10:21 +02001455 handled += ata_bmdma_port_intr(ap, qc);
Tejun Heo3e4ec342010-05-10 21:41:30 +02001456 } else {
1457 /*
1458 * No request pending? Clear interrupt status
1459 * anyway, in case there's one pending.
1460 */
1461 ap->ops->sff_check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 }
1464
Jeff Garzikcca39742006-08-24 03:19:22 -04001465 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466
1467 return IRQ_RETVAL(handled);
1468}
1469
Jeff Garzikcca39742006-08-24 03:19:22 -04001470static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001471{
1472 int i, handled = 0;
1473
Jeff Garzikcca39742006-08-24 03:19:22 -04001474 for (i = 0; i < host->n_ports; i++) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02001475 handled += nv_host_intr(host->ports[i], irq_stat);
Tejun Heoada364e2006-06-17 15:49:56 +09001476 irq_stat >>= NV_INT_PORT_SHIFT;
1477 }
1478
1479 return IRQ_RETVAL(handled);
1480}
1481
David Howells7d12e782006-10-05 14:55:46 +01001482static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001483{
Jeff Garzikcca39742006-08-24 03:19:22 -04001484 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001485 u8 irq_stat;
1486 irqreturn_t ret;
1487
Jeff Garzikcca39742006-08-24 03:19:22 -04001488 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001489 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
Jeff Garzikcca39742006-08-24 03:19:22 -04001490 ret = nv_do_interrupt(host, irq_stat);
1491 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001492
1493 return ret;
1494}
1495
David Howells7d12e782006-10-05 14:55:46 +01001496static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001497{
Jeff Garzikcca39742006-08-24 03:19:22 -04001498 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001499 u8 irq_stat;
1500 irqreturn_t ret;
1501
Jeff Garzikcca39742006-08-24 03:19:22 -04001502 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001503 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Jeff Garzikcca39742006-08-24 03:19:22 -04001504 ret = nv_do_interrupt(host, irq_stat);
1505 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001506
1507 return ret;
1508}
1509
Tejun Heo82ef04f2008-07-31 17:02:40 +09001510static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001513 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514
Tejun Heo82ef04f2008-07-31 17:02:40 +09001515 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
Tejun Heoda3dbb12007-07-16 14:29:40 +09001516 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517}
1518
Tejun Heo82ef04f2008-07-31 17:02:40 +09001519static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001522 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523
Tejun Heo82ef04f2008-07-31 17:02:40 +09001524 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
Tejun Heoda3dbb12007-07-16 14:29:40 +09001525 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526}
1527
Tejun Heo7f4774b2009-06-10 16:29:07 +09001528static int nv_hardreset(struct ata_link *link, unsigned int *class,
1529 unsigned long deadline)
Tejun Heoe8caa3c2009-01-25 11:25:22 +09001530{
Tejun Heo7f4774b2009-06-10 16:29:07 +09001531 struct ata_eh_context *ehc = &link->eh_context;
Tejun Heoe8caa3c2009-01-25 11:25:22 +09001532
Tejun Heo7f4774b2009-06-10 16:29:07 +09001533 /* Do hardreset iff it's post-boot probing, please read the
1534 * comment above port ops for details.
1535 */
1536 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1537 !ata_dev_enabled(link->device))
1538 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1539 NULL, NULL);
Tejun Heo6489e322009-10-14 11:18:28 +09001540 else {
1541 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1542 int rc;
1543
1544 if (!(ehc->i.flags & ATA_EHI_QUIET))
Joe Perchesa9a79df2011-04-15 15:51:59 -07001545 ata_link_info(link,
1546 "nv: skipping hardreset on occupied port\n");
Tejun Heo6489e322009-10-14 11:18:28 +09001547
1548 /* make sure the link is online */
1549 rc = sata_link_resume(link, timing, deadline);
1550 /* whine about phy resume failure but proceed */
1551 if (rc && rc != -EOPNOTSUPP)
Joe Perchesa9a79df2011-04-15 15:51:59 -07001552 ata_link_warn(link, "failed to resume link (errno=%d)\n",
1553 rc);
Tejun Heo6489e322009-10-14 11:18:28 +09001554 }
Tejun Heo7f4774b2009-06-10 16:29:07 +09001555
1556 /* device signature acquisition is unreliable */
1557 return -EAGAIN;
Tejun Heoe8caa3c2009-01-25 11:25:22 +09001558}
1559
Tejun Heo39f87582006-06-17 15:49:56 +09001560static void nv_nf2_freeze(struct ata_port *ap)
1561{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001562 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001563 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1564 u8 mask;
1565
Tejun Heo0d5ff562007-02-01 15:06:36 +09001566 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001567 mask &= ~(NV_INT_ALL << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001568 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001569}
1570
1571static void nv_nf2_thaw(struct ata_port *ap)
1572{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001573 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001574 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1575 u8 mask;
1576
Tejun Heo0d5ff562007-02-01 15:06:36 +09001577 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
Tejun Heo39f87582006-06-17 15:49:56 +09001578
Tejun Heo0d5ff562007-02-01 15:06:36 +09001579 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001580 mask |= (NV_INT_MASK << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001581 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001582}
1583
1584static void nv_ck804_freeze(struct ata_port *ap)
1585{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001586 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001587 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1588 u8 mask;
1589
1590 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1591 mask &= ~(NV_INT_ALL << shift);
1592 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1593}
1594
1595static void nv_ck804_thaw(struct ata_port *ap)
1596{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001597 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001598 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1599 u8 mask;
1600
1601 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1602
1603 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1604 mask |= (NV_INT_MASK << shift);
1605 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1606}
1607
Kuan Luof140f0f2007-10-15 15:16:53 -04001608static void nv_mcp55_freeze(struct ata_port *ap)
1609{
1610 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1611 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1612 u32 mask;
1613
1614 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1615
1616 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1617 mask &= ~(NV_INT_ALL_MCP55 << shift);
1618 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
Kuan Luof140f0f2007-10-15 15:16:53 -04001619}
1620
1621static void nv_mcp55_thaw(struct ata_port *ap)
1622{
1623 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1624 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1625 u32 mask;
1626
1627 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1628
1629 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1630 mask |= (NV_INT_MASK_MCP55 << shift);
1631 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
Kuan Luof140f0f2007-10-15 15:16:53 -04001632}
1633
Robert Hancockfbbb2622006-10-27 19:08:41 -07001634static void nv_adma_error_handler(struct ata_port *ap)
1635{
1636 struct nv_adma_port_priv *pp = ap->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04001637 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001638 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001639 int i;
1640 u16 tmp;
Jeff Garzika84471f2007-02-26 05:51:33 -05001641
Jeff Garzikb4479162007-10-25 20:47:30 -04001642 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001643 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1644 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1645 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1646 u32 status = readw(mmio + NV_ADMA_STAT);
Robert Hancock08af7412007-02-19 19:01:59 -06001647 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1648 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
Robert Hancock2cb27852007-02-11 18:34:44 -06001649
Joe Perchesa9a79df2011-04-15 15:51:59 -07001650 ata_port_err(ap,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001651 "EH in ADMA mode, notifier 0x%X "
Robert Hancock08af7412007-02-19 19:01:59 -06001652 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1653 "next cpb count 0x%X next cpb idx 0x%x\n",
1654 notifier, notifier_error, gen_ctl, status,
1655 cpb_count, next_cpb_idx);
Robert Hancock2cb27852007-02-11 18:34:44 -06001656
Jeff Garzikb4479162007-10-25 20:47:30 -04001657 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001658 struct nv_adma_cpb *cpb = &pp->cpb[i];
Jeff Garzikb4479162007-10-25 20:47:30 -04001659 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001660 ap->link.sactive & (1 << i))
Joe Perchesa9a79df2011-04-15 15:51:59 -07001661 ata_port_err(ap,
Robert Hancock2cb27852007-02-11 18:34:44 -06001662 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1663 i, cpb->ctl_flags, cpb->resp_flags);
1664 }
1665 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001666
Robert Hancockfbbb2622006-10-27 19:08:41 -07001667 /* Push us back into port register mode for error handling. */
1668 nv_adma_register_mode(ap);
1669
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001670 /* Mark all of the CPBs as invalid to prevent them from
1671 being executed */
Jeff Garzikb4479162007-10-25 20:47:30 -04001672 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001673 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1674
1675 /* clear CPB fetch count */
1676 writew(0, mmio + NV_ADMA_CPB_COUNT);
1677
1678 /* Reset channel */
1679 tmp = readw(mmio + NV_ADMA_CTL);
1680 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001681 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001682 udelay(1);
1683 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001684 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001685 }
1686
Tejun Heofe06e5f2010-05-10 21:41:39 +02001687 ata_bmdma_error_handler(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001688}
1689
Kuan Luof140f0f2007-10-15 15:16:53 -04001690static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1691{
1692 struct nv_swncq_port_priv *pp = ap->private_data;
1693 struct defer_queue *dq = &pp->defer_queue;
1694
1695 /* queue is full */
1696 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
Jens Axboe4e5b6262018-05-11 12:51:04 -06001697 dq->defer_bits |= (1 << qc->hw_tag);
1698 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
Kuan Luof140f0f2007-10-15 15:16:53 -04001699}
1700
1701static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1702{
1703 struct nv_swncq_port_priv *pp = ap->private_data;
1704 struct defer_queue *dq = &pp->defer_queue;
1705 unsigned int tag;
1706
1707 if (dq->head == dq->tail) /* null queue */
1708 return NULL;
1709
1710 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1711 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1712 WARN_ON(!(dq->defer_bits & (1 << tag)));
1713 dq->defer_bits &= ~(1 << tag);
1714
1715 return ata_qc_from_tag(ap, tag);
1716}
1717
1718static void nv_swncq_fis_reinit(struct ata_port *ap)
1719{
1720 struct nv_swncq_port_priv *pp = ap->private_data;
1721
1722 pp->dhfis_bits = 0;
1723 pp->dmafis_bits = 0;
1724 pp->sdbfis_bits = 0;
1725 pp->ncq_flags = 0;
1726}
1727
1728static void nv_swncq_pp_reinit(struct ata_port *ap)
1729{
1730 struct nv_swncq_port_priv *pp = ap->private_data;
1731 struct defer_queue *dq = &pp->defer_queue;
1732
1733 dq->head = 0;
1734 dq->tail = 0;
1735 dq->defer_bits = 0;
1736 pp->qc_active = 0;
1737 pp->last_issue_tag = ATA_TAG_POISON;
1738 nv_swncq_fis_reinit(ap);
1739}
1740
1741static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1742{
1743 struct nv_swncq_port_priv *pp = ap->private_data;
1744
1745 writew(fis, pp->irq_block);
1746}
1747
1748static void __ata_bmdma_stop(struct ata_port *ap)
1749{
1750 struct ata_queued_cmd qc;
1751
1752 qc.ap = ap;
1753 ata_bmdma_stop(&qc);
1754}
1755
1756static void nv_swncq_ncq_stop(struct ata_port *ap)
1757{
1758 struct nv_swncq_port_priv *pp = ap->private_data;
1759 unsigned int i;
1760 u32 sactive;
1761 u32 done_mask;
1762
Jens Axboee3ed89392018-05-11 12:51:05 -06001763 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
Joe Perchesa9a79df2011-04-15 15:51:59 -07001764 ap->qc_active, ap->link.sactive);
1765 ata_port_err(ap,
Kuan Luof140f0f2007-10-15 15:16:53 -04001766 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1767 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1768 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1769 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1770
Joe Perchesa9a79df2011-04-15 15:51:59 -07001771 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1772 ap->ops->sff_check_status(ap),
1773 ioread8(ap->ioaddr.error_addr));
Kuan Luof140f0f2007-10-15 15:16:53 -04001774
1775 sactive = readl(pp->sactive_block);
1776 done_mask = pp->qc_active ^ sactive;
1777
Joe Perchesa9a79df2011-04-15 15:51:59 -07001778 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
Kuan Luof140f0f2007-10-15 15:16:53 -04001779 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1780 u8 err = 0;
1781 if (pp->qc_active & (1 << i))
1782 err = 0;
1783 else if (done_mask & (1 << i))
1784 err = 1;
1785 else
1786 continue;
1787
Joe Perchesa9a79df2011-04-15 15:51:59 -07001788 ata_port_err(ap,
1789 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1790 (pp->dhfis_bits >> i) & 0x1,
1791 (pp->dmafis_bits >> i) & 0x1,
1792 (pp->sdbfis_bits >> i) & 0x1,
1793 (sactive >> i) & 0x1,
1794 (err ? "error! tag doesn't exit" : " "));
Kuan Luof140f0f2007-10-15 15:16:53 -04001795 }
1796
1797 nv_swncq_pp_reinit(ap);
Tejun Heo5682ed32008-04-07 22:47:16 +09001798 ap->ops->sff_irq_clear(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001799 __ata_bmdma_stop(ap);
1800 nv_swncq_irq_clear(ap, 0xffff);
1801}
1802
1803static void nv_swncq_error_handler(struct ata_port *ap)
1804{
1805 struct ata_eh_context *ehc = &ap->link.eh_context;
1806
1807 if (ap->link.sactive) {
1808 nv_swncq_ncq_stop(ap);
Tejun Heocf480622008-01-24 00:05:14 +09001809 ehc->i.action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04001810 }
1811
Tejun Heofe06e5f2010-05-10 21:41:39 +02001812 ata_bmdma_error_handler(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001813}
1814
1815#ifdef CONFIG_PM
1816static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1817{
1818 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1819 u32 tmp;
1820
1821 /* clear irq */
1822 writel(~0, mmio + NV_INT_STATUS_MCP55);
1823
1824 /* disable irq */
1825 writel(0, mmio + NV_INT_ENABLE_MCP55);
1826
1827 /* disable swncq */
1828 tmp = readl(mmio + NV_CTL_MCP55);
1829 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1830 writel(tmp, mmio + NV_CTL_MCP55);
1831
1832 return 0;
1833}
1834
1835static int nv_swncq_port_resume(struct ata_port *ap)
1836{
1837 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1838 u32 tmp;
1839
1840 /* clear irq */
1841 writel(~0, mmio + NV_INT_STATUS_MCP55);
1842
1843 /* enable irq */
1844 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1845
1846 /* enable swncq */
1847 tmp = readl(mmio + NV_CTL_MCP55);
1848 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1849
1850 return 0;
1851}
1852#endif
1853
1854static void nv_swncq_host_init(struct ata_host *host)
1855{
1856 u32 tmp;
1857 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1858 struct pci_dev *pdev = to_pci_dev(host->dev);
1859 u8 regval;
1860
1861 /* disable ECO 398 */
1862 pci_read_config_byte(pdev, 0x7f, &regval);
1863 regval &= ~(1 << 7);
1864 pci_write_config_byte(pdev, 0x7f, regval);
1865
1866 /* enable swncq */
1867 tmp = readl(mmio + NV_CTL_MCP55);
1868 VPRINTK("HOST_CTL:0x%X\n", tmp);
1869 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1870
1871 /* enable irq intr */
1872 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
1873 VPRINTK("HOST_ENABLE:0x%X\n", tmp);
1874 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1875
1876 /* clear port irq */
1877 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1878}
1879
1880static int nv_swncq_slave_config(struct scsi_device *sdev)
1881{
1882 struct ata_port *ap = ata_shost_to_port(sdev->host);
1883 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1884 struct ata_device *dev;
1885 int rc;
1886 u8 rev;
1887 u8 check_maxtor = 0;
1888 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1889
1890 rc = ata_scsi_slave_config(sdev);
1891 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1892 /* Not a proper libata device, ignore */
1893 return rc;
1894
1895 dev = &ap->link.device[sdev->id];
1896 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1897 return rc;
1898
1899 /* if MCP51 and Maxtor, then disable ncq */
1900 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1901 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1902 check_maxtor = 1;
1903
1904 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1905 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1906 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1907 pci_read_config_byte(pdev, 0x8, &rev);
1908 if (rev <= 0xa2)
1909 check_maxtor = 1;
1910 }
1911
1912 if (!check_maxtor)
1913 return rc;
1914
1915 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1916
1917 if (strncmp(model_num, "Maxtor", 6) == 0) {
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01001918 ata_scsi_change_queue_depth(sdev, 1);
Joe Perchesa9a79df2011-04-15 15:51:59 -07001919 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1920 sdev->queue_depth);
Kuan Luof140f0f2007-10-15 15:16:53 -04001921 }
1922
1923 return rc;
1924}
1925
1926static int nv_swncq_port_start(struct ata_port *ap)
1927{
1928 struct device *dev = ap->host->dev;
1929 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1930 struct nv_swncq_port_priv *pp;
1931 int rc;
1932
Tejun Heoc7087652010-05-10 21:41:34 +02001933 /* we might fallback to bmdma, allocate bmdma resources */
1934 rc = ata_bmdma_port_start(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001935 if (rc)
1936 return rc;
1937
1938 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1939 if (!pp)
1940 return -ENOMEM;
1941
1942 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1943 &pp->prd_dma, GFP_KERNEL);
1944 if (!pp->prd)
1945 return -ENOMEM;
Kuan Luof140f0f2007-10-15 15:16:53 -04001946
1947 ap->private_data = pp;
1948 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1949 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1950 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1951
1952 return 0;
1953}
1954
Jiri Slaby95364f32019-10-31 10:59:45 +01001955static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
Kuan Luof140f0f2007-10-15 15:16:53 -04001956{
1957 if (qc->tf.protocol != ATA_PROT_NCQ) {
Tejun Heof47451c2010-05-10 21:41:40 +02001958 ata_bmdma_qc_prep(qc);
Jiri Slaby95364f32019-10-31 10:59:45 +01001959 return AC_ERR_OK;
Kuan Luof140f0f2007-10-15 15:16:53 -04001960 }
1961
1962 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Jiri Slaby95364f32019-10-31 10:59:45 +01001963 return AC_ERR_OK;
Kuan Luof140f0f2007-10-15 15:16:53 -04001964
1965 nv_swncq_fill_sg(qc);
Jiri Slaby95364f32019-10-31 10:59:45 +01001966
1967 return AC_ERR_OK;
Kuan Luof140f0f2007-10-15 15:16:53 -04001968}
1969
1970static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1971{
1972 struct ata_port *ap = qc->ap;
1973 struct scatterlist *sg;
Kuan Luof140f0f2007-10-15 15:16:53 -04001974 struct nv_swncq_port_priv *pp = ap->private_data;
Tejun Heof60d7012010-05-10 21:41:41 +02001975 struct ata_bmdma_prd *prd;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001976 unsigned int si, idx;
Kuan Luof140f0f2007-10-15 15:16:53 -04001977
Jens Axboe4e5b6262018-05-11 12:51:04 -06001978 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
Kuan Luof140f0f2007-10-15 15:16:53 -04001979
1980 idx = 0;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001981 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Kuan Luof140f0f2007-10-15 15:16:53 -04001982 u32 addr, offset;
1983 u32 sg_len, len;
1984
1985 addr = (u32)sg_dma_address(sg);
1986 sg_len = sg_dma_len(sg);
1987
1988 while (sg_len) {
1989 offset = addr & 0xffff;
1990 len = sg_len;
1991 if ((offset + sg_len) > 0x10000)
1992 len = 0x10000 - offset;
1993
1994 prd[idx].addr = cpu_to_le32(addr);
1995 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1996
1997 idx++;
1998 sg_len -= len;
1999 addr += len;
2000 }
2001 }
2002
Tejun Heoff2aeb12007-12-05 16:43:11 +09002003 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
Kuan Luof140f0f2007-10-15 15:16:53 -04002004}
2005
2006static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
2007 struct ata_queued_cmd *qc)
2008{
2009 struct nv_swncq_port_priv *pp = ap->private_data;
2010
2011 if (qc == NULL)
2012 return 0;
2013
2014 DPRINTK("Enter\n");
2015
Jens Axboe4e5b6262018-05-11 12:51:04 -06002016 writel((1 << qc->hw_tag), pp->sactive_block);
2017 pp->last_issue_tag = qc->hw_tag;
2018 pp->dhfis_bits &= ~(1 << qc->hw_tag);
2019 pp->dmafis_bits &= ~(1 << qc->hw_tag);
2020 pp->qc_active |= (0x1 << qc->hw_tag);
Kuan Luof140f0f2007-10-15 15:16:53 -04002021
Tejun Heo5682ed32008-04-07 22:47:16 +09002022 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
2023 ap->ops->sff_exec_command(ap, &qc->tf);
Kuan Luof140f0f2007-10-15 15:16:53 -04002024
Jens Axboe4e5b6262018-05-11 12:51:04 -06002025 DPRINTK("Issued tag %u\n", qc->hw_tag);
Kuan Luof140f0f2007-10-15 15:16:53 -04002026
2027 return 0;
2028}
2029
2030static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2031{
2032 struct ata_port *ap = qc->ap;
2033 struct nv_swncq_port_priv *pp = ap->private_data;
2034
2035 if (qc->tf.protocol != ATA_PROT_NCQ)
Tejun Heo360ff782010-05-10 21:41:42 +02002036 return ata_bmdma_qc_issue(qc);
Kuan Luof140f0f2007-10-15 15:16:53 -04002037
2038 DPRINTK("Enter\n");
2039
2040 if (!pp->qc_active)
2041 nv_swncq_issue_atacmd(ap, qc);
2042 else
2043 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2044
2045 return 0;
2046}
2047
2048static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2049{
2050 u32 serror;
2051 struct ata_eh_info *ehi = &ap->link.eh_info;
2052
2053 ata_ehi_clear_desc(ehi);
2054
2055 /* AHCI needs SError cleared; otherwise, it might lock up */
2056 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2057 sata_scr_write(&ap->link, SCR_ERROR, serror);
2058
2059 /* analyze @irq_stat */
2060 if (fis & NV_SWNCQ_IRQ_ADDED)
2061 ata_ehi_push_desc(ehi, "hot plug");
2062 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2063 ata_ehi_push_desc(ehi, "hot unplug");
2064
2065 ata_ehi_hotplugged(ehi);
2066
2067 /* okay, let's hand over to EH */
2068 ehi->serror |= serror;
2069
2070 ata_port_freeze(ap);
2071}
2072
2073static int nv_swncq_sdbfis(struct ata_port *ap)
2074{
2075 struct ata_queued_cmd *qc;
2076 struct nv_swncq_port_priv *pp = ap->private_data;
2077 struct ata_eh_info *ehi = &ap->link.eh_info;
2078 u32 sactive;
Kuan Luof140f0f2007-10-15 15:16:53 -04002079 u32 done_mask;
Kuan Luof140f0f2007-10-15 15:16:53 -04002080 u8 host_stat;
2081 u8 lack_dhfis = 0;
2082
2083 host_stat = ap->ops->bmdma_status(ap);
2084 if (unlikely(host_stat & ATA_DMA_ERR)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002085 /* error when transferring data to/from memory */
Kuan Luof140f0f2007-10-15 15:16:53 -04002086 ata_ehi_clear_desc(ehi);
2087 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2088 ehi->err_mask |= AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002089 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002090 return -EINVAL;
2091 }
2092
Tejun Heo5682ed32008-04-07 22:47:16 +09002093 ap->ops->sff_irq_clear(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002094 __ata_bmdma_stop(ap);
2095
2096 sactive = readl(pp->sactive_block);
2097 done_mask = pp->qc_active ^ sactive;
2098
Tejun Heo1aadf5c2010-06-25 15:03:34 +02002099 pp->qc_active &= ~done_mask;
2100 pp->dhfis_bits &= ~done_mask;
2101 pp->dmafis_bits &= ~done_mask;
2102 pp->sdbfis_bits |= done_mask;
2103 ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
Kuan Luof140f0f2007-10-15 15:16:53 -04002104
2105 if (!ap->qc_active) {
2106 DPRINTK("over\n");
2107 nv_swncq_pp_reinit(ap);
Tejun Heo752e3862010-06-25 15:02:59 +02002108 return 0;
Kuan Luof140f0f2007-10-15 15:16:53 -04002109 }
2110
2111 if (pp->qc_active & pp->dhfis_bits)
Tejun Heo752e3862010-06-25 15:02:59 +02002112 return 0;
Kuan Luof140f0f2007-10-15 15:16:53 -04002113
2114 if ((pp->ncq_flags & ncq_saw_backout) ||
2115 (pp->qc_active ^ pp->dhfis_bits))
Tejun Heo752e3862010-06-25 15:02:59 +02002116 /* if the controller can't get a device to host register FIS,
Kuan Luof140f0f2007-10-15 15:16:53 -04002117 * The driver needs to reissue the new command.
2118 */
2119 lack_dhfis = 1;
2120
2121 DPRINTK("id 0x%x QC: qc_active 0x%x,"
2122 "SWNCQ:qc_active 0x%X defer_bits %X "
2123 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2124 ap->print_id, ap->qc_active, pp->qc_active,
2125 pp->defer_queue.defer_bits, pp->dhfis_bits,
2126 pp->dmafis_bits, pp->last_issue_tag);
2127
2128 nv_swncq_fis_reinit(ap);
2129
2130 if (lack_dhfis) {
2131 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2132 nv_swncq_issue_atacmd(ap, qc);
Tejun Heo752e3862010-06-25 15:02:59 +02002133 return 0;
Kuan Luof140f0f2007-10-15 15:16:53 -04002134 }
2135
2136 if (pp->defer_queue.defer_bits) {
2137 /* send deferral queue command */
2138 qc = nv_swncq_qc_from_dq(ap);
2139 WARN_ON(qc == NULL);
2140 nv_swncq_issue_atacmd(ap, qc);
2141 }
2142
Tejun Heo752e3862010-06-25 15:02:59 +02002143 return 0;
Kuan Luof140f0f2007-10-15 15:16:53 -04002144}
2145
2146static inline u32 nv_swncq_tag(struct ata_port *ap)
2147{
2148 struct nv_swncq_port_priv *pp = ap->private_data;
2149 u32 tag;
2150
2151 tag = readb(pp->tag_block) >> 2;
2152 return (tag & 0x1f);
2153}
2154
Tejun Heo752e3862010-06-25 15:02:59 +02002155static void nv_swncq_dmafis(struct ata_port *ap)
Kuan Luof140f0f2007-10-15 15:16:53 -04002156{
2157 struct ata_queued_cmd *qc;
2158 unsigned int rw;
2159 u8 dmactl;
2160 u32 tag;
2161 struct nv_swncq_port_priv *pp = ap->private_data;
2162
2163 __ata_bmdma_stop(ap);
2164 tag = nv_swncq_tag(ap);
2165
2166 DPRINTK("dma setup tag 0x%x\n", tag);
2167 qc = ata_qc_from_tag(ap, tag);
2168
2169 if (unlikely(!qc))
Tejun Heo752e3862010-06-25 15:02:59 +02002170 return;
Kuan Luof140f0f2007-10-15 15:16:53 -04002171
2172 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2173
2174 /* load PRD table addr. */
Jens Axboe4e5b6262018-05-11 12:51:04 -06002175 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
Kuan Luof140f0f2007-10-15 15:16:53 -04002176 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2177
2178 /* specify data direction, triple-check start bit is clear */
2179 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2180 dmactl &= ~ATA_DMA_WR;
2181 if (!rw)
2182 dmactl |= ATA_DMA_WR;
2183
2184 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Kuan Luof140f0f2007-10-15 15:16:53 -04002185}
2186
2187static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2188{
2189 struct nv_swncq_port_priv *pp = ap->private_data;
2190 struct ata_queued_cmd *qc;
2191 struct ata_eh_info *ehi = &ap->link.eh_info;
2192 u32 serror;
2193 u8 ata_stat;
Kuan Luof140f0f2007-10-15 15:16:53 -04002194
Tejun Heo5682ed32008-04-07 22:47:16 +09002195 ata_stat = ap->ops->sff_check_status(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002196 nv_swncq_irq_clear(ap, fis);
2197 if (!fis)
2198 return;
2199
2200 if (ap->pflags & ATA_PFLAG_FROZEN)
2201 return;
2202
2203 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2204 nv_swncq_hotplug(ap, fis);
2205 return;
2206 }
2207
2208 if (!pp->qc_active)
2209 return;
2210
Tejun Heo82ef04f2008-07-31 17:02:40 +09002211 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
Kuan Luof140f0f2007-10-15 15:16:53 -04002212 return;
Tejun Heo82ef04f2008-07-31 17:02:40 +09002213 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
Kuan Luof140f0f2007-10-15 15:16:53 -04002214
2215 if (ata_stat & ATA_ERR) {
2216 ata_ehi_clear_desc(ehi);
2217 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2218 ehi->err_mask |= AC_ERR_DEV;
2219 ehi->serror |= serror;
Tejun Heocf480622008-01-24 00:05:14 +09002220 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002221 ata_port_freeze(ap);
2222 return;
2223 }
2224
2225 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2226 /* If the IRQ is backout, driver must issue
2227 * the new command again some time later.
2228 */
2229 pp->ncq_flags |= ncq_saw_backout;
2230 }
2231
2232 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2233 pp->ncq_flags |= ncq_saw_sdb;
2234 DPRINTK("id 0x%x SWNCQ: qc_active 0x%X "
2235 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
2236 ap->print_id, pp->qc_active, pp->dhfis_bits,
2237 pp->dmafis_bits, readl(pp->sactive_block));
Tejun Heo752e3862010-06-25 15:02:59 +02002238 if (nv_swncq_sdbfis(ap) < 0)
Kuan Luof140f0f2007-10-15 15:16:53 -04002239 goto irq_error;
2240 }
2241
2242 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2243 /* The interrupt indicates the new command
2244 * was transmitted correctly to the drive.
2245 */
2246 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2247 pp->ncq_flags |= ncq_saw_d2h;
2248 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2249 ata_ehi_push_desc(ehi, "illegal fis transaction");
2250 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002251 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002252 goto irq_error;
2253 }
2254
2255 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2256 !(pp->ncq_flags & ncq_saw_dmas)) {
Tejun Heo5682ed32008-04-07 22:47:16 +09002257 ata_stat = ap->ops->sff_check_status(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002258 if (ata_stat & ATA_BUSY)
2259 goto irq_exit;
2260
2261 if (pp->defer_queue.defer_bits) {
2262 DPRINTK("send next command\n");
2263 qc = nv_swncq_qc_from_dq(ap);
2264 nv_swncq_issue_atacmd(ap, qc);
2265 }
2266 }
2267 }
2268
2269 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2270 /* program the dma controller with appropriate PRD buffers
2271 * and start the DMA transfer for requested command.
2272 */
2273 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2274 pp->ncq_flags |= ncq_saw_dmas;
Tejun Heo752e3862010-06-25 15:02:59 +02002275 nv_swncq_dmafis(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002276 }
2277
2278irq_exit:
2279 return;
2280irq_error:
2281 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2282 ata_port_freeze(ap);
2283 return;
2284}
2285
2286static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2287{
2288 struct ata_host *host = dev_instance;
2289 unsigned int i;
2290 unsigned int handled = 0;
2291 unsigned long flags;
2292 u32 irq_stat;
2293
2294 spin_lock_irqsave(&host->lock, flags);
2295
2296 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2297
2298 for (i = 0; i < host->n_ports; i++) {
2299 struct ata_port *ap = host->ports[i];
2300
Tejun Heo3e4ec342010-05-10 21:41:30 +02002301 if (ap->link.sactive) {
2302 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2303 handled = 1;
2304 } else {
2305 if (irq_stat) /* reserve Hotplug */
2306 nv_swncq_irq_clear(ap, 0xfff0);
Kuan Luof140f0f2007-10-15 15:16:53 -04002307
Tejun Heo3e4ec342010-05-10 21:41:30 +02002308 handled += nv_host_intr(ap, (u8)irq_stat);
Kuan Luof140f0f2007-10-15 15:16:53 -04002309 }
2310 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2311 }
2312
2313 spin_unlock_irqrestore(&host->lock, flags);
2314
2315 return IRQ_RETVAL(handled);
2316}
2317
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002318static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319{
Tejun Heo1626aeb2007-05-04 12:43:58 +02002320 const struct ata_port_info *ppi[] = { NULL, NULL };
Tejun Heo95947192008-03-25 12:22:49 +09002321 struct nv_pi_priv *ipriv;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002322 struct ata_host *host;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002323 struct nv_host_priv *hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 int rc;
2325 u32 bar;
Tejun Heo0d5ff562007-02-01 15:06:36 +09002326 void __iomem *base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002327 unsigned long type = ent->driver_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
2329 // Make sure this is a SATA controller by counting the number of bars
2330 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2331 // it's an IDE controller and we ignore it.
Denis Efremovc9c13ba2019-09-28 02:43:08 +03002332 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 if (pci_resource_start(pdev, bar) == 0)
2334 return -ENODEV;
2335
Joe Perches06296a12011-04-15 15:52:00 -07002336 ata_print_version_once(&pdev->dev, DRV_VERSION);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337
Tejun Heo24dc5f32007-01-20 16:00:28 +09002338 rc = pcim_enable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002340 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341
Tejun Heo9a829cc2007-04-17 23:44:08 +09002342 /* determine type and allocate host */
Kuan Luof140f0f2007-10-15 15:16:53 -04002343 if (type == CK804 && adma_enabled) {
Joe Perchesa44fec12011-04-15 15:51:58 -07002344 dev_notice(&pdev->dev, "Using ADMA mode\n");
Robert Hancockfbbb2622006-10-27 19:08:41 -07002345 type = ADMA;
Tejun Heo2d775702009-01-25 11:29:38 +09002346 } else if (type == MCP5x && swncq_enabled) {
Joe Perchesa44fec12011-04-15 15:51:58 -07002347 dev_notice(&pdev->dev, "Using SWNCQ mode\n");
Tejun Heo2d775702009-01-25 11:29:38 +09002348 type = SWNCQ;
Jeff Garzik360737a2007-10-29 06:49:24 -04002349 }
2350
Tejun Heo1626aeb2007-05-04 12:43:58 +02002351 ppi[0] = &nv_port_info[type];
Tejun Heo95947192008-03-25 12:22:49 +09002352 ipriv = ppi[0]->private_data;
Tejun Heo1c5afdf2010-05-19 22:10:22 +02002353 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
Tejun Heo9a829cc2007-04-17 23:44:08 +09002354 if (rc)
2355 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356
Tejun Heo24dc5f32007-01-20 16:00:28 +09002357 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002358 if (!hpriv)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002359 return -ENOMEM;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002360 hpriv->type = type;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002361 host->private_data = hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362
Tejun Heo9a829cc2007-04-17 23:44:08 +09002363 /* request and iomap NV_MMIO_BAR */
2364 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2365 if (rc)
2366 return rc;
2367
2368 /* configure SCR access */
2369 base = host->iomap[NV_MMIO_BAR];
2370 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2371 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
Jeff Garzik02cbd922006-03-22 23:59:46 -05002372
Tejun Heoada364e2006-06-17 15:49:56 +09002373 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002374 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09002375 u8 regval;
2376
2377 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2378 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2379 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2380 }
2381
Tejun Heo9a829cc2007-04-17 23:44:08 +09002382 /* init ADMA */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002383 if (type == ADMA) {
Tejun Heo9a829cc2007-04-17 23:44:08 +09002384 rc = nv_adma_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002385 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002386 return rc;
Jeff Garzik360737a2007-10-29 06:49:24 -04002387 } else if (type == SWNCQ)
Kuan Luof140f0f2007-10-15 15:16:53 -04002388 nv_swncq_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002389
Tony Vroon51c89492009-08-06 00:50:09 +01002390 if (msi_enabled) {
Joe Perchesa44fec12011-04-15 15:51:58 -07002391 dev_notice(&pdev->dev, "Using MSI\n");
Tony Vroon51c89492009-08-06 00:50:09 +01002392 pci_enable_msi(pdev);
2393 }
2394
Tejun Heo9a829cc2007-04-17 23:44:08 +09002395 pci_set_master(pdev);
Tejun Heo95cc2c72010-05-14 11:48:50 +02002396 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397}
2398
Bartlomiej Zolnierkiewicz58eb8cd2014-05-07 17:17:44 +02002399#ifdef CONFIG_PM_SLEEP
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002400static int nv_pci_device_resume(struct pci_dev *pdev)
2401{
Jingoo Han0a86e1c2013-06-03 14:05:36 +09002402 struct ata_host *host = pci_get_drvdata(pdev);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002403 struct nv_host_priv *hpriv = host->private_data;
Robert Hancockce053fa2007-02-05 16:26:04 -08002404 int rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002405
Robert Hancockce053fa2007-02-05 16:26:04 -08002406 rc = ata_pci_device_do_resume(pdev);
Jeff Garzikb4479162007-10-25 20:47:30 -04002407 if (rc)
Robert Hancockce053fa2007-02-05 16:26:04 -08002408 return rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002409
2410 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
Jeff Garzikb4479162007-10-25 20:47:30 -04002411 if (hpriv->type >= CK804) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002412 u8 regval;
2413
2414 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2415 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2416 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2417 }
Jeff Garzikb4479162007-10-25 20:47:30 -04002418 if (hpriv->type == ADMA) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002419 u32 tmp32;
2420 struct nv_adma_port_priv *pp;
2421 /* enable/disable ADMA on the ports appropriately */
2422 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2423
2424 pp = host->ports[0]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002425 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002426 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002427 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002428 else
2429 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002430 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002431 pp = host->ports[1]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002432 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002433 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002434 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002435 else
2436 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002437 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002438
2439 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2440 }
2441 }
2442
2443 ata_host_resume(host);
2444
2445 return 0;
2446}
Tejun Heo438ac6d2007-03-02 17:31:26 +09002447#endif
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002448
Jeff Garzikcca39742006-08-24 03:19:22 -04002449static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09002450{
Jeff Garzikcca39742006-08-24 03:19:22 -04002451 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09002452 u8 regval;
2453
2454 /* disable SATA space for CK804 */
2455 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2456 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2457 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
Tejun Heoada364e2006-06-17 15:49:56 +09002458}
2459
Robert Hancockfbbb2622006-10-27 19:08:41 -07002460static void nv_adma_host_stop(struct ata_host *host)
2461{
2462 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002463 u32 tmp32;
2464
Robert Hancockfbbb2622006-10-27 19:08:41 -07002465 /* disable ADMA on the ports */
2466 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2467 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2468 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2469 NV_MCP_SATA_CFG_20_PORT1_EN |
2470 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2471
2472 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2473
2474 nv_ck804_host_stop(host);
2475}
2476
Axel Lin2fc75da2012-04-19 13:43:05 +08002477module_pci_driver(nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002478
Robert Hancockfbbb2622006-10-27 19:08:41 -07002479module_param_named(adma, adma_enabled, bool, 0444);
Brandon Ehle55f784c2009-03-01 00:02:49 -08002480MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
Kuan Luof140f0f2007-10-15 15:16:53 -04002481module_param_named(swncq, swncq_enabled, bool, 0444);
Zoltan Boszormenyid21279f2008-03-28 14:33:46 -07002482MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
Tony Vroon51c89492009-08-06 00:50:09 +01002483module_param_named(msi, msi_enabled, bool, 0444);
2484MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");