blob: 7f14d0d3105724778982a53cbca56cdd04976a9a [file] [log] [blame]
Thomas Gleixnerc82ee6d2019-05-19 15:51:48 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * sata_nv.c - NVIDIA nForce SATA
4 *
5 * Copyright 2004 NVIDIA Corp. All rights reserved.
6 * Copyright 2004 Andrew Chew
7 *
Jeff Garzikaf36d7f2005-08-28 20:18:39 -04008 * libata documentation is available via 'make {ps|pdf}docs',
Mauro Carvalho Chehab19285f32017-05-14 11:52:56 -03009 * as Documentation/driver-api/libata.rst
Jeff Garzikaf36d7f2005-08-28 20:18:39 -040010 *
11 * No hardware documentation available outside of NVIDIA.
12 * This driver programs the NVIDIA SATA controller in a similar
13 * fashion as with other PCI IDE BMDMA controllers, with a few
14 * NV-specific details such as register offsets, SATA phy location,
15 * hotplug info, etc.
16 *
Robert Hancockfbbb2622006-10-27 19:08:41 -070017 * CK804/MCP04 controllers support an alternate programming interface
18 * similar to the ADMA specification (with some modifications).
19 * This allows the use of NCQ. Non-DMA-mapped ATA commands are still
20 * sent through the legacy interface.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/kernel.h>
24#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/pci.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/blkdev.h>
28#include <linux/delay.h>
29#include <linux/interrupt.h>
Jeff Garzika9524a72005-10-30 14:39:11 -050030#include <linux/device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <scsi/scsi_host.h>
Robert Hancockfbbb2622006-10-27 19:08:41 -070032#include <scsi/scsi_device.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/libata.h>
Hannes Reineckec206a382021-12-21 08:20:31 +010034#include <trace/events/libata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#define DRV_NAME "sata_nv"
Jeff Garzik2a3103c2007-08-31 04:54:06 -040037#define DRV_VERSION "3.5"
Robert Hancockfbbb2622006-10-27 19:08:41 -070038
39#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Jeff Garzik10ad05d2006-03-22 23:50:50 -050041enum {
Tejun Heo0d5ff562007-02-01 15:06:36 +090042 NV_MMIO_BAR = 5,
43
Jeff Garzik10ad05d2006-03-22 23:50:50 -050044 NV_PORTS = 2,
Erik Inge Bolsø14bdef92009-03-14 21:38:24 +010045 NV_PIO_MASK = ATA_PIO4,
46 NV_MWDMA_MASK = ATA_MWDMA2,
47 NV_UDMA_MASK = ATA_UDMA6,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050048 NV_PORT0_SCR_REG_OFFSET = 0x00,
49 NV_PORT1_SCR_REG_OFFSET = 0x40,
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Tejun Heo27e4b272006-06-17 15:49:55 +090051 /* INT_STATUS/ENABLE */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050052 NV_INT_STATUS = 0x10,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050053 NV_INT_ENABLE = 0x11,
Tejun Heo27e4b272006-06-17 15:49:55 +090054 NV_INT_STATUS_CK804 = 0x440,
Jeff Garzik10ad05d2006-03-22 23:50:50 -050055 NV_INT_ENABLE_CK804 = 0x441,
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
Tejun Heo27e4b272006-06-17 15:49:55 +090057 /* INT_STATUS/ENABLE bits */
58 NV_INT_DEV = 0x01,
59 NV_INT_PM = 0x02,
60 NV_INT_ADDED = 0x04,
61 NV_INT_REMOVED = 0x08,
62
63 NV_INT_PORT_SHIFT = 4, /* each port occupies 4 bits */
64
Tejun Heo39f87582006-06-17 15:49:56 +090065 NV_INT_ALL = 0x0f,
Tejun Heo5a44eff2006-06-17 15:49:56 +090066 NV_INT_MASK = NV_INT_DEV |
67 NV_INT_ADDED | NV_INT_REMOVED,
Tejun Heo39f87582006-06-17 15:49:56 +090068
Tejun Heo27e4b272006-06-17 15:49:55 +090069 /* INT_CONFIG */
Jeff Garzik10ad05d2006-03-22 23:50:50 -050070 NV_INT_CONFIG = 0x12,
71 NV_INT_CONFIG_METHD = 0x01, // 0 = INT, 1 = SMI
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
Jeff Garzik10ad05d2006-03-22 23:50:50 -050073 // For PCI config register 20
74 NV_MCP_SATA_CFG_20 = 0x50,
75 NV_MCP_SATA_CFG_20_SATA_SPACE_EN = 0x04,
Robert Hancockfbbb2622006-10-27 19:08:41 -070076 NV_MCP_SATA_CFG_20_PORT0_EN = (1 << 17),
77 NV_MCP_SATA_CFG_20_PORT1_EN = (1 << 16),
78 NV_MCP_SATA_CFG_20_PORT0_PWB_EN = (1 << 14),
79 NV_MCP_SATA_CFG_20_PORT1_PWB_EN = (1 << 12),
80
81 NV_ADMA_MAX_CPBS = 32,
82 NV_ADMA_CPB_SZ = 128,
83 NV_ADMA_APRD_SZ = 16,
84 NV_ADMA_SGTBL_LEN = (1024 - NV_ADMA_CPB_SZ) /
85 NV_ADMA_APRD_SZ,
86 NV_ADMA_SGTBL_TOTAL_LEN = NV_ADMA_SGTBL_LEN + 5,
87 NV_ADMA_SGTBL_SZ = NV_ADMA_SGTBL_LEN * NV_ADMA_APRD_SZ,
88 NV_ADMA_PORT_PRIV_DMA_SZ = NV_ADMA_MAX_CPBS *
89 (NV_ADMA_CPB_SZ + NV_ADMA_SGTBL_SZ),
90
91 /* BAR5 offset to ADMA general registers */
92 NV_ADMA_GEN = 0x400,
93 NV_ADMA_GEN_CTL = 0x00,
94 NV_ADMA_NOTIFIER_CLEAR = 0x30,
95
96 /* BAR5 offset to ADMA ports */
97 NV_ADMA_PORT = 0x480,
98
99 /* size of ADMA port register space */
100 NV_ADMA_PORT_SIZE = 0x100,
101
102 /* ADMA port registers */
103 NV_ADMA_CTL = 0x40,
104 NV_ADMA_CPB_COUNT = 0x42,
105 NV_ADMA_NEXT_CPB_IDX = 0x43,
106 NV_ADMA_STAT = 0x44,
107 NV_ADMA_CPB_BASE_LOW = 0x48,
108 NV_ADMA_CPB_BASE_HIGH = 0x4C,
109 NV_ADMA_APPEND = 0x50,
110 NV_ADMA_NOTIFIER = 0x68,
111 NV_ADMA_NOTIFIER_ERROR = 0x6C,
112
113 /* NV_ADMA_CTL register bits */
114 NV_ADMA_CTL_HOTPLUG_IEN = (1 << 0),
115 NV_ADMA_CTL_CHANNEL_RESET = (1 << 5),
116 NV_ADMA_CTL_GO = (1 << 7),
117 NV_ADMA_CTL_AIEN = (1 << 8),
118 NV_ADMA_CTL_READ_NON_COHERENT = (1 << 11),
119 NV_ADMA_CTL_WRITE_NON_COHERENT = (1 << 12),
120
121 /* CPB response flag bits */
122 NV_CPB_RESP_DONE = (1 << 0),
123 NV_CPB_RESP_ATA_ERR = (1 << 3),
124 NV_CPB_RESP_CMD_ERR = (1 << 4),
125 NV_CPB_RESP_CPB_ERR = (1 << 7),
126
127 /* CPB control flag bits */
128 NV_CPB_CTL_CPB_VALID = (1 << 0),
129 NV_CPB_CTL_QUEUE = (1 << 1),
130 NV_CPB_CTL_APRD_VALID = (1 << 2),
131 NV_CPB_CTL_IEN = (1 << 3),
132 NV_CPB_CTL_FPDMA = (1 << 4),
133
134 /* APRD flags */
135 NV_APRD_WRITE = (1 << 1),
136 NV_APRD_END = (1 << 2),
137 NV_APRD_CONT = (1 << 3),
138
139 /* NV_ADMA_STAT flags */
140 NV_ADMA_STAT_TIMEOUT = (1 << 0),
141 NV_ADMA_STAT_HOTUNPLUG = (1 << 1),
142 NV_ADMA_STAT_HOTPLUG = (1 << 2),
143 NV_ADMA_STAT_CPBERR = (1 << 4),
144 NV_ADMA_STAT_SERROR = (1 << 5),
145 NV_ADMA_STAT_CMD_COMPLETE = (1 << 6),
146 NV_ADMA_STAT_IDLE = (1 << 8),
147 NV_ADMA_STAT_LEGACY = (1 << 9),
148 NV_ADMA_STAT_STOPPED = (1 << 10),
149 NV_ADMA_STAT_DONE = (1 << 12),
150 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR |
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400151 NV_ADMA_STAT_TIMEOUT,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700152
153 /* port flags */
154 NV_ADMA_PORT_REGISTER_MODE = (1 << 0),
Robert Hancock2dec7552006-11-26 14:20:19 -0600155 NV_ADMA_ATAPI_SETUP_COMPLETE = (1 << 1),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700156
Kuan Luof140f0f2007-10-15 15:16:53 -0400157 /* MCP55 reg offset */
158 NV_CTL_MCP55 = 0x400,
159 NV_INT_STATUS_MCP55 = 0x440,
160 NV_INT_ENABLE_MCP55 = 0x444,
161 NV_NCQ_REG_MCP55 = 0x448,
162
163 /* MCP55 */
164 NV_INT_ALL_MCP55 = 0xffff,
165 NV_INT_PORT_SHIFT_MCP55 = 16, /* each port occupies 16 bits */
166 NV_INT_MASK_MCP55 = NV_INT_ALL_MCP55 & 0xfffd,
167
168 /* SWNCQ ENABLE BITS*/
169 NV_CTL_PRI_SWNCQ = 0x02,
170 NV_CTL_SEC_SWNCQ = 0x04,
171
172 /* SW NCQ status bits*/
173 NV_SWNCQ_IRQ_DEV = (1 << 0),
174 NV_SWNCQ_IRQ_PM = (1 << 1),
175 NV_SWNCQ_IRQ_ADDED = (1 << 2),
176 NV_SWNCQ_IRQ_REMOVED = (1 << 3),
177
178 NV_SWNCQ_IRQ_BACKOUT = (1 << 4),
179 NV_SWNCQ_IRQ_SDBFIS = (1 << 5),
180 NV_SWNCQ_IRQ_DHREGFIS = (1 << 6),
181 NV_SWNCQ_IRQ_DMASETUP = (1 << 7),
182
183 NV_SWNCQ_IRQ_HOTPLUG = NV_SWNCQ_IRQ_ADDED |
184 NV_SWNCQ_IRQ_REMOVED,
185
Jeff Garzik10ad05d2006-03-22 23:50:50 -0500186};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
Robert Hancockfbbb2622006-10-27 19:08:41 -0700188/* ADMA Physical Region Descriptor - one SG segment */
189struct nv_adma_prd {
190 __le64 addr;
191 __le32 len;
192 u8 flags;
193 u8 packet_len;
194 __le16 reserved;
195};
196
197enum nv_adma_regbits {
198 CMDEND = (1 << 15), /* end of command list */
199 WNB = (1 << 14), /* wait-not-BSY */
200 IGN = (1 << 13), /* ignore this entry */
201 CS1n = (1 << (4 + 8)), /* std. PATA signals follow... */
202 DA2 = (1 << (2 + 8)),
203 DA1 = (1 << (1 + 8)),
204 DA0 = (1 << (0 + 8)),
205};
206
207/* ADMA Command Parameter Block
208 The first 5 SG segments are stored inside the Command Parameter Block itself.
209 If there are more than 5 segments the remainder are stored in a separate
210 memory area indicated by next_aprd. */
211struct nv_adma_cpb {
212 u8 resp_flags; /* 0 */
213 u8 reserved1; /* 1 */
214 u8 ctl_flags; /* 2 */
215 /* len is length of taskfile in 64 bit words */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400216 u8 len; /* 3 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700217 u8 tag; /* 4 */
218 u8 next_cpb_idx; /* 5 */
219 __le16 reserved2; /* 6-7 */
220 __le16 tf[12]; /* 8-31 */
221 struct nv_adma_prd aprd[5]; /* 32-111 */
222 __le64 next_aprd; /* 112-119 */
223 __le64 reserved3; /* 120-127 */
224};
225
226
227struct nv_adma_port_priv {
228 struct nv_adma_cpb *cpb;
229 dma_addr_t cpb_dma;
230 struct nv_adma_prd *aprd;
231 dma_addr_t aprd_dma;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400232 void __iomem *ctl_block;
233 void __iomem *gen_block;
234 void __iomem *notifier_clear_block;
Robert Hancock8959d302008-02-04 19:39:02 -0600235 u64 adma_dma_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700236 u8 flags;
Robert Hancock5e5c74a2007-02-19 18:42:30 -0600237 int last_issue_ncq;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700238};
239
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600240struct nv_host_priv {
241 unsigned long type;
242};
243
Kuan Luof140f0f2007-10-15 15:16:53 -0400244struct defer_queue {
245 u32 defer_bits;
246 unsigned int head;
247 unsigned int tail;
248 unsigned int tag[ATA_MAX_QUEUE];
249};
250
251enum ncq_saw_flag_list {
252 ncq_saw_d2h = (1U << 0),
253 ncq_saw_dmas = (1U << 1),
254 ncq_saw_sdb = (1U << 2),
255 ncq_saw_backout = (1U << 3),
256};
257
258struct nv_swncq_port_priv {
Tejun Heof60d7012010-05-10 21:41:41 +0200259 struct ata_bmdma_prd *prd; /* our SG list */
Kuan Luof140f0f2007-10-15 15:16:53 -0400260 dma_addr_t prd_dma; /* and its DMA mapping */
261 void __iomem *sactive_block;
262 void __iomem *irq_block;
263 void __iomem *tag_block;
264 u32 qc_active;
265
266 unsigned int last_issue_tag;
267
268 /* fifo circular queue to store deferral command */
269 struct defer_queue defer_queue;
270
271 /* for NCQ interrupt analysis */
272 u32 dhfis_bits;
273 u32 dmafis_bits;
274 u32 sdbfis_bits;
275
276 unsigned int ncq_flags;
277};
278
279
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400280#define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & (1 << (19 + (12 * (PORT)))))
Robert Hancockfbbb2622006-10-27 19:08:41 -0700281
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400282static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
Bartlomiej Zolnierkiewicz58eb8cd2014-05-07 17:17:44 +0200283#ifdef CONFIG_PM_SLEEP
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600284static int nv_pci_device_resume(struct pci_dev *pdev);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900285#endif
Jeff Garzikcca39742006-08-24 03:19:22 -0400286static void nv_ck804_host_stop(struct ata_host *host);
David Howells7d12e782006-10-05 14:55:46 +0100287static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance);
288static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance);
289static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance);
Tejun Heo82ef04f2008-07-31 17:02:40 +0900290static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
291static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Tejun Heo7f4774b2009-06-10 16:29:07 +0900293static int nv_hardreset(struct ata_link *link, unsigned int *class,
294 unsigned long deadline);
Tejun Heo39f87582006-06-17 15:49:56 +0900295static void nv_nf2_freeze(struct ata_port *ap);
296static void nv_nf2_thaw(struct ata_port *ap);
297static void nv_ck804_freeze(struct ata_port *ap);
298static void nv_ck804_thaw(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700299static int nv_adma_slave_config(struct scsi_device *sdev);
Robert Hancock2dec7552006-11-26 14:20:19 -0600300static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc);
Jiri Slaby95364f32019-10-31 10:59:45 +0100301static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700302static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc);
303static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance);
304static void nv_adma_irq_clear(struct ata_port *ap);
305static int nv_adma_port_start(struct ata_port *ap);
306static void nv_adma_port_stop(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900307#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600308static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg);
309static int nv_adma_port_resume(struct ata_port *ap);
Tejun Heo438ac6d2007-03-02 17:31:26 +0900310#endif
Robert Hancock53014e22007-05-05 15:36:36 -0600311static void nv_adma_freeze(struct ata_port *ap);
312static void nv_adma_thaw(struct ata_port *ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700313static void nv_adma_error_handler(struct ata_port *ap);
314static void nv_adma_host_stop(struct ata_host *host);
Robert Hancockf5ecac22007-02-20 21:49:10 -0600315static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800316static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
Tejun Heo39f87582006-06-17 15:49:56 +0900317
Kuan Luof140f0f2007-10-15 15:16:53 -0400318static void nv_mcp55_thaw(struct ata_port *ap);
319static void nv_mcp55_freeze(struct ata_port *ap);
320static void nv_swncq_error_handler(struct ata_port *ap);
321static int nv_swncq_slave_config(struct scsi_device *sdev);
322static int nv_swncq_port_start(struct ata_port *ap);
Jiri Slaby95364f32019-10-31 10:59:45 +0100323static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc);
Kuan Luof140f0f2007-10-15 15:16:53 -0400324static void nv_swncq_fill_sg(struct ata_queued_cmd *qc);
325static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc);
326static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis);
327static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance);
328#ifdef CONFIG_PM
329static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg);
330static int nv_swncq_port_resume(struct ata_port *ap);
331#endif
332
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333enum nv_host_type
334{
335 GENERIC,
336 NFORCE2,
Tejun Heo27e4b272006-06-17 15:49:55 +0900337 NFORCE3 = NFORCE2, /* NF2 == NF3 as far as sata_nv is concerned */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700338 CK804,
Kuan Luof140f0f2007-10-15 15:16:53 -0400339 ADMA,
Tejun Heo2d775702009-01-25 11:29:38 +0900340 MCP5x,
Kuan Luof140f0f2007-10-15 15:16:53 -0400341 SWNCQ,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342};
343
Jeff Garzik3b7d6972005-11-10 11:04:11 -0500344static const struct pci_device_id nv_pci_tbl[] = {
Jeff Garzik54bb3a942006-09-27 22:20:11 -0400345 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA), NFORCE2 },
346 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA), NFORCE3 },
347 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2), NFORCE3 },
348 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA), CK804 },
349 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2), CK804 },
350 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA), CK804 },
351 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2), CK804 },
Tejun Heo2d775702009-01-25 11:29:38 +0900352 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA), MCP5x },
353 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2), MCP5x },
354 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA), MCP5x },
355 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2), MCP5x },
Kuan Luoe2e031e2007-10-25 02:14:17 -0400356 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA), GENERIC },
357 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2), GENERIC },
358 { PCI_VDEVICE(NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3), GENERIC },
Jeff Garzik2d2744f2006-09-28 20:21:59 -0400359
360 { } /* terminate list */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361};
362
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363static struct pci_driver nv_pci_driver = {
364 .name = DRV_NAME,
365 .id_table = nv_pci_tbl,
366 .probe = nv_init_one,
Bartlomiej Zolnierkiewicz58eb8cd2014-05-07 17:17:44 +0200367#ifdef CONFIG_PM_SLEEP
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600368 .suspend = ata_pci_device_suspend,
369 .resume = nv_pci_device_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900370#endif
Tejun Heo1daf9ce2007-05-17 13:13:57 +0200371 .remove = ata_pci_remove_one,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372};
373
Jeff Garzik193515d2005-11-07 00:59:37 -0500374static struct scsi_host_template nv_sht = {
Tejun Heo68d1d072008-03-25 12:22:49 +0900375 ATA_BMDMA_SHT(DRV_NAME),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376};
377
Robert Hancockfbbb2622006-10-27 19:08:41 -0700378static struct scsi_host_template nv_adma_sht = {
Lee Jones7d43b822021-05-28 10:04:56 +0100379 __ATA_BASE_SHT(DRV_NAME),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700380 .can_queue = NV_ADMA_MAX_CPBS,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700381 .sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700382 .dma_boundary = NV_ADMA_DMA_BOUNDARY,
383 .slave_configure = nv_adma_slave_config,
Bart Van Asschec3f69c72021-10-12 16:35:14 -0700384 .sdev_groups = ata_ncq_sdev_groups,
Lee Jones7d43b822021-05-28 10:04:56 +0100385 .change_queue_depth = ata_scsi_change_queue_depth,
386 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700387};
388
Kuan Luof140f0f2007-10-15 15:16:53 -0400389static struct scsi_host_template nv_swncq_sht = {
Lee Jones7d43b822021-05-28 10:04:56 +0100390 __ATA_BASE_SHT(DRV_NAME),
Jens Axboeba80c3a2018-05-11 12:51:08 -0600391 .can_queue = ATA_MAX_QUEUE - 1,
Kuan Luof140f0f2007-10-15 15:16:53 -0400392 .sg_tablesize = LIBATA_MAX_PRD,
Kuan Luof140f0f2007-10-15 15:16:53 -0400393 .dma_boundary = ATA_DMA_BOUNDARY,
394 .slave_configure = nv_swncq_slave_config,
Bart Van Asschec3f69c72021-10-12 16:35:14 -0700395 .sdev_groups = ata_ncq_sdev_groups,
Lee Jones7d43b822021-05-28 10:04:56 +0100396 .change_queue_depth = ata_scsi_change_queue_depth,
397 .tag_alloc_policy = BLK_TAG_ALLOC_RR,
Kuan Luof140f0f2007-10-15 15:16:53 -0400398};
399
Tejun Heo7f4774b2009-06-10 16:29:07 +0900400/*
401 * NV SATA controllers have various different problems with hardreset
402 * protocol depending on the specific controller and device.
403 *
404 * GENERIC:
405 *
406 * bko11195 reports that link doesn't come online after hardreset on
407 * generic nv's and there have been several other similar reports on
408 * linux-ide.
409 *
410 * bko12351#c23 reports that warmplug on MCP61 doesn't work with
411 * softreset.
412 *
413 * NF2/3:
414 *
415 * bko3352 reports nf2/3 controllers can't determine device signature
416 * reliably after hardreset. The following thread reports detection
417 * failure on cold boot with the standard debouncing timing.
418 *
419 * http://thread.gmane.org/gmane.linux.ide/34098
420 *
421 * bko12176 reports that hardreset fails to bring up the link during
422 * boot on nf2.
423 *
424 * CK804:
425 *
426 * For initial probing after boot and hot plugging, hardreset mostly
427 * works fine on CK804 but curiously, reprobing on the initial port
428 * by rescanning or rmmod/insmod fails to acquire the initial D2H Reg
429 * FIS in somewhat undeterministic way.
430 *
431 * SWNCQ:
432 *
433 * bko12351 reports that when SWNCQ is enabled, for hotplug to work,
434 * hardreset should be used and hardreset can't report proper
435 * signature, which suggests that mcp5x is closer to nf2 as long as
436 * reset quirkiness is concerned.
437 *
438 * bko12703 reports that boot probing fails for intel SSD with
439 * hardreset. Link fails to come online. Softreset works fine.
440 *
441 * The failures are varied but the following patterns seem true for
442 * all flavors.
443 *
444 * - Softreset during boot always works.
445 *
446 * - Hardreset during boot sometimes fails to bring up the link on
447 * certain comibnations and device signature acquisition is
448 * unreliable.
449 *
450 * - Hardreset is often necessary after hotplug.
451 *
452 * So, preferring softreset for boot probing and error handling (as
453 * hardreset might bring down the link) but using hardreset for
454 * post-boot probing should work around the above issues in most
455 * cases. Define nv_hardreset() which only kicks in for post-boot
456 * probing and use it for all variants.
457 */
458static struct ata_port_operations nv_generic_ops = {
Tejun Heo029cfd62008-03-25 12:22:49 +0900459 .inherits = &ata_bmdma_port_ops,
Alan Coxc96f1732009-03-24 10:23:46 +0000460 .lost_interrupt = ATA_OP_NULL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 .scr_read = nv_scr_read,
462 .scr_write = nv_scr_write,
Tejun Heo7f4774b2009-06-10 16:29:07 +0900463 .hardreset = nv_hardreset,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464};
465
Tejun Heo029cfd62008-03-25 12:22:49 +0900466static struct ata_port_operations nv_nf2_ops = {
Tejun Heo7dac7452009-02-12 10:34:32 +0900467 .inherits = &nv_generic_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900468 .freeze = nv_nf2_freeze,
469 .thaw = nv_nf2_thaw,
Tejun Heoada364e2006-06-17 15:49:56 +0900470};
471
Tejun Heo029cfd62008-03-25 12:22:49 +0900472static struct ata_port_operations nv_ck804_ops = {
Tejun Heo7f4774b2009-06-10 16:29:07 +0900473 .inherits = &nv_generic_ops,
Tejun Heo39f87582006-06-17 15:49:56 +0900474 .freeze = nv_ck804_freeze,
475 .thaw = nv_ck804_thaw,
Tejun Heoada364e2006-06-17 15:49:56 +0900476 .host_stop = nv_ck804_host_stop,
477};
478
Tejun Heo029cfd62008-03-25 12:22:49 +0900479static struct ata_port_operations nv_adma_ops = {
Tejun Heo3c324282008-11-03 12:37:49 +0900480 .inherits = &nv_ck804_ops,
Tejun Heo029cfd62008-03-25 12:22:49 +0900481
Robert Hancock2dec7552006-11-26 14:20:19 -0600482 .check_atapi_dma = nv_adma_check_atapi_dma,
Tejun Heo5682ed32008-04-07 22:47:16 +0900483 .sff_tf_read = nv_adma_tf_read,
Tejun Heo31cc23b2007-09-23 13:14:12 +0900484 .qc_defer = ata_std_qc_defer,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700485 .qc_prep = nv_adma_qc_prep,
486 .qc_issue = nv_adma_qc_issue,
Tejun Heo5682ed32008-04-07 22:47:16 +0900487 .sff_irq_clear = nv_adma_irq_clear,
Tejun Heo029cfd62008-03-25 12:22:49 +0900488
Robert Hancock53014e22007-05-05 15:36:36 -0600489 .freeze = nv_adma_freeze,
490 .thaw = nv_adma_thaw,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700491 .error_handler = nv_adma_error_handler,
Robert Hancockf5ecac22007-02-20 21:49:10 -0600492 .post_internal_cmd = nv_adma_post_internal_cmd,
Tejun Heo029cfd62008-03-25 12:22:49 +0900493
Robert Hancockfbbb2622006-10-27 19:08:41 -0700494 .port_start = nv_adma_port_start,
495 .port_stop = nv_adma_port_stop,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900496#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600497 .port_suspend = nv_adma_port_suspend,
498 .port_resume = nv_adma_port_resume,
Tejun Heo438ac6d2007-03-02 17:31:26 +0900499#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -0700500 .host_stop = nv_adma_host_stop,
501};
502
Tejun Heo029cfd62008-03-25 12:22:49 +0900503static struct ata_port_operations nv_swncq_ops = {
Tejun Heo7f4774b2009-06-10 16:29:07 +0900504 .inherits = &nv_generic_ops,
Tejun Heo029cfd62008-03-25 12:22:49 +0900505
Kuan Luof140f0f2007-10-15 15:16:53 -0400506 .qc_defer = ata_std_qc_defer,
507 .qc_prep = nv_swncq_qc_prep,
508 .qc_issue = nv_swncq_qc_issue,
Tejun Heo029cfd62008-03-25 12:22:49 +0900509
Kuan Luof140f0f2007-10-15 15:16:53 -0400510 .freeze = nv_mcp55_freeze,
511 .thaw = nv_mcp55_thaw,
512 .error_handler = nv_swncq_error_handler,
Tejun Heo029cfd62008-03-25 12:22:49 +0900513
Kuan Luof140f0f2007-10-15 15:16:53 -0400514#ifdef CONFIG_PM
515 .port_suspend = nv_swncq_port_suspend,
516 .port_resume = nv_swncq_port_resume,
517#endif
518 .port_start = nv_swncq_port_start,
519};
520
Tejun Heo95947192008-03-25 12:22:49 +0900521struct nv_pi_priv {
522 irq_handler_t irq_handler;
523 struct scsi_host_template *sht;
524};
525
526#define NV_PI_PRIV(_irq_handler, _sht) \
527 &(struct nv_pi_priv){ .irq_handler = _irq_handler, .sht = _sht }
528
Tejun Heo1626aeb2007-05-04 12:43:58 +0200529static const struct ata_port_info nv_port_info[] = {
Tejun Heoada364e2006-06-17 15:49:56 +0900530 /* generic */
531 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300532 .flags = ATA_FLAG_SATA,
Tejun Heoada364e2006-06-17 15:49:56 +0900533 .pio_mask = NV_PIO_MASK,
534 .mwdma_mask = NV_MWDMA_MASK,
535 .udma_mask = NV_UDMA_MASK,
536 .port_ops = &nv_generic_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900537 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900538 },
539 /* nforce2/3 */
540 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300541 .flags = ATA_FLAG_SATA,
Tejun Heoada364e2006-06-17 15:49:56 +0900542 .pio_mask = NV_PIO_MASK,
543 .mwdma_mask = NV_MWDMA_MASK,
544 .udma_mask = NV_UDMA_MASK,
545 .port_ops = &nv_nf2_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900546 .private_data = NV_PI_PRIV(nv_nf2_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900547 },
548 /* ck804 */
549 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300550 .flags = ATA_FLAG_SATA,
Tejun Heoada364e2006-06-17 15:49:56 +0900551 .pio_mask = NV_PIO_MASK,
552 .mwdma_mask = NV_MWDMA_MASK,
553 .udma_mask = NV_UDMA_MASK,
554 .port_ops = &nv_ck804_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900555 .private_data = NV_PI_PRIV(nv_ck804_interrupt, &nv_sht),
Tejun Heoada364e2006-06-17 15:49:56 +0900556 },
Robert Hancockfbbb2622006-10-27 19:08:41 -0700557 /* ADMA */
558 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300559 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
Robert Hancockfbbb2622006-10-27 19:08:41 -0700560 .pio_mask = NV_PIO_MASK,
561 .mwdma_mask = NV_MWDMA_MASK,
562 .udma_mask = NV_UDMA_MASK,
563 .port_ops = &nv_adma_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900564 .private_data = NV_PI_PRIV(nv_adma_interrupt, &nv_adma_sht),
Robert Hancockfbbb2622006-10-27 19:08:41 -0700565 },
Tejun Heo2d775702009-01-25 11:29:38 +0900566 /* MCP5x */
567 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300568 .flags = ATA_FLAG_SATA,
Tejun Heo2d775702009-01-25 11:29:38 +0900569 .pio_mask = NV_PIO_MASK,
570 .mwdma_mask = NV_MWDMA_MASK,
571 .udma_mask = NV_UDMA_MASK,
Tejun Heo7f4774b2009-06-10 16:29:07 +0900572 .port_ops = &nv_generic_ops,
Tejun Heo2d775702009-01-25 11:29:38 +0900573 .private_data = NV_PI_PRIV(nv_generic_interrupt, &nv_sht),
574 },
Kuan Luof140f0f2007-10-15 15:16:53 -0400575 /* SWNCQ */
576 {
Sergei Shtylyov9cbe0562011-02-04 22:05:48 +0300577 .flags = ATA_FLAG_SATA | ATA_FLAG_NCQ,
Kuan Luof140f0f2007-10-15 15:16:53 -0400578 .pio_mask = NV_PIO_MASK,
579 .mwdma_mask = NV_MWDMA_MASK,
580 .udma_mask = NV_UDMA_MASK,
581 .port_ops = &nv_swncq_ops,
Tejun Heo95947192008-03-25 12:22:49 +0900582 .private_data = NV_PI_PRIV(nv_swncq_interrupt, &nv_swncq_sht),
Kuan Luof140f0f2007-10-15 15:16:53 -0400583 },
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584};
585
586MODULE_AUTHOR("NVIDIA");
587MODULE_DESCRIPTION("low-level driver for NVIDIA nForce SATA controller");
588MODULE_LICENSE("GPL");
589MODULE_DEVICE_TABLE(pci, nv_pci_tbl);
590MODULE_VERSION(DRV_VERSION);
591
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030592static bool adma_enabled;
Shailendra Vermac13aff32015-05-26 01:38:25 +0530593static bool swncq_enabled = true;
Rusty Russell90ab5ee2012-01-13 09:32:20 +1030594static bool msi_enabled;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700595
Robert Hancock2dec7552006-11-26 14:20:19 -0600596static void nv_adma_register_mode(struct ata_port *ap)
597{
Robert Hancock2dec7552006-11-26 14:20:19 -0600598 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600599 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800600 u16 tmp, status;
601 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600602
603 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
604 return;
605
Robert Hancocka2cfe812007-02-05 16:26:03 -0800606 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400607 while (!(status & NV_ADMA_STAT_IDLE) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800608 ndelay(50);
609 status = readw(mmio + NV_ADMA_STAT);
610 count++;
611 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400612 if (count == 20)
Joe Perchesa9a79df2011-04-15 15:51:59 -0700613 ata_port_warn(ap, "timeout waiting for ADMA IDLE, stat=0x%hx\n",
614 status);
Robert Hancocka2cfe812007-02-05 16:26:03 -0800615
Robert Hancock2dec7552006-11-26 14:20:19 -0600616 tmp = readw(mmio + NV_ADMA_CTL);
617 writew(tmp & ~NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
618
Robert Hancocka2cfe812007-02-05 16:26:03 -0800619 count = 0;
620 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400621 while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) {
Robert Hancocka2cfe812007-02-05 16:26:03 -0800622 ndelay(50);
623 status = readw(mmio + NV_ADMA_STAT);
624 count++;
625 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400626 if (count == 20)
Joe Perchesa9a79df2011-04-15 15:51:59 -0700627 ata_port_warn(ap,
628 "timeout waiting for ADMA LEGACY, stat=0x%hx\n",
629 status);
Robert Hancocka2cfe812007-02-05 16:26:03 -0800630
Robert Hancock2dec7552006-11-26 14:20:19 -0600631 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
632}
633
634static void nv_adma_mode(struct ata_port *ap)
635{
Robert Hancock2dec7552006-11-26 14:20:19 -0600636 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -0600637 void __iomem *mmio = pp->ctl_block;
Robert Hancocka2cfe812007-02-05 16:26:03 -0800638 u16 tmp, status;
639 int count = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600640
641 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE))
642 return;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500643
Robert Hancock2dec7552006-11-26 14:20:19 -0600644 WARN_ON(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
645
646 tmp = readw(mmio + NV_ADMA_CTL);
647 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL);
648
Robert Hancocka2cfe812007-02-05 16:26:03 -0800649 status = readw(mmio + NV_ADMA_STAT);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400650 while (((status & NV_ADMA_STAT_LEGACY) ||
Robert Hancocka2cfe812007-02-05 16:26:03 -0800651 !(status & NV_ADMA_STAT_IDLE)) && count < 20) {
652 ndelay(50);
653 status = readw(mmio + NV_ADMA_STAT);
654 count++;
655 }
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400656 if (count == 20)
Joe Perchesa9a79df2011-04-15 15:51:59 -0700657 ata_port_warn(ap,
Robert Hancocka2cfe812007-02-05 16:26:03 -0800658 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n",
659 status);
660
Robert Hancock2dec7552006-11-26 14:20:19 -0600661 pp->flags &= ~NV_ADMA_PORT_REGISTER_MODE;
662}
663
Robert Hancockfbbb2622006-10-27 19:08:41 -0700664static int nv_adma_slave_config(struct scsi_device *sdev)
665{
666 struct ata_port *ap = ata_shost_to_port(sdev->host);
Robert Hancock2dec7552006-11-26 14:20:19 -0600667 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock8959d302008-02-04 19:39:02 -0600668 struct nv_adma_port_priv *port0, *port1;
Robert Hancock2dec7552006-11-26 14:20:19 -0600669 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
Robert Hancock8959d302008-02-04 19:39:02 -0600670 unsigned long segment_boundary, flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700671 unsigned short sg_tablesize;
672 int rc;
Robert Hancock2dec7552006-11-26 14:20:19 -0600673 int adma_enable;
674 u32 current_reg, new_reg, config_mask;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700675
676 rc = ata_scsi_slave_config(sdev);
677
678 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
679 /* Not a proper libata device, ignore */
680 return rc;
681
Robert Hancock8959d302008-02-04 19:39:02 -0600682 spin_lock_irqsave(ap->lock, flags);
683
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900684 if (ap->link.device[sdev->id].class == ATA_DEV_ATAPI) {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700685 /*
686 * NVIDIA reports that ADMA mode does not support ATAPI commands.
687 * Therefore ATAPI commands are sent through the legacy interface.
688 * However, the legacy interface only supports 32-bit DMA.
689 * Restrict DMA parameters as required by the legacy interface
690 * when an ATAPI device is connected.
691 */
Robert Hancockfbbb2622006-10-27 19:08:41 -0700692 segment_boundary = ATA_DMA_BOUNDARY;
693 /* Subtract 1 since an extra entry may be needed for padding, see
694 libata-scsi.c */
695 sg_tablesize = LIBATA_MAX_PRD - 1;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500696
Robert Hancock2dec7552006-11-26 14:20:19 -0600697 /* Since the legacy DMA engine is in use, we need to disable ADMA
698 on the port. */
699 adma_enable = 0;
700 nv_adma_register_mode(ap);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400701 } else {
Robert Hancockfbbb2622006-10-27 19:08:41 -0700702 segment_boundary = NV_ADMA_DMA_BOUNDARY;
703 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN;
Robert Hancock2dec7552006-11-26 14:20:19 -0600704 adma_enable = 1;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700705 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500706
Robert Hancock2dec7552006-11-26 14:20:19 -0600707 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700708
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400709 if (ap->port_no == 1)
Robert Hancock2dec7552006-11-26 14:20:19 -0600710 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN |
711 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
712 else
713 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN |
714 NV_MCP_SATA_CFG_20_PORT0_PWB_EN;
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500715
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400716 if (adma_enable) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600717 new_reg = current_reg | config_mask;
718 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE;
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400719 } else {
Robert Hancock2dec7552006-11-26 14:20:19 -0600720 new_reg = current_reg & ~config_mask;
721 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE;
722 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500723
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400724 if (current_reg != new_reg)
Robert Hancock2dec7552006-11-26 14:20:19 -0600725 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg);
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500726
Robert Hancock8959d302008-02-04 19:39:02 -0600727 port0 = ap->host->ports[0]->private_data;
728 port1 = ap->host->ports[1]->private_data;
Robert Hancock8959d302008-02-04 19:39:02 -0600729 if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
730 (port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
Christoph Hellwig258c9fd2018-05-09 16:01:00 +0200731 /*
732 * We have to set the DMA mask to 32-bit if either port is in
733 * ATAPI mode, since they are on the same PCI device which is
734 * used for DMA mapping. If either SCSI device is not allocated
735 * yet, it's OK since that port will discover its correct
736 * setting when it does get allocated.
737 */
738 rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
Robert Hancock8959d302008-02-04 19:39:02 -0600739 } else {
Christoph Hellwig258c9fd2018-05-09 16:01:00 +0200740 rc = dma_set_mask(&pdev->dev, pp->adma_dma_mask);
Robert Hancock8959d302008-02-04 19:39:02 -0600741 }
742
Robert Hancockfbbb2622006-10-27 19:08:41 -0700743 blk_queue_segment_boundary(sdev->request_queue, segment_boundary);
Martin K. Petersen8a783622010-02-26 00:20:39 -0500744 blk_queue_max_segments(sdev->request_queue, sg_tablesize);
Joe Perchesa9a79df2011-04-15 15:51:59 -0700745 ata_port_info(ap,
746 "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n",
747 (unsigned long long)*ap->host->dev->dma_mask,
748 segment_boundary, sg_tablesize);
Robert Hancock8959d302008-02-04 19:39:02 -0600749
750 spin_unlock_irqrestore(ap->lock, flags);
751
Robert Hancockfbbb2622006-10-27 19:08:41 -0700752 return rc;
753}
754
Robert Hancock2dec7552006-11-26 14:20:19 -0600755static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc)
756{
757 struct nv_adma_port_priv *pp = qc->ap->private_data;
758 return !(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE);
759}
760
Robert Hancockf2fb3442007-03-26 21:43:36 -0800761static void nv_adma_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
762{
Robert Hancock3f3debd2007-11-25 16:59:36 -0600763 /* Other than when internal or pass-through commands are executed,
764 the only time this function will be called in ADMA mode will be
765 if a command fails. In the failure case we don't care about going
766 into register mode with ADMA commands pending, as the commands will
767 all shortly be aborted anyway. We assume that NCQ commands are not
768 issued via passthrough, which is the only way that switching into
769 ADMA mode could abort outstanding commands. */
Robert Hancockf2fb3442007-03-26 21:43:36 -0800770 nv_adma_register_mode(ap);
771
Tejun Heo9363c382008-04-07 22:47:16 +0900772 ata_sff_tf_read(ap, tf);
Robert Hancockf2fb3442007-03-26 21:43:36 -0800773}
774
Robert Hancock2dec7552006-11-26 14:20:19 -0600775static unsigned int nv_adma_tf_to_cpb(struct ata_taskfile *tf, __le16 *cpb)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700776{
777 unsigned int idx = 0;
778
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400779 if (tf->flags & ATA_TFLAG_ISADDR) {
Robert Hancockac3d6b82007-02-19 19:02:46 -0600780 if (tf->flags & ATA_TFLAG_LBA48) {
781 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB);
782 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect);
783 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->hob_lbal);
784 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->hob_lbam);
785 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->hob_lbah);
786 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature);
787 } else
788 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->feature | WNB);
Jeff Garzika84471f2007-02-26 05:51:33 -0500789
Robert Hancockac3d6b82007-02-19 19:02:46 -0600790 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->nsect);
791 cpb[idx++] = cpu_to_le16((ATA_REG_LBAL << 8) | tf->lbal);
792 cpb[idx++] = cpu_to_le16((ATA_REG_LBAM << 8) | tf->lbam);
793 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700794 }
Jeff Garzika84471f2007-02-26 05:51:33 -0500795
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400796 if (tf->flags & ATA_TFLAG_DEVICE)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600797 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700798
799 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND);
Jeff Garzika84471f2007-02-26 05:51:33 -0500800
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400801 while (idx < 12)
Robert Hancockac3d6b82007-02-19 19:02:46 -0600802 cpb[idx++] = cpu_to_le16(IGN);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700803
804 return idx;
805}
806
Robert Hancock5bd28a42007-02-05 16:26:01 -0800807static int nv_adma_check_cpb(struct ata_port *ap, int cpb_num, int force_err)
Robert Hancockfbbb2622006-10-27 19:08:41 -0700808{
809 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancock2dec7552006-11-26 14:20:19 -0600810 u8 flags = pp->cpb[cpb_num].resp_flags;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700811
Hannes Reinecke47013c52021-12-21 08:20:56 +0100812 ata_port_dbg(ap, "CPB %d, flags=0x%x\n", cpb_num, flags);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700813
Robert Hancock5bd28a42007-02-05 16:26:01 -0800814 if (unlikely((force_err ||
815 flags & (NV_CPB_RESP_ATA_ERR |
816 NV_CPB_RESP_CMD_ERR |
817 NV_CPB_RESP_CPB_ERR)))) {
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900818 struct ata_eh_info *ehi = &ap->link.eh_info;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800819 int freeze = 0;
820
821 ata_ehi_clear_desc(ehi);
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400822 __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags);
Robert Hancock5bd28a42007-02-05 16:26:01 -0800823 if (flags & NV_CPB_RESP_ATA_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900824 ata_ehi_push_desc(ehi, "ATA error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800825 ehi->err_mask |= AC_ERR_DEV;
826 } else if (flags & NV_CPB_RESP_CMD_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900827 ata_ehi_push_desc(ehi, "CMD error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800828 ehi->err_mask |= AC_ERR_DEV;
829 } else if (flags & NV_CPB_RESP_CPB_ERR) {
Tejun Heob64bbc32007-07-16 14:29:39 +0900830 ata_ehi_push_desc(ehi, "CPB error");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800831 ehi->err_mask |= AC_ERR_SYSTEM;
832 freeze = 1;
833 } else {
834 /* notifier error, but no error in CPB flags? */
Tejun Heob64bbc32007-07-16 14:29:39 +0900835 ata_ehi_push_desc(ehi, "unknown");
Robert Hancock5bd28a42007-02-05 16:26:01 -0800836 ehi->err_mask |= AC_ERR_OTHER;
837 freeze = 1;
838 }
839 /* Kill all commands. EH will determine what actually failed. */
840 if (freeze)
841 ata_port_freeze(ap);
842 else
843 ata_port_abort(ap);
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200844 return -1;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800845 }
846
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200847 if (likely(flags & NV_CPB_RESP_DONE))
848 return 1;
Robert Hancock5bd28a42007-02-05 16:26:01 -0800849 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700850}
851
Robert Hancock2dec7552006-11-26 14:20:19 -0600852static int nv_host_intr(struct ata_port *ap, u8 irq_stat)
853{
Tejun Heo9af5c9c2007-08-06 18:36:22 +0900854 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
Robert Hancock2dec7552006-11-26 14:20:19 -0600855
856 /* freeze if hotplugged */
857 if (unlikely(irq_stat & (NV_INT_ADDED | NV_INT_REMOVED))) {
858 ata_port_freeze(ap);
859 return 1;
860 }
861
862 /* bail out if not our interrupt */
863 if (!(irq_stat & NV_INT_DEV))
864 return 0;
865
866 /* DEV interrupt w/ no active qc? */
867 if (unlikely(!qc || (qc->tf.flags & ATA_TFLAG_POLLING))) {
Tejun Heo9363c382008-04-07 22:47:16 +0900868 ata_sff_check_status(ap);
Robert Hancock2dec7552006-11-26 14:20:19 -0600869 return 1;
870 }
871
872 /* handle interrupt */
Tejun Heoc3b28892010-05-19 22:10:21 +0200873 return ata_bmdma_port_intr(ap, qc);
Robert Hancock2dec7552006-11-26 14:20:19 -0600874}
875
Robert Hancockfbbb2622006-10-27 19:08:41 -0700876static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
877{
878 struct ata_host *host = dev_instance;
879 int i, handled = 0;
Robert Hancock2dec7552006-11-26 14:20:19 -0600880 u32 notifier_clears[2];
Robert Hancockfbbb2622006-10-27 19:08:41 -0700881
882 spin_lock(&host->lock);
883
884 for (i = 0; i < host->n_ports; i++) {
885 struct ata_port *ap = host->ports[i];
Tejun Heo3e4ec342010-05-10 21:41:30 +0200886 struct nv_adma_port_priv *pp = ap->private_data;
887 void __iomem *mmio = pp->ctl_block;
888 u16 status;
889 u32 gen_ctl;
890 u32 notifier, notifier_error;
891
Robert Hancock2dec7552006-11-26 14:20:19 -0600892 notifier_clears[i] = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700893
Tejun Heo3e4ec342010-05-10 21:41:30 +0200894 /* if ADMA is disabled, use standard ata interrupt handler */
895 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
896 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
897 >> (NV_INT_PORT_SHIFT * i);
898 handled += nv_host_intr(ap, irq_stat);
899 continue;
900 }
Jeff Garzika617c092007-05-21 20:14:23 -0400901
Tejun Heo3e4ec342010-05-10 21:41:30 +0200902 /* if in ATA register mode, check for standard interrupts */
903 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
904 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
905 >> (NV_INT_PORT_SHIFT * i);
906 if (ata_tag_valid(ap->link.active_tag))
907 /** NV_INT_DEV indication seems unreliable
908 at times at least in ADMA mode. Force it
909 on always when a command is active, to
910 prevent losing interrupts. */
911 irq_stat |= NV_INT_DEV;
912 handled += nv_host_intr(ap, irq_stat);
913 }
Robert Hancockfbbb2622006-10-27 19:08:41 -0700914
Tejun Heo3e4ec342010-05-10 21:41:30 +0200915 notifier = readl(mmio + NV_ADMA_NOTIFIER);
916 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
917 notifier_clears[i] = notifier | notifier_error;
918
919 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
920
921 if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier &&
922 !notifier_error)
923 /* Nothing to do */
924 continue;
925
926 status = readw(mmio + NV_ADMA_STAT);
927
928 /*
929 * Clear status. Ensure the controller sees the
930 * clearing before we start looking at any of the CPB
931 * statuses, so that any CPB completions after this
932 * point in the handler will raise another interrupt.
933 */
934 writew(status, mmio + NV_ADMA_STAT);
935 readw(mmio + NV_ADMA_STAT); /* flush posted write */
936 rmb();
937
938 handled++; /* irq handled if we got here */
939
940 /* freeze if hotplugged or controller error */
941 if (unlikely(status & (NV_ADMA_STAT_HOTPLUG |
942 NV_ADMA_STAT_HOTUNPLUG |
943 NV_ADMA_STAT_TIMEOUT |
944 NV_ADMA_STAT_SERROR))) {
945 struct ata_eh_info *ehi = &ap->link.eh_info;
946
947 ata_ehi_clear_desc(ehi);
948 __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status);
949 if (status & NV_ADMA_STAT_TIMEOUT) {
950 ehi->err_mask |= AC_ERR_SYSTEM;
951 ata_ehi_push_desc(ehi, "timeout");
952 } else if (status & NV_ADMA_STAT_HOTPLUG) {
953 ata_ehi_hotplugged(ehi);
954 ata_ehi_push_desc(ehi, "hotplug");
955 } else if (status & NV_ADMA_STAT_HOTUNPLUG) {
956 ata_ehi_hotplugged(ehi);
957 ata_ehi_push_desc(ehi, "hot unplug");
958 } else if (status & NV_ADMA_STAT_SERROR) {
959 /* let EH analyze SError and figure out cause */
960 ata_ehi_push_desc(ehi, "SError");
961 } else
962 ata_ehi_push_desc(ehi, "unknown");
963 ata_port_freeze(ap);
964 continue;
965 }
966
967 if (status & (NV_ADMA_STAT_DONE |
968 NV_ADMA_STAT_CPBERR |
969 NV_ADMA_STAT_CMD_COMPLETE)) {
970 u32 check_commands = notifier_clears[i];
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200971 u32 done_mask = 0;
Tejun Heo752e3862010-06-25 15:02:59 +0200972 int pos, rc;
Tejun Heo3e4ec342010-05-10 21:41:30 +0200973
974 if (status & NV_ADMA_STAT_CPBERR) {
975 /* check all active commands */
Jeff Garzik2dcb4072007-10-19 06:42:56 -0400976 if (ata_tag_valid(ap->link.active_tag))
Tejun Heo3e4ec342010-05-10 21:41:30 +0200977 check_commands = 1 <<
978 ap->link.active_tag;
979 else
980 check_commands = ap->link.sactive;
Robert Hancockfbbb2622006-10-27 19:08:41 -0700981 }
982
Tejun Heo3e4ec342010-05-10 21:41:30 +0200983 /* check CPBs for completed commands */
Tejun Heo752e3862010-06-25 15:02:59 +0200984 while ((pos = ffs(check_commands))) {
Tejun Heo3e4ec342010-05-10 21:41:30 +0200985 pos--;
Tejun Heo752e3862010-06-25 15:02:59 +0200986 rc = nv_adma_check_cpb(ap, pos,
Jeff Garzik5796d1c2007-10-26 00:03:37 -0400987 notifier_error & (1 << pos));
Tejun Heo1aadf5c2010-06-25 15:03:34 +0200988 if (rc > 0)
989 done_mask |= 1 << pos;
990 else if (unlikely(rc < 0))
Tejun Heo752e3862010-06-25 15:02:59 +0200991 check_commands = 0;
Tejun Heo3e4ec342010-05-10 21:41:30 +0200992 check_commands &= ~(1 << pos);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700993 }
Sascha Hauer8385d752019-12-13 09:04:08 +0100994 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
Robert Hancockfbbb2622006-10-27 19:08:41 -0700995 }
996 }
Jeff Garzikf20b16f2006-12-11 11:14:06 -0500997
Jeff Garzikb4479162007-10-25 20:47:30 -0400998 if (notifier_clears[0] || notifier_clears[1]) {
Robert Hancock2dec7552006-11-26 14:20:19 -0600999 /* Note: Both notifier clear registers must be written
1000 if either is set, even if one is zero, according to NVIDIA. */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001001 struct nv_adma_port_priv *pp = host->ports[0]->private_data;
1002 writel(notifier_clears[0], pp->notifier_clear_block);
1003 pp = host->ports[1]->private_data;
1004 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancock2dec7552006-11-26 14:20:19 -06001005 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001006
1007 spin_unlock(&host->lock);
1008
1009 return IRQ_RETVAL(handled);
1010}
1011
Robert Hancock53014e22007-05-05 15:36:36 -06001012static void nv_adma_freeze(struct ata_port *ap)
1013{
1014 struct nv_adma_port_priv *pp = ap->private_data;
1015 void __iomem *mmio = pp->ctl_block;
1016 u16 tmp;
1017
1018 nv_ck804_freeze(ap);
1019
1020 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1021 return;
1022
1023 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001024 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001025 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
1026
1027 /* Disable interrupt */
1028 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001029 writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001030 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001031 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001032}
1033
1034static void nv_adma_thaw(struct ata_port *ap)
1035{
1036 struct nv_adma_port_priv *pp = ap->private_data;
1037 void __iomem *mmio = pp->ctl_block;
1038 u16 tmp;
1039
1040 nv_ck804_thaw(ap);
1041
1042 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
1043 return;
1044
1045 /* Enable interrupt */
1046 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001047 writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN),
Robert Hancock53014e22007-05-05 15:36:36 -06001048 mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001049 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancock53014e22007-05-05 15:36:36 -06001050}
1051
Robert Hancockfbbb2622006-10-27 19:08:41 -07001052static void nv_adma_irq_clear(struct ata_port *ap)
1053{
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001054 struct nv_adma_port_priv *pp = ap->private_data;
1055 void __iomem *mmio = pp->ctl_block;
Robert Hancock53014e22007-05-05 15:36:36 -06001056 u32 notifier_clears[2];
1057
1058 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) {
Tejun Heo37f65b82010-05-19 22:10:20 +02001059 ata_bmdma_irq_clear(ap);
Robert Hancock53014e22007-05-05 15:36:36 -06001060 return;
1061 }
1062
1063 /* clear any outstanding CK804 notifications */
Jeff Garzik2dcb4072007-10-19 06:42:56 -04001064 writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT),
Robert Hancock53014e22007-05-05 15:36:36 -06001065 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001066
1067 /* clear ADMA status */
Robert Hancock53014e22007-05-05 15:36:36 -06001068 writew(0xffff, mmio + NV_ADMA_STAT);
Jeff Garzika617c092007-05-21 20:14:23 -04001069
Robert Hancock53014e22007-05-05 15:36:36 -06001070 /* clear notifiers - note both ports need to be written with
1071 something even though we are only clearing on one */
1072 if (ap->port_no == 0) {
1073 notifier_clears[0] = 0xFFFFFFFF;
1074 notifier_clears[1] = 0;
1075 } else {
1076 notifier_clears[0] = 0;
1077 notifier_clears[1] = 0xFFFFFFFF;
1078 }
1079 pp = ap->host->ports[0]->private_data;
1080 writel(notifier_clears[0], pp->notifier_clear_block);
1081 pp = ap->host->ports[1]->private_data;
1082 writel(notifier_clears[1], pp->notifier_clear_block);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001083}
1084
Robert Hancockf5ecac22007-02-20 21:49:10 -06001085static void nv_adma_post_internal_cmd(struct ata_queued_cmd *qc)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001086{
Robert Hancockf5ecac22007-02-20 21:49:10 -06001087 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001088
Jeff Garzikb4479162007-10-25 20:47:30 -04001089 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE)
Tejun Heofe06e5f2010-05-10 21:41:39 +02001090 ata_bmdma_post_internal_cmd(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001091}
1092
1093static int nv_adma_port_start(struct ata_port *ap)
1094{
1095 struct device *dev = ap->host->dev;
1096 struct nv_adma_port_priv *pp;
1097 int rc;
1098 void *mem;
1099 dma_addr_t mem_dma;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001100 void __iomem *mmio;
Robert Hancock8959d302008-02-04 19:39:02 -06001101 struct pci_dev *pdev = to_pci_dev(dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001102 u16 tmp;
1103
Christoph Hellwig258c9fd2018-05-09 16:01:00 +02001104 /*
1105 * Ensure DMA mask is set to 32-bit before allocating legacy PRD and
1106 * pad buffers.
1107 */
1108 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
Robert Hancock8959d302008-02-04 19:39:02 -06001109 if (rc)
1110 return rc;
1111
Tejun Heoc7087652010-05-10 21:41:34 +02001112 /* we might fallback to bmdma, allocate bmdma resources */
1113 rc = ata_bmdma_port_start(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001114 if (rc)
1115 return rc;
1116
Tejun Heo24dc5f32007-01-20 16:00:28 +09001117 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1118 if (!pp)
1119 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001120
Tejun Heo0d5ff562007-02-01 15:06:36 +09001121 mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001122 ap->port_no * NV_ADMA_PORT_SIZE;
1123 pp->ctl_block = mmio;
Tejun Heo0d5ff562007-02-01 15:06:36 +09001124 pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001125 pp->notifier_clear_block = pp->gen_block +
1126 NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
1127
Christoph Hellwig258c9fd2018-05-09 16:01:00 +02001128 /*
1129 * Now that the legacy PRD and padding buffer are allocated we can
Christoph Hellwig51872b62019-08-26 12:57:22 +02001130 * raise the DMA mask to allocate the CPB/APRD table.
Christoph Hellwig258c9fd2018-05-09 16:01:00 +02001131 */
Christoph Hellwig51872b62019-08-26 12:57:22 +02001132 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1133
Robert Hancock8959d302008-02-04 19:39:02 -06001134 pp->adma_dma_mask = *dev->dma_mask;
1135
Tejun Heo24dc5f32007-01-20 16:00:28 +09001136 mem = dmam_alloc_coherent(dev, NV_ADMA_PORT_PRIV_DMA_SZ,
1137 &mem_dma, GFP_KERNEL);
1138 if (!mem)
1139 return -ENOMEM;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001140
1141 /*
1142 * First item in chunk of DMA memory:
1143 * 128-byte command parameter block (CPB)
1144 * one for each command tag
1145 */
1146 pp->cpb = mem;
1147 pp->cpb_dma = mem_dma;
1148
1149 writel(mem_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001150 writel((mem_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001151
1152 mem += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1153 mem_dma += NV_ADMA_MAX_CPBS * NV_ADMA_CPB_SZ;
1154
1155 /*
1156 * Second item: block of ADMA_SGTBL_LEN s/g entries
1157 */
1158 pp->aprd = mem;
1159 pp->aprd_dma = mem_dma;
1160
1161 ap->private_data = pp;
1162
1163 /* clear any outstanding interrupt conditions */
1164 writew(0xffff, mmio + NV_ADMA_STAT);
1165
1166 /* initialize port variables */
1167 pp->flags = NV_ADMA_PORT_REGISTER_MODE;
1168
1169 /* clear CPB fetch count */
1170 writew(0, mmio + NV_ADMA_CPB_COUNT);
1171
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001172 /* clear GO for register mode, enable interrupt */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001173 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001174 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1175 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001176
1177 tmp = readw(mmio + NV_ADMA_CTL);
1178 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001179 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001180 udelay(1);
1181 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001182 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001183
1184 return 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001185}
1186
1187static void nv_adma_port_stop(struct ata_port *ap)
1188{
Robert Hancockfbbb2622006-10-27 19:08:41 -07001189 struct nv_adma_port_priv *pp = ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001190 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001191
Robert Hancockfbbb2622006-10-27 19:08:41 -07001192 writew(0, mmio + NV_ADMA_CTL);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001193}
1194
Tejun Heo438ac6d2007-03-02 17:31:26 +09001195#ifdef CONFIG_PM
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001196static int nv_adma_port_suspend(struct ata_port *ap, pm_message_t mesg)
1197{
1198 struct nv_adma_port_priv *pp = ap->private_data;
1199 void __iomem *mmio = pp->ctl_block;
1200
1201 /* Go to register mode - clears GO */
1202 nv_adma_register_mode(ap);
1203
1204 /* clear CPB fetch count */
1205 writew(0, mmio + NV_ADMA_CPB_COUNT);
1206
1207 /* disable interrupt, shut down port */
1208 writew(0, mmio + NV_ADMA_CTL);
1209
1210 return 0;
1211}
1212
1213static int nv_adma_port_resume(struct ata_port *ap)
1214{
1215 struct nv_adma_port_priv *pp = ap->private_data;
1216 void __iomem *mmio = pp->ctl_block;
1217 u16 tmp;
1218
1219 /* set CPB block location */
1220 writel(pp->cpb_dma & 0xFFFFFFFF, mmio + NV_ADMA_CPB_BASE_LOW);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001221 writel((pp->cpb_dma >> 16) >> 16, mmio + NV_ADMA_CPB_BASE_HIGH);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001222
1223 /* clear any outstanding interrupt conditions */
1224 writew(0xffff, mmio + NV_ADMA_STAT);
1225
1226 /* initialize port variables */
1227 pp->flags |= NV_ADMA_PORT_REGISTER_MODE;
1228
1229 /* clear CPB fetch count */
1230 writew(0, mmio + NV_ADMA_CPB_COUNT);
1231
1232 /* clear GO for register mode, enable interrupt */
1233 tmp = readw(mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001234 writew((tmp & ~NV_ADMA_CTL_GO) | NV_ADMA_CTL_AIEN |
1235 NV_ADMA_CTL_HOTPLUG_IEN, mmio + NV_ADMA_CTL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001236
1237 tmp = readw(mmio + NV_ADMA_CTL);
1238 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001239 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001240 udelay(1);
1241 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001242 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001243
1244 return 0;
1245}
Tejun Heo438ac6d2007-03-02 17:31:26 +09001246#endif
Robert Hancockfbbb2622006-10-27 19:08:41 -07001247
Tejun Heo9a829cc2007-04-17 23:44:08 +09001248static void nv_adma_setup_port(struct ata_port *ap)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001249{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001250 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1251 struct ata_ioports *ioport = &ap->ioaddr;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001252
Tejun Heo9a829cc2007-04-17 23:44:08 +09001253 mmio += NV_ADMA_PORT + ap->port_no * NV_ADMA_PORT_SIZE;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001254
Tejun Heo0d5ff562007-02-01 15:06:36 +09001255 ioport->cmd_addr = mmio;
1256 ioport->data_addr = mmio + (ATA_REG_DATA * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001257 ioport->error_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001258 ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
1259 ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
1260 ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
1261 ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
1262 ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
1263 ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001264 ioport->status_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001265 ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001266 ioport->altstatus_addr =
Tejun Heo0d5ff562007-02-01 15:06:36 +09001267 ioport->ctl_addr = mmio + 0x20;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001268}
1269
Tejun Heo9a829cc2007-04-17 23:44:08 +09001270static int nv_adma_host_init(struct ata_host *host)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001271{
Tejun Heo9a829cc2007-04-17 23:44:08 +09001272 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001273 unsigned int i;
1274 u32 tmp32;
1275
Robert Hancockfbbb2622006-10-27 19:08:41 -07001276 /* enable ADMA on the ports */
1277 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
1278 tmp32 |= NV_MCP_SATA_CFG_20_PORT0_EN |
1279 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
1280 NV_MCP_SATA_CFG_20_PORT1_EN |
1281 NV_MCP_SATA_CFG_20_PORT1_PWB_EN;
1282
1283 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
1284
Tejun Heo9a829cc2007-04-17 23:44:08 +09001285 for (i = 0; i < host->n_ports; i++)
1286 nv_adma_setup_port(host->ports[i]);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001287
Robert Hancockfbbb2622006-10-27 19:08:41 -07001288 return 0;
1289}
1290
1291static void nv_adma_fill_aprd(struct ata_queued_cmd *qc,
1292 struct scatterlist *sg,
1293 int idx,
1294 struct nv_adma_prd *aprd)
1295{
Robert Hancock41949ed2007-02-19 19:02:27 -06001296 u8 flags = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001297 if (qc->tf.flags & ATA_TFLAG_WRITE)
1298 flags |= NV_APRD_WRITE;
1299 if (idx == qc->n_elem - 1)
1300 flags |= NV_APRD_END;
1301 else if (idx != 4)
1302 flags |= NV_APRD_CONT;
1303
1304 aprd->addr = cpu_to_le64(((u64)sg_dma_address(sg)));
1305 aprd->len = cpu_to_le32(((u32)sg_dma_len(sg))); /* len in bytes */
Robert Hancock2dec7552006-11-26 14:20:19 -06001306 aprd->flags = flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001307 aprd->packet_len = 0;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001308}
1309
1310static void nv_adma_fill_sg(struct ata_queued_cmd *qc, struct nv_adma_cpb *cpb)
1311{
1312 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001313 struct nv_adma_prd *aprd;
1314 struct scatterlist *sg;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001315 unsigned int si;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001316
Tejun Heoff2aeb12007-12-05 16:43:11 +09001317 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1318 aprd = (si < 5) ? &cpb->aprd[si] :
Jens Axboe4e5b6262018-05-11 12:51:04 -06001319 &pp->aprd[NV_ADMA_SGTBL_LEN * qc->hw_tag + (si-5)];
Tejun Heoff2aeb12007-12-05 16:43:11 +09001320 nv_adma_fill_aprd(qc, sg, si, aprd);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001321 }
Tejun Heoff2aeb12007-12-05 16:43:11 +09001322 if (si > 5)
Jens Axboe4e5b6262018-05-11 12:51:04 -06001323 cpb->next_aprd = cpu_to_le64(((u64)(pp->aprd_dma + NV_ADMA_SGTBL_SZ * qc->hw_tag)));
Robert Hancock41949ed2007-02-19 19:02:27 -06001324 else
1325 cpb->next_aprd = cpu_to_le64(0);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001326}
1327
Robert Hancock382a6652007-02-05 16:26:02 -08001328static int nv_adma_use_reg_mode(struct ata_queued_cmd *qc)
1329{
1330 struct nv_adma_port_priv *pp = qc->ap->private_data;
1331
1332 /* ADMA engine can only be used for non-ATAPI DMA commands,
Robert Hancock3f3debd2007-11-25 16:59:36 -06001333 or interrupt-driven no-data commands. */
Jeff Garzikb4479162007-10-25 20:47:30 -04001334 if ((pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
Robert Hancock3f3debd2007-11-25 16:59:36 -06001335 (qc->tf.flags & ATA_TFLAG_POLLING))
Robert Hancock382a6652007-02-05 16:26:02 -08001336 return 1;
1337
Jeff Garzikb4479162007-10-25 20:47:30 -04001338 if ((qc->flags & ATA_QCFLAG_DMAMAP) ||
Robert Hancock382a6652007-02-05 16:26:02 -08001339 (qc->tf.protocol == ATA_PROT_NODATA))
1340 return 0;
1341
1342 return 1;
1343}
1344
Jiri Slaby95364f32019-10-31 10:59:45 +01001345static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001346{
1347 struct nv_adma_port_priv *pp = qc->ap->private_data;
Jens Axboe4e5b6262018-05-11 12:51:04 -06001348 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag];
Robert Hancockfbbb2622006-10-27 19:08:41 -07001349 u8 ctl_flags = NV_CPB_CTL_CPB_VALID |
Robert Hancockfbbb2622006-10-27 19:08:41 -07001350 NV_CPB_CTL_IEN;
1351
Robert Hancock382a6652007-02-05 16:26:02 -08001352 if (nv_adma_use_reg_mode(qc)) {
Robert Hancock3f3debd2007-11-25 16:59:36 -06001353 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1354 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancock2dec7552006-11-26 14:20:19 -06001355 nv_adma_register_mode(qc->ap);
Tejun Heof47451c2010-05-10 21:41:40 +02001356 ata_bmdma_qc_prep(qc);
Jiri Slaby95364f32019-10-31 10:59:45 +01001357 return AC_ERR_OK;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001358 }
1359
Robert Hancock41949ed2007-02-19 19:02:27 -06001360 cpb->resp_flags = NV_CPB_RESP_DONE;
1361 wmb();
1362 cpb->ctl_flags = 0;
1363 wmb();
Robert Hancockfbbb2622006-10-27 19:08:41 -07001364
1365 cpb->len = 3;
Jens Axboe4e5b6262018-05-11 12:51:04 -06001366 cpb->tag = qc->hw_tag;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001367 cpb->next_cpb_idx = 0;
1368
1369 /* turn on NCQ flags for NCQ commands */
1370 if (qc->tf.protocol == ATA_PROT_NCQ)
1371 ctl_flags |= NV_CPB_CTL_QUEUE | NV_CPB_CTL_FPDMA;
1372
1373 nv_adma_tf_to_cpb(&qc->tf, cpb->tf);
1374
Jeff Garzikb4479162007-10-25 20:47:30 -04001375 if (qc->flags & ATA_QCFLAG_DMAMAP) {
Robert Hancock382a6652007-02-05 16:26:02 -08001376 nv_adma_fill_sg(qc, cpb);
1377 ctl_flags |= NV_CPB_CTL_APRD_VALID;
1378 } else
1379 memset(&cpb->aprd[0], 0, sizeof(struct nv_adma_prd) * 5);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001380
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001381 /* Be paranoid and don't let the device see NV_CPB_CTL_CPB_VALID
1382 until we are finished filling in all of the contents */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001383 wmb();
1384 cpb->ctl_flags = ctl_flags;
Robert Hancock41949ed2007-02-19 19:02:27 -06001385 wmb();
1386 cpb->resp_flags = 0;
Jiri Slaby95364f32019-10-31 10:59:45 +01001387
1388 return AC_ERR_OK;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001389}
1390
1391static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc)
1392{
Robert Hancock2dec7552006-11-26 14:20:19 -06001393 struct nv_adma_port_priv *pp = qc->ap->private_data;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001394 void __iomem *mmio = pp->ctl_block;
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001395 int curr_ncq = (qc->tf.protocol == ATA_PROT_NCQ);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001396
Robert Hancock3f3debd2007-11-25 16:59:36 -06001397 /* We can't handle result taskfile with NCQ commands, since
1398 retrieving the taskfile switches us out of ADMA mode and would abort
1399 existing commands. */
1400 if (unlikely(qc->tf.protocol == ATA_PROT_NCQ &&
1401 (qc->flags & ATA_QCFLAG_RESULT_TF))) {
Joe Perchesa9a79df2011-04-15 15:51:59 -07001402 ata_dev_err(qc->dev, "NCQ w/ RESULT_TF not allowed\n");
Robert Hancock3f3debd2007-11-25 16:59:36 -06001403 return AC_ERR_SYSTEM;
1404 }
1405
Robert Hancock382a6652007-02-05 16:26:02 -08001406 if (nv_adma_use_reg_mode(qc)) {
Robert Hancockfbbb2622006-10-27 19:08:41 -07001407 /* use ATA register mode */
Robert Hancock3f3debd2007-11-25 16:59:36 -06001408 BUG_ON(!(pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) &&
1409 (qc->flags & ATA_QCFLAG_DMAMAP));
Robert Hancockfbbb2622006-10-27 19:08:41 -07001410 nv_adma_register_mode(qc->ap);
Tejun Heo360ff782010-05-10 21:41:42 +02001411 return ata_bmdma_qc_issue(qc);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001412 } else
1413 nv_adma_mode(qc->ap);
1414
1415 /* write append register, command tag in lower 8 bits
1416 and (number of cpbs to append -1) in top 8 bits */
1417 wmb();
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001418
Jeff Garzikb4479162007-10-25 20:47:30 -04001419 if (curr_ncq != pp->last_issue_ncq) {
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001420 /* Seems to need some delay before switching between NCQ and
1421 non-NCQ commands, else we get command timeouts and such. */
Robert Hancock5e5c74a2007-02-19 18:42:30 -06001422 udelay(20);
1423 pp->last_issue_ncq = curr_ncq;
1424 }
1425
Jens Axboe4e5b6262018-05-11 12:51:04 -06001426 writew(qc->hw_tag, mmio + NV_ADMA_APPEND);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001427
Robert Hancockfbbb2622006-10-27 19:08:41 -07001428 return 0;
1429}
1430
David Howells7d12e782006-10-05 14:55:46 +01001431static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432{
Jeff Garzikcca39742006-08-24 03:19:22 -04001433 struct ata_host *host = dev_instance;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 unsigned int i;
1435 unsigned int handled = 0;
1436 unsigned long flags;
1437
Jeff Garzikcca39742006-08-24 03:19:22 -04001438 spin_lock_irqsave(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
Jeff Garzikcca39742006-08-24 03:19:22 -04001440 for (i = 0; i < host->n_ports; i++) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02001441 struct ata_port *ap = host->ports[i];
1442 struct ata_queued_cmd *qc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
Tejun Heo3e4ec342010-05-10 21:41:30 +02001444 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1445 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING))) {
Tejun Heoc3b28892010-05-19 22:10:21 +02001446 handled += ata_bmdma_port_intr(ap, qc);
Tejun Heo3e4ec342010-05-10 21:41:30 +02001447 } else {
1448 /*
1449 * No request pending? Clear interrupt status
1450 * anyway, in case there's one pending.
1451 */
1452 ap->ops->sff_check_status(ap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 }
1455
Jeff Garzikcca39742006-08-24 03:19:22 -04001456 spin_unlock_irqrestore(&host->lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
1458 return IRQ_RETVAL(handled);
1459}
1460
Jeff Garzikcca39742006-08-24 03:19:22 -04001461static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat)
Tejun Heoada364e2006-06-17 15:49:56 +09001462{
1463 int i, handled = 0;
1464
Jeff Garzikcca39742006-08-24 03:19:22 -04001465 for (i = 0; i < host->n_ports; i++) {
Tejun Heo3e4ec342010-05-10 21:41:30 +02001466 handled += nv_host_intr(host->ports[i], irq_stat);
Tejun Heoada364e2006-06-17 15:49:56 +09001467 irq_stat >>= NV_INT_PORT_SHIFT;
1468 }
1469
1470 return IRQ_RETVAL(handled);
1471}
1472
David Howells7d12e782006-10-05 14:55:46 +01001473static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001474{
Jeff Garzikcca39742006-08-24 03:19:22 -04001475 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001476 u8 irq_stat;
1477 irqreturn_t ret;
1478
Jeff Garzikcca39742006-08-24 03:19:22 -04001479 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001480 irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
Jeff Garzikcca39742006-08-24 03:19:22 -04001481 ret = nv_do_interrupt(host, irq_stat);
1482 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001483
1484 return ret;
1485}
1486
David Howells7d12e782006-10-05 14:55:46 +01001487static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
Tejun Heoada364e2006-06-17 15:49:56 +09001488{
Jeff Garzikcca39742006-08-24 03:19:22 -04001489 struct ata_host *host = dev_instance;
Tejun Heoada364e2006-06-17 15:49:56 +09001490 u8 irq_stat;
1491 irqreturn_t ret;
1492
Jeff Garzikcca39742006-08-24 03:19:22 -04001493 spin_lock(&host->lock);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001494 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
Jeff Garzikcca39742006-08-24 03:19:22 -04001495 ret = nv_do_interrupt(host, irq_stat);
1496 spin_unlock(&host->lock);
Tejun Heoada364e2006-06-17 15:49:56 +09001497
1498 return ret;
1499}
1500
Tejun Heo82ef04f2008-07-31 17:02:40 +09001501static int nv_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001504 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
Tejun Heo82ef04f2008-07-31 17:02:40 +09001506 *val = ioread32(link->ap->ioaddr.scr_addr + (sc_reg * 4));
Tejun Heoda3dbb12007-07-16 14:29:40 +09001507 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508}
1509
Tejun Heo82ef04f2008-07-31 17:02:40 +09001510static int nv_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 if (sc_reg > SCR_CONTROL)
Tejun Heoda3dbb12007-07-16 14:29:40 +09001513 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514
Tejun Heo82ef04f2008-07-31 17:02:40 +09001515 iowrite32(val, link->ap->ioaddr.scr_addr + (sc_reg * 4));
Tejun Heoda3dbb12007-07-16 14:29:40 +09001516 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517}
1518
Tejun Heo7f4774b2009-06-10 16:29:07 +09001519static int nv_hardreset(struct ata_link *link, unsigned int *class,
1520 unsigned long deadline)
Tejun Heoe8caa3c2009-01-25 11:25:22 +09001521{
Tejun Heo7f4774b2009-06-10 16:29:07 +09001522 struct ata_eh_context *ehc = &link->eh_context;
Tejun Heoe8caa3c2009-01-25 11:25:22 +09001523
Tejun Heo7f4774b2009-06-10 16:29:07 +09001524 /* Do hardreset iff it's post-boot probing, please read the
1525 * comment above port ops for details.
1526 */
1527 if (!(link->ap->pflags & ATA_PFLAG_LOADING) &&
1528 !ata_dev_enabled(link->device))
1529 sata_link_hardreset(link, sata_deb_timing_hotplug, deadline,
1530 NULL, NULL);
Tejun Heo6489e322009-10-14 11:18:28 +09001531 else {
1532 const unsigned long *timing = sata_ehc_deb_timing(ehc);
1533 int rc;
1534
1535 if (!(ehc->i.flags & ATA_EHI_QUIET))
Joe Perchesa9a79df2011-04-15 15:51:59 -07001536 ata_link_info(link,
1537 "nv: skipping hardreset on occupied port\n");
Tejun Heo6489e322009-10-14 11:18:28 +09001538
1539 /* make sure the link is online */
1540 rc = sata_link_resume(link, timing, deadline);
1541 /* whine about phy resume failure but proceed */
1542 if (rc && rc != -EOPNOTSUPP)
Joe Perchesa9a79df2011-04-15 15:51:59 -07001543 ata_link_warn(link, "failed to resume link (errno=%d)\n",
1544 rc);
Tejun Heo6489e322009-10-14 11:18:28 +09001545 }
Tejun Heo7f4774b2009-06-10 16:29:07 +09001546
1547 /* device signature acquisition is unreliable */
1548 return -EAGAIN;
Tejun Heoe8caa3c2009-01-25 11:25:22 +09001549}
1550
Tejun Heo39f87582006-06-17 15:49:56 +09001551static void nv_nf2_freeze(struct ata_port *ap)
1552{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001553 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001554 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1555 u8 mask;
1556
Tejun Heo0d5ff562007-02-01 15:06:36 +09001557 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001558 mask &= ~(NV_INT_ALL << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001559 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001560}
1561
1562static void nv_nf2_thaw(struct ata_port *ap)
1563{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001564 void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
Tejun Heo39f87582006-06-17 15:49:56 +09001565 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1566 u8 mask;
1567
Tejun Heo0d5ff562007-02-01 15:06:36 +09001568 iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
Tejun Heo39f87582006-06-17 15:49:56 +09001569
Tejun Heo0d5ff562007-02-01 15:06:36 +09001570 mask = ioread8(scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001571 mask |= (NV_INT_MASK << shift);
Tejun Heo0d5ff562007-02-01 15:06:36 +09001572 iowrite8(mask, scr_addr + NV_INT_ENABLE);
Tejun Heo39f87582006-06-17 15:49:56 +09001573}
1574
1575static void nv_ck804_freeze(struct ata_port *ap)
1576{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001577 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001578 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1579 u8 mask;
1580
1581 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1582 mask &= ~(NV_INT_ALL << shift);
1583 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1584}
1585
1586static void nv_ck804_thaw(struct ata_port *ap)
1587{
Tejun Heo0d5ff562007-02-01 15:06:36 +09001588 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
Tejun Heo39f87582006-06-17 15:49:56 +09001589 int shift = ap->port_no * NV_INT_PORT_SHIFT;
1590 u8 mask;
1591
1592 writeb(NV_INT_ALL << shift, mmio_base + NV_INT_STATUS_CK804);
1593
1594 mask = readb(mmio_base + NV_INT_ENABLE_CK804);
1595 mask |= (NV_INT_MASK << shift);
1596 writeb(mask, mmio_base + NV_INT_ENABLE_CK804);
1597}
1598
Kuan Luof140f0f2007-10-15 15:16:53 -04001599static void nv_mcp55_freeze(struct ata_port *ap)
1600{
1601 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1602 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1603 u32 mask;
1604
1605 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1606
1607 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1608 mask &= ~(NV_INT_ALL_MCP55 << shift);
1609 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
Kuan Luof140f0f2007-10-15 15:16:53 -04001610}
1611
1612static void nv_mcp55_thaw(struct ata_port *ap)
1613{
1614 void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
1615 int shift = ap->port_no * NV_INT_PORT_SHIFT_MCP55;
1616 u32 mask;
1617
1618 writel(NV_INT_ALL_MCP55 << shift, mmio_base + NV_INT_STATUS_MCP55);
1619
1620 mask = readl(mmio_base + NV_INT_ENABLE_MCP55);
1621 mask |= (NV_INT_MASK_MCP55 << shift);
1622 writel(mask, mmio_base + NV_INT_ENABLE_MCP55);
Kuan Luof140f0f2007-10-15 15:16:53 -04001623}
1624
Robert Hancockfbbb2622006-10-27 19:08:41 -07001625static void nv_adma_error_handler(struct ata_port *ap)
1626{
1627 struct nv_adma_port_priv *pp = ap->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04001628 if (!(pp->flags & NV_ADMA_PORT_REGISTER_MODE)) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06001629 void __iomem *mmio = pp->ctl_block;
Robert Hancockfbbb2622006-10-27 19:08:41 -07001630 int i;
1631 u16 tmp;
Jeff Garzika84471f2007-02-26 05:51:33 -05001632
Jeff Garzikb4479162007-10-25 20:47:30 -04001633 if (ata_tag_valid(ap->link.active_tag) || ap->link.sactive) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001634 u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
1635 u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
1636 u32 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL);
1637 u32 status = readw(mmio + NV_ADMA_STAT);
Robert Hancock08af7412007-02-19 19:01:59 -06001638 u8 cpb_count = readb(mmio + NV_ADMA_CPB_COUNT);
1639 u8 next_cpb_idx = readb(mmio + NV_ADMA_NEXT_CPB_IDX);
Robert Hancock2cb27852007-02-11 18:34:44 -06001640
Joe Perchesa9a79df2011-04-15 15:51:59 -07001641 ata_port_err(ap,
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001642 "EH in ADMA mode, notifier 0x%X "
Robert Hancock08af7412007-02-19 19:01:59 -06001643 "notifier_error 0x%X gen_ctl 0x%X status 0x%X "
1644 "next cpb count 0x%X next cpb idx 0x%x\n",
1645 notifier, notifier_error, gen_ctl, status,
1646 cpb_count, next_cpb_idx);
Robert Hancock2cb27852007-02-11 18:34:44 -06001647
Jeff Garzikb4479162007-10-25 20:47:30 -04001648 for (i = 0; i < NV_ADMA_MAX_CPBS; i++) {
Robert Hancock2cb27852007-02-11 18:34:44 -06001649 struct nv_adma_cpb *cpb = &pp->cpb[i];
Jeff Garzikb4479162007-10-25 20:47:30 -04001650 if ((ata_tag_valid(ap->link.active_tag) && i == ap->link.active_tag) ||
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001651 ap->link.sactive & (1 << i))
Joe Perchesa9a79df2011-04-15 15:51:59 -07001652 ata_port_err(ap,
Robert Hancock2cb27852007-02-11 18:34:44 -06001653 "CPB %d: ctl_flags 0x%x, resp_flags 0x%x\n",
1654 i, cpb->ctl_flags, cpb->resp_flags);
1655 }
1656 }
Robert Hancockfbbb2622006-10-27 19:08:41 -07001657
Robert Hancockfbbb2622006-10-27 19:08:41 -07001658 /* Push us back into port register mode for error handling. */
1659 nv_adma_register_mode(ap);
1660
Jeff Garzik5796d1c2007-10-26 00:03:37 -04001661 /* Mark all of the CPBs as invalid to prevent them from
1662 being executed */
Jeff Garzikb4479162007-10-25 20:47:30 -04001663 for (i = 0; i < NV_ADMA_MAX_CPBS; i++)
Robert Hancockfbbb2622006-10-27 19:08:41 -07001664 pp->cpb[i].ctl_flags &= ~NV_CPB_CTL_CPB_VALID;
1665
1666 /* clear CPB fetch count */
1667 writew(0, mmio + NV_ADMA_CPB_COUNT);
1668
1669 /* Reset channel */
1670 tmp = readw(mmio + NV_ADMA_CTL);
1671 writew(tmp | NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001672 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001673 udelay(1);
1674 writew(tmp & ~NV_ADMA_CTL_CHANNEL_RESET, mmio + NV_ADMA_CTL);
Jeff Garzikb4479162007-10-25 20:47:30 -04001675 readw(mmio + NV_ADMA_CTL); /* flush posted write */
Robert Hancockfbbb2622006-10-27 19:08:41 -07001676 }
1677
Tejun Heofe06e5f2010-05-10 21:41:39 +02001678 ata_bmdma_error_handler(ap);
Robert Hancockfbbb2622006-10-27 19:08:41 -07001679}
1680
Kuan Luof140f0f2007-10-15 15:16:53 -04001681static void nv_swncq_qc_to_dq(struct ata_port *ap, struct ata_queued_cmd *qc)
1682{
1683 struct nv_swncq_port_priv *pp = ap->private_data;
1684 struct defer_queue *dq = &pp->defer_queue;
1685
1686 /* queue is full */
1687 WARN_ON(dq->tail - dq->head == ATA_MAX_QUEUE);
Jens Axboe4e5b6262018-05-11 12:51:04 -06001688 dq->defer_bits |= (1 << qc->hw_tag);
1689 dq->tag[dq->tail++ & (ATA_MAX_QUEUE - 1)] = qc->hw_tag;
Kuan Luof140f0f2007-10-15 15:16:53 -04001690}
1691
1692static struct ata_queued_cmd *nv_swncq_qc_from_dq(struct ata_port *ap)
1693{
1694 struct nv_swncq_port_priv *pp = ap->private_data;
1695 struct defer_queue *dq = &pp->defer_queue;
1696 unsigned int tag;
1697
1698 if (dq->head == dq->tail) /* null queue */
1699 return NULL;
1700
1701 tag = dq->tag[dq->head & (ATA_MAX_QUEUE - 1)];
1702 dq->tag[dq->head++ & (ATA_MAX_QUEUE - 1)] = ATA_TAG_POISON;
1703 WARN_ON(!(dq->defer_bits & (1 << tag)));
1704 dq->defer_bits &= ~(1 << tag);
1705
1706 return ata_qc_from_tag(ap, tag);
1707}
1708
1709static void nv_swncq_fis_reinit(struct ata_port *ap)
1710{
1711 struct nv_swncq_port_priv *pp = ap->private_data;
1712
1713 pp->dhfis_bits = 0;
1714 pp->dmafis_bits = 0;
1715 pp->sdbfis_bits = 0;
1716 pp->ncq_flags = 0;
1717}
1718
1719static void nv_swncq_pp_reinit(struct ata_port *ap)
1720{
1721 struct nv_swncq_port_priv *pp = ap->private_data;
1722 struct defer_queue *dq = &pp->defer_queue;
1723
1724 dq->head = 0;
1725 dq->tail = 0;
1726 dq->defer_bits = 0;
1727 pp->qc_active = 0;
1728 pp->last_issue_tag = ATA_TAG_POISON;
1729 nv_swncq_fis_reinit(ap);
1730}
1731
1732static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis)
1733{
1734 struct nv_swncq_port_priv *pp = ap->private_data;
1735
1736 writew(fis, pp->irq_block);
1737}
1738
1739static void __ata_bmdma_stop(struct ata_port *ap)
1740{
1741 struct ata_queued_cmd qc;
1742
1743 qc.ap = ap;
1744 ata_bmdma_stop(&qc);
1745}
1746
1747static void nv_swncq_ncq_stop(struct ata_port *ap)
1748{
1749 struct nv_swncq_port_priv *pp = ap->private_data;
1750 unsigned int i;
1751 u32 sactive;
1752 u32 done_mask;
1753
Jens Axboee3ed89392018-05-11 12:51:05 -06001754 ata_port_err(ap, "EH in SWNCQ mode,QC:qc_active 0x%llX sactive 0x%X\n",
Joe Perchesa9a79df2011-04-15 15:51:59 -07001755 ap->qc_active, ap->link.sactive);
1756 ata_port_err(ap,
Kuan Luof140f0f2007-10-15 15:16:53 -04001757 "SWNCQ:qc_active 0x%X defer_bits 0x%X last_issue_tag 0x%x\n "
1758 "dhfis 0x%X dmafis 0x%X sdbfis 0x%X\n",
1759 pp->qc_active, pp->defer_queue.defer_bits, pp->last_issue_tag,
1760 pp->dhfis_bits, pp->dmafis_bits, pp->sdbfis_bits);
1761
Joe Perchesa9a79df2011-04-15 15:51:59 -07001762 ata_port_err(ap, "ATA_REG 0x%X ERR_REG 0x%X\n",
1763 ap->ops->sff_check_status(ap),
1764 ioread8(ap->ioaddr.error_addr));
Kuan Luof140f0f2007-10-15 15:16:53 -04001765
1766 sactive = readl(pp->sactive_block);
1767 done_mask = pp->qc_active ^ sactive;
1768
Joe Perchesa9a79df2011-04-15 15:51:59 -07001769 ata_port_err(ap, "tag : dhfis dmafis sdbfis sactive\n");
Kuan Luof140f0f2007-10-15 15:16:53 -04001770 for (i = 0; i < ATA_MAX_QUEUE; i++) {
1771 u8 err = 0;
1772 if (pp->qc_active & (1 << i))
1773 err = 0;
1774 else if (done_mask & (1 << i))
1775 err = 1;
1776 else
1777 continue;
1778
Joe Perchesa9a79df2011-04-15 15:51:59 -07001779 ata_port_err(ap,
1780 "tag 0x%x: %01x %01x %01x %01x %s\n", i,
1781 (pp->dhfis_bits >> i) & 0x1,
1782 (pp->dmafis_bits >> i) & 0x1,
1783 (pp->sdbfis_bits >> i) & 0x1,
1784 (sactive >> i) & 0x1,
1785 (err ? "error! tag doesn't exit" : " "));
Kuan Luof140f0f2007-10-15 15:16:53 -04001786 }
1787
1788 nv_swncq_pp_reinit(ap);
Tejun Heo5682ed32008-04-07 22:47:16 +09001789 ap->ops->sff_irq_clear(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001790 __ata_bmdma_stop(ap);
1791 nv_swncq_irq_clear(ap, 0xffff);
1792}
1793
1794static void nv_swncq_error_handler(struct ata_port *ap)
1795{
1796 struct ata_eh_context *ehc = &ap->link.eh_context;
1797
1798 if (ap->link.sactive) {
1799 nv_swncq_ncq_stop(ap);
Tejun Heocf480622008-01-24 00:05:14 +09001800 ehc->i.action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04001801 }
1802
Tejun Heofe06e5f2010-05-10 21:41:39 +02001803 ata_bmdma_error_handler(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001804}
1805
1806#ifdef CONFIG_PM
1807static int nv_swncq_port_suspend(struct ata_port *ap, pm_message_t mesg)
1808{
1809 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1810 u32 tmp;
1811
1812 /* clear irq */
1813 writel(~0, mmio + NV_INT_STATUS_MCP55);
1814
1815 /* disable irq */
1816 writel(0, mmio + NV_INT_ENABLE_MCP55);
1817
1818 /* disable swncq */
1819 tmp = readl(mmio + NV_CTL_MCP55);
1820 tmp &= ~(NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ);
1821 writel(tmp, mmio + NV_CTL_MCP55);
1822
1823 return 0;
1824}
1825
1826static int nv_swncq_port_resume(struct ata_port *ap)
1827{
1828 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1829 u32 tmp;
1830
1831 /* clear irq */
1832 writel(~0, mmio + NV_INT_STATUS_MCP55);
1833
1834 /* enable irq */
1835 writel(0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1836
1837 /* enable swncq */
1838 tmp = readl(mmio + NV_CTL_MCP55);
1839 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1840
1841 return 0;
1842}
1843#endif
1844
1845static void nv_swncq_host_init(struct ata_host *host)
1846{
1847 u32 tmp;
1848 void __iomem *mmio = host->iomap[NV_MMIO_BAR];
1849 struct pci_dev *pdev = to_pci_dev(host->dev);
1850 u8 regval;
1851
1852 /* disable ECO 398 */
1853 pci_read_config_byte(pdev, 0x7f, &regval);
1854 regval &= ~(1 << 7);
1855 pci_write_config_byte(pdev, 0x7f, regval);
1856
1857 /* enable swncq */
1858 tmp = readl(mmio + NV_CTL_MCP55);
Hannes Reinecke47013c52021-12-21 08:20:56 +01001859 dev_dbg(&pdev->dev, "HOST_CTL:0x%X\n", tmp);
Kuan Luof140f0f2007-10-15 15:16:53 -04001860 writel(tmp | NV_CTL_PRI_SWNCQ | NV_CTL_SEC_SWNCQ, mmio + NV_CTL_MCP55);
1861
1862 /* enable irq intr */
1863 tmp = readl(mmio + NV_INT_ENABLE_MCP55);
Hannes Reinecke47013c52021-12-21 08:20:56 +01001864 dev_dbg(&pdev->dev, "HOST_ENABLE:0x%X\n", tmp);
Kuan Luof140f0f2007-10-15 15:16:53 -04001865 writel(tmp | 0x00fd00fd, mmio + NV_INT_ENABLE_MCP55);
1866
1867 /* clear port irq */
1868 writel(~0x0, mmio + NV_INT_STATUS_MCP55);
1869}
1870
1871static int nv_swncq_slave_config(struct scsi_device *sdev)
1872{
1873 struct ata_port *ap = ata_shost_to_port(sdev->host);
1874 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
1875 struct ata_device *dev;
1876 int rc;
1877 u8 rev;
1878 u8 check_maxtor = 0;
1879 unsigned char model_num[ATA_ID_PROD_LEN + 1];
1880
1881 rc = ata_scsi_slave_config(sdev);
1882 if (sdev->id >= ATA_MAX_DEVICES || sdev->channel || sdev->lun)
1883 /* Not a proper libata device, ignore */
1884 return rc;
1885
1886 dev = &ap->link.device[sdev->id];
1887 if (!(ap->flags & ATA_FLAG_NCQ) || dev->class == ATA_DEV_ATAPI)
1888 return rc;
1889
1890 /* if MCP51 and Maxtor, then disable ncq */
1891 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA ||
1892 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_SATA2)
1893 check_maxtor = 1;
1894
1895 /* if MCP55 and rev <= a2 and Maxtor, then disable ncq */
1896 if (pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA ||
1897 pdev->device == PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2) {
1898 pci_read_config_byte(pdev, 0x8, &rev);
1899 if (rev <= 0xa2)
1900 check_maxtor = 1;
1901 }
1902
1903 if (!check_maxtor)
1904 return rc;
1905
1906 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
1907
1908 if (strncmp(model_num, "Maxtor", 6) == 0) {
Christoph Hellwigdb5ed4d2014-11-13 15:08:42 +01001909 ata_scsi_change_queue_depth(sdev, 1);
Joe Perchesa9a79df2011-04-15 15:51:59 -07001910 ata_dev_notice(dev, "Disabling SWNCQ mode (depth %x)\n",
1911 sdev->queue_depth);
Kuan Luof140f0f2007-10-15 15:16:53 -04001912 }
1913
1914 return rc;
1915}
1916
1917static int nv_swncq_port_start(struct ata_port *ap)
1918{
1919 struct device *dev = ap->host->dev;
1920 void __iomem *mmio = ap->host->iomap[NV_MMIO_BAR];
1921 struct nv_swncq_port_priv *pp;
1922 int rc;
1923
Tejun Heoc7087652010-05-10 21:41:34 +02001924 /* we might fallback to bmdma, allocate bmdma resources */
1925 rc = ata_bmdma_port_start(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04001926 if (rc)
1927 return rc;
1928
1929 pp = devm_kzalloc(dev, sizeof(*pp), GFP_KERNEL);
1930 if (!pp)
1931 return -ENOMEM;
1932
1933 pp->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ * ATA_MAX_QUEUE,
1934 &pp->prd_dma, GFP_KERNEL);
1935 if (!pp->prd)
1936 return -ENOMEM;
Kuan Luof140f0f2007-10-15 15:16:53 -04001937
1938 ap->private_data = pp;
1939 pp->sactive_block = ap->ioaddr.scr_addr + 4 * SCR_ACTIVE;
1940 pp->irq_block = mmio + NV_INT_STATUS_MCP55 + ap->port_no * 2;
1941 pp->tag_block = mmio + NV_NCQ_REG_MCP55 + ap->port_no * 2;
1942
1943 return 0;
1944}
1945
Jiri Slaby95364f32019-10-31 10:59:45 +01001946static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc)
Kuan Luof140f0f2007-10-15 15:16:53 -04001947{
1948 if (qc->tf.protocol != ATA_PROT_NCQ) {
Tejun Heof47451c2010-05-10 21:41:40 +02001949 ata_bmdma_qc_prep(qc);
Jiri Slaby95364f32019-10-31 10:59:45 +01001950 return AC_ERR_OK;
Kuan Luof140f0f2007-10-15 15:16:53 -04001951 }
1952
1953 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
Jiri Slaby95364f32019-10-31 10:59:45 +01001954 return AC_ERR_OK;
Kuan Luof140f0f2007-10-15 15:16:53 -04001955
1956 nv_swncq_fill_sg(qc);
Jiri Slaby95364f32019-10-31 10:59:45 +01001957
1958 return AC_ERR_OK;
Kuan Luof140f0f2007-10-15 15:16:53 -04001959}
1960
1961static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
1962{
1963 struct ata_port *ap = qc->ap;
1964 struct scatterlist *sg;
Kuan Luof140f0f2007-10-15 15:16:53 -04001965 struct nv_swncq_port_priv *pp = ap->private_data;
Tejun Heof60d7012010-05-10 21:41:41 +02001966 struct ata_bmdma_prd *prd;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001967 unsigned int si, idx;
Kuan Luof140f0f2007-10-15 15:16:53 -04001968
Jens Axboe4e5b6262018-05-11 12:51:04 -06001969 prd = pp->prd + ATA_MAX_PRD * qc->hw_tag;
Kuan Luof140f0f2007-10-15 15:16:53 -04001970
1971 idx = 0;
Tejun Heoff2aeb12007-12-05 16:43:11 +09001972 for_each_sg(qc->sg, sg, qc->n_elem, si) {
Kuan Luof140f0f2007-10-15 15:16:53 -04001973 u32 addr, offset;
1974 u32 sg_len, len;
1975
1976 addr = (u32)sg_dma_address(sg);
1977 sg_len = sg_dma_len(sg);
1978
1979 while (sg_len) {
1980 offset = addr & 0xffff;
1981 len = sg_len;
1982 if ((offset + sg_len) > 0x10000)
1983 len = 0x10000 - offset;
1984
1985 prd[idx].addr = cpu_to_le32(addr);
1986 prd[idx].flags_len = cpu_to_le32(len & 0xffff);
1987
1988 idx++;
1989 sg_len -= len;
1990 addr += len;
1991 }
1992 }
1993
Tejun Heoff2aeb12007-12-05 16:43:11 +09001994 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
Kuan Luof140f0f2007-10-15 15:16:53 -04001995}
1996
1997static unsigned int nv_swncq_issue_atacmd(struct ata_port *ap,
1998 struct ata_queued_cmd *qc)
1999{
2000 struct nv_swncq_port_priv *pp = ap->private_data;
2001
2002 if (qc == NULL)
2003 return 0;
2004
Jens Axboe4e5b6262018-05-11 12:51:04 -06002005 writel((1 << qc->hw_tag), pp->sactive_block);
2006 pp->last_issue_tag = qc->hw_tag;
2007 pp->dhfis_bits &= ~(1 << qc->hw_tag);
2008 pp->dmafis_bits &= ~(1 << qc->hw_tag);
2009 pp->qc_active |= (0x1 << qc->hw_tag);
Kuan Luof140f0f2007-10-15 15:16:53 -04002010
Hannes Reineckec206a382021-12-21 08:20:31 +01002011 trace_ata_tf_load(ap, &qc->tf);
Tejun Heo5682ed32008-04-07 22:47:16 +09002012 ap->ops->sff_tf_load(ap, &qc->tf); /* load tf registers */
Hannes Reineckec206a382021-12-21 08:20:31 +01002013 trace_ata_exec_command(ap, &qc->tf, qc->hw_tag);
Tejun Heo5682ed32008-04-07 22:47:16 +09002014 ap->ops->sff_exec_command(ap, &qc->tf);
Kuan Luof140f0f2007-10-15 15:16:53 -04002015
Kuan Luof140f0f2007-10-15 15:16:53 -04002016 return 0;
2017}
2018
2019static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc)
2020{
2021 struct ata_port *ap = qc->ap;
2022 struct nv_swncq_port_priv *pp = ap->private_data;
2023
2024 if (qc->tf.protocol != ATA_PROT_NCQ)
Tejun Heo360ff782010-05-10 21:41:42 +02002025 return ata_bmdma_qc_issue(qc);
Kuan Luof140f0f2007-10-15 15:16:53 -04002026
Kuan Luof140f0f2007-10-15 15:16:53 -04002027 if (!pp->qc_active)
2028 nv_swncq_issue_atacmd(ap, qc);
2029 else
2030 nv_swncq_qc_to_dq(ap, qc); /* add qc to defer queue */
2031
2032 return 0;
2033}
2034
2035static void nv_swncq_hotplug(struct ata_port *ap, u32 fis)
2036{
2037 u32 serror;
2038 struct ata_eh_info *ehi = &ap->link.eh_info;
2039
2040 ata_ehi_clear_desc(ehi);
2041
2042 /* AHCI needs SError cleared; otherwise, it might lock up */
2043 sata_scr_read(&ap->link, SCR_ERROR, &serror);
2044 sata_scr_write(&ap->link, SCR_ERROR, serror);
2045
2046 /* analyze @irq_stat */
2047 if (fis & NV_SWNCQ_IRQ_ADDED)
2048 ata_ehi_push_desc(ehi, "hot plug");
2049 else if (fis & NV_SWNCQ_IRQ_REMOVED)
2050 ata_ehi_push_desc(ehi, "hot unplug");
2051
2052 ata_ehi_hotplugged(ehi);
2053
2054 /* okay, let's hand over to EH */
2055 ehi->serror |= serror;
2056
2057 ata_port_freeze(ap);
2058}
2059
2060static int nv_swncq_sdbfis(struct ata_port *ap)
2061{
2062 struct ata_queued_cmd *qc;
2063 struct nv_swncq_port_priv *pp = ap->private_data;
2064 struct ata_eh_info *ehi = &ap->link.eh_info;
2065 u32 sactive;
Kuan Luof140f0f2007-10-15 15:16:53 -04002066 u32 done_mask;
Kuan Luof140f0f2007-10-15 15:16:53 -04002067 u8 host_stat;
2068 u8 lack_dhfis = 0;
2069
2070 host_stat = ap->ops->bmdma_status(ap);
Hannes Reineckec206a382021-12-21 08:20:31 +01002071 trace_ata_bmdma_status(ap, host_stat);
Kuan Luof140f0f2007-10-15 15:16:53 -04002072 if (unlikely(host_stat & ATA_DMA_ERR)) {
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002073 /* error when transferring data to/from memory */
Kuan Luof140f0f2007-10-15 15:16:53 -04002074 ata_ehi_clear_desc(ehi);
2075 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
2076 ehi->err_mask |= AC_ERR_HOST_BUS;
Tejun Heocf480622008-01-24 00:05:14 +09002077 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002078 return -EINVAL;
2079 }
2080
Tejun Heo5682ed32008-04-07 22:47:16 +09002081 ap->ops->sff_irq_clear(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002082 __ata_bmdma_stop(ap);
2083
2084 sactive = readl(pp->sactive_block);
2085 done_mask = pp->qc_active ^ sactive;
2086
Tejun Heo1aadf5c2010-06-25 15:03:34 +02002087 pp->qc_active &= ~done_mask;
2088 pp->dhfis_bits &= ~done_mask;
2089 pp->dmafis_bits &= ~done_mask;
2090 pp->sdbfis_bits |= done_mask;
Sascha Hauer8e4c3092020-05-08 07:28:19 +02002091 ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
Kuan Luof140f0f2007-10-15 15:16:53 -04002092
2093 if (!ap->qc_active) {
Hannes Reinecke47013c52021-12-21 08:20:56 +01002094 ata_port_dbg(ap, "over\n");
Kuan Luof140f0f2007-10-15 15:16:53 -04002095 nv_swncq_pp_reinit(ap);
Tejun Heo752e3862010-06-25 15:02:59 +02002096 return 0;
Kuan Luof140f0f2007-10-15 15:16:53 -04002097 }
2098
2099 if (pp->qc_active & pp->dhfis_bits)
Tejun Heo752e3862010-06-25 15:02:59 +02002100 return 0;
Kuan Luof140f0f2007-10-15 15:16:53 -04002101
2102 if ((pp->ncq_flags & ncq_saw_backout) ||
2103 (pp->qc_active ^ pp->dhfis_bits))
Tejun Heo752e3862010-06-25 15:02:59 +02002104 /* if the controller can't get a device to host register FIS,
Kuan Luof140f0f2007-10-15 15:16:53 -04002105 * The driver needs to reissue the new command.
2106 */
2107 lack_dhfis = 1;
2108
Hannes Reinecke47013c52021-12-21 08:20:56 +01002109 ata_port_dbg(ap, "QC: qc_active 0x%llx,"
2110 "SWNCQ:qc_active 0x%X defer_bits %X "
2111 "dhfis 0x%X dmafis 0x%X last_issue_tag %x\n",
2112 ap->qc_active, pp->qc_active,
2113 pp->defer_queue.defer_bits, pp->dhfis_bits,
2114 pp->dmafis_bits, pp->last_issue_tag);
Kuan Luof140f0f2007-10-15 15:16:53 -04002115
2116 nv_swncq_fis_reinit(ap);
2117
2118 if (lack_dhfis) {
2119 qc = ata_qc_from_tag(ap, pp->last_issue_tag);
2120 nv_swncq_issue_atacmd(ap, qc);
Tejun Heo752e3862010-06-25 15:02:59 +02002121 return 0;
Kuan Luof140f0f2007-10-15 15:16:53 -04002122 }
2123
2124 if (pp->defer_queue.defer_bits) {
2125 /* send deferral queue command */
2126 qc = nv_swncq_qc_from_dq(ap);
2127 WARN_ON(qc == NULL);
2128 nv_swncq_issue_atacmd(ap, qc);
2129 }
2130
Tejun Heo752e3862010-06-25 15:02:59 +02002131 return 0;
Kuan Luof140f0f2007-10-15 15:16:53 -04002132}
2133
2134static inline u32 nv_swncq_tag(struct ata_port *ap)
2135{
2136 struct nv_swncq_port_priv *pp = ap->private_data;
2137 u32 tag;
2138
2139 tag = readb(pp->tag_block) >> 2;
2140 return (tag & 0x1f);
2141}
2142
Tejun Heo752e3862010-06-25 15:02:59 +02002143static void nv_swncq_dmafis(struct ata_port *ap)
Kuan Luof140f0f2007-10-15 15:16:53 -04002144{
2145 struct ata_queued_cmd *qc;
2146 unsigned int rw;
2147 u8 dmactl;
2148 u32 tag;
2149 struct nv_swncq_port_priv *pp = ap->private_data;
2150
2151 __ata_bmdma_stop(ap);
2152 tag = nv_swncq_tag(ap);
2153
Hannes Reinecke47013c52021-12-21 08:20:56 +01002154 ata_port_dbg(ap, "dma setup tag 0x%x\n", tag);
Kuan Luof140f0f2007-10-15 15:16:53 -04002155 qc = ata_qc_from_tag(ap, tag);
2156
2157 if (unlikely(!qc))
Tejun Heo752e3862010-06-25 15:02:59 +02002158 return;
Kuan Luof140f0f2007-10-15 15:16:53 -04002159
2160 rw = qc->tf.flags & ATA_TFLAG_WRITE;
2161
2162 /* load PRD table addr. */
Jens Axboe4e5b6262018-05-11 12:51:04 -06002163 iowrite32(pp->prd_dma + ATA_PRD_TBL_SZ * qc->hw_tag,
Kuan Luof140f0f2007-10-15 15:16:53 -04002164 ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
2165
2166 /* specify data direction, triple-check start bit is clear */
2167 dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
2168 dmactl &= ~ATA_DMA_WR;
2169 if (!rw)
2170 dmactl |= ATA_DMA_WR;
2171
2172 iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
Kuan Luof140f0f2007-10-15 15:16:53 -04002173}
2174
2175static void nv_swncq_host_interrupt(struct ata_port *ap, u16 fis)
2176{
2177 struct nv_swncq_port_priv *pp = ap->private_data;
2178 struct ata_queued_cmd *qc;
2179 struct ata_eh_info *ehi = &ap->link.eh_info;
2180 u32 serror;
2181 u8 ata_stat;
Kuan Luof140f0f2007-10-15 15:16:53 -04002182
Tejun Heo5682ed32008-04-07 22:47:16 +09002183 ata_stat = ap->ops->sff_check_status(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002184 nv_swncq_irq_clear(ap, fis);
2185 if (!fis)
2186 return;
2187
2188 if (ap->pflags & ATA_PFLAG_FROZEN)
2189 return;
2190
2191 if (fis & NV_SWNCQ_IRQ_HOTPLUG) {
2192 nv_swncq_hotplug(ap, fis);
2193 return;
2194 }
2195
2196 if (!pp->qc_active)
2197 return;
2198
Tejun Heo82ef04f2008-07-31 17:02:40 +09002199 if (ap->ops->scr_read(&ap->link, SCR_ERROR, &serror))
Kuan Luof140f0f2007-10-15 15:16:53 -04002200 return;
Tejun Heo82ef04f2008-07-31 17:02:40 +09002201 ap->ops->scr_write(&ap->link, SCR_ERROR, serror);
Kuan Luof140f0f2007-10-15 15:16:53 -04002202
2203 if (ata_stat & ATA_ERR) {
2204 ata_ehi_clear_desc(ehi);
2205 ata_ehi_push_desc(ehi, "Ata error. fis:0x%X", fis);
2206 ehi->err_mask |= AC_ERR_DEV;
2207 ehi->serror |= serror;
Tejun Heocf480622008-01-24 00:05:14 +09002208 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002209 ata_port_freeze(ap);
2210 return;
2211 }
2212
2213 if (fis & NV_SWNCQ_IRQ_BACKOUT) {
2214 /* If the IRQ is backout, driver must issue
2215 * the new command again some time later.
2216 */
2217 pp->ncq_flags |= ncq_saw_backout;
2218 }
2219
2220 if (fis & NV_SWNCQ_IRQ_SDBFIS) {
2221 pp->ncq_flags |= ncq_saw_sdb;
Hannes Reinecke47013c52021-12-21 08:20:56 +01002222 ata_port_dbg(ap, "SWNCQ: qc_active 0x%X "
Kuan Luof140f0f2007-10-15 15:16:53 -04002223 "dhfis 0x%X dmafis 0x%X sactive 0x%X\n",
Hannes Reinecke47013c52021-12-21 08:20:56 +01002224 pp->qc_active, pp->dhfis_bits,
Kuan Luof140f0f2007-10-15 15:16:53 -04002225 pp->dmafis_bits, readl(pp->sactive_block));
Tejun Heo752e3862010-06-25 15:02:59 +02002226 if (nv_swncq_sdbfis(ap) < 0)
Kuan Luof140f0f2007-10-15 15:16:53 -04002227 goto irq_error;
2228 }
2229
2230 if (fis & NV_SWNCQ_IRQ_DHREGFIS) {
2231 /* The interrupt indicates the new command
2232 * was transmitted correctly to the drive.
2233 */
2234 pp->dhfis_bits |= (0x1 << pp->last_issue_tag);
2235 pp->ncq_flags |= ncq_saw_d2h;
2236 if (pp->ncq_flags & (ncq_saw_sdb | ncq_saw_backout)) {
2237 ata_ehi_push_desc(ehi, "illegal fis transaction");
2238 ehi->err_mask |= AC_ERR_HSM;
Tejun Heocf480622008-01-24 00:05:14 +09002239 ehi->action |= ATA_EH_RESET;
Kuan Luof140f0f2007-10-15 15:16:53 -04002240 goto irq_error;
2241 }
2242
2243 if (!(fis & NV_SWNCQ_IRQ_DMASETUP) &&
2244 !(pp->ncq_flags & ncq_saw_dmas)) {
Tejun Heo5682ed32008-04-07 22:47:16 +09002245 ata_stat = ap->ops->sff_check_status(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002246 if (ata_stat & ATA_BUSY)
2247 goto irq_exit;
2248
2249 if (pp->defer_queue.defer_bits) {
Hannes Reinecke47013c52021-12-21 08:20:56 +01002250 ata_port_dbg(ap, "send next command\n");
Kuan Luof140f0f2007-10-15 15:16:53 -04002251 qc = nv_swncq_qc_from_dq(ap);
2252 nv_swncq_issue_atacmd(ap, qc);
2253 }
2254 }
2255 }
2256
2257 if (fis & NV_SWNCQ_IRQ_DMASETUP) {
2258 /* program the dma controller with appropriate PRD buffers
2259 * and start the DMA transfer for requested command.
2260 */
2261 pp->dmafis_bits |= (0x1 << nv_swncq_tag(ap));
2262 pp->ncq_flags |= ncq_saw_dmas;
Tejun Heo752e3862010-06-25 15:02:59 +02002263 nv_swncq_dmafis(ap);
Kuan Luof140f0f2007-10-15 15:16:53 -04002264 }
2265
2266irq_exit:
2267 return;
2268irq_error:
2269 ata_ehi_push_desc(ehi, "fis:0x%x", fis);
2270 ata_port_freeze(ap);
2271 return;
2272}
2273
2274static irqreturn_t nv_swncq_interrupt(int irq, void *dev_instance)
2275{
2276 struct ata_host *host = dev_instance;
2277 unsigned int i;
2278 unsigned int handled = 0;
2279 unsigned long flags;
2280 u32 irq_stat;
2281
2282 spin_lock_irqsave(&host->lock, flags);
2283
2284 irq_stat = readl(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_MCP55);
2285
2286 for (i = 0; i < host->n_ports; i++) {
2287 struct ata_port *ap = host->ports[i];
2288
Tejun Heo3e4ec342010-05-10 21:41:30 +02002289 if (ap->link.sactive) {
2290 nv_swncq_host_interrupt(ap, (u16)irq_stat);
2291 handled = 1;
2292 } else {
2293 if (irq_stat) /* reserve Hotplug */
2294 nv_swncq_irq_clear(ap, 0xfff0);
Kuan Luof140f0f2007-10-15 15:16:53 -04002295
Tejun Heo3e4ec342010-05-10 21:41:30 +02002296 handled += nv_host_intr(ap, (u8)irq_stat);
Kuan Luof140f0f2007-10-15 15:16:53 -04002297 }
2298 irq_stat >>= NV_INT_PORT_SHIFT_MCP55;
2299 }
2300
2301 spin_unlock_irqrestore(&host->lock, flags);
2302
2303 return IRQ_RETVAL(handled);
2304}
2305
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002306static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307{
Tejun Heo1626aeb2007-05-04 12:43:58 +02002308 const struct ata_port_info *ppi[] = { NULL, NULL };
Tejun Heo95947192008-03-25 12:22:49 +09002309 struct nv_pi_priv *ipriv;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002310 struct ata_host *host;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002311 struct nv_host_priv *hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 int rc;
2313 u32 bar;
Tejun Heo0d5ff562007-02-01 15:06:36 +09002314 void __iomem *base;
Robert Hancockfbbb2622006-10-27 19:08:41 -07002315 unsigned long type = ent->driver_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316
2317 // Make sure this is a SATA controller by counting the number of bars
2318 // (NVIDIA SATA controllers will always have six bars). Otherwise,
2319 // it's an IDE controller and we ignore it.
Denis Efremovc9c13ba2019-09-28 02:43:08 +03002320 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 if (pci_resource_start(pdev, bar) == 0)
2322 return -ENODEV;
2323
Joe Perches06296a12011-04-15 15:52:00 -07002324 ata_print_version_once(&pdev->dev, DRV_VERSION);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
Tejun Heo24dc5f32007-01-20 16:00:28 +09002326 rc = pcim_enable_device(pdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002328 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329
Tejun Heo9a829cc2007-04-17 23:44:08 +09002330 /* determine type and allocate host */
Kuan Luof140f0f2007-10-15 15:16:53 -04002331 if (type == CK804 && adma_enabled) {
Joe Perchesa44fec12011-04-15 15:51:58 -07002332 dev_notice(&pdev->dev, "Using ADMA mode\n");
Robert Hancockfbbb2622006-10-27 19:08:41 -07002333 type = ADMA;
Tejun Heo2d775702009-01-25 11:29:38 +09002334 } else if (type == MCP5x && swncq_enabled) {
Joe Perchesa44fec12011-04-15 15:51:58 -07002335 dev_notice(&pdev->dev, "Using SWNCQ mode\n");
Tejun Heo2d775702009-01-25 11:29:38 +09002336 type = SWNCQ;
Jeff Garzik360737a2007-10-29 06:49:24 -04002337 }
2338
Tejun Heo1626aeb2007-05-04 12:43:58 +02002339 ppi[0] = &nv_port_info[type];
Tejun Heo95947192008-03-25 12:22:49 +09002340 ipriv = ppi[0]->private_data;
Tejun Heo1c5afdf2010-05-19 22:10:22 +02002341 rc = ata_pci_bmdma_prepare_host(pdev, ppi, &host);
Tejun Heo9a829cc2007-04-17 23:44:08 +09002342 if (rc)
2343 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344
Tejun Heo24dc5f32007-01-20 16:00:28 +09002345 hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002346 if (!hpriv)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002347 return -ENOMEM;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002348 hpriv->type = type;
Tejun Heo9a829cc2007-04-17 23:44:08 +09002349 host->private_data = hpriv;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350
Tejun Heo9a829cc2007-04-17 23:44:08 +09002351 /* request and iomap NV_MMIO_BAR */
2352 rc = pcim_iomap_regions(pdev, 1 << NV_MMIO_BAR, DRV_NAME);
2353 if (rc)
2354 return rc;
2355
2356 /* configure SCR access */
2357 base = host->iomap[NV_MMIO_BAR];
2358 host->ports[0]->ioaddr.scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
2359 host->ports[1]->ioaddr.scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
Jeff Garzik02cbd922006-03-22 23:59:46 -05002360
Tejun Heoada364e2006-06-17 15:49:56 +09002361 /* enable SATA space for CK804 */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002362 if (type >= CK804) {
Tejun Heoada364e2006-06-17 15:49:56 +09002363 u8 regval;
2364
2365 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2366 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2367 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2368 }
2369
Tejun Heo9a829cc2007-04-17 23:44:08 +09002370 /* init ADMA */
Robert Hancockfbbb2622006-10-27 19:08:41 -07002371 if (type == ADMA) {
Tejun Heo9a829cc2007-04-17 23:44:08 +09002372 rc = nv_adma_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002373 if (rc)
Tejun Heo24dc5f32007-01-20 16:00:28 +09002374 return rc;
Jeff Garzik360737a2007-10-29 06:49:24 -04002375 } else if (type == SWNCQ)
Kuan Luof140f0f2007-10-15 15:16:53 -04002376 nv_swncq_host_init(host);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002377
Tony Vroon51c89492009-08-06 00:50:09 +01002378 if (msi_enabled) {
Joe Perchesa44fec12011-04-15 15:51:58 -07002379 dev_notice(&pdev->dev, "Using MSI\n");
Tony Vroon51c89492009-08-06 00:50:09 +01002380 pci_enable_msi(pdev);
2381 }
2382
Tejun Heo9a829cc2007-04-17 23:44:08 +09002383 pci_set_master(pdev);
Tejun Heo95cc2c72010-05-14 11:48:50 +02002384 return ata_pci_sff_activate_host(host, ipriv->irq_handler, ipriv->sht);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385}
2386
Bartlomiej Zolnierkiewicz58eb8cd2014-05-07 17:17:44 +02002387#ifdef CONFIG_PM_SLEEP
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002388static int nv_pci_device_resume(struct pci_dev *pdev)
2389{
Jingoo Han0a86e1c2013-06-03 14:05:36 +09002390 struct ata_host *host = pci_get_drvdata(pdev);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002391 struct nv_host_priv *hpriv = host->private_data;
Robert Hancockce053fa2007-02-05 16:26:04 -08002392 int rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002393
Robert Hancockce053fa2007-02-05 16:26:04 -08002394 rc = ata_pci_device_do_resume(pdev);
Jeff Garzikb4479162007-10-25 20:47:30 -04002395 if (rc)
Robert Hancockce053fa2007-02-05 16:26:04 -08002396 return rc;
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002397
2398 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
Jeff Garzikb4479162007-10-25 20:47:30 -04002399 if (hpriv->type >= CK804) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002400 u8 regval;
2401
2402 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2403 regval |= NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2404 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
2405 }
Jeff Garzikb4479162007-10-25 20:47:30 -04002406 if (hpriv->type == ADMA) {
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002407 u32 tmp32;
2408 struct nv_adma_port_priv *pp;
2409 /* enable/disable ADMA on the ports appropriately */
2410 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2411
2412 pp = host->ports[0]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002413 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002414 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002415 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002416 else
2417 tmp32 |= (NV_MCP_SATA_CFG_20_PORT0_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002418 NV_MCP_SATA_CFG_20_PORT0_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002419 pp = host->ports[1]->private_data;
Jeff Garzikb4479162007-10-25 20:47:30 -04002420 if (pp->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002421 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002422 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002423 else
2424 tmp32 |= (NV_MCP_SATA_CFG_20_PORT1_EN |
Jeff Garzik5796d1c2007-10-26 00:03:37 -04002425 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002426
2427 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2428 }
2429 }
2430
2431 ata_host_resume(host);
2432
2433 return 0;
2434}
Tejun Heo438ac6d2007-03-02 17:31:26 +09002435#endif
Robert Hancockcdf56bc2007-01-03 18:13:57 -06002436
Jeff Garzikcca39742006-08-24 03:19:22 -04002437static void nv_ck804_host_stop(struct ata_host *host)
Tejun Heoada364e2006-06-17 15:49:56 +09002438{
Jeff Garzikcca39742006-08-24 03:19:22 -04002439 struct pci_dev *pdev = to_pci_dev(host->dev);
Tejun Heoada364e2006-06-17 15:49:56 +09002440 u8 regval;
2441
2442 /* disable SATA space for CK804 */
2443 pci_read_config_byte(pdev, NV_MCP_SATA_CFG_20, &regval);
2444 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN;
2445 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval);
Tejun Heoada364e2006-06-17 15:49:56 +09002446}
2447
Robert Hancockfbbb2622006-10-27 19:08:41 -07002448static void nv_adma_host_stop(struct ata_host *host)
2449{
2450 struct pci_dev *pdev = to_pci_dev(host->dev);
Robert Hancockfbbb2622006-10-27 19:08:41 -07002451 u32 tmp32;
2452
Robert Hancockfbbb2622006-10-27 19:08:41 -07002453 /* disable ADMA on the ports */
2454 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &tmp32);
2455 tmp32 &= ~(NV_MCP_SATA_CFG_20_PORT0_EN |
2456 NV_MCP_SATA_CFG_20_PORT0_PWB_EN |
2457 NV_MCP_SATA_CFG_20_PORT1_EN |
2458 NV_MCP_SATA_CFG_20_PORT1_PWB_EN);
2459
2460 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, tmp32);
2461
2462 nv_ck804_host_stop(host);
2463}
2464
Axel Lin2fc75da2012-04-19 13:43:05 +08002465module_pci_driver(nv_pci_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466
Robert Hancockfbbb2622006-10-27 19:08:41 -07002467module_param_named(adma, adma_enabled, bool, 0444);
Brandon Ehle55f784c2009-03-01 00:02:49 -08002468MODULE_PARM_DESC(adma, "Enable use of ADMA (Default: false)");
Kuan Luof140f0f2007-10-15 15:16:53 -04002469module_param_named(swncq, swncq_enabled, bool, 0444);
Zoltan Boszormenyid21279f2008-03-28 14:33:46 -07002470MODULE_PARM_DESC(swncq, "Enable use of SWNCQ (Default: true)");
Tony Vroon51c89492009-08-06 00:50:09 +01002471module_param_named(msi, msi_enabled, bool, 0444);
2472MODULE_PARM_DESC(msi, "Enable use of MSI (Default: false)");