blob: f373a139e0c37b175dbce3a1c2a9658ef7a1bb05 [file] [log] [blame]
Chris Leech0bbd5f42006-05-23 17:35:34 -07001/*
Shannon Nelson43d6e362007-10-16 01:27:39 -07002 * Intel I/OAT DMA Linux driver
Dave Jiang85596a12015-08-11 08:48:10 -07003 * Copyright(c) 2004 - 2015 Intel Corporation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07004 *
5 * This program is free software; you can redistribute it and/or modify it
Shannon Nelson43d6e362007-10-16 01:27:39 -07006 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
Chris Leech0bbd5f42006-05-23 17:35:34 -07008 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
Shannon Nelson43d6e362007-10-16 01:27:39 -070014 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
16 *
Chris Leech0bbd5f42006-05-23 17:35:34 -070017 */
18
19/*
20 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
21 * copy operations.
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090026#include <linux/slab.h>
Chris Leech0bbd5f42006-05-23 17:35:34 -070027#include <linux/pci.h>
28#include <linux/interrupt.h>
29#include <linux/dmaengine.h>
30#include <linux/delay.h>
David S. Miller6b00c922006-05-23 17:37:58 -070031#include <linux/dma-mapping.h>
Maciej Sosnowski09177e82008-07-22 10:07:33 -070032#include <linux/workqueue.h>
Paul Gortmaker70c71602011-05-22 16:47:17 -040033#include <linux/prefetch.h>
Dave Jiangdd4645e2016-02-10 15:00:32 -070034#include <linux/sizes.h>
Dan Williams584ec222009-07-28 14:32:12 -070035#include "dma.h"
36#include "registers.h"
37#include "hw.h"
Chris Leech0bbd5f42006-05-23 17:35:34 -070038
Russell King - ARM Linuxd2ebfb32012-03-06 22:34:26 +000039#include "../dmaengine.h"
40
Dave Jiangaed681d2016-07-20 13:14:01 -070041static char *chanerr_str[] = {
Dave Jiangd46dc992016-11-09 10:48:26 -070042 "DMA Transfer Source Address Error",
Dave Jiangaed681d2016-07-20 13:14:01 -070043 "DMA Transfer Destination Address Error",
44 "Next Descriptor Address Error",
45 "Descriptor Error",
46 "Chan Address Value Error",
47 "CHANCMD Error",
48 "Chipset Uncorrectable Data Integrity Error",
49 "DMA Uncorrectable Data Integrity Error",
50 "Read Data Error",
51 "Write Data Error",
52 "Descriptor Control Error",
53 "Descriptor Transfer Size Error",
54 "Completion Address Error",
55 "Interrupt Configuration Error",
56 "Super extended descriptor Address Error",
57 "Unaffiliated Error",
58 "CRC or XOR P Error",
59 "XOR Q Error",
60 "Descriptor Count Error",
61 "DIF All F detect Error",
62 "Guard Tag verification Error",
63 "Application Tag verification Error",
64 "Reference Tag verification Error",
65 "Bundle Bit Error",
66 "Result DIF All F detect Error",
67 "Result Guard Tag verification Error",
68 "Result Application Tag verification Error",
69 "Result Reference Tag verification Error",
Dave Jiangaed681d2016-07-20 13:14:01 -070070};
71
Dave Jiang3372de52015-08-11 08:48:55 -070072static void ioat_eh(struct ioatdma_chan *ioat_chan);
73
Dave Jiangaed681d2016-07-20 13:14:01 -070074static void ioat_print_chanerrs(struct ioatdma_chan *ioat_chan, u32 chanerr)
75{
76 int i;
77
Colin Ian King1b779412016-10-16 13:25:47 +010078 for (i = 0; i < ARRAY_SIZE(chanerr_str); i++) {
Dave Jiangaed681d2016-07-20 13:14:01 -070079 if ((chanerr >> i) & 1) {
Colin Ian King1b779412016-10-16 13:25:47 +010080 dev_err(to_dev(ioat_chan), "Err(%d): %s\n",
81 i, chanerr_str[i]);
Dave Jiangaed681d2016-07-20 13:14:01 -070082 }
83 }
84}
85
Shannon Nelson3e037452007-10-16 01:27:40 -070086/**
87 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
88 * @irq: interrupt id
89 * @data: interrupt data
90 */
Dave Jiangc0f28ce2015-08-11 08:48:43 -070091irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
Shannon Nelson3e037452007-10-16 01:27:40 -070092{
93 struct ioatdma_device *instance = data;
Dave Jiang5a976882015-08-11 08:48:21 -070094 struct ioatdma_chan *ioat_chan;
Shannon Nelson3e037452007-10-16 01:27:40 -070095 unsigned long attnstatus;
96 int bit;
97 u8 intrctrl;
98
99 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
100
101 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
102 return IRQ_NONE;
103
104 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
105 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
106 return IRQ_NONE;
107 }
108
109 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
Akinobu Mita984b3f52010-03-05 13:41:37 -0800110 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
Dave Jiang5a976882015-08-11 08:48:21 -0700111 ioat_chan = ioat_chan_by_index(instance, bit);
112 if (test_bit(IOAT_RUN, &ioat_chan->state))
113 tasklet_schedule(&ioat_chan->cleanup_task);
Shannon Nelson3e037452007-10-16 01:27:40 -0700114 }
115
116 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
117 return IRQ_HANDLED;
118}
119
120/**
121 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
122 * @irq: interrupt id
123 * @data: interrupt data
124 */
Dave Jiangc0f28ce2015-08-11 08:48:43 -0700125irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
Shannon Nelson3e037452007-10-16 01:27:40 -0700126{
Dave Jiang5a976882015-08-11 08:48:21 -0700127 struct ioatdma_chan *ioat_chan = data;
Shannon Nelson3e037452007-10-16 01:27:40 -0700128
Dave Jiang5a976882015-08-11 08:48:21 -0700129 if (test_bit(IOAT_RUN, &ioat_chan->state))
130 tasklet_schedule(&ioat_chan->cleanup_task);
Shannon Nelson3e037452007-10-16 01:27:40 -0700131
132 return IRQ_HANDLED;
133}
134
Dave Jiang5a976882015-08-11 08:48:21 -0700135void ioat_stop(struct ioatdma_chan *ioat_chan)
Dan Williamsda87ca42014-02-19 16:19:35 -0800136{
Dave Jiang55f878e2015-08-11 08:48:27 -0700137 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
138 struct pci_dev *pdev = ioat_dma->pdev;
Dave Jiang5a976882015-08-11 08:48:21 -0700139 int chan_id = chan_num(ioat_chan);
Dan Williamsda87ca42014-02-19 16:19:35 -0800140 struct msix_entry *msix;
141
142 /* 1/ stop irq from firing tasklets
143 * 2/ stop the tasklet from re-arming irqs
144 */
Dave Jiang5a976882015-08-11 08:48:21 -0700145 clear_bit(IOAT_RUN, &ioat_chan->state);
Dan Williamsda87ca42014-02-19 16:19:35 -0800146
147 /* flush inflight interrupts */
Dave Jiang55f878e2015-08-11 08:48:27 -0700148 switch (ioat_dma->irq_mode) {
Dan Williamsda87ca42014-02-19 16:19:35 -0800149 case IOAT_MSIX:
Dave Jiang55f878e2015-08-11 08:48:27 -0700150 msix = &ioat_dma->msix_entries[chan_id];
Dan Williamsda87ca42014-02-19 16:19:35 -0800151 synchronize_irq(msix->vector);
152 break;
153 case IOAT_MSI:
154 case IOAT_INTX:
155 synchronize_irq(pdev->irq);
156 break;
157 default:
158 break;
159 }
160
161 /* flush inflight timers */
Dave Jiang5a976882015-08-11 08:48:21 -0700162 del_timer_sync(&ioat_chan->timer);
Dan Williamsda87ca42014-02-19 16:19:35 -0800163
164 /* flush inflight tasklet runs */
Dave Jiang5a976882015-08-11 08:48:21 -0700165 tasklet_kill(&ioat_chan->cleanup_task);
Dan Williamsda87ca42014-02-19 16:19:35 -0800166
167 /* final cleanup now that everything is quiesced and can't re-arm */
Dave Jiangef97bd0f2015-08-11 08:49:00 -0700168 ioat_cleanup_event((unsigned long)&ioat_chan->dma_chan);
Dan Williamsda87ca42014-02-19 16:19:35 -0800169}
170
Dave Jiang3372de52015-08-11 08:48:55 -0700171static void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
Dave Jiang885b2012015-08-11 08:48:32 -0700172{
173 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
174 ioat_chan->issued = ioat_chan->head;
175 writew(ioat_chan->dmacount,
176 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
177 dev_dbg(to_dev(ioat_chan),
178 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
179 __func__, ioat_chan->head, ioat_chan->tail,
180 ioat_chan->issued, ioat_chan->dmacount);
181}
182
183void ioat_issue_pending(struct dma_chan *c)
184{
185 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
186
187 if (ioat_ring_pending(ioat_chan)) {
188 spin_lock_bh(&ioat_chan->prep_lock);
189 __ioat_issue_pending(ioat_chan);
190 spin_unlock_bh(&ioat_chan->prep_lock);
191 }
192}
193
194/**
195 * ioat_update_pending - log pending descriptors
196 * @ioat: ioat+ channel
197 *
198 * Check if the number of unsubmitted descriptors has exceeded the
199 * watermark. Called with prep_lock held
200 */
201static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
202{
203 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
204 __ioat_issue_pending(ioat_chan);
205}
206
207static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
208{
209 struct ioat_ring_ent *desc;
210 struct ioat_dma_descriptor *hw;
211
212 if (ioat_ring_space(ioat_chan) < 1) {
213 dev_err(to_dev(ioat_chan),
214 "Unable to start null desc - ring full\n");
215 return;
216 }
217
218 dev_dbg(to_dev(ioat_chan),
219 "%s: head: %#x tail: %#x issued: %#x\n",
220 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
221 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
222
223 hw = desc->hw;
224 hw->ctl = 0;
225 hw->ctl_f.null = 1;
226 hw->ctl_f.int_en = 1;
227 hw->ctl_f.compl_write = 1;
228 /* set size to non-zero value (channel returns error when size is 0) */
229 hw->size = NULL_DESC_BUFFER_SIZE;
230 hw->src_addr = 0;
231 hw->dst_addr = 0;
232 async_tx_ack(&desc->txd);
233 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
234 dump_desc_dbg(ioat_chan, desc);
235 /* make sure descriptors are written before we submit */
236 wmb();
237 ioat_chan->head += 1;
238 __ioat_issue_pending(ioat_chan);
239}
240
Dave Jiangc0f28ce2015-08-11 08:48:43 -0700241void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
Dave Jiang885b2012015-08-11 08:48:32 -0700242{
243 spin_lock_bh(&ioat_chan->prep_lock);
Dave Jiangad4a7b52015-08-26 13:17:24 -0700244 if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state))
245 __ioat_start_null_desc(ioat_chan);
Dave Jiang885b2012015-08-11 08:48:32 -0700246 spin_unlock_bh(&ioat_chan->prep_lock);
247}
248
Dave Jiang3372de52015-08-11 08:48:55 -0700249static void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
Dave Jiang885b2012015-08-11 08:48:32 -0700250{
251 /* set the tail to be re-issued */
252 ioat_chan->issued = ioat_chan->tail;
253 ioat_chan->dmacount = 0;
Dave Jiang885b2012015-08-11 08:48:32 -0700254 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
255
256 dev_dbg(to_dev(ioat_chan),
257 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
258 __func__, ioat_chan->head, ioat_chan->tail,
259 ioat_chan->issued, ioat_chan->dmacount);
260
261 if (ioat_ring_pending(ioat_chan)) {
262 struct ioat_ring_ent *desc;
263
264 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
265 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
266 __ioat_issue_pending(ioat_chan);
267 } else
268 __ioat_start_null_desc(ioat_chan);
269}
270
Dave Jiang3372de52015-08-11 08:48:55 -0700271static int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
Dave Jiang885b2012015-08-11 08:48:32 -0700272{
273 unsigned long end = jiffies + tmo;
274 int err = 0;
275 u32 status;
276
277 status = ioat_chansts(ioat_chan);
278 if (is_ioat_active(status) || is_ioat_idle(status))
279 ioat_suspend(ioat_chan);
280 while (is_ioat_active(status) || is_ioat_idle(status)) {
281 if (tmo && time_after(jiffies, end)) {
282 err = -ETIMEDOUT;
283 break;
284 }
285 status = ioat_chansts(ioat_chan);
286 cpu_relax();
287 }
288
289 return err;
290}
291
Dave Jiang3372de52015-08-11 08:48:55 -0700292static int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
Dave Jiang885b2012015-08-11 08:48:32 -0700293{
294 unsigned long end = jiffies + tmo;
295 int err = 0;
296
297 ioat_reset(ioat_chan);
298 while (ioat_reset_pending(ioat_chan)) {
299 if (end && time_after(jiffies, end)) {
300 err = -ETIMEDOUT;
301 break;
302 }
303 cpu_relax();
304 }
305
306 return err;
307}
308
Dave Jiang885b2012015-08-11 08:48:32 -0700309static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
Dave Jiang5c65cb92015-08-25 12:58:05 -0700310 __releases(&ioat_chan->prep_lock)
Dave Jiang885b2012015-08-11 08:48:32 -0700311{
312 struct dma_chan *c = tx->chan;
313 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
314 dma_cookie_t cookie;
315
316 cookie = dma_cookie_assign(tx);
317 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
318
319 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
320 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
321
322 /* make descriptor updates visible before advancing ioat->head,
323 * this is purposefully not smp_wmb() since we are also
324 * publishing the descriptor updates to a dma device
325 */
326 wmb();
327
328 ioat_chan->head += ioat_chan->produce;
329
330 ioat_update_pending(ioat_chan);
331 spin_unlock_bh(&ioat_chan->prep_lock);
332
333 return cookie;
334}
335
336static struct ioat_ring_ent *
Dave Jiangdd4645e2016-02-10 15:00:32 -0700337ioat_alloc_ring_ent(struct dma_chan *chan, int idx, gfp_t flags)
Dave Jiang885b2012015-08-11 08:48:32 -0700338{
339 struct ioat_dma_descriptor *hw;
340 struct ioat_ring_ent *desc;
Dave Jiangdd4645e2016-02-10 15:00:32 -0700341 struct ioatdma_chan *ioat_chan = to_ioat_chan(chan);
342 int chunk;
Dave Jiang885b2012015-08-11 08:48:32 -0700343 dma_addr_t phys;
Dave Jiangdd4645e2016-02-10 15:00:32 -0700344 u8 *pos;
345 off_t offs;
Dave Jiang885b2012015-08-11 08:48:32 -0700346
Dave Jiangdd4645e2016-02-10 15:00:32 -0700347 chunk = idx / IOAT_DESCS_PER_2M;
348 idx &= (IOAT_DESCS_PER_2M - 1);
349 offs = idx * IOAT_DESC_SZ;
350 pos = (u8 *)ioat_chan->descs[chunk].virt + offs;
351 phys = ioat_chan->descs[chunk].hw + offs;
352 hw = (struct ioat_dma_descriptor *)pos;
Dave Jiang885b2012015-08-11 08:48:32 -0700353 memset(hw, 0, sizeof(*hw));
354
355 desc = kmem_cache_zalloc(ioat_cache, flags);
Dave Jiangdd4645e2016-02-10 15:00:32 -0700356 if (!desc)
Dave Jiang885b2012015-08-11 08:48:32 -0700357 return NULL;
Dave Jiang885b2012015-08-11 08:48:32 -0700358
359 dma_async_tx_descriptor_init(&desc->txd, chan);
360 desc->txd.tx_submit = ioat_tx_submit_unlock;
361 desc->hw = hw;
362 desc->txd.phys = phys;
363 return desc;
364}
365
Dave Jiangc0f28ce2015-08-11 08:48:43 -0700366void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
Dave Jiang885b2012015-08-11 08:48:32 -0700367{
Dave Jiang885b2012015-08-11 08:48:32 -0700368 kmem_cache_free(ioat_cache, desc);
369}
370
Dave Jiangc0f28ce2015-08-11 08:48:43 -0700371struct ioat_ring_ent **
Dave Jiang885b2012015-08-11 08:48:32 -0700372ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
373{
Dave Jiangdd4645e2016-02-10 15:00:32 -0700374 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
Dave Jiange0100d42019-02-22 10:00:05 -0700375 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
Dave Jiang885b2012015-08-11 08:48:32 -0700376 struct ioat_ring_ent **ring;
Dave Jiangdd4645e2016-02-10 15:00:32 -0700377 int total_descs = 1 << order;
378 int i, chunks;
Dave Jiang885b2012015-08-11 08:48:32 -0700379
380 /* allocate the array to hold the software ring */
Dave Jiangdd4645e2016-02-10 15:00:32 -0700381 ring = kcalloc(total_descs, sizeof(*ring), flags);
Dave Jiang885b2012015-08-11 08:48:32 -0700382 if (!ring)
383 return NULL;
Dave Jiangdd4645e2016-02-10 15:00:32 -0700384
385 ioat_chan->desc_chunks = chunks = (total_descs * IOAT_DESC_SZ) / SZ_2M;
386
387 for (i = 0; i < chunks; i++) {
388 struct ioat_descs *descs = &ioat_chan->descs[i];
389
390 descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
391 SZ_2M, &descs->hw, flags);
392 if (!descs->virt && (i > 0)) {
393 int idx;
394
395 for (idx = 0; idx < i; idx++) {
396 dma_free_coherent(to_dev(ioat_chan), SZ_2M,
397 descs->virt, descs->hw);
398 descs->virt = NULL;
399 descs->hw = 0;
400 }
401
402 ioat_chan->desc_chunks = 0;
403 kfree(ring);
404 return NULL;
405 }
406 }
407
408 for (i = 0; i < total_descs; i++) {
409 ring[i] = ioat_alloc_ring_ent(c, i, flags);
Dave Jiang885b2012015-08-11 08:48:32 -0700410 if (!ring[i]) {
Dave Jiangdd4645e2016-02-10 15:00:32 -0700411 int idx;
412
Dave Jiang885b2012015-08-11 08:48:32 -0700413 while (i--)
414 ioat_free_ring_ent(ring[i], c);
Dave Jiangdd4645e2016-02-10 15:00:32 -0700415
416 for (idx = 0; idx < ioat_chan->desc_chunks; idx++) {
417 dma_free_coherent(to_dev(ioat_chan),
418 SZ_2M,
419 ioat_chan->descs[idx].virt,
420 ioat_chan->descs[idx].hw);
421 ioat_chan->descs[idx].virt = NULL;
422 ioat_chan->descs[idx].hw = 0;
423 }
424
425 ioat_chan->desc_chunks = 0;
Dave Jiang885b2012015-08-11 08:48:32 -0700426 kfree(ring);
427 return NULL;
428 }
429 set_desc_id(ring[i], i);
430 }
431
432 /* link descs */
Dave Jiangdd4645e2016-02-10 15:00:32 -0700433 for (i = 0; i < total_descs-1; i++) {
Dave Jiang885b2012015-08-11 08:48:32 -0700434 struct ioat_ring_ent *next = ring[i+1];
435 struct ioat_dma_descriptor *hw = ring[i]->hw;
436
437 hw->next = next->txd.phys;
438 }
439 ring[i]->hw->next = ring[0]->txd.phys;
440
Dave Jiange0100d42019-02-22 10:00:05 -0700441 /* setup descriptor pre-fetching for v3.4 */
442 if (ioat_dma->cap & IOAT_CAP_DPS) {
443 u16 drsctl = IOAT_CHAN_DRSZ_2MB | IOAT_CHAN_DRS_EN;
444
445 if (chunks == 1)
446 drsctl |= IOAT_CHAN_DRS_AUTOWRAP;
447
448 writew(drsctl, ioat_chan->reg_base + IOAT_CHAN_DRSCTL_OFFSET);
449
450 }
451
Dave Jiang885b2012015-08-11 08:48:32 -0700452 return ring;
453}
454
Dave Jiang885b2012015-08-11 08:48:32 -0700455/**
456 * ioat_check_space_lock - verify space and grab ring producer lock
457 * @ioat: ioat,3 channel (ring) to operate on
458 * @num_descs: allocation length
459 */
460int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
Dave Jiang5c65cb92015-08-25 12:58:05 -0700461 __acquires(&ioat_chan->prep_lock)
Dave Jiang885b2012015-08-11 08:48:32 -0700462{
Dave Jiang885b2012015-08-11 08:48:32 -0700463 spin_lock_bh(&ioat_chan->prep_lock);
464 /* never allow the last descriptor to be consumed, we need at
465 * least one free at all times to allow for on-the-fly ring
466 * resizing.
467 */
468 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
469 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
470 __func__, num_descs, ioat_chan->head,
471 ioat_chan->tail, ioat_chan->issued);
472 ioat_chan->produce = num_descs;
473 return 0; /* with ioat->prep_lock held */
474 }
Dave Jiang885b2012015-08-11 08:48:32 -0700475 spin_unlock_bh(&ioat_chan->prep_lock);
476
Dave Jiang885b2012015-08-11 08:48:32 -0700477 dev_dbg_ratelimited(to_dev(ioat_chan),
478 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
479 __func__, num_descs, ioat_chan->head,
480 ioat_chan->tail, ioat_chan->issued);
481
482 /* progress reclaim in the allocation failure case we may be
483 * called under bh_disabled so we need to trigger the timer
484 * event directly
485 */
486 if (time_is_before_jiffies(ioat_chan->timer.expires)
487 && timer_pending(&ioat_chan->timer)) {
Dave Jiang885b2012015-08-11 08:48:32 -0700488 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
Kees Cookbcdc4bd2017-10-24 03:02:23 -0700489 ioat_timer_event(&ioat_chan->timer);
Dave Jiang885b2012015-08-11 08:48:32 -0700490 }
491
492 return -ENOMEM;
493}
Dave Jiang3372de52015-08-11 08:48:55 -0700494
495static bool desc_has_ext(struct ioat_ring_ent *desc)
496{
497 struct ioat_dma_descriptor *hw = desc->hw;
498
499 if (hw->ctl_f.op == IOAT_OP_XOR ||
500 hw->ctl_f.op == IOAT_OP_XOR_VAL) {
501 struct ioat_xor_descriptor *xor = desc->xor;
502
503 if (src_cnt_to_sw(xor->ctl_f.src_cnt) > 5)
504 return true;
505 } else if (hw->ctl_f.op == IOAT_OP_PQ ||
506 hw->ctl_f.op == IOAT_OP_PQ_VAL) {
507 struct ioat_pq_descriptor *pq = desc->pq;
508
509 if (src_cnt_to_sw(pq->ctl_f.src_cnt) > 3)
510 return true;
511 }
512
513 return false;
514}
515
516static void
517ioat_free_sed(struct ioatdma_device *ioat_dma, struct ioat_sed_ent *sed)
518{
519 if (!sed)
520 return;
521
522 dma_pool_free(ioat_dma->sed_hw_pool[sed->hw_pool], sed->hw, sed->dma);
523 kmem_cache_free(ioat_sed_cache, sed);
524}
525
526static u64 ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
527{
528 u64 phys_complete;
529 u64 completion;
530
531 completion = *ioat_chan->completion;
532 phys_complete = ioat_chansts_to_addr(completion);
533
534 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
535 (unsigned long long) phys_complete);
536
537 return phys_complete;
538}
539
540static bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
541 u64 *phys_complete)
542{
543 *phys_complete = ioat_get_current_completion(ioat_chan);
544 if (*phys_complete == ioat_chan->last_completion)
545 return false;
546
547 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
548 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
549
550 return true;
551}
552
553static void
554desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc)
555{
556 struct ioat_dma_descriptor *hw = desc->hw;
557
558 switch (hw->ctl_f.op) {
559 case IOAT_OP_PQ_VAL:
560 case IOAT_OP_PQ_VAL_16S:
561 {
562 struct ioat_pq_descriptor *pq = desc->pq;
563
564 /* check if there's error written */
565 if (!pq->dwbes_f.wbes)
566 return;
567
568 /* need to set a chanerr var for checking to clear later */
569
570 if (pq->dwbes_f.p_val_err)
571 *desc->result |= SUM_CHECK_P_RESULT;
572
573 if (pq->dwbes_f.q_val_err)
574 *desc->result |= SUM_CHECK_Q_RESULT;
575
576 return;
577 }
578 default:
579 return;
580 }
581}
582
583/**
584 * __cleanup - reclaim used descriptors
585 * @ioat: channel (ring) to clean
586 */
587static void __cleanup(struct ioatdma_chan *ioat_chan, dma_addr_t phys_complete)
588{
589 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
590 struct ioat_ring_ent *desc;
591 bool seen_current = false;
592 int idx = ioat_chan->tail, i;
593 u16 active;
594
595 dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n",
596 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
597
598 /*
599 * At restart of the channel, the completion address and the
600 * channel status will be 0 due to starting a new chain. Since
601 * it's new chain and the first descriptor "fails", there is
602 * nothing to clean up. We do not want to reap the entire submitted
603 * chain due to this 0 address value and then BUG.
604 */
605 if (!phys_complete)
606 return;
607
608 active = ioat_ring_active(ioat_chan);
609 for (i = 0; i < active && !seen_current; i++) {
610 struct dma_async_tx_descriptor *tx;
611
Dave Jiang3372de52015-08-11 08:48:55 -0700612 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
613 desc = ioat_get_ring_ent(ioat_chan, idx + i);
614 dump_desc_dbg(ioat_chan, desc);
615
616 /* set err stat if we are using dwbes */
617 if (ioat_dma->cap & IOAT_CAP_DWBES)
618 desc_get_errstat(ioat_chan, desc);
619
620 tx = &desc->txd;
621 if (tx->cookie) {
622 dma_cookie_complete(tx);
623 dma_descriptor_unmap(tx);
Dave Jiang63992862016-07-20 13:11:33 -0700624 dmaengine_desc_get_callback_invoke(tx, NULL);
625 tx->callback = NULL;
Dave Jiang9546d4c2016-07-20 13:13:55 -0700626 tx->callback_result = NULL;
Dave Jiang3372de52015-08-11 08:48:55 -0700627 }
628
629 if (tx->phys == phys_complete)
630 seen_current = true;
631
632 /* skip extended descriptors */
633 if (desc_has_ext(desc)) {
634 BUG_ON(i + 1 >= active);
635 i++;
636 }
637
638 /* cleanup super extended descriptors */
639 if (desc->sed) {
640 ioat_free_sed(ioat_dma, desc->sed);
641 desc->sed = NULL;
642 }
643 }
644
645 /* finish all descriptor reads before incrementing tail */
646 smp_mb();
647 ioat_chan->tail = idx + i;
648 /* no active descs have written a completion? */
649 BUG_ON(active && !seen_current);
650 ioat_chan->last_completion = phys_complete;
651
652 if (active - i == 0) {
653 dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n",
654 __func__);
Dave Jiang3372de52015-08-11 08:48:55 -0700655 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
656 }
657
Ujjal Singh268e2512017-08-22 20:31:18 -0400658 /* microsecond delay by sysfs variable per pending descriptor */
659 if (ioat_chan->intr_coalesce != ioat_chan->prev_intr_coalesce) {
660 writew(min((ioat_chan->intr_coalesce * (active - i)),
661 IOAT_INTRDELAY_MASK),
662 ioat_chan->ioat_dma->reg_base + IOAT_INTRDELAY_OFFSET);
663 ioat_chan->prev_intr_coalesce = ioat_chan->intr_coalesce;
664 }
Dave Jiang3372de52015-08-11 08:48:55 -0700665}
666
667static void ioat_cleanup(struct ioatdma_chan *ioat_chan)
668{
669 u64 phys_complete;
670
671 spin_lock_bh(&ioat_chan->cleanup_lock);
672
673 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
674 __cleanup(ioat_chan, phys_complete);
675
676 if (is_ioat_halted(*ioat_chan->completion)) {
677 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
678
Dave Jiang9546d4c2016-07-20 13:13:55 -0700679 if (chanerr &
680 (IOAT_CHANERR_HANDLE_MASK | IOAT_CHANERR_RECOVER_MASK)) {
Dave Jiang3372de52015-08-11 08:48:55 -0700681 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
682 ioat_eh(ioat_chan);
683 }
684 }
685
686 spin_unlock_bh(&ioat_chan->cleanup_lock);
687}
688
689void ioat_cleanup_event(unsigned long data)
690{
691 struct ioatdma_chan *ioat_chan = to_ioat_chan((void *)data);
692
693 ioat_cleanup(ioat_chan);
694 if (!test_bit(IOAT_RUN, &ioat_chan->state))
695 return;
696 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
697}
698
699static void ioat_restart_channel(struct ioatdma_chan *ioat_chan)
700{
701 u64 phys_complete;
702
Dave Jiang4cb0e602018-06-11 12:49:03 -0700703 /* set the completion address register again */
704 writel(lower_32_bits(ioat_chan->completion_dma),
705 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
706 writel(upper_32_bits(ioat_chan->completion_dma),
707 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
708
Dave Jiang3372de52015-08-11 08:48:55 -0700709 ioat_quiesce(ioat_chan, 0);
710 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
711 __cleanup(ioat_chan, phys_complete);
712
713 __ioat_restart_chan(ioat_chan);
714}
715
Dave Jiang9546d4c2016-07-20 13:13:55 -0700716
717static void ioat_abort_descs(struct ioatdma_chan *ioat_chan)
718{
719 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
720 struct ioat_ring_ent *desc;
721 u16 active;
722 int idx = ioat_chan->tail, i;
723
724 /*
725 * We assume that the failed descriptor has been processed.
726 * Now we are just returning all the remaining submitted
727 * descriptors to abort.
728 */
729 active = ioat_ring_active(ioat_chan);
730
731 /* we skip the failed descriptor that tail points to */
732 for (i = 1; i < active; i++) {
733 struct dma_async_tx_descriptor *tx;
734
Dave Jiang9546d4c2016-07-20 13:13:55 -0700735 prefetch(ioat_get_ring_ent(ioat_chan, idx + i + 1));
736 desc = ioat_get_ring_ent(ioat_chan, idx + i);
737
738 tx = &desc->txd;
739 if (tx->cookie) {
740 struct dmaengine_result res;
741
742 dma_cookie_complete(tx);
743 dma_descriptor_unmap(tx);
744 res.result = DMA_TRANS_ABORTED;
745 dmaengine_desc_get_callback_invoke(tx, &res);
746 tx->callback = NULL;
747 tx->callback_result = NULL;
748 }
749
750 /* skip extended descriptors */
751 if (desc_has_ext(desc)) {
752 WARN_ON(i + 1 >= active);
753 i++;
754 }
755
756 /* cleanup super extended descriptors */
757 if (desc->sed) {
758 ioat_free_sed(ioat_dma, desc->sed);
759 desc->sed = NULL;
760 }
761 }
762
763 smp_mb(); /* finish all descriptor reads before incrementing tail */
764 ioat_chan->tail = idx + active;
765
766 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
767 ioat_chan->last_completion = *ioat_chan->completion = desc->txd.phys;
768}
769
Dave Jiang3372de52015-08-11 08:48:55 -0700770static void ioat_eh(struct ioatdma_chan *ioat_chan)
771{
772 struct pci_dev *pdev = to_pdev(ioat_chan);
773 struct ioat_dma_descriptor *hw;
774 struct dma_async_tx_descriptor *tx;
775 u64 phys_complete;
776 struct ioat_ring_ent *desc;
777 u32 err_handled = 0;
778 u32 chanerr_int;
779 u32 chanerr;
Dave Jiang9546d4c2016-07-20 13:13:55 -0700780 bool abort = false;
781 struct dmaengine_result res;
Dave Jiang3372de52015-08-11 08:48:55 -0700782
783 /* cleanup so tail points to descriptor that caused the error */
784 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
785 __cleanup(ioat_chan, phys_complete);
786
787 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
788 pci_read_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, &chanerr_int);
789
790 dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n",
791 __func__, chanerr, chanerr_int);
792
793 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
794 hw = desc->hw;
795 dump_desc_dbg(ioat_chan, desc);
796
797 switch (hw->ctl_f.op) {
798 case IOAT_OP_XOR_VAL:
799 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
800 *desc->result |= SUM_CHECK_P_RESULT;
801 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
802 }
803 break;
804 case IOAT_OP_PQ_VAL:
805 case IOAT_OP_PQ_VAL_16S:
806 if (chanerr & IOAT_CHANERR_XOR_P_OR_CRC_ERR) {
807 *desc->result |= SUM_CHECK_P_RESULT;
808 err_handled |= IOAT_CHANERR_XOR_P_OR_CRC_ERR;
809 }
810 if (chanerr & IOAT_CHANERR_XOR_Q_ERR) {
811 *desc->result |= SUM_CHECK_Q_RESULT;
812 err_handled |= IOAT_CHANERR_XOR_Q_ERR;
813 }
814 break;
815 }
816
Dave Jiang9546d4c2016-07-20 13:13:55 -0700817 if (chanerr & IOAT_CHANERR_RECOVER_MASK) {
818 if (chanerr & IOAT_CHANERR_READ_DATA_ERR) {
819 res.result = DMA_TRANS_READ_FAILED;
820 err_handled |= IOAT_CHANERR_READ_DATA_ERR;
821 } else if (chanerr & IOAT_CHANERR_WRITE_DATA_ERR) {
822 res.result = DMA_TRANS_WRITE_FAILED;
823 err_handled |= IOAT_CHANERR_WRITE_DATA_ERR;
824 }
825
826 abort = true;
827 } else
828 res.result = DMA_TRANS_NOERROR;
829
Dave Jiang3372de52015-08-11 08:48:55 -0700830 /* fault on unhandled error or spurious halt */
831 if (chanerr ^ err_handled || chanerr == 0) {
832 dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n",
833 __func__, chanerr, err_handled);
Dave Jiangaed681d2016-07-20 13:14:01 -0700834 dev_err(to_dev(ioat_chan), "Errors handled:\n");
835 ioat_print_chanerrs(ioat_chan, err_handled);
836 dev_err(to_dev(ioat_chan), "Errors not handled:\n");
837 ioat_print_chanerrs(ioat_chan, (chanerr & ~err_handled));
838
Dave Jiang3372de52015-08-11 08:48:55 -0700839 BUG();
Dave Jiang3372de52015-08-11 08:48:55 -0700840 }
841
Dave Jiang9546d4c2016-07-20 13:13:55 -0700842 /* cleanup the faulty descriptor since we are continuing */
843 tx = &desc->txd;
844 if (tx->cookie) {
845 dma_cookie_complete(tx);
846 dma_descriptor_unmap(tx);
847 dmaengine_desc_get_callback_invoke(tx, &res);
848 tx->callback = NULL;
849 tx->callback_result = NULL;
850 }
Dave Jiang3372de52015-08-11 08:48:55 -0700851
852 /* mark faulting descriptor as complete */
853 *ioat_chan->completion = desc->txd.phys;
854
855 spin_lock_bh(&ioat_chan->prep_lock);
Dave Jiang9546d4c2016-07-20 13:13:55 -0700856 /* we need abort all descriptors */
857 if (abort) {
858 ioat_abort_descs(ioat_chan);
859 /* clean up the channel, we could be in weird state */
860 ioat_reset_hw(ioat_chan);
861 }
862
863 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
864 pci_write_config_dword(pdev, IOAT_PCI_CHANERR_INT_OFFSET, chanerr_int);
865
Dave Jiang3372de52015-08-11 08:48:55 -0700866 ioat_restart_channel(ioat_chan);
867 spin_unlock_bh(&ioat_chan->prep_lock);
868}
869
870static void check_active(struct ioatdma_chan *ioat_chan)
871{
872 if (ioat_ring_active(ioat_chan)) {
873 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
874 return;
875 }
876
877 if (test_and_clear_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
878 mod_timer(&ioat_chan->timer, jiffies + IDLE_TIMEOUT);
Dave Jiang3372de52015-08-11 08:48:55 -0700879}
880
Kees Cookbcdc4bd2017-10-24 03:02:23 -0700881void ioat_timer_event(struct timer_list *t)
Dave Jiang3372de52015-08-11 08:48:55 -0700882{
Kees Cookbcdc4bd2017-10-24 03:02:23 -0700883 struct ioatdma_chan *ioat_chan = from_timer(ioat_chan, t, timer);
Dave Jiang3372de52015-08-11 08:48:55 -0700884 dma_addr_t phys_complete;
885 u64 status;
886
887 status = ioat_chansts(ioat_chan);
888
889 /* when halted due to errors check for channel
890 * programming errors before advancing the completion state
891 */
892 if (is_ioat_halted(status)) {
893 u32 chanerr;
894
895 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
896 dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n",
897 __func__, chanerr);
Dave Jiangaed681d2016-07-20 13:14:01 -0700898 dev_err(to_dev(ioat_chan), "Errors:\n");
899 ioat_print_chanerrs(ioat_chan, chanerr);
900
Dave Jiang9546d4c2016-07-20 13:13:55 -0700901 if (test_bit(IOAT_RUN, &ioat_chan->state)) {
902 spin_lock_bh(&ioat_chan->cleanup_lock);
903 spin_lock_bh(&ioat_chan->prep_lock);
904 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
905 spin_unlock_bh(&ioat_chan->prep_lock);
906
907 ioat_abort_descs(ioat_chan);
908 dev_warn(to_dev(ioat_chan), "Reset channel...\n");
909 ioat_reset_hw(ioat_chan);
910 dev_warn(to_dev(ioat_chan), "Restart channel...\n");
911 ioat_restart_channel(ioat_chan);
912
913 spin_lock_bh(&ioat_chan->prep_lock);
914 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
915 spin_unlock_bh(&ioat_chan->prep_lock);
916 spin_unlock_bh(&ioat_chan->cleanup_lock);
917 }
918
919 return;
Dave Jiang3372de52015-08-11 08:48:55 -0700920 }
921
Dave Jiang8a695db2016-01-19 08:57:48 -0700922 spin_lock_bh(&ioat_chan->cleanup_lock);
923
924 /* handle the no-actives case */
925 if (!ioat_ring_active(ioat_chan)) {
926 spin_lock_bh(&ioat_chan->prep_lock);
927 check_active(ioat_chan);
928 spin_unlock_bh(&ioat_chan->prep_lock);
929 spin_unlock_bh(&ioat_chan->cleanup_lock);
930 return;
931 }
932
Dave Jiang3372de52015-08-11 08:48:55 -0700933 /* if we haven't made progress and we have already
934 * acknowledged a pending completion once, then be more
935 * forceful with a restart
936 */
Dave Jiang3372de52015-08-11 08:48:55 -0700937 if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
938 __cleanup(ioat_chan, phys_complete);
939 else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
Dave Jiang8a695db2016-01-19 08:57:48 -0700940 u32 chanerr;
941
942 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
Dave Jiangaed681d2016-07-20 13:14:01 -0700943 dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
944 status, chanerr);
945 dev_err(to_dev(ioat_chan), "Errors:\n");
946 ioat_print_chanerrs(ioat_chan, chanerr);
947
948 dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n",
949 ioat_ring_active(ioat_chan));
Dave Jiang8a695db2016-01-19 08:57:48 -0700950
Dave Jiang3372de52015-08-11 08:48:55 -0700951 spin_lock_bh(&ioat_chan->prep_lock);
Dave Jiang9546d4c2016-07-20 13:13:55 -0700952 set_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
953 spin_unlock_bh(&ioat_chan->prep_lock);
954
955 ioat_abort_descs(ioat_chan);
956 dev_warn(to_dev(ioat_chan), "Resetting channel...\n");
957 ioat_reset_hw(ioat_chan);
958 dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
Dave Jiang3372de52015-08-11 08:48:55 -0700959 ioat_restart_channel(ioat_chan);
Dave Jiang9546d4c2016-07-20 13:13:55 -0700960
961 spin_lock_bh(&ioat_chan->prep_lock);
962 clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state);
Dave Jiang3372de52015-08-11 08:48:55 -0700963 spin_unlock_bh(&ioat_chan->prep_lock);
964 spin_unlock_bh(&ioat_chan->cleanup_lock);
965 return;
Dave Jiang8a695db2016-01-19 08:57:48 -0700966 } else
Dave Jiang3372de52015-08-11 08:48:55 -0700967 set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
Dave Jiang3372de52015-08-11 08:48:55 -0700968
Dave Jiang8a695db2016-01-19 08:57:48 -0700969 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
Dave Jiang3372de52015-08-11 08:48:55 -0700970 spin_unlock_bh(&ioat_chan->cleanup_lock);
971}
972
973enum dma_status
974ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie,
975 struct dma_tx_state *txstate)
976{
977 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
978 enum dma_status ret;
979
980 ret = dma_cookie_status(c, cookie, txstate);
981 if (ret == DMA_COMPLETE)
982 return ret;
983
984 ioat_cleanup(ioat_chan);
985
986 return dma_cookie_status(c, cookie, txstate);
987}
988
Dave Jiang3372de52015-08-11 08:48:55 -0700989int ioat_reset_hw(struct ioatdma_chan *ioat_chan)
990{
991 /* throw away whatever the channel was doing and get it
992 * initialized, with ioat3 specific workarounds
993 */
994 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
995 struct pci_dev *pdev = ioat_dma->pdev;
996 u32 chanerr;
997 u16 dev_id;
998 int err;
999
1000 ioat_quiesce(ioat_chan, msecs_to_jiffies(100));
1001
1002 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1003 writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
1004
1005 if (ioat_dma->version < IOAT_VER_3_3) {
1006 /* clear any pending errors */
1007 err = pci_read_config_dword(pdev,
1008 IOAT_PCI_CHANERR_INT_OFFSET, &chanerr);
1009 if (err) {
1010 dev_err(&pdev->dev,
1011 "channel error register unreachable\n");
1012 return err;
1013 }
1014 pci_write_config_dword(pdev,
1015 IOAT_PCI_CHANERR_INT_OFFSET, chanerr);
1016
1017 /* Clear DMAUNCERRSTS Cfg-Reg Parity Error status bit
1018 * (workaround for spurious config parity error after restart)
1019 */
1020 pci_read_config_word(pdev, IOAT_PCI_DEVICE_ID_OFFSET, &dev_id);
1021 if (dev_id == PCI_DEVICE_ID_INTEL_IOAT_TBG0) {
1022 pci_write_config_dword(pdev,
1023 IOAT_PCI_DMAUNCERRSTS_OFFSET,
1024 0x10);
1025 }
1026 }
1027
Dave Jiangc997e302016-03-10 16:18:40 -07001028 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1029 ioat_dma->msixtba0 = readq(ioat_dma->reg_base + 0x1000);
1030 ioat_dma->msixdata0 = readq(ioat_dma->reg_base + 0x1008);
1031 ioat_dma->msixpba = readq(ioat_dma->reg_base + 0x1800);
1032 }
1033
1034
Dave Jiang3372de52015-08-11 08:48:55 -07001035 err = ioat_reset_sync(ioat_chan, msecs_to_jiffies(200));
Dave Jiangc997e302016-03-10 16:18:40 -07001036 if (!err) {
1037 if (is_bwd_ioat(pdev) && (ioat_dma->irq_mode == IOAT_MSIX)) {
1038 writeq(ioat_dma->msixtba0, ioat_dma->reg_base + 0x1000);
1039 writeq(ioat_dma->msixdata0, ioat_dma->reg_base + 0x1008);
1040 writeq(ioat_dma->msixpba, ioat_dma->reg_base + 0x1800);
1041 }
1042 }
Dave Jiang3372de52015-08-11 08:48:55 -07001043
1044 if (err)
1045 dev_err(&pdev->dev, "Failed to reset: %d\n", err);
1046
1047 return err;
1048}