Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ |
| 3 | #include <linux/init.h> |
| 4 | #include <linux/kernel.h> |
| 5 | #include <linux/module.h> |
| 6 | #include <linux/pci.h> |
| 7 | #include <linux/io-64-nonatomic-lo-hi.h> |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 8 | #include <linux/dmaengine.h> |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 9 | #include <uapi/linux/idxd.h> |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 10 | #include "../dmaengine.h" |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 11 | #include "idxd.h" |
| 12 | #include "registers.h" |
| 13 | |
| 14 | void idxd_device_wqs_clear_state(struct idxd_device *idxd) |
| 15 | { |
| 16 | int i; |
| 17 | |
| 18 | lockdep_assert_held(&idxd->dev_lock); |
| 19 | for (i = 0; i < idxd->max_wqs; i++) { |
| 20 | struct idxd_wq *wq = &idxd->wqs[i]; |
| 21 | |
| 22 | wq->state = IDXD_WQ_DISABLED; |
| 23 | } |
| 24 | } |
| 25 | |
Dave Jiang | 0d5c10b | 2020-06-26 11:11:18 -0700 | [diff] [blame] | 26 | static void idxd_device_reinit(struct work_struct *work) |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 27 | { |
Dave Jiang | 0d5c10b | 2020-06-26 11:11:18 -0700 | [diff] [blame] | 28 | struct idxd_device *idxd = container_of(work, struct idxd_device, work); |
| 29 | struct device *dev = &idxd->pdev->dev; |
| 30 | int rc, i; |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 31 | |
Dave Jiang | 0d5c10b | 2020-06-26 11:11:18 -0700 | [diff] [blame] | 32 | idxd_device_reset(idxd); |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 33 | rc = idxd_device_config(idxd); |
| 34 | if (rc < 0) |
| 35 | goto out; |
| 36 | |
| 37 | rc = idxd_device_enable(idxd); |
| 38 | if (rc < 0) |
| 39 | goto out; |
| 40 | |
| 41 | for (i = 0; i < idxd->max_wqs; i++) { |
| 42 | struct idxd_wq *wq = &idxd->wqs[i]; |
| 43 | |
| 44 | if (wq->state == IDXD_WQ_ENABLED) { |
| 45 | rc = idxd_wq_enable(wq); |
| 46 | if (rc < 0) { |
Dave Jiang | 0d5c10b | 2020-06-26 11:11:18 -0700 | [diff] [blame] | 47 | dev_warn(dev, "Unable to re-enable wq %s\n", |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 48 | dev_name(&wq->conf_dev)); |
| 49 | } |
| 50 | } |
| 51 | } |
| 52 | |
Dave Jiang | 0d5c10b | 2020-06-26 11:11:18 -0700 | [diff] [blame] | 53 | return; |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 54 | |
| 55 | out: |
| 56 | idxd_device_wqs_clear_state(idxd); |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 57 | } |
| 58 | |
| 59 | irqreturn_t idxd_irq_handler(int vec, void *data) |
| 60 | { |
| 61 | struct idxd_irq_entry *irq_entry = data; |
| 62 | struct idxd_device *idxd = irq_entry->idxd; |
| 63 | |
| 64 | idxd_mask_msix_vector(idxd, irq_entry->id); |
| 65 | return IRQ_WAKE_THREAD; |
| 66 | } |
| 67 | |
| 68 | irqreturn_t idxd_misc_thread(int vec, void *data) |
| 69 | { |
| 70 | struct idxd_irq_entry *irq_entry = data; |
| 71 | struct idxd_device *idxd = irq_entry->idxd; |
| 72 | struct device *dev = &idxd->pdev->dev; |
| 73 | union gensts_reg gensts; |
| 74 | u32 cause, val = 0; |
Dave Jiang | 0d5c10b | 2020-06-26 11:11:18 -0700 | [diff] [blame] | 75 | int i; |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 76 | bool err = false; |
| 77 | |
| 78 | cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); |
| 79 | |
| 80 | if (cause & IDXD_INTC_ERR) { |
| 81 | spin_lock_bh(&idxd->dev_lock); |
| 82 | for (i = 0; i < 4; i++) |
| 83 | idxd->sw_err.bits[i] = ioread64(idxd->reg_base + |
| 84 | IDXD_SWERR_OFFSET + i * sizeof(u64)); |
| 85 | iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET); |
Dave Jiang | 42d279f | 2020-01-21 16:44:29 -0700 | [diff] [blame] | 86 | |
| 87 | if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { |
| 88 | int id = idxd->sw_err.wq_idx; |
| 89 | struct idxd_wq *wq = &idxd->wqs[id]; |
| 90 | |
| 91 | if (wq->type == IDXD_WQT_USER) |
| 92 | wake_up_interruptible(&wq->idxd_cdev.err_queue); |
| 93 | } else { |
| 94 | int i; |
| 95 | |
| 96 | for (i = 0; i < idxd->max_wqs; i++) { |
| 97 | struct idxd_wq *wq = &idxd->wqs[i]; |
| 98 | |
| 99 | if (wq->type == IDXD_WQT_USER) |
| 100 | wake_up_interruptible(&wq->idxd_cdev.err_queue); |
| 101 | } |
| 102 | } |
| 103 | |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 104 | spin_unlock_bh(&idxd->dev_lock); |
| 105 | val |= IDXD_INTC_ERR; |
| 106 | |
| 107 | for (i = 0; i < 4; i++) |
| 108 | dev_warn(dev, "err[%d]: %#16.16llx\n", |
| 109 | i, idxd->sw_err.bits[i]); |
| 110 | err = true; |
| 111 | } |
| 112 | |
| 113 | if (cause & IDXD_INTC_CMD) { |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 114 | val |= IDXD_INTC_CMD; |
Dave Jiang | 0d5c10b | 2020-06-26 11:11:18 -0700 | [diff] [blame] | 115 | complete(idxd->cmd_done); |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 116 | } |
| 117 | |
| 118 | if (cause & IDXD_INTC_OCCUPY) { |
| 119 | /* Driver does not utilize occupancy interrupt */ |
| 120 | val |= IDXD_INTC_OCCUPY; |
| 121 | } |
| 122 | |
| 123 | if (cause & IDXD_INTC_PERFMON_OVFL) { |
| 124 | /* |
| 125 | * Driver does not utilize perfmon counter overflow interrupt |
| 126 | * yet. |
| 127 | */ |
| 128 | val |= IDXD_INTC_PERFMON_OVFL; |
| 129 | } |
| 130 | |
| 131 | val ^= cause; |
| 132 | if (val) |
| 133 | dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n", |
| 134 | val); |
| 135 | |
| 136 | iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); |
| 137 | if (!err) |
Dave Jiang | e312282 | 2020-06-25 12:16:54 -0700 | [diff] [blame] | 138 | goto out; |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 139 | |
| 140 | gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); |
| 141 | if (gensts.state == IDXD_DEVICE_STATE_HALT) { |
Dave Jiang | 0d5c10b | 2020-06-26 11:11:18 -0700 | [diff] [blame] | 142 | idxd->state = IDXD_DEV_HALTED; |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 143 | if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { |
Dave Jiang | 0d5c10b | 2020-06-26 11:11:18 -0700 | [diff] [blame] | 144 | /* |
| 145 | * If we need a software reset, we will throw the work |
| 146 | * on a system workqueue in order to allow interrupts |
| 147 | * for the device command completions. |
| 148 | */ |
| 149 | INIT_WORK(&idxd->work, idxd_device_reinit); |
| 150 | queue_work(idxd->wq, &idxd->work); |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 151 | } else { |
Dave Jiang | 0d5c10b | 2020-06-26 11:11:18 -0700 | [diff] [blame] | 152 | spin_lock_bh(&idxd->dev_lock); |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 153 | idxd_device_wqs_clear_state(idxd); |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 154 | dev_err(&idxd->pdev->dev, |
| 155 | "idxd halted, need %s.\n", |
| 156 | gensts.reset_type == IDXD_DEVICE_RESET_FLR ? |
| 157 | "FLR" : "system reset"); |
Dave Jiang | 0d5c10b | 2020-06-26 11:11:18 -0700 | [diff] [blame] | 158 | spin_unlock_bh(&idxd->dev_lock); |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 159 | } |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 160 | } |
| 161 | |
Dave Jiang | e312282 | 2020-06-25 12:16:54 -0700 | [diff] [blame] | 162 | out: |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 163 | idxd_unmask_msix_vector(idxd, irq_entry->id); |
| 164 | return IRQ_HANDLED; |
| 165 | } |
| 166 | |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 167 | static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, |
| 168 | int *processed) |
| 169 | { |
| 170 | struct idxd_desc *desc, *t; |
| 171 | struct llist_node *head; |
| 172 | int queued = 0; |
| 173 | |
Dave Jiang | 4f30264 | 2020-05-01 08:21:18 -0700 | [diff] [blame] | 174 | *processed = 0; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 175 | head = llist_del_all(&irq_entry->pending_llist); |
| 176 | if (!head) |
| 177 | return 0; |
| 178 | |
| 179 | llist_for_each_entry_safe(desc, t, head, llnode) { |
| 180 | if (desc->completion->status) { |
| 181 | idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); |
| 182 | idxd_free_desc(desc->wq, desc); |
| 183 | (*processed)++; |
| 184 | } else { |
| 185 | list_add_tail(&desc->list, &irq_entry->work_list); |
| 186 | queued++; |
| 187 | } |
| 188 | } |
| 189 | |
| 190 | return queued; |
| 191 | } |
| 192 | |
| 193 | static int irq_process_work_list(struct idxd_irq_entry *irq_entry, |
| 194 | int *processed) |
| 195 | { |
| 196 | struct list_head *node, *next; |
| 197 | int queued = 0; |
| 198 | |
Dave Jiang | 4f30264 | 2020-05-01 08:21:18 -0700 | [diff] [blame] | 199 | *processed = 0; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 200 | if (list_empty(&irq_entry->work_list)) |
| 201 | return 0; |
| 202 | |
| 203 | list_for_each_safe(node, next, &irq_entry->work_list) { |
| 204 | struct idxd_desc *desc = |
| 205 | container_of(node, struct idxd_desc, list); |
| 206 | |
| 207 | if (desc->completion->status) { |
| 208 | list_del(&desc->list); |
| 209 | /* process and callback */ |
| 210 | idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); |
| 211 | idxd_free_desc(desc->wq, desc); |
| 212 | (*processed)++; |
| 213 | } else { |
| 214 | queued++; |
| 215 | } |
| 216 | } |
| 217 | |
| 218 | return queued; |
| 219 | } |
| 220 | |
Dave Jiang | 4f30264 | 2020-05-01 08:21:18 -0700 | [diff] [blame] | 221 | static int idxd_desc_process(struct idxd_irq_entry *irq_entry) |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 222 | { |
Dave Jiang | 4f30264 | 2020-05-01 08:21:18 -0700 | [diff] [blame] | 223 | int rc, processed, total = 0; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 224 | |
| 225 | /* |
| 226 | * There are two lists we are processing. The pending_llist is where |
| 227 | * submmiter adds all the submitted descriptor after sending it to |
| 228 | * the workqueue. It's a lockless singly linked list. The work_list |
| 229 | * is the common linux double linked list. We are in a scenario of |
| 230 | * multiple producers and a single consumer. The producers are all |
| 231 | * the kernel submitters of descriptors, and the consumer is the |
| 232 | * kernel irq handler thread for the msix vector when using threaded |
| 233 | * irq. To work with the restrictions of llist to remain lockless, |
| 234 | * we are doing the following steps: |
| 235 | * 1. Iterate through the work_list and process any completed |
| 236 | * descriptor. Delete the completed entries during iteration. |
| 237 | * 2. llist_del_all() from the pending list. |
| 238 | * 3. Iterate through the llist that was deleted from the pending list |
| 239 | * and process the completed entries. |
| 240 | * 4. If the entry is still waiting on hardware, list_add_tail() to |
| 241 | * the work_list. |
| 242 | * 5. Repeat until no more descriptors. |
| 243 | */ |
| 244 | do { |
| 245 | rc = irq_process_work_list(irq_entry, &processed); |
Dave Jiang | 4f30264 | 2020-05-01 08:21:18 -0700 | [diff] [blame] | 246 | total += processed; |
| 247 | if (rc != 0) |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 248 | continue; |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 249 | |
| 250 | rc = irq_process_pending_llist(irq_entry, &processed); |
Dave Jiang | 4f30264 | 2020-05-01 08:21:18 -0700 | [diff] [blame] | 251 | total += processed; |
| 252 | } while (rc != 0); |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 253 | |
Dave Jiang | 4f30264 | 2020-05-01 08:21:18 -0700 | [diff] [blame] | 254 | return total; |
| 255 | } |
| 256 | |
| 257 | irqreturn_t idxd_wq_thread(int irq, void *data) |
| 258 | { |
| 259 | struct idxd_irq_entry *irq_entry = data; |
| 260 | int processed; |
| 261 | |
| 262 | processed = idxd_desc_process(irq_entry); |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 263 | idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id); |
| 264 | |
Dave Jiang | 8f47d1a | 2020-01-21 16:44:23 -0700 | [diff] [blame] | 265 | if (processed == 0) |
| 266 | return IRQ_NONE; |
| 267 | |
Dave Jiang | bfe1d56 | 2020-01-21 16:43:59 -0700 | [diff] [blame] | 268 | return IRQ_HANDLED; |
| 269 | } |