Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ |
Dan Williams | 4faf31b | 2021-09-08 22:12:32 -0700 | [diff] [blame] | 3 | #include <linux/io-64-nonatomic-lo-hi.h> |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 4 | #include <linux/module.h> |
Dan Williams | fae8817 | 2021-04-16 17:43:30 -0700 | [diff] [blame] | 5 | #include <linux/sizes.h> |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 6 | #include <linux/mutex.h> |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 7 | #include <linux/list.h> |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 8 | #include <linux/pci.h> |
| 9 | #include <linux/io.h> |
Ben Widawsky | 5161a55 | 2021-08-02 10:29:38 -0700 | [diff] [blame] | 10 | #include "cxlmem.h" |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 11 | #include "pci.h" |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 12 | #include "cxl.h" |
| 13 | |
| 14 | /** |
Ben Widawsky | 21e9f76 | 2021-05-26 10:44:13 -0700 | [diff] [blame] | 15 | * DOC: cxl pci |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 16 | * |
Ben Widawsky | 21e9f76 | 2021-05-26 10:44:13 -0700 | [diff] [blame] | 17 | * This implements the PCI exclusive functionality for a CXL device as it is |
| 18 | * defined by the Compute Express Link specification. CXL devices may surface |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 19 | * certain functionality even if it isn't CXL enabled. While this driver is |
| 20 | * focused around the PCI specific aspects of a CXL device, it binds to the |
| 21 | * specific CXL memory device class code, and therefore the implementation of |
| 22 | * cxl_pci is focused around CXL memory devices. |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 23 | * |
| 24 | * The driver has several responsibilities, mainly: |
| 25 | * - Create the memX device and register on the CXL bus. |
| 26 | * - Enumerate device's register interface and map them. |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 27 | * - Registers nvdimm bridge device with cxl_core. |
| 28 | * - Registers a CXL mailbox with cxl_core. |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 29 | */ |
| 30 | |
| 31 | #define cxl_doorbell_busy(cxlm) \ |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 32 | (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \ |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 33 | CXLDEV_MBOX_CTRL_DOORBELL) |
| 34 | |
| 35 | /* CXL 2.0 - 8.2.8.4 */ |
| 36 | #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) |
| 37 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 38 | static int cxl_pci_mbox_wait_for_doorbell(struct cxl_mem *cxlm) |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 39 | { |
| 40 | const unsigned long start = jiffies; |
| 41 | unsigned long end = start; |
| 42 | |
| 43 | while (cxl_doorbell_busy(cxlm)) { |
| 44 | end = jiffies; |
| 45 | |
| 46 | if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { |
| 47 | /* Check again in case preempted before timeout test */ |
| 48 | if (!cxl_doorbell_busy(cxlm)) |
| 49 | break; |
| 50 | return -ETIMEDOUT; |
| 51 | } |
| 52 | cpu_relax(); |
| 53 | } |
| 54 | |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 55 | dev_dbg(cxlm->dev, "Doorbell wait took %dms", |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 56 | jiffies_to_msecs(end) - jiffies_to_msecs(start)); |
| 57 | return 0; |
| 58 | } |
| 59 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 60 | static void cxl_pci_mbox_timeout(struct cxl_mem *cxlm, |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 61 | struct cxl_mbox_cmd *mbox_cmd) |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 62 | { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 63 | struct device *dev = cxlm->dev; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 64 | |
| 65 | dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n", |
| 66 | mbox_cmd->opcode, mbox_cmd->size_in); |
| 67 | } |
| 68 | |
| 69 | /** |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 70 | * __cxl_pci_mbox_send_cmd() - Execute a mailbox command |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 71 | * @cxlm: The CXL memory device to communicate with. |
| 72 | * @mbox_cmd: Command to send to the memory device. |
| 73 | * |
| 74 | * Context: Any context. Expects mbox_mutex to be held. |
| 75 | * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success. |
| 76 | * Caller should check the return code in @mbox_cmd to make sure it |
| 77 | * succeeded. |
| 78 | * |
| 79 | * This is a generic form of the CXL mailbox send command thus only using the |
| 80 | * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory |
| 81 | * devices, and perhaps other types of CXL devices may have further information |
| 82 | * available upon error conditions. Driver facilities wishing to send mailbox |
| 83 | * commands should use the wrapper command. |
| 84 | * |
| 85 | * The CXL spec allows for up to two mailboxes. The intention is for the primary |
| 86 | * mailbox to be OS controlled and the secondary mailbox to be used by system |
| 87 | * firmware. This allows the OS and firmware to communicate with the device and |
| 88 | * not need to coordinate with each other. The driver only uses the primary |
| 89 | * mailbox. |
| 90 | */ |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 91 | static int __cxl_pci_mbox_send_cmd(struct cxl_mem *cxlm, |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 92 | struct cxl_mbox_cmd *mbox_cmd) |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 93 | { |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 94 | void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 95 | struct device *dev = cxlm->dev; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 96 | u64 cmd_reg, status_reg; |
| 97 | size_t out_len; |
| 98 | int rc; |
| 99 | |
| 100 | lockdep_assert_held(&cxlm->mbox_mutex); |
| 101 | |
| 102 | /* |
| 103 | * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. |
| 104 | * 1. Caller reads MB Control Register to verify doorbell is clear |
| 105 | * 2. Caller writes Command Register |
| 106 | * 3. Caller writes Command Payload Registers if input payload is non-empty |
| 107 | * 4. Caller writes MB Control Register to set doorbell |
| 108 | * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured |
| 109 | * 6. Caller reads MB Status Register to fetch Return code |
| 110 | * 7. If command successful, Caller reads Command Register to get Payload Length |
| 111 | * 8. If output payload is non-empty, host reads Command Payload Registers |
| 112 | * |
| 113 | * Hardware is free to do whatever it wants before the doorbell is rung, |
| 114 | * and isn't allowed to change anything after it clears the doorbell. As |
| 115 | * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can |
| 116 | * also happen in any order (though some orders might not make sense). |
| 117 | */ |
| 118 | |
| 119 | /* #1 */ |
| 120 | if (cxl_doorbell_busy(cxlm)) { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 121 | dev_err_ratelimited(dev, "Mailbox re-busy after acquiring\n"); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 122 | return -EBUSY; |
| 123 | } |
| 124 | |
| 125 | cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK, |
| 126 | mbox_cmd->opcode); |
| 127 | if (mbox_cmd->size_in) { |
| 128 | if (WARN_ON(!mbox_cmd->payload_in)) |
| 129 | return -EINVAL; |
| 130 | |
| 131 | cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, |
| 132 | mbox_cmd->size_in); |
| 133 | memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in); |
| 134 | } |
| 135 | |
| 136 | /* #2, #3 */ |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 137 | writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 138 | |
| 139 | /* #4 */ |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 140 | dev_dbg(dev, "Sending command\n"); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 141 | writel(CXLDEV_MBOX_CTRL_DOORBELL, |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 142 | cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 143 | |
| 144 | /* #5 */ |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 145 | rc = cxl_pci_mbox_wait_for_doorbell(cxlm); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 146 | if (rc == -ETIMEDOUT) { |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 147 | cxl_pci_mbox_timeout(cxlm, mbox_cmd); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 148 | return rc; |
| 149 | } |
| 150 | |
| 151 | /* #6 */ |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 152 | status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 153 | mbox_cmd->return_code = |
| 154 | FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); |
| 155 | |
| 156 | if (mbox_cmd->return_code != 0) { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 157 | dev_dbg(dev, "Mailbox operation had an error\n"); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 158 | return 0; |
| 159 | } |
| 160 | |
| 161 | /* #7 */ |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 162 | cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 163 | out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg); |
| 164 | |
| 165 | /* #8 */ |
| 166 | if (out_len && mbox_cmd->payload_out) { |
| 167 | /* |
| 168 | * Sanitize the copy. If hardware misbehaves, out_len per the |
| 169 | * spec can actually be greater than the max allowed size (21 |
| 170 | * bits available but spec defined 1M max). The caller also may |
| 171 | * have requested less data than the hardware supplied even |
| 172 | * within spec. |
| 173 | */ |
| 174 | size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len); |
| 175 | |
| 176 | memcpy_fromio(mbox_cmd->payload_out, payload, n); |
| 177 | mbox_cmd->size_out = n; |
| 178 | } else { |
| 179 | mbox_cmd->size_out = 0; |
| 180 | } |
| 181 | |
| 182 | return 0; |
| 183 | } |
| 184 | |
| 185 | /** |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 186 | * cxl_pci_mbox_get() - Acquire exclusive access to the mailbox. |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 187 | * @cxlm: The memory device to gain access to. |
| 188 | * |
| 189 | * Context: Any context. Takes the mbox_mutex. |
| 190 | * Return: 0 if exclusive access was acquired. |
| 191 | */ |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 192 | static int cxl_pci_mbox_get(struct cxl_mem *cxlm) |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 193 | { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 194 | struct device *dev = cxlm->dev; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 195 | u64 md_status; |
| 196 | int rc; |
| 197 | |
| 198 | mutex_lock_io(&cxlm->mbox_mutex); |
| 199 | |
| 200 | /* |
| 201 | * XXX: There is some amount of ambiguity in the 2.0 version of the spec |
| 202 | * around the mailbox interface ready (8.2.8.5.1.1). The purpose of the |
| 203 | * bit is to allow firmware running on the device to notify the driver |
| 204 | * that it's ready to receive commands. It is unclear if the bit needs |
| 205 | * to be read for each transaction mailbox, ie. the firmware can switch |
| 206 | * it on and off as needed. Second, there is no defined timeout for |
| 207 | * mailbox ready, like there is for the doorbell interface. |
| 208 | * |
| 209 | * Assumptions: |
| 210 | * 1. The firmware might toggle the Mailbox Interface Ready bit, check |
| 211 | * it for every command. |
| 212 | * |
| 213 | * 2. If the doorbell is clear, the firmware should have first set the |
| 214 | * Mailbox Interface Ready bit. Therefore, waiting for the doorbell |
| 215 | * to be ready is sufficient. |
| 216 | */ |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 217 | rc = cxl_pci_mbox_wait_for_doorbell(cxlm); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 218 | if (rc) { |
| 219 | dev_warn(dev, "Mailbox interface not ready\n"); |
| 220 | goto out; |
| 221 | } |
| 222 | |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 223 | md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 224 | if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) { |
| 225 | dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n"); |
| 226 | rc = -EBUSY; |
| 227 | goto out; |
| 228 | } |
| 229 | |
| 230 | /* |
| 231 | * Hardware shouldn't allow a ready status but also have failure bits |
| 232 | * set. Spit out an error, this should be a bug report |
| 233 | */ |
| 234 | rc = -EFAULT; |
| 235 | if (md_status & CXLMDEV_DEV_FATAL) { |
| 236 | dev_err(dev, "mbox: reported ready, but fatal\n"); |
| 237 | goto out; |
| 238 | } |
| 239 | if (md_status & CXLMDEV_FW_HALT) { |
| 240 | dev_err(dev, "mbox: reported ready, but halted\n"); |
| 241 | goto out; |
| 242 | } |
| 243 | if (CXLMDEV_RESET_NEEDED(md_status)) { |
| 244 | dev_err(dev, "mbox: reported ready, but reset needed\n"); |
| 245 | goto out; |
| 246 | } |
| 247 | |
| 248 | /* with lock held */ |
| 249 | return 0; |
| 250 | |
| 251 | out: |
| 252 | mutex_unlock(&cxlm->mbox_mutex); |
| 253 | return rc; |
| 254 | } |
| 255 | |
| 256 | /** |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 257 | * cxl_pci_mbox_put() - Release exclusive access to the mailbox. |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 258 | * @cxlm: The CXL memory device to communicate with. |
| 259 | * |
| 260 | * Context: Any context. Expects mbox_mutex to be held. |
| 261 | */ |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 262 | static void cxl_pci_mbox_put(struct cxl_mem *cxlm) |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 263 | { |
| 264 | mutex_unlock(&cxlm->mbox_mutex); |
| 265 | } |
| 266 | |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 267 | static int cxl_pci_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd) |
| 268 | { |
| 269 | int rc; |
| 270 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 271 | rc = cxl_pci_mbox_get(cxlm); |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 272 | if (rc) |
| 273 | return rc; |
| 274 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 275 | rc = __cxl_pci_mbox_send_cmd(cxlm, cmd); |
| 276 | cxl_pci_mbox_put(cxlm); |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 277 | |
| 278 | return rc; |
| 279 | } |
| 280 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 281 | static int cxl_pci_setup_mailbox(struct cxl_mem *cxlm) |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 282 | { |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 283 | const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 284 | |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 285 | cxlm->mbox_send = cxl_pci_mbox_send; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 286 | cxlm->payload_size = |
| 287 | 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); |
| 288 | |
| 289 | /* |
| 290 | * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register |
| 291 | * |
| 292 | * If the size is too small, mandatory commands will not work and so |
| 293 | * there's no point in going forward. If the size is too large, there's |
| 294 | * no harm is soft limiting it. |
| 295 | */ |
| 296 | cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M); |
| 297 | if (cxlm->payload_size < 256) { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 298 | dev_err(cxlm->dev, "Mailbox is too small (%zub)", |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 299 | cxlm->payload_size); |
| 300 | return -ENXIO; |
| 301 | } |
| 302 | |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 303 | dev_dbg(cxlm->dev, "Mailbox payload sized %zu", |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 304 | cxlm->payload_size); |
| 305 | |
| 306 | return 0; |
| 307 | } |
| 308 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 309 | static void __iomem *cxl_pci_map_regblock(struct cxl_mem *cxlm, |
Ira Weiny | 07d62ea | 2021-05-27 17:49:18 -0700 | [diff] [blame] | 310 | u8 bar, u64 offset) |
Ben Widawsky | 1b0a1a2 | 2021-04-07 15:26:20 -0700 | [diff] [blame] | 311 | { |
Ira Weiny | f8a7e8c | 2021-05-27 17:49:19 -0700 | [diff] [blame] | 312 | void __iomem *addr; |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 313 | struct device *dev = cxlm->dev; |
| 314 | struct pci_dev *pdev = to_pci_dev(dev); |
Ben Widawsky | 1b0a1a2 | 2021-04-07 15:26:20 -0700 | [diff] [blame] | 315 | |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 316 | /* Basic sanity check that BAR is big enough */ |
| 317 | if (pci_resource_len(pdev, bar) < offset) { |
| 318 | dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar, |
| 319 | &pdev->resource[bar], (unsigned long long)offset); |
Ben Widawsky | 6630d31 | 2021-05-20 14:29:53 -0700 | [diff] [blame] | 320 | return IOMEM_ERR_PTR(-ENXIO); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 321 | } |
| 322 | |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 323 | addr = pci_iomap(pdev, bar, 0); |
Ira Weiny | f8a7e8c | 2021-05-27 17:49:19 -0700 | [diff] [blame] | 324 | if (!addr) { |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 325 | dev_err(dev, "failed to map registers\n"); |
Ira Weiny | f8a7e8c | 2021-05-27 17:49:19 -0700 | [diff] [blame] | 326 | return addr; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 327 | } |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 328 | |
Ira Weiny | f8a7e8c | 2021-05-27 17:49:19 -0700 | [diff] [blame] | 329 | dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %#llx\n", |
| 330 | bar, offset); |
Ben Widawsky | 6630d31 | 2021-05-20 14:29:53 -0700 | [diff] [blame] | 331 | |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 332 | return addr; |
| 333 | } |
| 334 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 335 | static void cxl_pci_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base) |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 336 | { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 337 | pci_iounmap(to_pci_dev(cxlm->dev), base); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 338 | } |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 339 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 340 | static int cxl_pci_dvsec(struct pci_dev *pdev, int dvsec) |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 341 | { |
| 342 | int pos; |
| 343 | |
| 344 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC); |
| 345 | if (!pos) |
| 346 | return 0; |
| 347 | |
| 348 | while (pos) { |
| 349 | u16 vendor, id; |
| 350 | |
| 351 | pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor); |
| 352 | pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id); |
| 353 | if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id) |
| 354 | return pos; |
| 355 | |
| 356 | pos = pci_find_next_ext_capability(pdev, pos, |
| 357 | PCI_EXT_CAP_ID_DVSEC); |
| 358 | } |
| 359 | |
| 360 | return 0; |
| 361 | } |
| 362 | |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 363 | static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base, |
| 364 | struct cxl_register_map *map) |
| 365 | { |
Ben Widawsky | 0842237 | 2021-05-27 17:49:22 -0700 | [diff] [blame] | 366 | struct cxl_component_reg_map *comp_map; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 367 | struct cxl_device_reg_map *dev_map; |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 368 | struct device *dev = cxlm->dev; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 369 | |
| 370 | switch (map->reg_type) { |
Ben Widawsky | 0842237 | 2021-05-27 17:49:22 -0700 | [diff] [blame] | 371 | case CXL_REGLOC_RBI_COMPONENT: |
| 372 | comp_map = &map->component_map; |
| 373 | cxl_probe_component_regs(dev, base, comp_map); |
| 374 | if (!comp_map->hdm_decoder.valid) { |
| 375 | dev_err(dev, "HDM decoder registers not found\n"); |
| 376 | return -ENXIO; |
| 377 | } |
| 378 | |
| 379 | dev_dbg(dev, "Set up component registers\n"); |
| 380 | break; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 381 | case CXL_REGLOC_RBI_MEMDEV: |
| 382 | dev_map = &map->device_map; |
| 383 | cxl_probe_device_regs(dev, base, dev_map); |
| 384 | if (!dev_map->status.valid || !dev_map->mbox.valid || |
| 385 | !dev_map->memdev.valid) { |
| 386 | dev_err(dev, "registers not found: %s%s%s\n", |
| 387 | !dev_map->status.valid ? "status " : "", |
Li Qiang (Johnny Li) | da582aa | 2021-09-03 19:20:50 -0700 | [diff] [blame] | 388 | !dev_map->mbox.valid ? "mbox " : "", |
| 389 | !dev_map->memdev.valid ? "memdev " : ""); |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 390 | return -ENXIO; |
| 391 | } |
| 392 | |
| 393 | dev_dbg(dev, "Probing device registers...\n"); |
| 394 | break; |
| 395 | default: |
| 396 | break; |
| 397 | } |
| 398 | |
| 399 | return 0; |
| 400 | } |
| 401 | |
| 402 | static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map) |
| 403 | { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 404 | struct device *dev = cxlm->dev; |
| 405 | struct pci_dev *pdev = to_pci_dev(dev); |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 406 | |
| 407 | switch (map->reg_type) { |
Ben Widawsky | 0842237 | 2021-05-27 17:49:22 -0700 | [diff] [blame] | 408 | case CXL_REGLOC_RBI_COMPONENT: |
| 409 | cxl_map_component_regs(pdev, &cxlm->regs.component, map); |
| 410 | dev_dbg(dev, "Mapping component registers...\n"); |
| 411 | break; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 412 | case CXL_REGLOC_RBI_MEMDEV: |
| 413 | cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map); |
| 414 | dev_dbg(dev, "Probing device registers...\n"); |
| 415 | break; |
| 416 | default: |
| 417 | break; |
| 418 | } |
| 419 | |
| 420 | return 0; |
| 421 | } |
| 422 | |
Ira Weiny | 07d62ea | 2021-05-27 17:49:18 -0700 | [diff] [blame] | 423 | static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi, |
| 424 | u8 *bar, u64 *offset, u8 *reg_type) |
| 425 | { |
| 426 | *offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK); |
| 427 | *bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo); |
| 428 | *reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo); |
| 429 | } |
| 430 | |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 431 | /** |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 432 | * cxl_pci_setup_regs() - Setup necessary MMIO. |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 433 | * @cxlm: The CXL memory device to communicate with. |
| 434 | * |
| 435 | * Return: 0 if all necessary registers mapped. |
| 436 | * |
| 437 | * A memory device is required by spec to implement a certain set of MMIO |
| 438 | * regions. The purpose of this function is to enumerate and map those |
| 439 | * registers. |
| 440 | */ |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 441 | static int cxl_pci_setup_regs(struct cxl_mem *cxlm) |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 442 | { |
Ben Widawsky | 6630d31 | 2021-05-20 14:29:53 -0700 | [diff] [blame] | 443 | void __iomem *base; |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 444 | u32 regloc_size, regblocks; |
| 445 | int regloc, i, n_maps, ret = 0; |
| 446 | struct device *dev = cxlm->dev; |
| 447 | struct pci_dev *pdev = to_pci_dev(dev); |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 448 | struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES]; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 449 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 450 | regloc = cxl_pci_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 451 | if (!regloc) { |
| 452 | dev_err(dev, "register location dvsec not found\n"); |
| 453 | return -ENXIO; |
| 454 | } |
| 455 | |
Ira Weiny | f8a7e8c | 2021-05-27 17:49:19 -0700 | [diff] [blame] | 456 | if (pci_request_mem_regions(pdev, pci_name(pdev))) |
| 457 | return -ENODEV; |
| 458 | |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 459 | /* Get the size of the Register Locator DVSEC */ |
| 460 | pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size); |
| 461 | regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size); |
| 462 | |
| 463 | regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET; |
| 464 | regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8; |
| 465 | |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 466 | for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) { |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 467 | u32 reg_lo, reg_hi; |
| 468 | u8 reg_type; |
Ira Weiny | 07d62ea | 2021-05-27 17:49:18 -0700 | [diff] [blame] | 469 | u64 offset; |
| 470 | u8 bar; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 471 | |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 472 | pci_read_config_dword(pdev, regloc, ®_lo); |
| 473 | pci_read_config_dword(pdev, regloc + 4, ®_hi); |
| 474 | |
Ira Weiny | 07d62ea | 2021-05-27 17:49:18 -0700 | [diff] [blame] | 475 | cxl_decode_register_block(reg_lo, reg_hi, &bar, &offset, |
| 476 | ®_type); |
| 477 | |
| 478 | dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n", |
| 479 | bar, offset, reg_type); |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 480 | |
Ben Widawsky | 1e39db5 | 2021-07-16 16:15:46 -0700 | [diff] [blame] | 481 | /* Ignore unknown register block types */ |
| 482 | if (reg_type > CXL_REGLOC_RBI_MEMDEV) |
| 483 | continue; |
| 484 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 485 | base = cxl_pci_map_regblock(cxlm, bar, offset); |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 486 | if (!base) |
| 487 | return -ENOMEM; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 488 | |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 489 | map = &maps[n_maps]; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 490 | map->barno = bar; |
| 491 | map->block_offset = offset; |
| 492 | map->reg_type = reg_type; |
| 493 | |
| 494 | ret = cxl_probe_regs(cxlm, base + offset, map); |
| 495 | |
| 496 | /* Always unmap the regblock regardless of probe success */ |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 497 | cxl_pci_unmap_regblock(cxlm, base); |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 498 | |
| 499 | if (ret) |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 500 | return ret; |
| 501 | |
| 502 | n_maps++; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 503 | } |
| 504 | |
Ira Weiny | 9a01652 | 2021-06-03 17:53:16 -0700 | [diff] [blame] | 505 | pci_release_mem_regions(pdev); |
| 506 | |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 507 | for (i = 0; i < n_maps; i++) { |
| 508 | ret = cxl_map_regs(cxlm, &maps[i]); |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 509 | if (ret) |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 510 | break; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 511 | } |
| 512 | |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 513 | return ret; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 514 | } |
| 515 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 516 | static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 517 | { |
Dan Williams | 21083f5 | 2021-06-15 16:36:31 -0700 | [diff] [blame] | 518 | struct cxl_memdev *cxlmd; |
Ben Widawsky | 1b0a1a2 | 2021-04-07 15:26:20 -0700 | [diff] [blame] | 519 | struct cxl_mem *cxlm; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 520 | int rc; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 521 | |
Dan Williams | 5a2328f | 2021-09-08 22:12:38 -0700 | [diff] [blame] | 522 | /* |
| 523 | * Double check the anonymous union trickery in struct cxl_regs |
| 524 | * FIXME switch to struct_group() |
| 525 | */ |
| 526 | BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != |
| 527 | offsetof(struct cxl_regs, device_regs.memdev)); |
| 528 | |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 529 | rc = pcim_enable_device(pdev); |
| 530 | if (rc) |
| 531 | return rc; |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 532 | |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 533 | cxlm = cxl_mem_create(&pdev->dev); |
Ben Widawsky | 1b0a1a2 | 2021-04-07 15:26:20 -0700 | [diff] [blame] | 534 | if (IS_ERR(cxlm)) |
| 535 | return PTR_ERR(cxlm); |
| 536 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 537 | rc = cxl_pci_setup_regs(cxlm); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 538 | if (rc) |
| 539 | return rc; |
| 540 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 541 | rc = cxl_pci_setup_mailbox(cxlm); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 542 | if (rc) |
| 543 | return rc; |
| 544 | |
Ben Widawsky | 472b1ce | 2021-02-16 20:09:55 -0800 | [diff] [blame] | 545 | rc = cxl_mem_enumerate_cmds(cxlm); |
| 546 | if (rc) |
| 547 | return rc; |
| 548 | |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 549 | rc = cxl_mem_identify(cxlm); |
| 550 | if (rc) |
| 551 | return rc; |
| 552 | |
Ira Weiny | f847502 | 2021-08-10 11:57:59 -0700 | [diff] [blame] | 553 | rc = cxl_mem_create_range_info(cxlm); |
| 554 | if (rc) |
| 555 | return rc; |
| 556 | |
Dan Williams | 4faf31b | 2021-09-08 22:12:32 -0700 | [diff] [blame] | 557 | cxlmd = devm_cxl_add_memdev(cxlm); |
Dan Williams | 21083f5 | 2021-06-15 16:36:31 -0700 | [diff] [blame] | 558 | if (IS_ERR(cxlmd)) |
| 559 | return PTR_ERR(cxlmd); |
| 560 | |
| 561 | if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM)) |
| 562 | rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); |
| 563 | |
| 564 | return rc; |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 565 | } |
| 566 | |
| 567 | static const struct pci_device_id cxl_mem_pci_tbl[] = { |
| 568 | /* PCI class code for CXL.mem Type-3 Devices */ |
| 569 | { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, |
| 570 | { /* terminate list */ }, |
| 571 | }; |
| 572 | MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); |
| 573 | |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 574 | static struct pci_driver cxl_pci_driver = { |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 575 | .name = KBUILD_MODNAME, |
| 576 | .id_table = cxl_mem_pci_tbl, |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 577 | .probe = cxl_pci_probe, |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 578 | .driver = { |
| 579 | .probe_type = PROBE_PREFER_ASYNCHRONOUS, |
| 580 | }, |
| 581 | }; |
| 582 | |
| 583 | MODULE_LICENSE("GPL v2"); |
Ben Widawsky | ed97afb | 2021-09-13 09:33:24 -0700 | [diff] [blame^] | 584 | module_pci_driver(cxl_pci_driver); |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 585 | MODULE_IMPORT_NS(CXL); |