Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright(c) 2020 Intel Corporation. All rights reserved. */ |
Dan Williams | 4faf31b | 2021-09-08 22:12:32 -0700 | [diff] [blame^] | 3 | #include <linux/io-64-nonatomic-lo-hi.h> |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 4 | #include <linux/module.h> |
Dan Williams | fae8817 | 2021-04-16 17:43:30 -0700 | [diff] [blame] | 5 | #include <linux/sizes.h> |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 6 | #include <linux/mutex.h> |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 7 | #include <linux/list.h> |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 8 | #include <linux/pci.h> |
| 9 | #include <linux/io.h> |
Ben Widawsky | 5161a55 | 2021-08-02 10:29:38 -0700 | [diff] [blame] | 10 | #include "cxlmem.h" |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 11 | #include "pci.h" |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 12 | #include "cxl.h" |
| 13 | |
| 14 | /** |
Ben Widawsky | 21e9f76 | 2021-05-26 10:44:13 -0700 | [diff] [blame] | 15 | * DOC: cxl pci |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 16 | * |
Ben Widawsky | 21e9f76 | 2021-05-26 10:44:13 -0700 | [diff] [blame] | 17 | * This implements the PCI exclusive functionality for a CXL device as it is |
| 18 | * defined by the Compute Express Link specification. CXL devices may surface |
| 19 | * certain functionality even if it isn't CXL enabled. |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 20 | * |
| 21 | * The driver has several responsibilities, mainly: |
| 22 | * - Create the memX device and register on the CXL bus. |
| 23 | * - Enumerate device's register interface and map them. |
| 24 | * - Probe the device attributes to establish sysfs interface. |
| 25 | * - Provide an IOCTL interface to userspace to communicate with the device for |
| 26 | * things like firmware update. |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 27 | */ |
| 28 | |
| 29 | #define cxl_doorbell_busy(cxlm) \ |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 30 | (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \ |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 31 | CXLDEV_MBOX_CTRL_DOORBELL) |
| 32 | |
| 33 | /* CXL 2.0 - 8.2.8.4 */ |
| 34 | #define CXL_MAILBOX_TIMEOUT_MS (2 * HZ) |
| 35 | |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 36 | static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm) |
| 37 | { |
| 38 | const unsigned long start = jiffies; |
| 39 | unsigned long end = start; |
| 40 | |
| 41 | while (cxl_doorbell_busy(cxlm)) { |
| 42 | end = jiffies; |
| 43 | |
| 44 | if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) { |
| 45 | /* Check again in case preempted before timeout test */ |
| 46 | if (!cxl_doorbell_busy(cxlm)) |
| 47 | break; |
| 48 | return -ETIMEDOUT; |
| 49 | } |
| 50 | cpu_relax(); |
| 51 | } |
| 52 | |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 53 | dev_dbg(cxlm->dev, "Doorbell wait took %dms", |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 54 | jiffies_to_msecs(end) - jiffies_to_msecs(start)); |
| 55 | return 0; |
| 56 | } |
| 57 | |
| 58 | static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm, |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 59 | struct cxl_mbox_cmd *mbox_cmd) |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 60 | { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 61 | struct device *dev = cxlm->dev; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 62 | |
| 63 | dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n", |
| 64 | mbox_cmd->opcode, mbox_cmd->size_in); |
| 65 | } |
| 66 | |
| 67 | /** |
| 68 | * __cxl_mem_mbox_send_cmd() - Execute a mailbox command |
| 69 | * @cxlm: The CXL memory device to communicate with. |
| 70 | * @mbox_cmd: Command to send to the memory device. |
| 71 | * |
| 72 | * Context: Any context. Expects mbox_mutex to be held. |
| 73 | * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success. |
| 74 | * Caller should check the return code in @mbox_cmd to make sure it |
| 75 | * succeeded. |
| 76 | * |
| 77 | * This is a generic form of the CXL mailbox send command thus only using the |
| 78 | * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory |
| 79 | * devices, and perhaps other types of CXL devices may have further information |
| 80 | * available upon error conditions. Driver facilities wishing to send mailbox |
| 81 | * commands should use the wrapper command. |
| 82 | * |
| 83 | * The CXL spec allows for up to two mailboxes. The intention is for the primary |
| 84 | * mailbox to be OS controlled and the secondary mailbox to be used by system |
| 85 | * firmware. This allows the OS and firmware to communicate with the device and |
| 86 | * not need to coordinate with each other. The driver only uses the primary |
| 87 | * mailbox. |
| 88 | */ |
| 89 | static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm, |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 90 | struct cxl_mbox_cmd *mbox_cmd) |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 91 | { |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 92 | void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET; |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 93 | struct device *dev = cxlm->dev; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 94 | u64 cmd_reg, status_reg; |
| 95 | size_t out_len; |
| 96 | int rc; |
| 97 | |
| 98 | lockdep_assert_held(&cxlm->mbox_mutex); |
| 99 | |
| 100 | /* |
| 101 | * Here are the steps from 8.2.8.4 of the CXL 2.0 spec. |
| 102 | * 1. Caller reads MB Control Register to verify doorbell is clear |
| 103 | * 2. Caller writes Command Register |
| 104 | * 3. Caller writes Command Payload Registers if input payload is non-empty |
| 105 | * 4. Caller writes MB Control Register to set doorbell |
| 106 | * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured |
| 107 | * 6. Caller reads MB Status Register to fetch Return code |
| 108 | * 7. If command successful, Caller reads Command Register to get Payload Length |
| 109 | * 8. If output payload is non-empty, host reads Command Payload Registers |
| 110 | * |
| 111 | * Hardware is free to do whatever it wants before the doorbell is rung, |
| 112 | * and isn't allowed to change anything after it clears the doorbell. As |
| 113 | * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can |
| 114 | * also happen in any order (though some orders might not make sense). |
| 115 | */ |
| 116 | |
| 117 | /* #1 */ |
| 118 | if (cxl_doorbell_busy(cxlm)) { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 119 | dev_err_ratelimited(dev, "Mailbox re-busy after acquiring\n"); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 120 | return -EBUSY; |
| 121 | } |
| 122 | |
| 123 | cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK, |
| 124 | mbox_cmd->opcode); |
| 125 | if (mbox_cmd->size_in) { |
| 126 | if (WARN_ON(!mbox_cmd->payload_in)) |
| 127 | return -EINVAL; |
| 128 | |
| 129 | cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, |
| 130 | mbox_cmd->size_in); |
| 131 | memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in); |
| 132 | } |
| 133 | |
| 134 | /* #2, #3 */ |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 135 | writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 136 | |
| 137 | /* #4 */ |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 138 | dev_dbg(dev, "Sending command\n"); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 139 | writel(CXLDEV_MBOX_CTRL_DOORBELL, |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 140 | cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 141 | |
| 142 | /* #5 */ |
| 143 | rc = cxl_mem_wait_for_doorbell(cxlm); |
| 144 | if (rc == -ETIMEDOUT) { |
| 145 | cxl_mem_mbox_timeout(cxlm, mbox_cmd); |
| 146 | return rc; |
| 147 | } |
| 148 | |
| 149 | /* #6 */ |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 150 | status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 151 | mbox_cmd->return_code = |
| 152 | FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg); |
| 153 | |
| 154 | if (mbox_cmd->return_code != 0) { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 155 | dev_dbg(dev, "Mailbox operation had an error\n"); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 156 | return 0; |
| 157 | } |
| 158 | |
| 159 | /* #7 */ |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 160 | cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 161 | out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg); |
| 162 | |
| 163 | /* #8 */ |
| 164 | if (out_len && mbox_cmd->payload_out) { |
| 165 | /* |
| 166 | * Sanitize the copy. If hardware misbehaves, out_len per the |
| 167 | * spec can actually be greater than the max allowed size (21 |
| 168 | * bits available but spec defined 1M max). The caller also may |
| 169 | * have requested less data than the hardware supplied even |
| 170 | * within spec. |
| 171 | */ |
| 172 | size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len); |
| 173 | |
| 174 | memcpy_fromio(mbox_cmd->payload_out, payload, n); |
| 175 | mbox_cmd->size_out = n; |
| 176 | } else { |
| 177 | mbox_cmd->size_out = 0; |
| 178 | } |
| 179 | |
| 180 | return 0; |
| 181 | } |
| 182 | |
| 183 | /** |
| 184 | * cxl_mem_mbox_get() - Acquire exclusive access to the mailbox. |
| 185 | * @cxlm: The memory device to gain access to. |
| 186 | * |
| 187 | * Context: Any context. Takes the mbox_mutex. |
| 188 | * Return: 0 if exclusive access was acquired. |
| 189 | */ |
| 190 | static int cxl_mem_mbox_get(struct cxl_mem *cxlm) |
| 191 | { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 192 | struct device *dev = cxlm->dev; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 193 | u64 md_status; |
| 194 | int rc; |
| 195 | |
| 196 | mutex_lock_io(&cxlm->mbox_mutex); |
| 197 | |
| 198 | /* |
| 199 | * XXX: There is some amount of ambiguity in the 2.0 version of the spec |
| 200 | * around the mailbox interface ready (8.2.8.5.1.1). The purpose of the |
| 201 | * bit is to allow firmware running on the device to notify the driver |
| 202 | * that it's ready to receive commands. It is unclear if the bit needs |
| 203 | * to be read for each transaction mailbox, ie. the firmware can switch |
| 204 | * it on and off as needed. Second, there is no defined timeout for |
| 205 | * mailbox ready, like there is for the doorbell interface. |
| 206 | * |
| 207 | * Assumptions: |
| 208 | * 1. The firmware might toggle the Mailbox Interface Ready bit, check |
| 209 | * it for every command. |
| 210 | * |
| 211 | * 2. If the doorbell is clear, the firmware should have first set the |
| 212 | * Mailbox Interface Ready bit. Therefore, waiting for the doorbell |
| 213 | * to be ready is sufficient. |
| 214 | */ |
| 215 | rc = cxl_mem_wait_for_doorbell(cxlm); |
| 216 | if (rc) { |
| 217 | dev_warn(dev, "Mailbox interface not ready\n"); |
| 218 | goto out; |
| 219 | } |
| 220 | |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 221 | md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 222 | if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) { |
| 223 | dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n"); |
| 224 | rc = -EBUSY; |
| 225 | goto out; |
| 226 | } |
| 227 | |
| 228 | /* |
| 229 | * Hardware shouldn't allow a ready status but also have failure bits |
| 230 | * set. Spit out an error, this should be a bug report |
| 231 | */ |
| 232 | rc = -EFAULT; |
| 233 | if (md_status & CXLMDEV_DEV_FATAL) { |
| 234 | dev_err(dev, "mbox: reported ready, but fatal\n"); |
| 235 | goto out; |
| 236 | } |
| 237 | if (md_status & CXLMDEV_FW_HALT) { |
| 238 | dev_err(dev, "mbox: reported ready, but halted\n"); |
| 239 | goto out; |
| 240 | } |
| 241 | if (CXLMDEV_RESET_NEEDED(md_status)) { |
| 242 | dev_err(dev, "mbox: reported ready, but reset needed\n"); |
| 243 | goto out; |
| 244 | } |
| 245 | |
| 246 | /* with lock held */ |
| 247 | return 0; |
| 248 | |
| 249 | out: |
| 250 | mutex_unlock(&cxlm->mbox_mutex); |
| 251 | return rc; |
| 252 | } |
| 253 | |
| 254 | /** |
| 255 | * cxl_mem_mbox_put() - Release exclusive access to the mailbox. |
| 256 | * @cxlm: The CXL memory device to communicate with. |
| 257 | * |
| 258 | * Context: Any context. Expects mbox_mutex to be held. |
| 259 | */ |
| 260 | static void cxl_mem_mbox_put(struct cxl_mem *cxlm) |
| 261 | { |
| 262 | mutex_unlock(&cxlm->mbox_mutex); |
| 263 | } |
| 264 | |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 265 | static int cxl_pci_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd) |
| 266 | { |
| 267 | int rc; |
| 268 | |
| 269 | rc = cxl_mem_mbox_get(cxlm); |
| 270 | if (rc) |
| 271 | return rc; |
| 272 | |
| 273 | rc = __cxl_mem_mbox_send_cmd(cxlm, cmd); |
| 274 | cxl_mem_mbox_put(cxlm); |
| 275 | |
| 276 | return rc; |
| 277 | } |
| 278 | |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 279 | static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm) |
| 280 | { |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 281 | const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 282 | |
Dan Williams | b64955a | 2021-09-08 22:12:21 -0700 | [diff] [blame] | 283 | cxlm->mbox_send = cxl_pci_mbox_send; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 284 | cxlm->payload_size = |
| 285 | 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap); |
| 286 | |
| 287 | /* |
| 288 | * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register |
| 289 | * |
| 290 | * If the size is too small, mandatory commands will not work and so |
| 291 | * there's no point in going forward. If the size is too large, there's |
| 292 | * no harm is soft limiting it. |
| 293 | */ |
| 294 | cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M); |
| 295 | if (cxlm->payload_size < 256) { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 296 | dev_err(cxlm->dev, "Mailbox is too small (%zub)", |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 297 | cxlm->payload_size); |
| 298 | return -ENXIO; |
| 299 | } |
| 300 | |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 301 | dev_dbg(cxlm->dev, "Mailbox payload sized %zu", |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 302 | cxlm->payload_size); |
| 303 | |
| 304 | return 0; |
| 305 | } |
| 306 | |
Ira Weiny | 07d62ea | 2021-05-27 17:49:18 -0700 | [diff] [blame] | 307 | static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm, |
| 308 | u8 bar, u64 offset) |
Ben Widawsky | 1b0a1a2 | 2021-04-07 15:26:20 -0700 | [diff] [blame] | 309 | { |
Ira Weiny | f8a7e8c | 2021-05-27 17:49:19 -0700 | [diff] [blame] | 310 | void __iomem *addr; |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 311 | struct device *dev = cxlm->dev; |
| 312 | struct pci_dev *pdev = to_pci_dev(dev); |
Ben Widawsky | 1b0a1a2 | 2021-04-07 15:26:20 -0700 | [diff] [blame] | 313 | |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 314 | /* Basic sanity check that BAR is big enough */ |
| 315 | if (pci_resource_len(pdev, bar) < offset) { |
| 316 | dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar, |
| 317 | &pdev->resource[bar], (unsigned long long)offset); |
Ben Widawsky | 6630d31 | 2021-05-20 14:29:53 -0700 | [diff] [blame] | 318 | return IOMEM_ERR_PTR(-ENXIO); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 319 | } |
| 320 | |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 321 | addr = pci_iomap(pdev, bar, 0); |
Ira Weiny | f8a7e8c | 2021-05-27 17:49:19 -0700 | [diff] [blame] | 322 | if (!addr) { |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 323 | dev_err(dev, "failed to map registers\n"); |
Ira Weiny | f8a7e8c | 2021-05-27 17:49:19 -0700 | [diff] [blame] | 324 | return addr; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 325 | } |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 326 | |
Ira Weiny | f8a7e8c | 2021-05-27 17:49:19 -0700 | [diff] [blame] | 327 | dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %#llx\n", |
| 328 | bar, offset); |
Ben Widawsky | 6630d31 | 2021-05-20 14:29:53 -0700 | [diff] [blame] | 329 | |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 330 | return addr; |
| 331 | } |
| 332 | |
| 333 | static void cxl_mem_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base) |
| 334 | { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 335 | pci_iounmap(to_pci_dev(cxlm->dev), base); |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 336 | } |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 337 | |
| 338 | static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec) |
| 339 | { |
| 340 | int pos; |
| 341 | |
| 342 | pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC); |
| 343 | if (!pos) |
| 344 | return 0; |
| 345 | |
| 346 | while (pos) { |
| 347 | u16 vendor, id; |
| 348 | |
| 349 | pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor); |
| 350 | pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id); |
| 351 | if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id) |
| 352 | return pos; |
| 353 | |
| 354 | pos = pci_find_next_ext_capability(pdev, pos, |
| 355 | PCI_EXT_CAP_ID_DVSEC); |
| 356 | } |
| 357 | |
| 358 | return 0; |
| 359 | } |
| 360 | |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 361 | static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base, |
| 362 | struct cxl_register_map *map) |
| 363 | { |
Ben Widawsky | 0842237 | 2021-05-27 17:49:22 -0700 | [diff] [blame] | 364 | struct cxl_component_reg_map *comp_map; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 365 | struct cxl_device_reg_map *dev_map; |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 366 | struct device *dev = cxlm->dev; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 367 | |
| 368 | switch (map->reg_type) { |
Ben Widawsky | 0842237 | 2021-05-27 17:49:22 -0700 | [diff] [blame] | 369 | case CXL_REGLOC_RBI_COMPONENT: |
| 370 | comp_map = &map->component_map; |
| 371 | cxl_probe_component_regs(dev, base, comp_map); |
| 372 | if (!comp_map->hdm_decoder.valid) { |
| 373 | dev_err(dev, "HDM decoder registers not found\n"); |
| 374 | return -ENXIO; |
| 375 | } |
| 376 | |
| 377 | dev_dbg(dev, "Set up component registers\n"); |
| 378 | break; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 379 | case CXL_REGLOC_RBI_MEMDEV: |
| 380 | dev_map = &map->device_map; |
| 381 | cxl_probe_device_regs(dev, base, dev_map); |
| 382 | if (!dev_map->status.valid || !dev_map->mbox.valid || |
| 383 | !dev_map->memdev.valid) { |
| 384 | dev_err(dev, "registers not found: %s%s%s\n", |
| 385 | !dev_map->status.valid ? "status " : "", |
Li Qiang (Johnny Li) | da582aa | 2021-09-03 19:20:50 -0700 | [diff] [blame] | 386 | !dev_map->mbox.valid ? "mbox " : "", |
| 387 | !dev_map->memdev.valid ? "memdev " : ""); |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 388 | return -ENXIO; |
| 389 | } |
| 390 | |
| 391 | dev_dbg(dev, "Probing device registers...\n"); |
| 392 | break; |
| 393 | default: |
| 394 | break; |
| 395 | } |
| 396 | |
| 397 | return 0; |
| 398 | } |
| 399 | |
| 400 | static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map) |
| 401 | { |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 402 | struct device *dev = cxlm->dev; |
| 403 | struct pci_dev *pdev = to_pci_dev(dev); |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 404 | |
| 405 | switch (map->reg_type) { |
Ben Widawsky | 0842237 | 2021-05-27 17:49:22 -0700 | [diff] [blame] | 406 | case CXL_REGLOC_RBI_COMPONENT: |
| 407 | cxl_map_component_regs(pdev, &cxlm->regs.component, map); |
| 408 | dev_dbg(dev, "Mapping component registers...\n"); |
| 409 | break; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 410 | case CXL_REGLOC_RBI_MEMDEV: |
| 411 | cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map); |
| 412 | dev_dbg(dev, "Probing device registers...\n"); |
| 413 | break; |
| 414 | default: |
| 415 | break; |
| 416 | } |
| 417 | |
| 418 | return 0; |
| 419 | } |
| 420 | |
Ira Weiny | 07d62ea | 2021-05-27 17:49:18 -0700 | [diff] [blame] | 421 | static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi, |
| 422 | u8 *bar, u64 *offset, u8 *reg_type) |
| 423 | { |
| 424 | *offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK); |
| 425 | *bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo); |
| 426 | *reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo); |
| 427 | } |
| 428 | |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 429 | /** |
| 430 | * cxl_mem_setup_regs() - Setup necessary MMIO. |
| 431 | * @cxlm: The CXL memory device to communicate with. |
| 432 | * |
| 433 | * Return: 0 if all necessary registers mapped. |
| 434 | * |
| 435 | * A memory device is required by spec to implement a certain set of MMIO |
| 436 | * regions. The purpose of this function is to enumerate and map those |
| 437 | * registers. |
| 438 | */ |
| 439 | static int cxl_mem_setup_regs(struct cxl_mem *cxlm) |
| 440 | { |
Ben Widawsky | 6630d31 | 2021-05-20 14:29:53 -0700 | [diff] [blame] | 441 | void __iomem *base; |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 442 | u32 regloc_size, regblocks; |
| 443 | int regloc, i, n_maps, ret = 0; |
| 444 | struct device *dev = cxlm->dev; |
| 445 | struct pci_dev *pdev = to_pci_dev(dev); |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 446 | struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES]; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 447 | |
Ben Widawsky | 4ad6181 | 2021-06-17 17:30:09 -0700 | [diff] [blame] | 448 | regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID); |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 449 | if (!regloc) { |
| 450 | dev_err(dev, "register location dvsec not found\n"); |
| 451 | return -ENXIO; |
| 452 | } |
| 453 | |
Ira Weiny | f8a7e8c | 2021-05-27 17:49:19 -0700 | [diff] [blame] | 454 | if (pci_request_mem_regions(pdev, pci_name(pdev))) |
| 455 | return -ENODEV; |
| 456 | |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 457 | /* Get the size of the Register Locator DVSEC */ |
| 458 | pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, ®loc_size); |
| 459 | regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size); |
| 460 | |
| 461 | regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET; |
| 462 | regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8; |
| 463 | |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 464 | for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) { |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 465 | u32 reg_lo, reg_hi; |
| 466 | u8 reg_type; |
Ira Weiny | 07d62ea | 2021-05-27 17:49:18 -0700 | [diff] [blame] | 467 | u64 offset; |
| 468 | u8 bar; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 469 | |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 470 | pci_read_config_dword(pdev, regloc, ®_lo); |
| 471 | pci_read_config_dword(pdev, regloc + 4, ®_hi); |
| 472 | |
Ira Weiny | 07d62ea | 2021-05-27 17:49:18 -0700 | [diff] [blame] | 473 | cxl_decode_register_block(reg_lo, reg_hi, &bar, &offset, |
| 474 | ®_type); |
| 475 | |
| 476 | dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n", |
| 477 | bar, offset, reg_type); |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 478 | |
Ben Widawsky | 1e39db5 | 2021-07-16 16:15:46 -0700 | [diff] [blame] | 479 | /* Ignore unknown register block types */ |
| 480 | if (reg_type > CXL_REGLOC_RBI_MEMDEV) |
| 481 | continue; |
| 482 | |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 483 | base = cxl_mem_map_regblock(cxlm, bar, offset); |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 484 | if (!base) |
| 485 | return -ENOMEM; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 486 | |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 487 | map = &maps[n_maps]; |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 488 | map->barno = bar; |
| 489 | map->block_offset = offset; |
| 490 | map->reg_type = reg_type; |
| 491 | |
| 492 | ret = cxl_probe_regs(cxlm, base + offset, map); |
| 493 | |
| 494 | /* Always unmap the regblock regardless of probe success */ |
| 495 | cxl_mem_unmap_regblock(cxlm, base); |
| 496 | |
| 497 | if (ret) |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 498 | return ret; |
| 499 | |
| 500 | n_maps++; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 501 | } |
| 502 | |
Ira Weiny | 9a01652 | 2021-06-03 17:53:16 -0700 | [diff] [blame] | 503 | pci_release_mem_regions(pdev); |
| 504 | |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 505 | for (i = 0; i < n_maps; i++) { |
| 506 | ret = cxl_map_regs(cxlm, &maps[i]); |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 507 | if (ret) |
Ben Widawsky | 5b68705 | 2021-07-16 16:15:47 -0700 | [diff] [blame] | 508 | break; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 509 | } |
| 510 | |
Ira Weiny | 30af972 | 2021-06-03 17:50:36 -0700 | [diff] [blame] | 511 | return ret; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 512 | } |
| 513 | |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 514 | static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
| 515 | { |
Dan Williams | 21083f5 | 2021-06-15 16:36:31 -0700 | [diff] [blame] | 516 | struct cxl_memdev *cxlmd; |
Ben Widawsky | 1b0a1a2 | 2021-04-07 15:26:20 -0700 | [diff] [blame] | 517 | struct cxl_mem *cxlm; |
Ben Widawsky | 1d5a415 | 2021-04-07 15:26:21 -0700 | [diff] [blame] | 518 | int rc; |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 519 | |
| 520 | rc = pcim_enable_device(pdev); |
| 521 | if (rc) |
| 522 | return rc; |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 523 | |
Dan Williams | 99e222a | 2021-09-08 22:12:09 -0700 | [diff] [blame] | 524 | cxlm = cxl_mem_create(&pdev->dev); |
Ben Widawsky | 1b0a1a2 | 2021-04-07 15:26:20 -0700 | [diff] [blame] | 525 | if (IS_ERR(cxlm)) |
| 526 | return PTR_ERR(cxlm); |
| 527 | |
Ben Widawsky | 8adaf74 | 2021-02-16 20:09:51 -0800 | [diff] [blame] | 528 | rc = cxl_mem_setup_regs(cxlm); |
| 529 | if (rc) |
| 530 | return rc; |
| 531 | |
| 532 | rc = cxl_mem_setup_mailbox(cxlm); |
| 533 | if (rc) |
| 534 | return rc; |
| 535 | |
Ben Widawsky | 472b1ce | 2021-02-16 20:09:55 -0800 | [diff] [blame] | 536 | rc = cxl_mem_enumerate_cmds(cxlm); |
| 537 | if (rc) |
| 538 | return rc; |
| 539 | |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 540 | rc = cxl_mem_identify(cxlm); |
| 541 | if (rc) |
| 542 | return rc; |
| 543 | |
Ira Weiny | f847502 | 2021-08-10 11:57:59 -0700 | [diff] [blame] | 544 | rc = cxl_mem_create_range_info(cxlm); |
| 545 | if (rc) |
| 546 | return rc; |
| 547 | |
Dan Williams | 4faf31b | 2021-09-08 22:12:32 -0700 | [diff] [blame^] | 548 | cxlmd = devm_cxl_add_memdev(cxlm); |
Dan Williams | 21083f5 | 2021-06-15 16:36:31 -0700 | [diff] [blame] | 549 | if (IS_ERR(cxlmd)) |
| 550 | return PTR_ERR(cxlmd); |
| 551 | |
| 552 | if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM)) |
| 553 | rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd); |
| 554 | |
| 555 | return rc; |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 556 | } |
| 557 | |
| 558 | static const struct pci_device_id cxl_mem_pci_tbl[] = { |
| 559 | /* PCI class code for CXL.mem Type-3 Devices */ |
| 560 | { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)}, |
| 561 | { /* terminate list */ }, |
| 562 | }; |
| 563 | MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl); |
| 564 | |
| 565 | static struct pci_driver cxl_mem_driver = { |
| 566 | .name = KBUILD_MODNAME, |
| 567 | .id_table = cxl_mem_pci_tbl, |
| 568 | .probe = cxl_mem_probe, |
| 569 | .driver = { |
| 570 | .probe_type = PROBE_PREFER_ASYNCHRONOUS, |
| 571 | }, |
| 572 | }; |
| 573 | |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 574 | static __init int cxl_mem_init(void) |
| 575 | { |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 576 | int rc; |
| 577 | |
Dan Williams | 8ac75dd | 2021-05-13 22:21:54 -0700 | [diff] [blame] | 578 | /* Double check the anonymous union trickery in struct cxl_regs */ |
| 579 | BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) != |
| 580 | offsetof(struct cxl_regs, device_regs.memdev)); |
| 581 | |
Ben Widawsky | 3d135db | 2021-08-02 10:30:05 -0700 | [diff] [blame] | 582 | rc = pci_register_driver(&cxl_mem_driver); |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 583 | if (rc) |
| 584 | return rc; |
| 585 | |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 586 | return 0; |
| 587 | } |
| 588 | |
| 589 | static __exit void cxl_mem_exit(void) |
| 590 | { |
| 591 | pci_unregister_driver(&cxl_mem_driver); |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 592 | } |
| 593 | |
Dan Williams | 4cdadfd | 2021-02-16 20:09:50 -0800 | [diff] [blame] | 594 | MODULE_LICENSE("GPL v2"); |
Dan Williams | b39cb10 | 2021-02-16 20:09:52 -0800 | [diff] [blame] | 595 | module_init(cxl_mem_init); |
| 596 | module_exit(cxl_mem_exit); |
| 597 | MODULE_IMPORT_NS(CXL); |