blob: c9f2ac134f4dcd45ccc10026f4c1ece0147a8d72 [file] [log] [blame]
Dan Williams4cdadfd2021-02-16 20:09:50 -08001// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
Dan Williams4faf31b2021-09-08 22:12:32 -07003#include <linux/io-64-nonatomic-lo-hi.h>
Dan Williams4cdadfd2021-02-16 20:09:50 -08004#include <linux/module.h>
Dan Williamsfae88172021-04-16 17:43:30 -07005#include <linux/sizes.h>
Dan Williamsb39cb102021-02-16 20:09:52 -08006#include <linux/mutex.h>
Ira Weiny30af9722021-06-03 17:50:36 -07007#include <linux/list.h>
Dan Williams4cdadfd2021-02-16 20:09:50 -08008#include <linux/pci.h>
9#include <linux/io.h>
Ben Widawsky5161a552021-08-02 10:29:38 -070010#include "cxlmem.h"
Dan Williams4cdadfd2021-02-16 20:09:50 -080011#include "pci.h"
Ben Widawsky8adaf742021-02-16 20:09:51 -080012#include "cxl.h"
13
14/**
Ben Widawsky21e9f762021-05-26 10:44:13 -070015 * DOC: cxl pci
Ben Widawsky8adaf742021-02-16 20:09:51 -080016 *
Ben Widawsky21e9f762021-05-26 10:44:13 -070017 * This implements the PCI exclusive functionality for a CXL device as it is
18 * defined by the Compute Express Link specification. CXL devices may surface
19 * certain functionality even if it isn't CXL enabled.
Ben Widawsky8adaf742021-02-16 20:09:51 -080020 *
21 * The driver has several responsibilities, mainly:
22 * - Create the memX device and register on the CXL bus.
23 * - Enumerate device's register interface and map them.
24 * - Probe the device attributes to establish sysfs interface.
25 * - Provide an IOCTL interface to userspace to communicate with the device for
26 * things like firmware update.
Ben Widawsky8adaf742021-02-16 20:09:51 -080027 */
28
29#define cxl_doorbell_busy(cxlm) \
Dan Williams8ac75dd2021-05-13 22:21:54 -070030 (readl((cxlm)->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET) & \
Ben Widawsky8adaf742021-02-16 20:09:51 -080031 CXLDEV_MBOX_CTRL_DOORBELL)
32
33/* CXL 2.0 - 8.2.8.4 */
34#define CXL_MAILBOX_TIMEOUT_MS (2 * HZ)
35
Ben Widawsky8adaf742021-02-16 20:09:51 -080036static int cxl_mem_wait_for_doorbell(struct cxl_mem *cxlm)
37{
38 const unsigned long start = jiffies;
39 unsigned long end = start;
40
41 while (cxl_doorbell_busy(cxlm)) {
42 end = jiffies;
43
44 if (time_after(end, start + CXL_MAILBOX_TIMEOUT_MS)) {
45 /* Check again in case preempted before timeout test */
46 if (!cxl_doorbell_busy(cxlm))
47 break;
48 return -ETIMEDOUT;
49 }
50 cpu_relax();
51 }
52
Dan Williams99e222a2021-09-08 22:12:09 -070053 dev_dbg(cxlm->dev, "Doorbell wait took %dms",
Ben Widawsky8adaf742021-02-16 20:09:51 -080054 jiffies_to_msecs(end) - jiffies_to_msecs(start));
55 return 0;
56}
57
58static void cxl_mem_mbox_timeout(struct cxl_mem *cxlm,
Dan Williamsb64955a2021-09-08 22:12:21 -070059 struct cxl_mbox_cmd *mbox_cmd)
Ben Widawsky8adaf742021-02-16 20:09:51 -080060{
Dan Williams99e222a2021-09-08 22:12:09 -070061 struct device *dev = cxlm->dev;
Ben Widawsky8adaf742021-02-16 20:09:51 -080062
63 dev_dbg(dev, "Mailbox command (opcode: %#x size: %zub) timed out\n",
64 mbox_cmd->opcode, mbox_cmd->size_in);
65}
66
67/**
68 * __cxl_mem_mbox_send_cmd() - Execute a mailbox command
69 * @cxlm: The CXL memory device to communicate with.
70 * @mbox_cmd: Command to send to the memory device.
71 *
72 * Context: Any context. Expects mbox_mutex to be held.
73 * Return: -ETIMEDOUT if timeout occurred waiting for completion. 0 on success.
74 * Caller should check the return code in @mbox_cmd to make sure it
75 * succeeded.
76 *
77 * This is a generic form of the CXL mailbox send command thus only using the
78 * registers defined by the mailbox capability ID - CXL 2.0 8.2.8.4. Memory
79 * devices, and perhaps other types of CXL devices may have further information
80 * available upon error conditions. Driver facilities wishing to send mailbox
81 * commands should use the wrapper command.
82 *
83 * The CXL spec allows for up to two mailboxes. The intention is for the primary
84 * mailbox to be OS controlled and the secondary mailbox to be used by system
85 * firmware. This allows the OS and firmware to communicate with the device and
86 * not need to coordinate with each other. The driver only uses the primary
87 * mailbox.
88 */
89static int __cxl_mem_mbox_send_cmd(struct cxl_mem *cxlm,
Dan Williamsb64955a2021-09-08 22:12:21 -070090 struct cxl_mbox_cmd *mbox_cmd)
Ben Widawsky8adaf742021-02-16 20:09:51 -080091{
Dan Williams8ac75dd2021-05-13 22:21:54 -070092 void __iomem *payload = cxlm->regs.mbox + CXLDEV_MBOX_PAYLOAD_OFFSET;
Dan Williams99e222a2021-09-08 22:12:09 -070093 struct device *dev = cxlm->dev;
Ben Widawsky8adaf742021-02-16 20:09:51 -080094 u64 cmd_reg, status_reg;
95 size_t out_len;
96 int rc;
97
98 lockdep_assert_held(&cxlm->mbox_mutex);
99
100 /*
101 * Here are the steps from 8.2.8.4 of the CXL 2.0 spec.
102 * 1. Caller reads MB Control Register to verify doorbell is clear
103 * 2. Caller writes Command Register
104 * 3. Caller writes Command Payload Registers if input payload is non-empty
105 * 4. Caller writes MB Control Register to set doorbell
106 * 5. Caller either polls for doorbell to be clear or waits for interrupt if configured
107 * 6. Caller reads MB Status Register to fetch Return code
108 * 7. If command successful, Caller reads Command Register to get Payload Length
109 * 8. If output payload is non-empty, host reads Command Payload Registers
110 *
111 * Hardware is free to do whatever it wants before the doorbell is rung,
112 * and isn't allowed to change anything after it clears the doorbell. As
113 * such, steps 2 and 3 can happen in any order, and steps 6, 7, 8 can
114 * also happen in any order (though some orders might not make sense).
115 */
116
117 /* #1 */
118 if (cxl_doorbell_busy(cxlm)) {
Dan Williams99e222a2021-09-08 22:12:09 -0700119 dev_err_ratelimited(dev, "Mailbox re-busy after acquiring\n");
Ben Widawsky8adaf742021-02-16 20:09:51 -0800120 return -EBUSY;
121 }
122
123 cmd_reg = FIELD_PREP(CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK,
124 mbox_cmd->opcode);
125 if (mbox_cmd->size_in) {
126 if (WARN_ON(!mbox_cmd->payload_in))
127 return -EINVAL;
128
129 cmd_reg |= FIELD_PREP(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK,
130 mbox_cmd->size_in);
131 memcpy_toio(payload, mbox_cmd->payload_in, mbox_cmd->size_in);
132 }
133
134 /* #2, #3 */
Dan Williams8ac75dd2021-05-13 22:21:54 -0700135 writeq(cmd_reg, cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800136
137 /* #4 */
Dan Williams99e222a2021-09-08 22:12:09 -0700138 dev_dbg(dev, "Sending command\n");
Ben Widawsky8adaf742021-02-16 20:09:51 -0800139 writel(CXLDEV_MBOX_CTRL_DOORBELL,
Dan Williams8ac75dd2021-05-13 22:21:54 -0700140 cxlm->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800141
142 /* #5 */
143 rc = cxl_mem_wait_for_doorbell(cxlm);
144 if (rc == -ETIMEDOUT) {
145 cxl_mem_mbox_timeout(cxlm, mbox_cmd);
146 return rc;
147 }
148
149 /* #6 */
Dan Williams8ac75dd2021-05-13 22:21:54 -0700150 status_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_STATUS_OFFSET);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800151 mbox_cmd->return_code =
152 FIELD_GET(CXLDEV_MBOX_STATUS_RET_CODE_MASK, status_reg);
153
154 if (mbox_cmd->return_code != 0) {
Dan Williams99e222a2021-09-08 22:12:09 -0700155 dev_dbg(dev, "Mailbox operation had an error\n");
Ben Widawsky8adaf742021-02-16 20:09:51 -0800156 return 0;
157 }
158
159 /* #7 */
Dan Williams8ac75dd2021-05-13 22:21:54 -0700160 cmd_reg = readq(cxlm->regs.mbox + CXLDEV_MBOX_CMD_OFFSET);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800161 out_len = FIELD_GET(CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK, cmd_reg);
162
163 /* #8 */
164 if (out_len && mbox_cmd->payload_out) {
165 /*
166 * Sanitize the copy. If hardware misbehaves, out_len per the
167 * spec can actually be greater than the max allowed size (21
168 * bits available but spec defined 1M max). The caller also may
169 * have requested less data than the hardware supplied even
170 * within spec.
171 */
172 size_t n = min3(mbox_cmd->size_out, cxlm->payload_size, out_len);
173
174 memcpy_fromio(mbox_cmd->payload_out, payload, n);
175 mbox_cmd->size_out = n;
176 } else {
177 mbox_cmd->size_out = 0;
178 }
179
180 return 0;
181}
182
183/**
184 * cxl_mem_mbox_get() - Acquire exclusive access to the mailbox.
185 * @cxlm: The memory device to gain access to.
186 *
187 * Context: Any context. Takes the mbox_mutex.
188 * Return: 0 if exclusive access was acquired.
189 */
190static int cxl_mem_mbox_get(struct cxl_mem *cxlm)
191{
Dan Williams99e222a2021-09-08 22:12:09 -0700192 struct device *dev = cxlm->dev;
Ben Widawsky8adaf742021-02-16 20:09:51 -0800193 u64 md_status;
194 int rc;
195
196 mutex_lock_io(&cxlm->mbox_mutex);
197
198 /*
199 * XXX: There is some amount of ambiguity in the 2.0 version of the spec
200 * around the mailbox interface ready (8.2.8.5.1.1). The purpose of the
201 * bit is to allow firmware running on the device to notify the driver
202 * that it's ready to receive commands. It is unclear if the bit needs
203 * to be read for each transaction mailbox, ie. the firmware can switch
204 * it on and off as needed. Second, there is no defined timeout for
205 * mailbox ready, like there is for the doorbell interface.
206 *
207 * Assumptions:
208 * 1. The firmware might toggle the Mailbox Interface Ready bit, check
209 * it for every command.
210 *
211 * 2. If the doorbell is clear, the firmware should have first set the
212 * Mailbox Interface Ready bit. Therefore, waiting for the doorbell
213 * to be ready is sufficient.
214 */
215 rc = cxl_mem_wait_for_doorbell(cxlm);
216 if (rc) {
217 dev_warn(dev, "Mailbox interface not ready\n");
218 goto out;
219 }
220
Dan Williams8ac75dd2021-05-13 22:21:54 -0700221 md_status = readq(cxlm->regs.memdev + CXLMDEV_STATUS_OFFSET);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800222 if (!(md_status & CXLMDEV_MBOX_IF_READY && CXLMDEV_READY(md_status))) {
223 dev_err(dev, "mbox: reported doorbell ready, but not mbox ready\n");
224 rc = -EBUSY;
225 goto out;
226 }
227
228 /*
229 * Hardware shouldn't allow a ready status but also have failure bits
230 * set. Spit out an error, this should be a bug report
231 */
232 rc = -EFAULT;
233 if (md_status & CXLMDEV_DEV_FATAL) {
234 dev_err(dev, "mbox: reported ready, but fatal\n");
235 goto out;
236 }
237 if (md_status & CXLMDEV_FW_HALT) {
238 dev_err(dev, "mbox: reported ready, but halted\n");
239 goto out;
240 }
241 if (CXLMDEV_RESET_NEEDED(md_status)) {
242 dev_err(dev, "mbox: reported ready, but reset needed\n");
243 goto out;
244 }
245
246 /* with lock held */
247 return 0;
248
249out:
250 mutex_unlock(&cxlm->mbox_mutex);
251 return rc;
252}
253
254/**
255 * cxl_mem_mbox_put() - Release exclusive access to the mailbox.
256 * @cxlm: The CXL memory device to communicate with.
257 *
258 * Context: Any context. Expects mbox_mutex to be held.
259 */
260static void cxl_mem_mbox_put(struct cxl_mem *cxlm)
261{
262 mutex_unlock(&cxlm->mbox_mutex);
263}
264
Dan Williamsb64955a2021-09-08 22:12:21 -0700265static int cxl_pci_mbox_send(struct cxl_mem *cxlm, struct cxl_mbox_cmd *cmd)
266{
267 int rc;
268
269 rc = cxl_mem_mbox_get(cxlm);
270 if (rc)
271 return rc;
272
273 rc = __cxl_mem_mbox_send_cmd(cxlm, cmd);
274 cxl_mem_mbox_put(cxlm);
275
276 return rc;
277}
278
Ben Widawsky8adaf742021-02-16 20:09:51 -0800279static int cxl_mem_setup_mailbox(struct cxl_mem *cxlm)
280{
Dan Williams8ac75dd2021-05-13 22:21:54 -0700281 const int cap = readl(cxlm->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800282
Dan Williamsb64955a2021-09-08 22:12:21 -0700283 cxlm->mbox_send = cxl_pci_mbox_send;
Ben Widawsky8adaf742021-02-16 20:09:51 -0800284 cxlm->payload_size =
285 1 << FIELD_GET(CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK, cap);
286
287 /*
288 * CXL 2.0 8.2.8.4.3 Mailbox Capabilities Register
289 *
290 * If the size is too small, mandatory commands will not work and so
291 * there's no point in going forward. If the size is too large, there's
292 * no harm is soft limiting it.
293 */
294 cxlm->payload_size = min_t(size_t, cxlm->payload_size, SZ_1M);
295 if (cxlm->payload_size < 256) {
Dan Williams99e222a2021-09-08 22:12:09 -0700296 dev_err(cxlm->dev, "Mailbox is too small (%zub)",
Ben Widawsky8adaf742021-02-16 20:09:51 -0800297 cxlm->payload_size);
298 return -ENXIO;
299 }
300
Dan Williams99e222a2021-09-08 22:12:09 -0700301 dev_dbg(cxlm->dev, "Mailbox payload sized %zu",
Ben Widawsky8adaf742021-02-16 20:09:51 -0800302 cxlm->payload_size);
303
304 return 0;
305}
306
Ira Weiny07d62ea2021-05-27 17:49:18 -0700307static void __iomem *cxl_mem_map_regblock(struct cxl_mem *cxlm,
308 u8 bar, u64 offset)
Ben Widawsky1b0a1a22021-04-07 15:26:20 -0700309{
Ira Weinyf8a7e8c2021-05-27 17:49:19 -0700310 void __iomem *addr;
Dan Williams99e222a2021-09-08 22:12:09 -0700311 struct device *dev = cxlm->dev;
312 struct pci_dev *pdev = to_pci_dev(dev);
Ben Widawsky1b0a1a22021-04-07 15:26:20 -0700313
Ben Widawsky8adaf742021-02-16 20:09:51 -0800314 /* Basic sanity check that BAR is big enough */
315 if (pci_resource_len(pdev, bar) < offset) {
316 dev_err(dev, "BAR%d: %pr: too small (offset: %#llx)\n", bar,
317 &pdev->resource[bar], (unsigned long long)offset);
Ben Widawsky6630d312021-05-20 14:29:53 -0700318 return IOMEM_ERR_PTR(-ENXIO);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800319 }
320
Ira Weiny30af9722021-06-03 17:50:36 -0700321 addr = pci_iomap(pdev, bar, 0);
Ira Weinyf8a7e8c2021-05-27 17:49:19 -0700322 if (!addr) {
Ben Widawsky8adaf742021-02-16 20:09:51 -0800323 dev_err(dev, "failed to map registers\n");
Ira Weinyf8a7e8c2021-05-27 17:49:19 -0700324 return addr;
Ben Widawsky8adaf742021-02-16 20:09:51 -0800325 }
Ben Widawsky8adaf742021-02-16 20:09:51 -0800326
Ira Weinyf8a7e8c2021-05-27 17:49:19 -0700327 dev_dbg(dev, "Mapped CXL Memory Device resource bar %u @ %#llx\n",
328 bar, offset);
Ben Widawsky6630d312021-05-20 14:29:53 -0700329
Ira Weiny30af9722021-06-03 17:50:36 -0700330 return addr;
331}
332
333static void cxl_mem_unmap_regblock(struct cxl_mem *cxlm, void __iomem *base)
334{
Dan Williams99e222a2021-09-08 22:12:09 -0700335 pci_iounmap(to_pci_dev(cxlm->dev), base);
Ben Widawsky8adaf742021-02-16 20:09:51 -0800336}
Dan Williams4cdadfd2021-02-16 20:09:50 -0800337
338static int cxl_mem_dvsec(struct pci_dev *pdev, int dvsec)
339{
340 int pos;
341
342 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DVSEC);
343 if (!pos)
344 return 0;
345
346 while (pos) {
347 u16 vendor, id;
348
349 pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER1, &vendor);
350 pci_read_config_word(pdev, pos + PCI_DVSEC_HEADER2, &id);
351 if (vendor == PCI_DVSEC_VENDOR_ID_CXL && dvsec == id)
352 return pos;
353
354 pos = pci_find_next_ext_capability(pdev, pos,
355 PCI_EXT_CAP_ID_DVSEC);
356 }
357
358 return 0;
359}
360
Ira Weiny30af9722021-06-03 17:50:36 -0700361static int cxl_probe_regs(struct cxl_mem *cxlm, void __iomem *base,
362 struct cxl_register_map *map)
363{
Ben Widawsky08422372021-05-27 17:49:22 -0700364 struct cxl_component_reg_map *comp_map;
Ira Weiny30af9722021-06-03 17:50:36 -0700365 struct cxl_device_reg_map *dev_map;
Dan Williams99e222a2021-09-08 22:12:09 -0700366 struct device *dev = cxlm->dev;
Ira Weiny30af9722021-06-03 17:50:36 -0700367
368 switch (map->reg_type) {
Ben Widawsky08422372021-05-27 17:49:22 -0700369 case CXL_REGLOC_RBI_COMPONENT:
370 comp_map = &map->component_map;
371 cxl_probe_component_regs(dev, base, comp_map);
372 if (!comp_map->hdm_decoder.valid) {
373 dev_err(dev, "HDM decoder registers not found\n");
374 return -ENXIO;
375 }
376
377 dev_dbg(dev, "Set up component registers\n");
378 break;
Ira Weiny30af9722021-06-03 17:50:36 -0700379 case CXL_REGLOC_RBI_MEMDEV:
380 dev_map = &map->device_map;
381 cxl_probe_device_regs(dev, base, dev_map);
382 if (!dev_map->status.valid || !dev_map->mbox.valid ||
383 !dev_map->memdev.valid) {
384 dev_err(dev, "registers not found: %s%s%s\n",
385 !dev_map->status.valid ? "status " : "",
Li Qiang (Johnny Li)da582aa2021-09-03 19:20:50 -0700386 !dev_map->mbox.valid ? "mbox " : "",
387 !dev_map->memdev.valid ? "memdev " : "");
Ira Weiny30af9722021-06-03 17:50:36 -0700388 return -ENXIO;
389 }
390
391 dev_dbg(dev, "Probing device registers...\n");
392 break;
393 default:
394 break;
395 }
396
397 return 0;
398}
399
400static int cxl_map_regs(struct cxl_mem *cxlm, struct cxl_register_map *map)
401{
Dan Williams99e222a2021-09-08 22:12:09 -0700402 struct device *dev = cxlm->dev;
403 struct pci_dev *pdev = to_pci_dev(dev);
Ira Weiny30af9722021-06-03 17:50:36 -0700404
405 switch (map->reg_type) {
Ben Widawsky08422372021-05-27 17:49:22 -0700406 case CXL_REGLOC_RBI_COMPONENT:
407 cxl_map_component_regs(pdev, &cxlm->regs.component, map);
408 dev_dbg(dev, "Mapping component registers...\n");
409 break;
Ira Weiny30af9722021-06-03 17:50:36 -0700410 case CXL_REGLOC_RBI_MEMDEV:
411 cxl_map_device_regs(pdev, &cxlm->regs.device_regs, map);
412 dev_dbg(dev, "Probing device registers...\n");
413 break;
414 default:
415 break;
416 }
417
418 return 0;
419}
420
Ira Weiny07d62ea2021-05-27 17:49:18 -0700421static void cxl_decode_register_block(u32 reg_lo, u32 reg_hi,
422 u8 *bar, u64 *offset, u8 *reg_type)
423{
424 *offset = ((u64)reg_hi << 32) | (reg_lo & CXL_REGLOC_ADDR_MASK);
425 *bar = FIELD_GET(CXL_REGLOC_BIR_MASK, reg_lo);
426 *reg_type = FIELD_GET(CXL_REGLOC_RBI_MASK, reg_lo);
427}
428
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700429/**
430 * cxl_mem_setup_regs() - Setup necessary MMIO.
431 * @cxlm: The CXL memory device to communicate with.
432 *
433 * Return: 0 if all necessary registers mapped.
434 *
435 * A memory device is required by spec to implement a certain set of MMIO
436 * regions. The purpose of this function is to enumerate and map those
437 * registers.
438 */
439static int cxl_mem_setup_regs(struct cxl_mem *cxlm)
440{
Ben Widawsky6630d312021-05-20 14:29:53 -0700441 void __iomem *base;
Dan Williams99e222a2021-09-08 22:12:09 -0700442 u32 regloc_size, regblocks;
443 int regloc, i, n_maps, ret = 0;
444 struct device *dev = cxlm->dev;
445 struct pci_dev *pdev = to_pci_dev(dev);
Ben Widawsky5b687052021-07-16 16:15:47 -0700446 struct cxl_register_map *map, maps[CXL_REGLOC_RBI_TYPES];
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700447
Ben Widawsky4ad61812021-06-17 17:30:09 -0700448 regloc = cxl_mem_dvsec(pdev, PCI_DVSEC_ID_CXL_REGLOC_DVSEC_ID);
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700449 if (!regloc) {
450 dev_err(dev, "register location dvsec not found\n");
451 return -ENXIO;
452 }
453
Ira Weinyf8a7e8c2021-05-27 17:49:19 -0700454 if (pci_request_mem_regions(pdev, pci_name(pdev)))
455 return -ENODEV;
456
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700457 /* Get the size of the Register Locator DVSEC */
458 pci_read_config_dword(pdev, regloc + PCI_DVSEC_HEADER1, &regloc_size);
459 regloc_size = FIELD_GET(PCI_DVSEC_HEADER1_LENGTH_MASK, regloc_size);
460
461 regloc += PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET;
462 regblocks = (regloc_size - PCI_DVSEC_ID_CXL_REGLOC_BLOCK1_OFFSET) / 8;
463
Ben Widawsky5b687052021-07-16 16:15:47 -0700464 for (i = 0, n_maps = 0; i < regblocks; i++, regloc += 8) {
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700465 u32 reg_lo, reg_hi;
466 u8 reg_type;
Ira Weiny07d62ea2021-05-27 17:49:18 -0700467 u64 offset;
468 u8 bar;
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700469
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700470 pci_read_config_dword(pdev, regloc, &reg_lo);
471 pci_read_config_dword(pdev, regloc + 4, &reg_hi);
472
Ira Weiny07d62ea2021-05-27 17:49:18 -0700473 cxl_decode_register_block(reg_lo, reg_hi, &bar, &offset,
474 &reg_type);
475
476 dev_dbg(dev, "Found register block in bar %u @ 0x%llx of type %u\n",
477 bar, offset, reg_type);
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700478
Ben Widawsky1e39db52021-07-16 16:15:46 -0700479 /* Ignore unknown register block types */
480 if (reg_type > CXL_REGLOC_RBI_MEMDEV)
481 continue;
482
Ira Weiny30af9722021-06-03 17:50:36 -0700483 base = cxl_mem_map_regblock(cxlm, bar, offset);
Ben Widawsky5b687052021-07-16 16:15:47 -0700484 if (!base)
485 return -ENOMEM;
Ira Weiny30af9722021-06-03 17:50:36 -0700486
Ben Widawsky5b687052021-07-16 16:15:47 -0700487 map = &maps[n_maps];
Ira Weiny30af9722021-06-03 17:50:36 -0700488 map->barno = bar;
489 map->block_offset = offset;
490 map->reg_type = reg_type;
491
492 ret = cxl_probe_regs(cxlm, base + offset, map);
493
494 /* Always unmap the regblock regardless of probe success */
495 cxl_mem_unmap_regblock(cxlm, base);
496
497 if (ret)
Ben Widawsky5b687052021-07-16 16:15:47 -0700498 return ret;
499
500 n_maps++;
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700501 }
502
Ira Weiny9a016522021-06-03 17:53:16 -0700503 pci_release_mem_regions(pdev);
504
Ben Widawsky5b687052021-07-16 16:15:47 -0700505 for (i = 0; i < n_maps; i++) {
506 ret = cxl_map_regs(cxlm, &maps[i]);
Ira Weiny30af9722021-06-03 17:50:36 -0700507 if (ret)
Ben Widawsky5b687052021-07-16 16:15:47 -0700508 break;
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700509 }
510
Ira Weiny30af9722021-06-03 17:50:36 -0700511 return ret;
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700512}
513
Dan Williams4cdadfd2021-02-16 20:09:50 -0800514static int cxl_mem_probe(struct pci_dev *pdev, const struct pci_device_id *id)
515{
Dan Williams21083f52021-06-15 16:36:31 -0700516 struct cxl_memdev *cxlmd;
Ben Widawsky1b0a1a22021-04-07 15:26:20 -0700517 struct cxl_mem *cxlm;
Ben Widawsky1d5a4152021-04-07 15:26:21 -0700518 int rc;
Ben Widawsky8adaf742021-02-16 20:09:51 -0800519
520 rc = pcim_enable_device(pdev);
521 if (rc)
522 return rc;
Dan Williams4cdadfd2021-02-16 20:09:50 -0800523
Dan Williams99e222a2021-09-08 22:12:09 -0700524 cxlm = cxl_mem_create(&pdev->dev);
Ben Widawsky1b0a1a22021-04-07 15:26:20 -0700525 if (IS_ERR(cxlm))
526 return PTR_ERR(cxlm);
527
Ben Widawsky8adaf742021-02-16 20:09:51 -0800528 rc = cxl_mem_setup_regs(cxlm);
529 if (rc)
530 return rc;
531
532 rc = cxl_mem_setup_mailbox(cxlm);
533 if (rc)
534 return rc;
535
Ben Widawsky472b1ce2021-02-16 20:09:55 -0800536 rc = cxl_mem_enumerate_cmds(cxlm);
537 if (rc)
538 return rc;
539
Dan Williamsb39cb102021-02-16 20:09:52 -0800540 rc = cxl_mem_identify(cxlm);
541 if (rc)
542 return rc;
543
Ira Weinyf8475022021-08-10 11:57:59 -0700544 rc = cxl_mem_create_range_info(cxlm);
545 if (rc)
546 return rc;
547
Dan Williams4faf31b2021-09-08 22:12:32 -0700548 cxlmd = devm_cxl_add_memdev(cxlm);
Dan Williams21083f52021-06-15 16:36:31 -0700549 if (IS_ERR(cxlmd))
550 return PTR_ERR(cxlmd);
551
552 if (range_len(&cxlm->pmem_range) && IS_ENABLED(CONFIG_CXL_PMEM))
553 rc = devm_cxl_add_nvdimm(&pdev->dev, cxlmd);
554
555 return rc;
Dan Williams4cdadfd2021-02-16 20:09:50 -0800556}
557
558static const struct pci_device_id cxl_mem_pci_tbl[] = {
559 /* PCI class code for CXL.mem Type-3 Devices */
560 { PCI_DEVICE_CLASS((PCI_CLASS_MEMORY_CXL << 8 | CXL_MEMORY_PROGIF), ~0)},
561 { /* terminate list */ },
562};
563MODULE_DEVICE_TABLE(pci, cxl_mem_pci_tbl);
564
565static struct pci_driver cxl_mem_driver = {
566 .name = KBUILD_MODNAME,
567 .id_table = cxl_mem_pci_tbl,
568 .probe = cxl_mem_probe,
569 .driver = {
570 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
571 },
572};
573
Dan Williamsb39cb102021-02-16 20:09:52 -0800574static __init int cxl_mem_init(void)
575{
Dan Williamsb39cb102021-02-16 20:09:52 -0800576 int rc;
577
Dan Williams8ac75dd2021-05-13 22:21:54 -0700578 /* Double check the anonymous union trickery in struct cxl_regs */
579 BUILD_BUG_ON(offsetof(struct cxl_regs, memdev) !=
580 offsetof(struct cxl_regs, device_regs.memdev));
581
Ben Widawsky3d135db2021-08-02 10:30:05 -0700582 rc = pci_register_driver(&cxl_mem_driver);
Dan Williamsb39cb102021-02-16 20:09:52 -0800583 if (rc)
584 return rc;
585
Dan Williamsb39cb102021-02-16 20:09:52 -0800586 return 0;
587}
588
589static __exit void cxl_mem_exit(void)
590{
591 pci_unregister_driver(&cxl_mem_driver);
Dan Williamsb39cb102021-02-16 20:09:52 -0800592}
593
Dan Williams4cdadfd2021-02-16 20:09:50 -0800594MODULE_LICENSE("GPL v2");
Dan Williamsb39cb102021-02-16 20:09:52 -0800595module_init(cxl_mem_init);
596module_exit(cxl_mem_exit);
597MODULE_IMPORT_NS(CXL);