blob: a5a0be3f088be1031e4c7ea9891b6933250d9a27 [file] [log] [blame]
Ben Widawsky8adaf742021-02-16 20:09:51 -08001/* SPDX-License-Identifier: GPL-2.0-only */
2/* Copyright(c) 2020 Intel Corporation. */
3
4#ifndef __CXL_H__
5#define __CXL_H__
6
Dan Williams8fdcb172021-06-15 16:18:17 -07007#include <linux/libnvdimm.h>
Ben Widawsky8adaf742021-02-16 20:09:51 -08008#include <linux/bitfield.h>
9#include <linux/bitops.h>
10#include <linux/io.h>
11
Dan Williams4812be92021-06-09 09:01:35 -070012/**
13 * DOC: cxl objects
14 *
15 * The CXL core objects like ports, decoders, and regions are shared
16 * between the subsystem drivers cxl_acpi, cxl_pci, and core drivers
17 * (port-driver, region-driver, nvdimm object-drivers... etc).
18 */
19
Ben Widawsky08422372021-05-27 17:49:22 -070020/* CXL 2.0 8.2.5 CXL.cache and CXL.mem Registers*/
21#define CXL_CM_OFFSET 0x1000
22#define CXL_CM_CAP_HDR_OFFSET 0x0
23#define CXL_CM_CAP_HDR_ID_MASK GENMASK(15, 0)
24#define CM_CAP_HDR_CAP_ID 1
25#define CXL_CM_CAP_HDR_VERSION_MASK GENMASK(19, 16)
26#define CM_CAP_HDR_CAP_VERSION 1
27#define CXL_CM_CAP_HDR_CACHE_MEM_VERSION_MASK GENMASK(23, 20)
28#define CM_CAP_HDR_CACHE_MEM_VERSION 1
29#define CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24)
30#define CXL_CM_CAP_PTR_MASK GENMASK(31, 20)
31
32#define CXL_CM_CAP_CAP_ID_HDM 0x5
33#define CXL_CM_CAP_CAP_HDM_VERSION 1
34
35/* HDM decoders CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure */
36#define CXL_HDM_DECODER_CAP_OFFSET 0x0
37#define CXL_HDM_DECODER_COUNT_MASK GENMASK(3, 0)
38#define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
39#define CXL_HDM_DECODER0_BASE_LOW_OFFSET 0x10
40#define CXL_HDM_DECODER0_BASE_HIGH_OFFSET 0x14
41#define CXL_HDM_DECODER0_SIZE_LOW_OFFSET 0x18
42#define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET 0x1c
43#define CXL_HDM_DECODER0_CTRL_OFFSET 0x20
44
Ben Widawsky64230352021-06-11 12:01:11 -070045static inline int cxl_hdm_decoder_count(u32 cap_hdr)
46{
47 int val = FIELD_GET(CXL_HDM_DECODER_COUNT_MASK, cap_hdr);
48
49 return val ? val * 2 : 1;
50}
51
Ben Widawsky8adaf742021-02-16 20:09:51 -080052/* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
53#define CXLDEV_CAP_ARRAY_OFFSET 0x0
54#define CXLDEV_CAP_ARRAY_CAP_ID 0
55#define CXLDEV_CAP_ARRAY_ID_MASK GENMASK_ULL(15, 0)
56#define CXLDEV_CAP_ARRAY_COUNT_MASK GENMASK_ULL(47, 32)
57/* CXL 2.0 8.2.8.2 CXL Device Capability Header Register */
58#define CXLDEV_CAP_HDR_CAP_ID_MASK GENMASK(15, 0)
59/* CXL 2.0 8.2.8.2.1 CXL Device Capabilities */
60#define CXLDEV_CAP_CAP_ID_DEVICE_STATUS 0x1
61#define CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX 0x2
62#define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3
63#define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000
64
65/* CXL 2.0 8.2.8.4 Mailbox Registers */
66#define CXLDEV_MBOX_CAPS_OFFSET 0x00
67#define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
68#define CXLDEV_MBOX_CTRL_OFFSET 0x04
69#define CXLDEV_MBOX_CTRL_DOORBELL BIT(0)
70#define CXLDEV_MBOX_CMD_OFFSET 0x08
71#define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
72#define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16)
73#define CXLDEV_MBOX_STATUS_OFFSET 0x10
74#define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32)
75#define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
76#define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
77
Ben Widawsky08422372021-05-27 17:49:22 -070078/*
Kees Cook301e68d2021-07-30 20:25:50 -070079 * Using struct_group() allows for per register-block-type helper routines,
80 * without requiring block-type agnostic code to include the prefix.
Dan Williams8ac75dd2021-05-13 22:21:54 -070081 */
82struct cxl_regs {
Kees Cook301e68d2021-07-30 20:25:50 -070083 /*
84 * Common set of CXL Component register block base pointers
85 * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
86 */
87 struct_group_tagged(cxl_component_regs, component,
88 void __iomem *hdm_decoder;
89 );
90 /*
91 * Common set of CXL Device register block base pointers
92 * @status: CXL 2.0 8.2.8.3 Device Status Registers
93 * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
94 * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
95 */
96 struct_group_tagged(cxl_device_regs, device_regs,
97 void __iomem *status, *mbox, *memdev;
98 );
Dan Williams8ac75dd2021-05-13 22:21:54 -070099};
100
Ira Weiny30af9722021-06-03 17:50:36 -0700101struct cxl_reg_map {
102 bool valid;
103 unsigned long offset;
104 unsigned long size;
105};
106
Ben Widawsky08422372021-05-27 17:49:22 -0700107struct cxl_component_reg_map {
108 struct cxl_reg_map hdm_decoder;
109};
110
Ira Weiny30af9722021-06-03 17:50:36 -0700111struct cxl_device_reg_map {
112 struct cxl_reg_map status;
113 struct cxl_reg_map mbox;
114 struct cxl_reg_map memdev;
115};
116
Dan Williamsa261e9a2021-10-15 14:57:27 -0700117/**
118 * struct cxl_register_map - DVSEC harvested register block mapping parameters
119 * @base: virtual base of the register-block-BAR + @block_offset
120 * @block_offset: offset to start of register block in @barno
121 * @reg_type: see enum cxl_regloc_type
122 * @barno: PCI BAR number containing the register block
123 * @component_map: cxl_reg_map for component registers
124 * @device_map: cxl_reg_maps for device registers
125 */
Ira Weiny30af9722021-06-03 17:50:36 -0700126struct cxl_register_map {
Dan Williamsa261e9a2021-10-15 14:57:27 -0700127 void __iomem *base;
Ira Weiny30af9722021-06-03 17:50:36 -0700128 u64 block_offset;
129 u8 reg_type;
130 u8 barno;
131 union {
Ben Widawsky08422372021-05-27 17:49:22 -0700132 struct cxl_component_reg_map component_map;
Ira Weiny30af9722021-06-03 17:50:36 -0700133 struct cxl_device_reg_map device_map;
134 };
135};
136
Ben Widawsky08422372021-05-27 17:49:22 -0700137void cxl_probe_component_regs(struct device *dev, void __iomem *base,
138 struct cxl_component_reg_map *map);
Ira Weiny30af9722021-06-03 17:50:36 -0700139void cxl_probe_device_regs(struct device *dev, void __iomem *base,
140 struct cxl_device_reg_map *map);
Ben Widawsky08422372021-05-27 17:49:22 -0700141int cxl_map_component_regs(struct pci_dev *pdev,
142 struct cxl_component_regs *regs,
143 struct cxl_register_map *map);
Ira Weiny30af9722021-06-03 17:50:36 -0700144int cxl_map_device_regs(struct pci_dev *pdev,
145 struct cxl_device_regs *regs,
146 struct cxl_register_map *map);
Dan Williams399d34e2021-05-13 22:22:05 -0700147
Dan Williams4812be92021-06-09 09:01:35 -0700148#define CXL_RESOURCE_NONE ((resource_size_t) -1)
Dan Williams7d4b5ca2021-06-09 09:01:46 -0700149#define CXL_TARGET_STRLEN 20
Dan Williams4812be92021-06-09 09:01:35 -0700150
Dan Williams40ba17a2021-06-09 09:43:29 -0700151/*
152 * cxl_decoder flags that define the type of memory / devices this
153 * decoder supports as well as configuration lock status See "CXL 2.0
154 * 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details.
155 */
156#define CXL_DECODER_F_RAM BIT(0)
157#define CXL_DECODER_F_PMEM BIT(1)
158#define CXL_DECODER_F_TYPE2 BIT(2)
159#define CXL_DECODER_F_TYPE3 BIT(3)
160#define CXL_DECODER_F_LOCK BIT(4)
161#define CXL_DECODER_F_MASK GENMASK(4, 0)
162
163enum cxl_decoder_type {
164 CXL_DECODER_ACCELERATOR = 2,
165 CXL_DECODER_EXPANDER = 3,
166};
167
Dan Williamsa5c25802021-09-08 22:13:10 -0700168/*
169 * Current specification goes up to 8, double that seems a reasonable
170 * software max for the foreseeable future
171 */
172#define CXL_DECODER_MAX_INTERLEAVE 16
173
Dan Williams40ba17a2021-06-09 09:43:29 -0700174/**
175 * struct cxl_decoder - CXL address range decode configuration
176 * @dev: this decoder's device
177 * @id: kernel device name id
178 * @range: address range considered by this decoder
179 * @interleave_ways: number of cxl_dports in this decode
180 * @interleave_granularity: data stride per dport
181 * @target_type: accelerator vs expander (type2 vs type3) selector
182 * @flags: memory type capabilities and locking
Dan Williams48667f62021-09-21 12:22:16 -0700183 * @nr_targets: number of elements in @target
Dan Williams40ba17a2021-06-09 09:43:29 -0700184 * @target: active ordered target list in current decoder configuration
185 */
186struct cxl_decoder {
187 struct device dev;
188 int id;
189 struct range range;
190 int interleave_ways;
191 int interleave_granularity;
192 enum cxl_decoder_type target_type;
193 unsigned long flags;
Nathan Chancellorbe185c22021-12-10 14:36:27 -0700194 int nr_targets;
Dan Williams40ba17a2021-06-09 09:43:29 -0700195 struct cxl_dport *target[];
196};
197
Dan Williams8fdcb172021-06-15 16:18:17 -0700198
Dan Williams53989fa2021-11-11 10:19:05 -0800199/**
200 * enum cxl_nvdimm_brige_state - state machine for managing bus rescans
201 * @CXL_NVB_NEW: Set at bridge create and after cxl_pmem_wq is destroyed
202 * @CXL_NVB_DEAD: Set at brige unregistration to preclude async probing
203 * @CXL_NVB_ONLINE: Target state after successful ->probe()
204 * @CXL_NVB_OFFLINE: Target state after ->remove() or failed ->probe()
205 */
Dan Williams8fdcb172021-06-15 16:18:17 -0700206enum cxl_nvdimm_brige_state {
207 CXL_NVB_NEW,
208 CXL_NVB_DEAD,
209 CXL_NVB_ONLINE,
210 CXL_NVB_OFFLINE,
211};
212
213struct cxl_nvdimm_bridge {
Dan Williams2e52b622021-09-14 12:08:40 -0700214 int id;
Dan Williams8fdcb172021-06-15 16:18:17 -0700215 struct device dev;
216 struct cxl_port *port;
217 struct nvdimm_bus *nvdimm_bus;
218 struct nvdimm_bus_descriptor nd_desc;
219 struct work_struct state_work;
220 enum cxl_nvdimm_brige_state state;
221};
222
Dan Williams21083f52021-06-15 16:36:31 -0700223struct cxl_nvdimm {
224 struct device dev;
225 struct cxl_memdev *cxlmd;
226 struct nvdimm *nvdimm;
227};
228
Dan Williams67dcdd42021-09-14 12:14:22 -0700229struct cxl_walk_context {
230 struct device *dev;
231 struct pci_bus *root;
232 struct cxl_port *port;
233 int error;
234 int count;
235};
236
Dan Williams4812be92021-06-09 09:01:35 -0700237/**
238 * struct cxl_port - logical collection of upstream port devices and
239 * downstream port devices to construct a CXL memory
240 * decode hierarchy.
241 * @dev: this port's device
242 * @uport: PCI or platform device implementing the upstream port capability
243 * @id: id for port device-name
Dan Williams7d4b5ca2021-06-09 09:01:46 -0700244 * @dports: cxl_dport instances referenced by decoders
Dan Williams40ba17a2021-06-09 09:43:29 -0700245 * @decoder_ida: allocator for decoder ids
Dan Williams4812be92021-06-09 09:01:35 -0700246 * @component_reg_phys: component register capability base address (optional)
247 */
248struct cxl_port {
249 struct device dev;
250 struct device *uport;
251 int id;
Dan Williams7d4b5ca2021-06-09 09:01:46 -0700252 struct list_head dports;
Dan Williams40ba17a2021-06-09 09:43:29 -0700253 struct ida decoder_ida;
Dan Williams4812be92021-06-09 09:01:35 -0700254 resource_size_t component_reg_phys;
255};
256
Dan Williams7d4b5ca2021-06-09 09:01:46 -0700257/**
258 * struct cxl_dport - CXL downstream port
259 * @dport: PCI bridge or firmware device representing the downstream link
260 * @port_id: unique hardware identifier for dport in decoder target list
261 * @component_reg_phys: downstream port component registers
262 * @port: reference to cxl_port that contains this downstream port
263 * @list: node for a cxl_port's list of cxl_dport instances
264 */
265struct cxl_dport {
266 struct device *dport;
267 int port_id;
268 resource_size_t component_reg_phys;
269 struct cxl_port *port;
270 struct list_head list;
271};
272
Dan Williams4812be92021-06-09 09:01:35 -0700273struct cxl_port *to_cxl_port(struct device *dev);
274struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
275 resource_size_t component_reg_phys,
276 struct cxl_port *parent_port);
277
Dan Williams7d4b5ca2021-06-09 09:01:46 -0700278int cxl_add_dport(struct cxl_port *port, struct device *dport, int port_id,
279 resource_size_t component_reg_phys);
Dan Williams40ba17a2021-06-09 09:43:29 -0700280
281struct cxl_decoder *to_cxl_decoder(struct device *dev);
Dan Williams8fdcb172021-06-15 16:18:17 -0700282bool is_root_decoder(struct device *dev);
Dan Williams48667f62021-09-21 12:22:16 -0700283struct cxl_decoder *cxl_decoder_alloc(struct cxl_port *port, int nr_targets);
284int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
285int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
Dan Williams40ba17a2021-06-09 09:43:29 -0700286
Dan Williamsb39cb102021-02-16 20:09:52 -0800287extern struct bus_type cxl_bus_type;
Dan Williams6af71392021-06-15 16:18:11 -0700288
289struct cxl_driver {
290 const char *name;
291 int (*probe)(struct device *dev);
292 void (*remove)(struct device *dev);
293 struct device_driver drv;
294 int id;
295};
296
297static inline struct cxl_driver *to_cxl_drv(struct device_driver *drv)
298{
299 return container_of(drv, struct cxl_driver, drv);
300}
301
302int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
303 const char *modname);
304#define cxl_driver_register(x) __cxl_driver_register(x, THIS_MODULE, KBUILD_MODNAME)
305void cxl_driver_unregister(struct cxl_driver *cxl_drv);
306
Dan Williams21083f52021-06-15 16:36:31 -0700307#define CXL_DEVICE_NVDIMM_BRIDGE 1
308#define CXL_DEVICE_NVDIMM 2
Dan Williams8fdcb172021-06-15 16:18:17 -0700309
Dan Williams6af71392021-06-15 16:18:11 -0700310#define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
311#define CXL_MODALIAS_FMT "cxl:t%d"
312
Dan Williams8fdcb172021-06-15 16:18:17 -0700313struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev);
314struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
315 struct cxl_port *port);
Dan Williams21083f52021-06-15 16:36:31 -0700316struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
317bool is_cxl_nvdimm(struct device *dev);
Dan Williams53989fa2021-11-11 10:19:05 -0800318bool is_cxl_nvdimm_bridge(struct device *dev);
Dan Williams21083f52021-06-15 16:36:31 -0700319int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
Dan Williams7d3eb232021-09-08 22:13:21 -0700320struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_nvdimm *cxl_nvd);
Dan Williams67dcdd42021-09-14 12:14:22 -0700321
322/*
323 * Unit test builds overrides this to __weak, find the 'strong' version
324 * of these symbols in tools/testing/cxl/.
325 */
326#ifndef __mock
327#define __mock static
328#endif
Ben Widawsky8adaf742021-02-16 20:09:51 -0800329#endif /* __CXL_H__ */