cxl.h 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /* Copyright(c) 2020 Intel Corporation. */
  3. #ifndef __CXL_H__
  4. #define __CXL_H__
  5. #include <linux/libnvdimm.h>
  6. #include <linux/bitfield.h>
  7. #include <linux/bitops.h>
  8. #include <linux/log2.h>
  9. #include <linux/io.h>
  10. /**
  11. * DOC: cxl objects
  12. *
  13. * The CXL core objects like ports, decoders, and regions are shared
  14. * between the subsystem drivers cxl_acpi, cxl_pci, and core drivers
  15. * (port-driver, region-driver, nvdimm object-drivers... etc).
  16. */
  17. /* CXL 2.0 8.2.4 CXL Component Register Layout and Definition */
  18. #define CXL_COMPONENT_REG_BLOCK_SIZE SZ_64K
  19. /* CXL 2.0 8.2.5 CXL.cache and CXL.mem Registers*/
  20. #define CXL_CM_OFFSET 0x1000
  21. #define CXL_CM_CAP_HDR_OFFSET 0x0
  22. #define CXL_CM_CAP_HDR_ID_MASK GENMASK(15, 0)
  23. #define CM_CAP_HDR_CAP_ID 1
  24. #define CXL_CM_CAP_HDR_VERSION_MASK GENMASK(19, 16)
  25. #define CM_CAP_HDR_CAP_VERSION 1
  26. #define CXL_CM_CAP_HDR_CACHE_MEM_VERSION_MASK GENMASK(23, 20)
  27. #define CM_CAP_HDR_CACHE_MEM_VERSION 1
  28. #define CXL_CM_CAP_HDR_ARRAY_SIZE_MASK GENMASK(31, 24)
  29. #define CXL_CM_CAP_PTR_MASK GENMASK(31, 20)
  30. #define CXL_CM_CAP_CAP_ID_HDM 0x5
  31. #define CXL_CM_CAP_CAP_HDM_VERSION 1
  32. /* HDM decoders CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure */
  33. #define CXL_HDM_DECODER_CAP_OFFSET 0x0
  34. #define CXL_HDM_DECODER_COUNT_MASK GENMASK(3, 0)
  35. #define CXL_HDM_DECODER_TARGET_COUNT_MASK GENMASK(7, 4)
  36. #define CXL_HDM_DECODER_INTERLEAVE_11_8 BIT(8)
  37. #define CXL_HDM_DECODER_INTERLEAVE_14_12 BIT(9)
  38. #define CXL_HDM_DECODER_CTRL_OFFSET 0x4
  39. #define CXL_HDM_DECODER_ENABLE BIT(1)
  40. #define CXL_HDM_DECODER0_BASE_LOW_OFFSET(i) (0x20 * (i) + 0x10)
  41. #define CXL_HDM_DECODER0_BASE_HIGH_OFFSET(i) (0x20 * (i) + 0x14)
  42. #define CXL_HDM_DECODER0_SIZE_LOW_OFFSET(i) (0x20 * (i) + 0x18)
  43. #define CXL_HDM_DECODER0_SIZE_HIGH_OFFSET(i) (0x20 * (i) + 0x1c)
  44. #define CXL_HDM_DECODER0_CTRL_OFFSET(i) (0x20 * (i) + 0x20)
  45. #define CXL_HDM_DECODER0_CTRL_IG_MASK GENMASK(3, 0)
  46. #define CXL_HDM_DECODER0_CTRL_IW_MASK GENMASK(7, 4)
  47. #define CXL_HDM_DECODER0_CTRL_LOCK BIT(8)
  48. #define CXL_HDM_DECODER0_CTRL_COMMIT BIT(9)
  49. #define CXL_HDM_DECODER0_CTRL_COMMITTED BIT(10)
  50. #define CXL_HDM_DECODER0_CTRL_COMMIT_ERROR BIT(11)
  51. #define CXL_HDM_DECODER0_CTRL_TYPE BIT(12)
  52. #define CXL_HDM_DECODER0_TL_LOW(i) (0x20 * (i) + 0x24)
  53. #define CXL_HDM_DECODER0_TL_HIGH(i) (0x20 * (i) + 0x28)
  54. #define CXL_HDM_DECODER0_SKIP_LOW(i) CXL_HDM_DECODER0_TL_LOW(i)
  55. #define CXL_HDM_DECODER0_SKIP_HIGH(i) CXL_HDM_DECODER0_TL_HIGH(i)
  56. static inline int cxl_hdm_decoder_count(u32 cap_hdr)
  57. {
  58. int val = FIELD_GET(CXL_HDM_DECODER_COUNT_MASK, cap_hdr);
  59. return val ? val * 2 : 1;
  60. }
  61. /* Encode defined in CXL 2.0 8.2.5.12.7 HDM Decoder Control Register */
  62. static inline int cxl_to_granularity(u16 ig, unsigned int *val)
  63. {
  64. if (ig > 6)
  65. return -EINVAL;
  66. *val = 256 << ig;
  67. return 0;
  68. }
  69. /* Encode defined in CXL ECN "3, 6, 12 and 16-way memory Interleaving" */
  70. static inline int cxl_to_ways(u8 eniw, unsigned int *val)
  71. {
  72. switch (eniw) {
  73. case 0 ... 4:
  74. *val = 1 << eniw;
  75. break;
  76. case 8 ... 10:
  77. *val = 3 << (eniw - 8);
  78. break;
  79. default:
  80. return -EINVAL;
  81. }
  82. return 0;
  83. }
  84. static inline int granularity_to_cxl(int g, u16 *ig)
  85. {
  86. if (g > SZ_16K || g < 256 || !is_power_of_2(g))
  87. return -EINVAL;
  88. *ig = ilog2(g) - 8;
  89. return 0;
  90. }
  91. static inline int ways_to_cxl(unsigned int ways, u8 *iw)
  92. {
  93. if (ways > 16)
  94. return -EINVAL;
  95. if (is_power_of_2(ways)) {
  96. *iw = ilog2(ways);
  97. return 0;
  98. }
  99. if (ways % 3)
  100. return -EINVAL;
  101. ways /= 3;
  102. if (!is_power_of_2(ways))
  103. return -EINVAL;
  104. *iw = ilog2(ways) + 8;
  105. return 0;
  106. }
  107. /* CXL 2.0 8.2.8.1 Device Capabilities Array Register */
  108. #define CXLDEV_CAP_ARRAY_OFFSET 0x0
  109. #define CXLDEV_CAP_ARRAY_CAP_ID 0
  110. #define CXLDEV_CAP_ARRAY_ID_MASK GENMASK_ULL(15, 0)
  111. #define CXLDEV_CAP_ARRAY_COUNT_MASK GENMASK_ULL(47, 32)
  112. /* CXL 2.0 8.2.8.2 CXL Device Capability Header Register */
  113. #define CXLDEV_CAP_HDR_CAP_ID_MASK GENMASK(15, 0)
  114. /* CXL 2.0 8.2.8.2.1 CXL Device Capabilities */
  115. #define CXLDEV_CAP_CAP_ID_DEVICE_STATUS 0x1
  116. #define CXLDEV_CAP_CAP_ID_PRIMARY_MAILBOX 0x2
  117. #define CXLDEV_CAP_CAP_ID_SECONDARY_MAILBOX 0x3
  118. #define CXLDEV_CAP_CAP_ID_MEMDEV 0x4000
  119. /* CXL 2.0 8.2.8.4 Mailbox Registers */
  120. #define CXLDEV_MBOX_CAPS_OFFSET 0x00
  121. #define CXLDEV_MBOX_CAP_PAYLOAD_SIZE_MASK GENMASK(4, 0)
  122. #define CXLDEV_MBOX_CTRL_OFFSET 0x04
  123. #define CXLDEV_MBOX_CTRL_DOORBELL BIT(0)
  124. #define CXLDEV_MBOX_CMD_OFFSET 0x08
  125. #define CXLDEV_MBOX_CMD_COMMAND_OPCODE_MASK GENMASK_ULL(15, 0)
  126. #define CXLDEV_MBOX_CMD_PAYLOAD_LENGTH_MASK GENMASK_ULL(36, 16)
  127. #define CXLDEV_MBOX_STATUS_OFFSET 0x10
  128. #define CXLDEV_MBOX_STATUS_RET_CODE_MASK GENMASK_ULL(47, 32)
  129. #define CXLDEV_MBOX_BG_CMD_STATUS_OFFSET 0x18
  130. #define CXLDEV_MBOX_PAYLOAD_OFFSET 0x20
  131. /*
  132. * Using struct_group() allows for per register-block-type helper routines,
  133. * without requiring block-type agnostic code to include the prefix.
  134. */
  135. struct cxl_regs {
  136. /*
  137. * Common set of CXL Component register block base pointers
  138. * @hdm_decoder: CXL 2.0 8.2.5.12 CXL HDM Decoder Capability Structure
  139. */
  140. struct_group_tagged(cxl_component_regs, component,
  141. void __iomem *hdm_decoder;
  142. );
  143. /*
  144. * Common set of CXL Device register block base pointers
  145. * @status: CXL 2.0 8.2.8.3 Device Status Registers
  146. * @mbox: CXL 2.0 8.2.8.4 Mailbox Registers
  147. * @memdev: CXL 2.0 8.2.8.5 Memory Device Registers
  148. */
  149. struct_group_tagged(cxl_device_regs, device_regs,
  150. void __iomem *status, *mbox, *memdev;
  151. );
  152. };
  153. struct cxl_reg_map {
  154. bool valid;
  155. unsigned long offset;
  156. unsigned long size;
  157. };
  158. struct cxl_component_reg_map {
  159. struct cxl_reg_map hdm_decoder;
  160. };
  161. struct cxl_device_reg_map {
  162. struct cxl_reg_map status;
  163. struct cxl_reg_map mbox;
  164. struct cxl_reg_map memdev;
  165. };
  166. /**
  167. * struct cxl_register_map - DVSEC harvested register block mapping parameters
  168. * @base: virtual base of the register-block-BAR + @block_offset
  169. * @block_offset: offset to start of register block in @barno
  170. * @reg_type: see enum cxl_regloc_type
  171. * @barno: PCI BAR number containing the register block
  172. * @component_map: cxl_reg_map for component registers
  173. * @device_map: cxl_reg_maps for device registers
  174. */
  175. struct cxl_register_map {
  176. void __iomem *base;
  177. u64 block_offset;
  178. u8 reg_type;
  179. u8 barno;
  180. union {
  181. struct cxl_component_reg_map component_map;
  182. struct cxl_device_reg_map device_map;
  183. };
  184. };
  185. void cxl_probe_component_regs(struct device *dev, void __iomem *base,
  186. struct cxl_component_reg_map *map);
  187. void cxl_probe_device_regs(struct device *dev, void __iomem *base,
  188. struct cxl_device_reg_map *map);
  189. int cxl_map_component_regs(struct pci_dev *pdev,
  190. struct cxl_component_regs *regs,
  191. struct cxl_register_map *map);
  192. int cxl_map_device_regs(struct pci_dev *pdev,
  193. struct cxl_device_regs *regs,
  194. struct cxl_register_map *map);
  195. enum cxl_regloc_type;
  196. int cxl_find_regblock(struct pci_dev *pdev, enum cxl_regloc_type type,
  197. struct cxl_register_map *map);
  198. void __iomem *devm_cxl_iomap_block(struct device *dev, resource_size_t addr,
  199. resource_size_t length);
  200. #define CXL_RESOURCE_NONE ((resource_size_t) -1)
  201. #define CXL_TARGET_STRLEN 20
  202. /*
  203. * cxl_decoder flags that define the type of memory / devices this
  204. * decoder supports as well as configuration lock status See "CXL 2.0
  205. * 8.2.5.12.7 CXL HDM Decoder 0 Control Register" for details.
  206. */
  207. #define CXL_DECODER_F_RAM BIT(0)
  208. #define CXL_DECODER_F_PMEM BIT(1)
  209. #define CXL_DECODER_F_TYPE2 BIT(2)
  210. #define CXL_DECODER_F_TYPE3 BIT(3)
  211. #define CXL_DECODER_F_LOCK BIT(4)
  212. #define CXL_DECODER_F_ENABLE BIT(5)
  213. #define CXL_DECODER_F_MASK GENMASK(5, 0)
  214. enum cxl_decoder_type {
  215. CXL_DECODER_ACCELERATOR = 2,
  216. CXL_DECODER_EXPANDER = 3,
  217. };
  218. /*
  219. * Current specification goes up to 8, double that seems a reasonable
  220. * software max for the foreseeable future
  221. */
  222. #define CXL_DECODER_MAX_INTERLEAVE 16
  223. #define CXL_DECODER_MIN_GRANULARITY 256
  224. /**
  225. * struct cxl_decoder - Common CXL HDM Decoder Attributes
  226. * @dev: this decoder's device
  227. * @id: kernel device name id
  228. * @hpa_range: Host physical address range mapped by this decoder
  229. * @interleave_ways: number of cxl_dports in this decode
  230. * @interleave_granularity: data stride per dport
  231. * @target_type: accelerator vs expander (type2 vs type3) selector
  232. * @region: currently assigned region for this decoder
  233. * @flags: memory type capabilities and locking
  234. * @commit: device/decoder-type specific callback to commit settings to hw
  235. * @reset: device/decoder-type specific callback to reset hw settings
  236. */
  237. struct cxl_decoder {
  238. struct device dev;
  239. int id;
  240. struct range hpa_range;
  241. int interleave_ways;
  242. int interleave_granularity;
  243. enum cxl_decoder_type target_type;
  244. struct cxl_region *region;
  245. unsigned long flags;
  246. int (*commit)(struct cxl_decoder *cxld);
  247. int (*reset)(struct cxl_decoder *cxld);
  248. };
  249. /*
  250. * CXL_DECODER_DEAD prevents endpoints from being reattached to regions
  251. * while cxld_unregister() is running
  252. */
  253. enum cxl_decoder_mode {
  254. CXL_DECODER_NONE,
  255. CXL_DECODER_RAM,
  256. CXL_DECODER_PMEM,
  257. CXL_DECODER_MIXED,
  258. CXL_DECODER_DEAD,
  259. };
  260. /**
  261. * struct cxl_endpoint_decoder - Endpoint / SPA to DPA decoder
  262. * @cxld: base cxl_decoder_object
  263. * @dpa_res: actively claimed DPA span of this decoder
  264. * @skip: offset into @dpa_res where @cxld.hpa_range maps
  265. * @mode: which memory type / access-mode-partition this decoder targets
  266. * @pos: interleave position in @cxld.region
  267. */
  268. struct cxl_endpoint_decoder {
  269. struct cxl_decoder cxld;
  270. struct resource *dpa_res;
  271. resource_size_t skip;
  272. enum cxl_decoder_mode mode;
  273. int pos;
  274. };
  275. /**
  276. * struct cxl_switch_decoder - Switch specific CXL HDM Decoder
  277. * @cxld: base cxl_decoder object
  278. * @target_lock: coordinate coherent reads of the target list
  279. * @nr_targets: number of elements in @target
  280. * @target: active ordered target list in current decoder configuration
  281. *
  282. * The 'switch' decoder type represents the decoder instances of cxl_port's that
  283. * route from the root of a CXL memory decode topology to the endpoints. They
  284. * come in two flavors, root-level decoders, statically defined by platform
  285. * firmware, and mid-level decoders, where interleave-granularity,
  286. * interleave-width, and the target list are mutable.
  287. */
  288. struct cxl_switch_decoder {
  289. struct cxl_decoder cxld;
  290. seqlock_t target_lock;
  291. int nr_targets;
  292. struct cxl_dport *target[];
  293. };
  294. /**
  295. * struct cxl_root_decoder - Static platform CXL address decoder
  296. * @res: host / parent resource for region allocations
  297. * @region_id: region id for next region provisioning event
  298. * @calc_hb: which host bridge covers the n'th position by granularity
  299. * @cxlsd: base cxl switch decoder
  300. */
  301. struct cxl_root_decoder {
  302. struct resource *res;
  303. atomic_t region_id;
  304. struct cxl_dport *(*calc_hb)(struct cxl_root_decoder *cxlrd, int pos);
  305. struct cxl_switch_decoder cxlsd;
  306. };
  307. /*
  308. * enum cxl_config_state - State machine for region configuration
  309. * @CXL_CONFIG_IDLE: Any sysfs attribute can be written freely
  310. * @CXL_CONFIG_INTERLEAVE_ACTIVE: region size has been set, no more
  311. * changes to interleave_ways or interleave_granularity
  312. * @CXL_CONFIG_ACTIVE: All targets have been added the region is now
  313. * active
  314. * @CXL_CONFIG_RESET_PENDING: see commit_store()
  315. * @CXL_CONFIG_COMMIT: Soft-config has been committed to hardware
  316. */
  317. enum cxl_config_state {
  318. CXL_CONFIG_IDLE,
  319. CXL_CONFIG_INTERLEAVE_ACTIVE,
  320. CXL_CONFIG_ACTIVE,
  321. CXL_CONFIG_RESET_PENDING,
  322. CXL_CONFIG_COMMIT,
  323. };
  324. /**
  325. * struct cxl_region_params - region settings
  326. * @state: allow the driver to lockdown further parameter changes
  327. * @uuid: unique id for persistent regions
  328. * @interleave_ways: number of endpoints in the region
  329. * @interleave_granularity: capacity each endpoint contributes to a stripe
  330. * @res: allocated iomem capacity for this region
  331. * @targets: active ordered targets in current decoder configuration
  332. * @nr_targets: number of targets
  333. *
  334. * State transitions are protected by the cxl_region_rwsem
  335. */
  336. struct cxl_region_params {
  337. enum cxl_config_state state;
  338. uuid_t uuid;
  339. int interleave_ways;
  340. int interleave_granularity;
  341. struct resource *res;
  342. struct cxl_endpoint_decoder *targets[CXL_DECODER_MAX_INTERLEAVE];
  343. int nr_targets;
  344. };
  345. /**
  346. * struct cxl_region - CXL region
  347. * @dev: This region's device
  348. * @id: This region's id. Id is globally unique across all regions
  349. * @mode: Endpoint decoder allocation / access mode
  350. * @type: Endpoint decoder target type
  351. * @params: active + config params for the region
  352. */
  353. struct cxl_region {
  354. struct device dev;
  355. int id;
  356. enum cxl_decoder_mode mode;
  357. enum cxl_decoder_type type;
  358. struct cxl_region_params params;
  359. };
  360. /**
  361. * enum cxl_nvdimm_brige_state - state machine for managing bus rescans
  362. * @CXL_NVB_NEW: Set at bridge create and after cxl_pmem_wq is destroyed
  363. * @CXL_NVB_DEAD: Set at brige unregistration to preclude async probing
  364. * @CXL_NVB_ONLINE: Target state after successful ->probe()
  365. * @CXL_NVB_OFFLINE: Target state after ->remove() or failed ->probe()
  366. */
  367. enum cxl_nvdimm_brige_state {
  368. CXL_NVB_NEW,
  369. CXL_NVB_DEAD,
  370. CXL_NVB_ONLINE,
  371. CXL_NVB_OFFLINE,
  372. };
  373. struct cxl_nvdimm_bridge {
  374. int id;
  375. struct device dev;
  376. struct cxl_port *port;
  377. struct nvdimm_bus *nvdimm_bus;
  378. struct nvdimm_bus_descriptor nd_desc;
  379. struct work_struct state_work;
  380. enum cxl_nvdimm_brige_state state;
  381. };
  382. struct cxl_nvdimm {
  383. struct device dev;
  384. struct cxl_memdev *cxlmd;
  385. struct cxl_nvdimm_bridge *bridge;
  386. struct xarray pmem_regions;
  387. };
  388. struct cxl_pmem_region_mapping {
  389. struct cxl_memdev *cxlmd;
  390. struct cxl_nvdimm *cxl_nvd;
  391. u64 start;
  392. u64 size;
  393. int position;
  394. };
  395. struct cxl_pmem_region {
  396. struct device dev;
  397. struct cxl_region *cxlr;
  398. struct nd_region *nd_region;
  399. struct cxl_nvdimm_bridge *bridge;
  400. struct range hpa_range;
  401. int nr_mappings;
  402. struct cxl_pmem_region_mapping mapping[];
  403. };
  404. /**
  405. * struct cxl_port - logical collection of upstream port devices and
  406. * downstream port devices to construct a CXL memory
  407. * decode hierarchy.
  408. * @dev: this port's device
  409. * @uport: PCI or platform device implementing the upstream port capability
  410. * @host_bridge: Shortcut to the platform attach point for this port
  411. * @id: id for port device-name
  412. * @dports: cxl_dport instances referenced by decoders
  413. * @endpoints: cxl_ep instances, endpoints that are a descendant of this port
  414. * @regions: cxl_region_ref instances, regions mapped by this port
  415. * @parent_dport: dport that points to this port in the parent
  416. * @decoder_ida: allocator for decoder ids
  417. * @nr_dports: number of entries in @dports
  418. * @hdm_end: track last allocated HDM decoder instance for allocation ordering
  419. * @commit_end: cursor to track highest committed decoder for commit ordering
  420. * @component_reg_phys: component register capability base address (optional)
  421. * @dead: last ep has been removed, force port re-creation
  422. * @depth: How deep this port is relative to the root. depth 0 is the root.
  423. * @cdat: Cached CDAT data
  424. * @cdat_available: Should a CDAT attribute be available in sysfs
  425. */
  426. struct cxl_port {
  427. struct device dev;
  428. struct device *uport;
  429. struct device *host_bridge;
  430. int id;
  431. struct xarray dports;
  432. struct xarray endpoints;
  433. struct xarray regions;
  434. struct cxl_dport *parent_dport;
  435. struct ida decoder_ida;
  436. int nr_dports;
  437. int hdm_end;
  438. int commit_end;
  439. resource_size_t component_reg_phys;
  440. bool dead;
  441. unsigned int depth;
  442. struct cxl_cdat {
  443. void *table;
  444. size_t length;
  445. } cdat;
  446. bool cdat_available;
  447. };
  448. static inline struct cxl_dport *
  449. cxl_find_dport_by_dev(struct cxl_port *port, const struct device *dport_dev)
  450. {
  451. return xa_load(&port->dports, (unsigned long)dport_dev);
  452. }
  453. /**
  454. * struct cxl_dport - CXL downstream port
  455. * @dport: PCI bridge or firmware device representing the downstream link
  456. * @port_id: unique hardware identifier for dport in decoder target list
  457. * @component_reg_phys: downstream port component registers
  458. * @port: reference to cxl_port that contains this downstream port
  459. */
  460. struct cxl_dport {
  461. struct device *dport;
  462. int port_id;
  463. resource_size_t component_reg_phys;
  464. struct cxl_port *port;
  465. };
  466. /**
  467. * struct cxl_ep - track an endpoint's interest in a port
  468. * @ep: device that hosts a generic CXL endpoint (expander or accelerator)
  469. * @dport: which dport routes to this endpoint on @port
  470. * @next: cxl switch port across the link attached to @dport NULL if
  471. * attached to an endpoint
  472. */
  473. struct cxl_ep {
  474. struct device *ep;
  475. struct cxl_dport *dport;
  476. struct cxl_port *next;
  477. };
  478. /**
  479. * struct cxl_region_ref - track a region's interest in a port
  480. * @port: point in topology to install this reference
  481. * @decoder: decoder assigned for @region in @port
  482. * @region: region for this reference
  483. * @endpoints: cxl_ep references for region members beneath @port
  484. * @nr_targets_set: track how many targets have been programmed during setup
  485. * @nr_eps: number of endpoints beneath @port
  486. * @nr_targets: number of distinct targets needed to reach @nr_eps
  487. */
  488. struct cxl_region_ref {
  489. struct cxl_port *port;
  490. struct cxl_decoder *decoder;
  491. struct cxl_region *region;
  492. struct xarray endpoints;
  493. int nr_targets_set;
  494. int nr_eps;
  495. int nr_targets;
  496. };
  497. /*
  498. * The platform firmware device hosting the root is also the top of the
  499. * CXL port topology. All other CXL ports have another CXL port as their
  500. * parent and their ->uport / host device is out-of-line of the port
  501. * ancestry.
  502. */
  503. static inline bool is_cxl_root(struct cxl_port *port)
  504. {
  505. return port->uport == port->dev.parent;
  506. }
  507. bool is_cxl_port(struct device *dev);
  508. struct cxl_port *to_cxl_port(struct device *dev);
  509. struct pci_bus;
  510. int devm_cxl_register_pci_bus(struct device *host, struct device *uport,
  511. struct pci_bus *bus);
  512. struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port);
  513. struct cxl_port *devm_cxl_add_port(struct device *host, struct device *uport,
  514. resource_size_t component_reg_phys,
  515. struct cxl_dport *parent_dport);
  516. struct cxl_port *find_cxl_root(struct device *dev);
  517. int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
  518. int cxl_bus_rescan(void);
  519. struct cxl_port *cxl_mem_find_port(struct cxl_memdev *cxlmd,
  520. struct cxl_dport **dport);
  521. bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd);
  522. struct cxl_dport *devm_cxl_add_dport(struct cxl_port *port,
  523. struct device *dport, int port_id,
  524. resource_size_t component_reg_phys);
  525. struct cxl_decoder *to_cxl_decoder(struct device *dev);
  526. struct cxl_root_decoder *to_cxl_root_decoder(struct device *dev);
  527. struct cxl_switch_decoder *to_cxl_switch_decoder(struct device *dev);
  528. struct cxl_endpoint_decoder *to_cxl_endpoint_decoder(struct device *dev);
  529. bool is_root_decoder(struct device *dev);
  530. bool is_switch_decoder(struct device *dev);
  531. bool is_endpoint_decoder(struct device *dev);
  532. struct cxl_root_decoder *cxl_root_decoder_alloc(struct cxl_port *port,
  533. unsigned int nr_targets);
  534. struct cxl_switch_decoder *cxl_switch_decoder_alloc(struct cxl_port *port,
  535. unsigned int nr_targets);
  536. int cxl_decoder_add(struct cxl_decoder *cxld, int *target_map);
  537. struct cxl_endpoint_decoder *cxl_endpoint_decoder_alloc(struct cxl_port *port);
  538. int cxl_decoder_add_locked(struct cxl_decoder *cxld, int *target_map);
  539. int cxl_decoder_autoremove(struct device *host, struct cxl_decoder *cxld);
  540. int cxl_endpoint_autoremove(struct cxl_memdev *cxlmd, struct cxl_port *endpoint);
  541. struct cxl_hdm;
  542. struct cxl_hdm *devm_cxl_setup_hdm(struct cxl_port *port);
  543. int devm_cxl_enumerate_decoders(struct cxl_hdm *cxlhdm);
  544. int devm_cxl_add_passthrough_decoder(struct cxl_port *port);
  545. bool is_cxl_region(struct device *dev);
  546. extern struct bus_type cxl_bus_type;
  547. struct cxl_driver {
  548. const char *name;
  549. int (*probe)(struct device *dev);
  550. void (*remove)(struct device *dev);
  551. struct device_driver drv;
  552. int id;
  553. };
  554. static inline struct cxl_driver *to_cxl_drv(struct device_driver *drv)
  555. {
  556. return container_of(drv, struct cxl_driver, drv);
  557. }
  558. int __cxl_driver_register(struct cxl_driver *cxl_drv, struct module *owner,
  559. const char *modname);
  560. #define cxl_driver_register(x) __cxl_driver_register(x, THIS_MODULE, KBUILD_MODNAME)
  561. void cxl_driver_unregister(struct cxl_driver *cxl_drv);
  562. #define module_cxl_driver(__cxl_driver) \
  563. module_driver(__cxl_driver, cxl_driver_register, cxl_driver_unregister)
  564. #define CXL_DEVICE_NVDIMM_BRIDGE 1
  565. #define CXL_DEVICE_NVDIMM 2
  566. #define CXL_DEVICE_PORT 3
  567. #define CXL_DEVICE_ROOT 4
  568. #define CXL_DEVICE_MEMORY_EXPANDER 5
  569. #define CXL_DEVICE_REGION 6
  570. #define CXL_DEVICE_PMEM_REGION 7
  571. #define MODULE_ALIAS_CXL(type) MODULE_ALIAS("cxl:t" __stringify(type) "*")
  572. #define CXL_MODALIAS_FMT "cxl:t%d"
  573. struct cxl_nvdimm_bridge *to_cxl_nvdimm_bridge(struct device *dev);
  574. struct cxl_nvdimm_bridge *devm_cxl_add_nvdimm_bridge(struct device *host,
  575. struct cxl_port *port);
  576. struct cxl_nvdimm *to_cxl_nvdimm(struct device *dev);
  577. bool is_cxl_nvdimm(struct device *dev);
  578. bool is_cxl_nvdimm_bridge(struct device *dev);
  579. int devm_cxl_add_nvdimm(struct device *host, struct cxl_memdev *cxlmd);
  580. struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct device *dev);
  581. #ifdef CONFIG_CXL_REGION
  582. bool is_cxl_pmem_region(struct device *dev);
  583. struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev);
  584. #else
  585. static inline bool is_cxl_pmem_region(struct device *dev)
  586. {
  587. return false;
  588. }
  589. static inline struct cxl_pmem_region *to_cxl_pmem_region(struct device *dev)
  590. {
  591. return NULL;
  592. }
  593. #endif
  594. /*
  595. * Unit test builds overrides this to __weak, find the 'strong' version
  596. * of these symbols in tools/testing/cxl/.
  597. */
  598. #ifndef __mock
  599. #define __mock static
  600. #endif
  601. #endif /* __CXL_H__ */