mem.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /* Copyright(c) 2022 Intel Corporation. All rights reserved. */
  3. #include <linux/debugfs.h>
  4. #include <linux/device.h>
  5. #include <linux/module.h>
  6. #include <linux/pci.h>
  7. #include "cxlmem.h"
  8. #include "cxlpci.h"
  9. /**
  10. * DOC: cxl mem
  11. *
  12. * CXL memory endpoint devices and switches are CXL capable devices that are
  13. * participating in CXL.mem protocol. Their functionality builds on top of the
  14. * CXL.io protocol that allows enumerating and configuring components via
  15. * standard PCI mechanisms.
  16. *
  17. * The cxl_mem driver owns kicking off the enumeration of this CXL.mem
  18. * capability. With the detection of a CXL capable endpoint, the driver will
  19. * walk up to find the platform specific port it is connected to, and determine
  20. * if there are intervening switches in the path. If there are switches, a
  21. * secondary action is to enumerate those (implemented in cxl_core). Finally the
  22. * cxl_mem driver adds the device it is bound to as a CXL endpoint-port for use
  23. * in higher level operations.
  24. */
  25. static void enable_suspend(void *data)
  26. {
  27. cxl_mem_active_dec();
  28. }
  29. static void remove_debugfs(void *dentry)
  30. {
  31. debugfs_remove_recursive(dentry);
  32. }
  33. static int cxl_mem_dpa_show(struct seq_file *file, void *data)
  34. {
  35. struct device *dev = file->private;
  36. struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
  37. cxl_dpa_debug(file, cxlmd->cxlds);
  38. return 0;
  39. }
  40. static int devm_cxl_add_endpoint(struct cxl_memdev *cxlmd,
  41. struct cxl_dport *parent_dport)
  42. {
  43. struct cxl_port *parent_port = parent_dport->port;
  44. struct cxl_dev_state *cxlds = cxlmd->cxlds;
  45. struct cxl_port *endpoint, *iter, *down;
  46. int rc;
  47. /*
  48. * Now that the path to the root is established record all the
  49. * intervening ports in the chain.
  50. */
  51. for (iter = parent_port, down = NULL; !is_cxl_root(iter);
  52. down = iter, iter = to_cxl_port(iter->dev.parent)) {
  53. struct cxl_ep *ep;
  54. ep = cxl_ep_load(iter, cxlmd);
  55. ep->next = down;
  56. }
  57. endpoint = devm_cxl_add_port(&parent_port->dev, &cxlmd->dev,
  58. cxlds->component_reg_phys, parent_dport);
  59. if (IS_ERR(endpoint))
  60. return PTR_ERR(endpoint);
  61. rc = cxl_endpoint_autoremove(cxlmd, endpoint);
  62. if (rc)
  63. return rc;
  64. if (!endpoint->dev.driver) {
  65. dev_err(&cxlmd->dev, "%s failed probe\n",
  66. dev_name(&endpoint->dev));
  67. return -ENXIO;
  68. }
  69. return 0;
  70. }
  71. static int cxl_mem_probe(struct device *dev)
  72. {
  73. struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
  74. struct cxl_port *parent_port;
  75. struct cxl_dport *dport;
  76. struct dentry *dentry;
  77. int rc;
  78. /*
  79. * Someone is trying to reattach this device after it lost its port
  80. * connection (an endpoint port previously registered by this memdev was
  81. * disabled). This racy check is ok because if the port is still gone,
  82. * no harm done, and if the port hierarchy comes back it will re-trigger
  83. * this probe. Port rescan and memdev detach work share the same
  84. * single-threaded workqueue.
  85. */
  86. if (work_pending(&cxlmd->detach_work))
  87. return -EBUSY;
  88. dentry = cxl_debugfs_create_dir(dev_name(dev));
  89. debugfs_create_devm_seqfile(dev, "dpamem", dentry, cxl_mem_dpa_show);
  90. rc = devm_add_action_or_reset(dev, remove_debugfs, dentry);
  91. if (rc)
  92. return rc;
  93. rc = devm_cxl_enumerate_ports(cxlmd);
  94. if (rc)
  95. return rc;
  96. parent_port = cxl_mem_find_port(cxlmd, &dport);
  97. if (!parent_port) {
  98. dev_err(dev, "CXL port topology not found\n");
  99. return -ENXIO;
  100. }
  101. device_lock(&parent_port->dev);
  102. if (!parent_port->dev.driver) {
  103. dev_err(dev, "CXL port topology %s not enabled\n",
  104. dev_name(&parent_port->dev));
  105. rc = -ENXIO;
  106. goto unlock;
  107. }
  108. rc = devm_cxl_add_endpoint(cxlmd, dport);
  109. unlock:
  110. device_unlock(&parent_port->dev);
  111. put_device(&parent_port->dev);
  112. if (rc)
  113. return rc;
  114. /*
  115. * The kernel may be operating out of CXL memory on this device,
  116. * there is no spec defined way to determine whether this device
  117. * preserves contents over suspend, and there is no simple way
  118. * to arrange for the suspend image to avoid CXL memory which
  119. * would setup a circular dependency between PCI resume and save
  120. * state restoration.
  121. *
  122. * TODO: support suspend when all the regions this device is
  123. * hosting are locked and covered by the system address map,
  124. * i.e. platform firmware owns restoring the HDM configuration
  125. * that it locked.
  126. */
  127. cxl_mem_active_inc();
  128. return devm_add_action_or_reset(dev, enable_suspend, NULL);
  129. }
  130. static struct cxl_driver cxl_mem_driver = {
  131. .name = "cxl_mem",
  132. .probe = cxl_mem_probe,
  133. .id = CXL_DEVICE_MEMORY_EXPANDER,
  134. };
  135. module_cxl_driver(cxl_mem_driver);
  136. MODULE_LICENSE("GPL v2");
  137. MODULE_IMPORT_NS(CXL);
  138. MODULE_ALIAS_CXL(CXL_DEVICE_MEMORY_EXPANDER);
  139. /*
  140. * create_endpoint() wants to validate port driver attach immediately after
  141. * endpoint registration.
  142. */
  143. MODULE_SOFTDEP("pre: cxl_port");