device.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/platform_device.h>
  3. #include <linux/memregion.h>
  4. #include <linux/module.h>
  5. #include <linux/dax.h>
  6. #include <linux/mm.h>
  7. static bool nohmem;
  8. module_param_named(disable, nohmem, bool, 0444);
  9. static struct resource hmem_active = {
  10. .name = "HMEM devices",
  11. .start = 0,
  12. .end = -1,
  13. .flags = IORESOURCE_MEM,
  14. };
  15. void hmem_register_device(int target_nid, struct resource *r)
  16. {
  17. /* define a clean / non-busy resource for the platform device */
  18. struct resource res = {
  19. .start = r->start,
  20. .end = r->end,
  21. .flags = IORESOURCE_MEM,
  22. .desc = IORES_DESC_SOFT_RESERVED,
  23. };
  24. struct platform_device *pdev;
  25. struct memregion_info info;
  26. int rc, id;
  27. if (nohmem)
  28. return;
  29. rc = region_intersects(res.start, resource_size(&res), IORESOURCE_MEM,
  30. IORES_DESC_SOFT_RESERVED);
  31. if (rc != REGION_INTERSECTS)
  32. return;
  33. id = memregion_alloc(GFP_KERNEL);
  34. if (id < 0) {
  35. pr_err("memregion allocation failure for %pr\n", &res);
  36. return;
  37. }
  38. pdev = platform_device_alloc("hmem", id);
  39. if (!pdev) {
  40. pr_err("hmem device allocation failure for %pr\n", &res);
  41. goto out_pdev;
  42. }
  43. if (!__request_region(&hmem_active, res.start, resource_size(&res),
  44. dev_name(&pdev->dev), 0)) {
  45. dev_dbg(&pdev->dev, "hmem range %pr already active\n", &res);
  46. goto out_active;
  47. }
  48. pdev->dev.numa_node = numa_map_to_online_node(target_nid);
  49. info = (struct memregion_info) {
  50. .target_node = target_nid,
  51. };
  52. rc = platform_device_add_data(pdev, &info, sizeof(info));
  53. if (rc < 0) {
  54. pr_err("hmem memregion_info allocation failure for %pr\n", &res);
  55. goto out_resource;
  56. }
  57. rc = platform_device_add_resources(pdev, &res, 1);
  58. if (rc < 0) {
  59. pr_err("hmem resource allocation failure for %pr\n", &res);
  60. goto out_resource;
  61. }
  62. rc = platform_device_add(pdev);
  63. if (rc < 0) {
  64. dev_err(&pdev->dev, "device add failed for %pr\n", &res);
  65. goto out_resource;
  66. }
  67. return;
  68. out_resource:
  69. __release_region(&hmem_active, res.start, resource_size(&res));
  70. out_active:
  71. platform_device_put(pdev);
  72. out_pdev:
  73. memregion_free(id);
  74. }
  75. static __init int hmem_register_one(struct resource *res, void *data)
  76. {
  77. hmem_register_device(phys_to_target_node(res->start), res);
  78. return 0;
  79. }
  80. static __init int hmem_init(void)
  81. {
  82. walk_iomem_res_desc(IORES_DESC_SOFT_RESERVED,
  83. IORESOURCE_MEM, 0, -1, NULL, hmem_register_one);
  84. return 0;
  85. }
  86. /*
  87. * As this is a fallback for address ranges unclaimed by the ACPI HMAT
  88. * parsing it must be at an initcall level greater than hmat_init().
  89. */
  90. late_initcall(hmem_init);