qcom_dt_parser.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/of.h>
  7. #include <linux/platform_device.h>
  8. #include <linux/slab.h>
  9. #include <linux/of_platform.h>
  10. #include <linux/of_address.h>
  11. #include <linux/of_reserved_mem.h>
  12. #include <linux/qcom_dma_heap.h>
  13. #include "qcom_dt_parser.h"
  14. #ifdef CONFIG_PANIC_ON_QCOM_DMA_HEAPS_FAILURE
  15. #define QCOM_DMA_HEAP_WARN(fmt...) panic(fmt)
  16. #else /* CONFIG_PANIC_ON_QCOM_DMA_HEAPS_FAILURE */
  17. #define QCOM_DMA_HEAP_WARN(fmt...) WARN(1, fmt)
  18. #endif /* CONFIG_PANIC_ON_QCOM_DMA_HEAPS_FAILURE */
  19. static int populate_heap(struct device_node *node,
  20. struct platform_heap *heap)
  21. {
  22. int ret;
  23. /* Mandatory properties */
  24. ret = of_property_read_string(node, "qcom,dma-heap-name", &heap->name);
  25. if (ret)
  26. goto err;
  27. ret = of_property_read_u32(node, "qcom,dma-heap-type", &heap->type);
  28. if (ret) {
  29. pr_err("Reading %s property in node %s failed with err %d.\n",
  30. "qcom,dma-heap-type", of_node_full_name(node), ret);
  31. goto err;
  32. }
  33. /* Optional properties */
  34. heap->is_uncached = of_property_read_bool(node, "qcom,uncached-heap");
  35. ret = of_property_read_u32(node, "qcom,token", &heap->token);
  36. if (ret && ret != -EINVAL)
  37. goto err;
  38. ret = of_property_read_u32(node, "qcom,max-align", &heap->max_align);
  39. if (ret && ret != -EINVAL)
  40. goto err;
  41. return 0;
  42. err:
  43. if (ret)
  44. QCOM_DMA_HEAP_WARN("%s: Unable to populate heap %s, err: %d\n",
  45. __func__, of_node_full_name(node), ret);
  46. return ret;
  47. }
  48. void free_pdata(const struct platform_data *pdata)
  49. {
  50. kfree(pdata->heaps);
  51. kfree(pdata);
  52. }
  53. static int heap_dt_init(struct device_node *mem_node,
  54. struct platform_heap *heap)
  55. {
  56. const __be32 *basep;
  57. u64 base, size;
  58. struct device *dev = heap->dev;
  59. struct reserved_mem *rmem;
  60. int ret = 0;
  61. rmem = of_reserved_mem_lookup(mem_node);
  62. if (!rmem) {
  63. dev_err(dev, "Failed to find reserved memory region\n");
  64. return -EINVAL;
  65. }
  66. /*
  67. * We only need to call this when the memory-region is managed by
  68. * a reserved memory region driver (e.g. CMA, coherent, etc). In that
  69. * case, they will have ops for device specific initialization for
  70. * the memory region. Otherwise, we have a pure carveout, which needs
  71. * not be initialized.
  72. */
  73. if (rmem->ops) {
  74. ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
  75. if (ret) {
  76. dev_err(dev,
  77. "Failed to initialize memory region rc: %d\n",
  78. ret);
  79. return ret;
  80. }
  81. }
  82. basep = of_get_address(mem_node, 0, &size, NULL);
  83. if (basep) {
  84. base = of_translate_address(mem_node, basep);
  85. if (base != OF_BAD_ADDR) {
  86. heap->base = base;
  87. heap->size = size;
  88. } else {
  89. ret = -EINVAL;
  90. dev_err(heap->dev,
  91. "Failed to get heap base/size\n");
  92. of_reserved_mem_device_release(dev);
  93. }
  94. }
  95. heap->is_nomap = of_property_read_bool(mem_node, "no-map");
  96. #if defined(CONFIG_RBIN)
  97. if (strncmp(rmem->name, "rbin", 4) == 0) {
  98. if (!heap->base && !heap->size && rmem->base && rmem->size) {
  99. heap->base = rmem->base;
  100. heap->size = rmem->size;
  101. }
  102. }
  103. #endif
  104. return ret;
  105. }
  106. static void release_reserved_memory_regions(struct platform_heap *heaps,
  107. int idx)
  108. {
  109. struct device *dev;
  110. struct device_node *node, *mem_node;
  111. for (idx = idx - 1; idx >= 0; idx--) {
  112. dev = heaps[idx].dev;
  113. node = dev->of_node;
  114. mem_node = of_parse_phandle(node, "memory-region", 0);
  115. if (mem_node)
  116. of_reserved_mem_device_release(dev);
  117. of_node_put(mem_node);
  118. }
  119. }
  120. struct platform_data *parse_heap_dt(struct platform_device *pdev)
  121. {
  122. struct platform_data *pdata = NULL;
  123. struct device_node *node;
  124. struct device_node *mem_node;
  125. struct platform_device *new_dev = NULL;
  126. const struct device_node *dt_node = pdev->dev.of_node;
  127. int ret;
  128. int idx = 0;
  129. pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
  130. if (!pdata)
  131. return ERR_PTR(-ENOMEM);
  132. for_each_available_child_of_node(dt_node, node)
  133. pdata->nr++;
  134. /*
  135. * No heaps defined in the devicetree. However, there may be other
  136. * heaps (e.g. system heaps) that do not need to be defined in the
  137. * devicetree.
  138. */
  139. if (!pdata->nr)
  140. goto out;
  141. pdata->heaps = kcalloc(pdata->nr, sizeof(*pdata->heaps), GFP_KERNEL);
  142. if (!pdata->heaps) {
  143. kfree(pdata);
  144. return ERR_PTR(-ENOMEM);
  145. }
  146. for_each_available_child_of_node(dt_node, node) {
  147. new_dev = of_platform_device_create(node, NULL, &pdev->dev);
  148. if (!new_dev) {
  149. pr_err("Failed to create device %s\n", node->name);
  150. ret = -EINVAL;
  151. goto free_heaps;
  152. }
  153. of_dma_configure(&new_dev->dev, node, true);
  154. pdata->heaps[idx].dev = &new_dev->dev;
  155. ret = populate_heap(node, &pdata->heaps[idx]);
  156. if (ret)
  157. goto free_heaps;
  158. mem_node = of_parse_phandle(node, "memory-region", 0);
  159. if (mem_node) {
  160. ret = heap_dt_init(mem_node, &pdata->heaps[idx]);
  161. if (ret)
  162. goto free_heaps;
  163. of_node_put(mem_node);
  164. }
  165. ++idx;
  166. }
  167. out:
  168. return pdata;
  169. free_heaps:
  170. of_node_put(node);
  171. release_reserved_memory_regions(pdata->heaps, idx);
  172. free_pdata(pdata);
  173. return ERR_PTR(ret);
  174. }