mem-buf-dev.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <linux/of.h>
  9. #include <linux/platform_device.h>
  10. #include <linux/slab.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/of_reserved_mem.h>
  13. #include <soc/qcom/secure_buffer.h>
  14. #include <linux/mem-buf.h>
  15. #include "mem-buf-dev.h"
  16. #include "mem-buf-ids.h"
  17. struct device *mem_buf_dev;
  18. EXPORT_SYMBOL(mem_buf_dev);
  19. unsigned char mem_buf_capability;
  20. EXPORT_SYMBOL(mem_buf_capability);
  21. int mem_buf_hyp_assign_table(struct sg_table *sgt, u32 *src_vmid, int source_nelems,
  22. int *dest_vmids, int *dest_perms, int dest_nelems)
  23. {
  24. char *verb;
  25. int ret;
  26. if (!mem_buf_vm_uses_hyp_assign())
  27. return 0;
  28. verb = *src_vmid == current_vmid ? "Assign" : "Unassign";
  29. pr_debug("%s memory to target VMIDs\n", verb);
  30. ret = hyp_assign_table(sgt, src_vmid, source_nelems, dest_vmids, dest_perms, dest_nelems);
  31. if (ret < 0)
  32. pr_err("Failed to %s memory for rmt allocation rc: %d\n", verb, ret);
  33. else
  34. pr_debug("Memory %s to target VMIDs\n", verb);
  35. return ret;
  36. }
  37. int mem_buf_assign_mem(u32 op, struct sg_table *sgt,
  38. struct mem_buf_lend_kernel_arg *arg)
  39. {
  40. int src_vmid[] = {current_vmid};
  41. int src_perms[] = {PERM_READ | PERM_WRITE | PERM_EXEC};
  42. int ret, ret2;
  43. if (!sgt || !arg->nr_acl_entries || !arg->vmids || !arg->perms)
  44. return -EINVAL;
  45. ret = mem_buf_hyp_assign_table(sgt, src_vmid, ARRAY_SIZE(src_vmid), arg->vmids, arg->perms,
  46. arg->nr_acl_entries);
  47. if (ret)
  48. return ret;
  49. ret = mem_buf_assign_mem_gunyah(op, sgt, arg);
  50. if (ret) {
  51. ret2 = mem_buf_hyp_assign_table(sgt, arg->vmids, arg->nr_acl_entries,
  52. src_vmid, src_perms, ARRAY_SIZE(src_vmid));
  53. if (ret2 < 0) {
  54. pr_err("hyp_assign failed while recovering from another error: %d\n",
  55. ret2);
  56. return -EADDRNOTAVAIL;
  57. }
  58. }
  59. return ret;
  60. }
  61. EXPORT_SYMBOL(mem_buf_assign_mem);
  62. int mem_buf_unassign_mem(struct sg_table *sgt, int *src_vmids,
  63. unsigned int nr_acl_entries,
  64. gh_memparcel_handle_t memparcel_hdl)
  65. {
  66. int dst_vmid[] = {current_vmid};
  67. int dst_perm[] = {PERM_READ | PERM_WRITE | PERM_EXEC};
  68. int ret;
  69. if (!sgt || !src_vmids || !nr_acl_entries)
  70. return -EINVAL;
  71. if (memparcel_hdl != MEM_BUF_MEMPARCEL_INVALID) {
  72. ret = mem_buf_unassign_mem_gunyah(memparcel_hdl);
  73. if (ret)
  74. return ret;
  75. }
  76. ret = mem_buf_hyp_assign_table(sgt, src_vmids, nr_acl_entries,
  77. dst_vmid, dst_perm, ARRAY_SIZE(dst_vmid));
  78. return ret;
  79. }
  80. EXPORT_SYMBOL(mem_buf_unassign_mem);
  81. static int mem_buf_probe(struct platform_device *pdev)
  82. {
  83. int ret, unused;
  84. struct device *dev = &pdev->dev;
  85. u64 dma_mask = IS_ENABLED(CONFIG_ARM64) ? DMA_BIT_MASK(64) :
  86. DMA_BIT_MASK(32);
  87. if (of_property_match_string(dev->of_node, "qcom,mem-buf-capabilities",
  88. "supplier") >= 0)
  89. mem_buf_capability = MEM_BUF_CAP_SUPPLIER;
  90. else if (of_property_match_string(dev->of_node,
  91. "qcom,mem-buf-capabilities",
  92. "consumer") >= 0)
  93. mem_buf_capability = MEM_BUF_CAP_CONSUMER;
  94. else if (of_property_match_string(dev->of_node,
  95. "qcom,mem-buf-capabilities",
  96. "dual") >= 0)
  97. mem_buf_capability = MEM_BUF_CAP_DUAL;
  98. else
  99. mem_buf_capability = 0;
  100. ret = dma_set_mask_and_coherent(dev, dma_mask);
  101. if (ret) {
  102. dev_err(dev, "Unable to set dma mask: %d\n", ret);
  103. return ret;
  104. }
  105. if (of_find_property(dev->of_node, "memory-region", &unused)) {
  106. ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, 0);
  107. if (ret) {
  108. dev_err(dev, "Failed to get memory-region property %d\n", ret);
  109. return ret;
  110. }
  111. }
  112. ret = mem_buf_vm_init(dev);
  113. if (ret) {
  114. dev_err(dev, "mem_buf_vm_init failed %d\n", ret);
  115. return ret;
  116. }
  117. mem_buf_dev = dev;
  118. return 0;
  119. }
  120. static int mem_buf_remove(struct platform_device *pdev)
  121. {
  122. mem_buf_dev = NULL;
  123. return 0;
  124. }
  125. static const struct of_device_id mem_buf_match_tbl[] = {
  126. {.compatible = "qcom,mem-buf"},
  127. {},
  128. };
  129. static struct platform_driver mem_buf_driver = {
  130. .probe = mem_buf_probe,
  131. .remove = mem_buf_remove,
  132. .driver = {
  133. .name = "mem-buf",
  134. .of_match_table = of_match_ptr(mem_buf_match_tbl),
  135. },
  136. };
  137. static int __init mem_buf_dev_init(void)
  138. {
  139. return platform_driver_register(&mem_buf_driver);
  140. }
  141. module_init(mem_buf_dev_init);
  142. static void __exit mem_buf_dev_exit(void)
  143. {
  144. mem_buf_vm_exit();
  145. platform_driver_unregister(&mem_buf_driver);
  146. }
  147. module_exit(mem_buf_dev_exit);
  148. MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Memory Buffer Sharing driver");
  149. MODULE_LICENSE("GPL");
  150. MODULE_IMPORT_NS(DMA_BUF);