mem-buf-ids.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #define pr_fmt(fmt) "mem_buf_vm: " fmt
  7. #include <linux/of.h>
  8. #include <linux/xarray.h>
  9. #include <soc/qcom/secure_buffer.h>
  10. #include "mem-buf-dev.h"
  11. #include "mem-buf-ids.h"
  12. #define DEVNAME "mem_buf_vm"
  13. #define NUM_MEM_BUF_VM_MINORS 128
  14. static dev_t mem_buf_vm_devt;
  15. static struct class *mem_buf_vm_class;
  16. /*
  17. * VM objects have the same lifetime as this module.
  18. */
  19. static DEFINE_XARRAY_ALLOC(mem_buf_vm_minors);
  20. static DEFINE_XARRAY(mem_buf_vms);
  21. int current_vmid;
  22. #define PERIPHERAL_VM(_uname, _lname) \
  23. static struct mem_buf_vm vm_ ## _lname = { \
  24. .name = "qcom," #_lname, \
  25. .vmid = VMID_ ## _uname, \
  26. .allowed_api = 0, \
  27. }
  28. PERIPHERAL_VM(CP_TOUCH, cp_touch);
  29. PERIPHERAL_VM(CP_BITSTREAM, cp_bitstream);
  30. PERIPHERAL_VM(CP_PIXEL, cp_pixel);
  31. PERIPHERAL_VM(CP_NON_PIXEL, cp_non_pixel);
  32. PERIPHERAL_VM(CP_CAMERA, cp_camera);
  33. PERIPHERAL_VM(CP_SEC_DISPLAY, cp_sec_display);
  34. PERIPHERAL_VM(CP_SPSS_SP, cp_spss_sp);
  35. PERIPHERAL_VM(CP_CAMERA_PREVIEW, cp_camera_preview);
  36. PERIPHERAL_VM(CP_SPSS_SP_SHARED, cp_spss_sp_shared);
  37. PERIPHERAL_VM(CP_SPSS_HLOS_SHARED, cp_spss_hlos_shared);
  38. PERIPHERAL_VM(CP_CDSP, cp_cdsp);
  39. PERIPHERAL_VM(CP_APP, cp_app);
  40. static struct mem_buf_vm vm_trusted_vm = {
  41. .name = "qcom,trusted_vm",
  42. .vmid = VMID_TVM,
  43. .allowed_api = MEM_BUF_API_GUNYAH,
  44. };
  45. static struct mem_buf_vm vm_oemvm = {
  46. .name = "qcom,oemvm",
  47. .vmid = VMID_OEMVM,
  48. .allowed_api = MEM_BUF_API_GUNYAH,
  49. };
  50. static struct mem_buf_vm vm_hlos = {
  51. .name = "qcom,hlos",
  52. .vmid = VMID_HLOS,
  53. .allowed_api = 0,
  54. };
  55. struct mem_buf_vm *pdata_array[] = {
  56. &vm_trusted_vm,
  57. &vm_oemvm,
  58. &vm_hlos,
  59. &vm_cp_touch,
  60. &vm_cp_bitstream,
  61. &vm_cp_pixel,
  62. &vm_cp_non_pixel,
  63. &vm_cp_camera,
  64. &vm_cp_sec_display,
  65. &vm_cp_spss_sp,
  66. &vm_cp_camera_preview,
  67. &vm_cp_spss_sp_shared,
  68. &vm_cp_spss_hlos_shared,
  69. &vm_cp_cdsp,
  70. &vm_cp_app,
  71. NULL,
  72. };
  73. int mem_buf_current_vmid(void)
  74. {
  75. return current_vmid;
  76. }
  77. EXPORT_SYMBOL(mem_buf_current_vmid);
  78. /*
  79. * Opening this file acquires a refcount on vm->dev's kobject - see
  80. * chrdev_open(). So private data won't be free'd out from
  81. * under us.
  82. */
  83. static int mem_buf_vm_open(struct inode *inode, struct file *file)
  84. {
  85. struct mem_buf_vm *vm;
  86. vm = container_of(inode->i_cdev, struct mem_buf_vm, cdev);
  87. file->private_data = vm;
  88. return 0;
  89. }
  90. static const struct file_operations mem_buf_vm_fops = {
  91. .open = mem_buf_vm_open,
  92. };
  93. bool mem_buf_vm_uses_hyp_assign(void)
  94. {
  95. return current_vmid == VMID_HLOS;
  96. }
  97. EXPORT_SYMBOL(mem_buf_vm_uses_hyp_assign);
  98. /*
  99. * Use Gunyah API if any vm in the source or destination requires it.
  100. */
  101. int mem_buf_vm_uses_gunyah(int *vmids, unsigned int nr_acl_entries)
  102. {
  103. struct mem_buf_vm *vm;
  104. int i;
  105. for (i = 0; i < nr_acl_entries; i++) {
  106. vm = xa_load(&mem_buf_vms, vmids[i]);
  107. if (!vm) {
  108. pr_err_ratelimited("No vm with vmid=0x%x\n", vmids[i]);
  109. return -EINVAL;
  110. }
  111. if (vm->allowed_api & MEM_BUF_API_GUNYAH)
  112. return true;
  113. }
  114. vm = xa_load(&mem_buf_vms, current_vmid);
  115. if (!vm) {
  116. pr_err_ratelimited("No vm with vmid=0x%x\n", current_vmid);
  117. return PTR_ERR(vm);
  118. }
  119. if (vm->allowed_api & MEM_BUF_API_GUNYAH)
  120. return true;
  121. return false;
  122. }
  123. EXPORT_SYMBOL(mem_buf_vm_uses_gunyah);
  124. int mem_buf_fd_to_vmid(int fd)
  125. {
  126. int ret = -EINVAL;
  127. struct mem_buf_vm *vm;
  128. struct file *file;
  129. file = fget(fd);
  130. if (!file)
  131. return -EINVAL;
  132. if (file->f_op != &mem_buf_vm_fops) {
  133. pr_err_ratelimited("Invalid vm file type\n");
  134. fput(file);
  135. return -EINVAL;
  136. }
  137. vm = file->private_data;
  138. ret = vm->vmid;
  139. fput(file);
  140. return ret;
  141. }
  142. EXPORT_SYMBOL(mem_buf_fd_to_vmid);
  143. static void mem_buf_vm_device_release(struct device *dev)
  144. {
  145. struct mem_buf_vm *vm;
  146. vm = container_of(dev, struct mem_buf_vm, dev);
  147. kfree(vm);
  148. }
  149. /*
  150. * caller must fill in all fields of new_vm except for cdev & dev.
  151. */
  152. static int mem_buf_vm_add(struct mem_buf_vm *new_vm)
  153. {
  154. struct mem_buf_vm *vm;
  155. struct device *dev;
  156. int minor, ret;
  157. unsigned long idx;
  158. xa_for_each(&mem_buf_vm_minors, idx, vm) {
  159. if (!strcmp(vm->name, new_vm->name)) {
  160. pr_err("duplicate vm %s\n", vm->name);
  161. ret = -EINVAL;
  162. goto err_duplicate;
  163. }
  164. }
  165. ret = xa_alloc(&mem_buf_vm_minors, &minor, new_vm,
  166. XA_LIMIT(0, NUM_MEM_BUF_VM_MINORS - 1), GFP_KERNEL);
  167. if (ret < 0) {
  168. pr_err("no more minors\n");
  169. goto err_devt;
  170. }
  171. cdev_init(&new_vm->cdev, &mem_buf_vm_fops);
  172. dev = &new_vm->dev;
  173. device_initialize(dev);
  174. dev->devt = MKDEV(MAJOR(mem_buf_vm_devt), minor);
  175. dev->class = mem_buf_vm_class;
  176. dev->parent = NULL;
  177. dev->release = mem_buf_vm_device_release;
  178. dev_set_drvdata(dev, new_vm);
  179. dev_set_name(dev, "%s", new_vm->name);
  180. ret = xa_err(xa_store(&mem_buf_vms, new_vm->vmid, new_vm, GFP_KERNEL));
  181. if (ret)
  182. goto err_xa_store;
  183. ret = cdev_device_add(&new_vm->cdev, dev);
  184. if (ret) {
  185. pr_err("Adding cdev %s failed\n", new_vm->name);
  186. goto err_cdev_add;
  187. }
  188. return 0;
  189. err_cdev_add:
  190. xa_erase(&mem_buf_vms, new_vm->vmid);
  191. err_xa_store:
  192. xa_erase(&mem_buf_vm_minors, minor);
  193. put_device(dev);
  194. err_devt:
  195. err_duplicate:
  196. return ret;
  197. }
  198. static int mem_buf_vm_add_pdata(struct mem_buf_vm *pdata)
  199. {
  200. struct mem_buf_vm *vm;
  201. int ret;
  202. vm = kmemdup(pdata, sizeof(*vm), GFP_KERNEL);
  203. if (!vm)
  204. return -ENOMEM;
  205. ret = mem_buf_vm_add(vm);
  206. if (ret) {
  207. kfree(vm);
  208. return ret;
  209. }
  210. return 0;
  211. }
  212. static int mem_buf_vm_add_self(void)
  213. {
  214. struct mem_buf_vm *vm, *self;
  215. int ret;
  216. vm = xa_load(&mem_buf_vms, current_vmid);
  217. if (!vm)
  218. return PTR_ERR(vm);
  219. self = kzalloc(sizeof(*self), GFP_KERNEL);
  220. if (!self)
  221. return -ENOMEM;
  222. /* Create an aliased name */
  223. self->name = "qcom,self";
  224. self->vmid = vm->vmid;
  225. self->allowed_api = vm->allowed_api;
  226. ret = mem_buf_vm_add(self);
  227. if (ret) {
  228. kfree(self);
  229. return ret;
  230. }
  231. return 0;
  232. }
  233. static char *mem_buf_vm_devnode(struct device *dev, umode_t *mode)
  234. {
  235. return kasprintf(GFP_KERNEL, "mem_buf_vm/%s", dev_name(dev));
  236. }
  237. static int mem_buf_vm_put_class_device_cb(struct device *dev, void *data)
  238. {
  239. struct mem_buf_vm *vm = container_of(dev, struct mem_buf_vm, dev);
  240. cdev_device_del(&vm->cdev, dev);
  241. return 0;
  242. }
  243. int mem_buf_vm_init(struct device *dev)
  244. {
  245. struct mem_buf_vm **p;
  246. int ret, vmid;
  247. ret = of_property_read_u32(dev->of_node, "qcom,vmid", &vmid);
  248. if (ret) {
  249. dev_err(dev, "missing qcom,vmid property\n");
  250. return ret;
  251. }
  252. current_vmid = vmid;
  253. ret = alloc_chrdev_region(&mem_buf_vm_devt, 0, NUM_MEM_BUF_VM_MINORS,
  254. DEVNAME);
  255. if (ret)
  256. return ret;
  257. mem_buf_vm_class = class_create(THIS_MODULE, DEVNAME);
  258. if (IS_ERR(mem_buf_vm_class)) {
  259. ret = PTR_ERR(mem_buf_vm_class);
  260. goto err_class_create;
  261. }
  262. mem_buf_vm_class->devnode = mem_buf_vm_devnode;
  263. for (p = pdata_array; *p; p++) {
  264. ret = mem_buf_vm_add_pdata(*p);
  265. if (ret)
  266. goto err_pdata;
  267. }
  268. ret = mem_buf_vm_add_self();
  269. if (ret)
  270. goto err_self;
  271. return 0;
  272. err_self:
  273. err_pdata:
  274. xa_destroy(&mem_buf_vms);
  275. xa_destroy(&mem_buf_vm_minors);
  276. class_for_each_device(mem_buf_vm_class, NULL, NULL,
  277. mem_buf_vm_put_class_device_cb);
  278. class_destroy(mem_buf_vm_class);
  279. err_class_create:
  280. unregister_chrdev_region(mem_buf_vm_devt, NUM_MEM_BUF_VM_MINORS);
  281. return ret;
  282. }
  283. void mem_buf_vm_exit(void)
  284. {
  285. xa_destroy(&mem_buf_vms);
  286. xa_destroy(&mem_buf_vm_minors);
  287. class_for_each_device(mem_buf_vm_class, NULL, NULL,
  288. mem_buf_vm_put_class_device_cb);
  289. class_destroy(mem_buf_vm_class);
  290. unregister_chrdev_region(mem_buf_vm_devt, NUM_MEM_BUF_VM_MINORS);
  291. }