secure_buffer.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2011 Google, Inc
  4. * Copyright (c) 2011-2021, The Linux Foundation. All rights reserved.
  5. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  6. */
  7. #include <linux/highmem.h>
  8. #include <linux/kernel.h>
  9. #include <linux/kref.h>
  10. #include <linux/scatterlist.h>
  11. #include <linux/slab.h>
  12. #include <linux/dma-mapping.h>
  13. #include <linux/platform_device.h>
  14. #include <linux/module.h>
  15. #include <linux/of.h>
  16. #include <linux/qcom_scm.h>
  17. #include <soc/qcom/secure_buffer.h>
  18. #define CREATE_TRACE_POINTS
  19. #include "trace_secure_buffer.h"
  20. #include <linux/stackdepot.h>
  21. #define BATCH_MAX_SIZE SZ_2M
  22. #define BATCH_MAX_SECTIONS 32
  23. static struct device *qcom_secure_buffer_dev;
  24. static bool vmid_cp_camera_preview_ro;
  25. struct hyp_assign_debug_track {
  26. depot_stack_handle_t hdl;
  27. int vmids[10];
  28. int perms[10];
  29. int nr_acl_entries;
  30. u32 refcount;
  31. };
  32. #if IS_ENABLED(CONFIG_HYP_ASSIGN_DEBUG)
  33. /*
  34. * Contains a pointer to struct hyp_assign_debug_track for each pfn which
  35. * is in an assigned state.
  36. */
  37. static DEFINE_XARRAY(xa_pfns);
  38. static DEFINE_MUTEX(xarray_lock);
  39. static depot_stack_handle_t failure_handle;
  40. #define HYP_ASSIGN_STACK_DEPTH (16)
  41. static depot_stack_handle_t create_dummy_stack(void)
  42. {
  43. unsigned long entries[4];
  44. unsigned int nr_entries;
  45. nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
  46. return stack_depot_save(entries, nr_entries, GFP_KERNEL);
  47. }
  48. static void hyp_assign_show_err(const char *msg, unsigned long pfn,
  49. struct hyp_assign_debug_track *track)
  50. {
  51. int i;
  52. unsigned long *stack_entries;
  53. unsigned int nr_stack_entries;
  54. pr_err("HYP_ASSIGN_DEBUG: %s pfn=0x%llx\n", msg, pfn);
  55. if (!track)
  56. goto out;
  57. pr_err("currently assigned to:\n");
  58. nr_stack_entries = stack_depot_fetch(track->hdl, &stack_entries);
  59. stack_trace_print(stack_entries, nr_stack_entries, 0);
  60. for (i = 0; i < track->nr_acl_entries; i++) {
  61. pr_err("VMID: %d %s%s%s\n",
  62. track->vmids[i],
  63. track->perms[i] & PERM_READ ? "R" : " ",
  64. track->perms[i] & PERM_WRITE ? "W" : " ",
  65. track->perms[i] & PERM_EXEC ? "X" : " ");
  66. }
  67. out:
  68. BUG();
  69. }
  70. static struct hyp_assign_debug_track *
  71. alloc_debug_tracking(int *dst_vmids, int *dst_perms, int dest_nelems)
  72. {
  73. unsigned long stack_entries[HYP_ASSIGN_STACK_DEPTH];
  74. u32 nr_stack_entries;
  75. struct hyp_assign_debug_track *track;
  76. u32 nr_acl_entries;
  77. track = kzalloc(sizeof(*track), GFP_KERNEL);
  78. if (!track)
  79. return NULL;
  80. nr_acl_entries = min_t(u32, dest_nelems, ARRAY_SIZE(track->vmids));
  81. track->nr_acl_entries = nr_acl_entries;
  82. memcpy(track->vmids, dst_vmids, nr_acl_entries * sizeof(*dst_vmids));
  83. memcpy(track->perms, dst_perms, nr_acl_entries * sizeof(*dst_perms));
  84. nr_stack_entries = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 2);
  85. track->hdl = stack_depot_save(stack_entries, nr_stack_entries, GFP_KERNEL);
  86. if (!track->hdl)
  87. track->hdl = failure_handle;
  88. track->refcount = 1;
  89. return track;
  90. }
  91. /* caller holds xarray_lock */
  92. static void get_track(struct hyp_assign_debug_track *track)
  93. {
  94. track->refcount++;
  95. }
  96. /* caller holds xarray_lock */
  97. static void put_track(struct hyp_assign_debug_track *track)
  98. {
  99. if (!track)
  100. return;
  101. track->refcount--;
  102. if (!track->refcount)
  103. kfree(track);
  104. }
  105. static bool is_reclaim(struct qcom_scm_current_perm_info *newvms, size_t newvms_sz)
  106. {
  107. int vmid;
  108. int perm;
  109. vmid = le32_to_cpu(newvms->vmid);
  110. perm = le32_to_cpu(newvms->perm);
  111. return (newvms_sz == sizeof(*newvms)) &&
  112. (vmid == VMID_HLOS) &&
  113. (perm == (PERM_READ | PERM_WRITE | PERM_EXEC));
  114. }
  115. static void check_debug_tracking(struct qcom_scm_mem_map_info *mem_regions,
  116. size_t mem_regions_sz, u32 *srcvms,
  117. size_t src_sz,
  118. struct qcom_scm_current_perm_info *newvms,
  119. size_t newvms_sz)
  120. {
  121. struct qcom_scm_mem_map_info *p, *mem_regions_end;
  122. unsigned long pfn;
  123. bool reclaim = is_reclaim(newvms, newvms_sz);
  124. struct hyp_assign_debug_track *track;
  125. mem_regions_end = mem_regions + mem_regions_sz/sizeof(*mem_regions);
  126. mutex_lock(&xarray_lock);
  127. for (p = mem_regions; p < mem_regions_end; p++) {
  128. unsigned long start_pfn;
  129. unsigned long nr_pages;
  130. start_pfn = PHYS_PFN(le64_to_cpu(p->mem_addr));
  131. nr_pages = le64_to_cpu(p->mem_size) >> PAGE_SHIFT;
  132. for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
  133. track = xa_load(&xa_pfns, pfn);
  134. if (reclaim && !track) {
  135. hyp_assign_show_err("PFN not assigned",
  136. pfn, NULL);
  137. break;
  138. } else if (!reclaim && track) {
  139. hyp_assign_show_err("PFN already assigned",
  140. pfn, track);
  141. break;
  142. }
  143. }
  144. }
  145. mutex_unlock(&xarray_lock);
  146. }
  147. static void update_debug_tracking(struct qcom_scm_mem_map_info *mem_regions,
  148. size_t mem_regions_sz, u32 *srcvms,
  149. size_t src_sz,
  150. struct qcom_scm_current_perm_info *newvms,
  151. size_t newvms_sz,
  152. struct hyp_assign_debug_track *new)
  153. {
  154. struct qcom_scm_mem_map_info *p, *mem_regions_end;
  155. unsigned long pfn;
  156. bool reclaim = is_reclaim(newvms, newvms_sz);
  157. struct hyp_assign_debug_track *track;
  158. mem_regions_end = mem_regions + mem_regions_sz/sizeof(*mem_regions);
  159. mutex_lock(&xarray_lock);
  160. for (p = mem_regions; p < mem_regions_end; p++) {
  161. unsigned long start_pfn;
  162. unsigned long nr_pages;
  163. start_pfn = PHYS_PFN(le64_to_cpu(p->mem_addr));
  164. nr_pages = le64_to_cpu(p->mem_size) >> PAGE_SHIFT;
  165. for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
  166. if (reclaim) {
  167. track = xa_erase(&xa_pfns, pfn);
  168. put_track(track);
  169. } else {
  170. get_track(new);
  171. xa_store(&xa_pfns, pfn, new, GFP_KERNEL);
  172. }
  173. }
  174. }
  175. mutex_unlock(&xarray_lock);
  176. }
  177. #else /* CONFIG_HYP_ASSIGN_DEBUG */
  178. static struct hyp_assign_debug_track *
  179. alloc_debug_tracking(int *dst_vmids, int *dst_perms, int dest_nelems)
  180. {
  181. return NULL;
  182. }
  183. static void put_track(struct hyp_assign_debug_track *track)
  184. {
  185. }
  186. static void check_debug_tracking(struct qcom_scm_mem_map_info *mem_regions,
  187. size_t mem_regions_sz, u32 *srcvms,
  188. size_t src_sz,
  189. struct qcom_scm_current_perm_info *newvms,
  190. size_t newvms_sz)
  191. {
  192. }
  193. static void update_debug_tracking(struct qcom_scm_mem_map_info *mem_regions,
  194. size_t mem_regions_sz, u32 *srcvms,
  195. size_t src_sz,
  196. struct qcom_scm_current_perm_info *newvms,
  197. size_t newvms_sz,
  198. struct hyp_assign_debug_track *new)
  199. {
  200. }
  201. #endif /* CONFIG_HYP_ASSIGN_DEBUG */
  202. static struct qcom_scm_current_perm_info *
  203. populate_dest_info(int *dest_vmids, int nelements, int *dest_perms,
  204. size_t *size_in_bytes)
  205. {
  206. struct qcom_scm_current_perm_info *dest_info;
  207. int i;
  208. size_t size;
  209. /* Ensure allocated size is less than PAGE_ALLOC_COSTLY_ORDER */
  210. size = nelements * sizeof(*dest_info);
  211. if (size > PAGE_SIZE)
  212. return NULL;
  213. dest_info = kzalloc(size, GFP_KERNEL);
  214. if (!dest_info)
  215. return NULL;
  216. for (i = 0; i < nelements; i++)
  217. qcom_scm_populate_vmperm_info(&dest_info[i], dest_vmids[i],
  218. dest_perms[i]);
  219. *size_in_bytes = size;
  220. return dest_info;
  221. }
  222. static unsigned int get_batches_from_sgl(struct qcom_scm_mem_map_info *sgt_copy,
  223. struct scatterlist *sgl,
  224. struct scatterlist **next_sgl)
  225. {
  226. u64 batch_size = 0;
  227. unsigned int i = 0;
  228. struct scatterlist *curr_sgl = sgl;
  229. /* Ensure no zero size batches */
  230. do {
  231. qcom_scm_populate_mem_map_info(&sgt_copy[i],
  232. page_to_phys(sg_page(curr_sgl)),
  233. curr_sgl->length);
  234. batch_size += curr_sgl->length;
  235. curr_sgl = sg_next(curr_sgl);
  236. i++;
  237. } while (curr_sgl && i < BATCH_MAX_SECTIONS &&
  238. curr_sgl->length + batch_size < BATCH_MAX_SIZE);
  239. *next_sgl = curr_sgl;
  240. return i;
  241. }
  242. static int batched_hyp_assign(struct sg_table *table, u32 *source_vmids,
  243. size_t source_size,
  244. struct qcom_scm_current_perm_info *destvms,
  245. size_t destvms_size,
  246. struct hyp_assign_debug_track *track)
  247. {
  248. unsigned int batch_start = 0;
  249. unsigned int batches_processed;
  250. unsigned int i = 0;
  251. u64 total_delta;
  252. struct scatterlist *curr_sgl = table->sgl;
  253. struct scatterlist *next_sgl;
  254. int ret = 0;
  255. ktime_t batch_assign_start_ts;
  256. ktime_t first_assign_ts;
  257. struct qcom_scm_mem_map_info *mem_regions_buf =
  258. kcalloc(BATCH_MAX_SECTIONS, sizeof(*mem_regions_buf),
  259. GFP_KERNEL);
  260. dma_addr_t entries_dma_addr;
  261. size_t mem_regions_buf_size;
  262. if (!mem_regions_buf)
  263. return -ENOMEM;
  264. first_assign_ts = ktime_get();
  265. while (batch_start < table->nents) {
  266. batches_processed = get_batches_from_sgl(mem_regions_buf,
  267. curr_sgl, &next_sgl);
  268. curr_sgl = next_sgl;
  269. mem_regions_buf_size = batches_processed *
  270. sizeof(*mem_regions_buf);
  271. entries_dma_addr = dma_map_single(qcom_secure_buffer_dev,
  272. mem_regions_buf,
  273. mem_regions_buf_size,
  274. DMA_TO_DEVICE);
  275. if (dma_mapping_error(qcom_secure_buffer_dev,
  276. entries_dma_addr)) {
  277. ret = -EADDRNOTAVAIL;
  278. break;
  279. }
  280. check_debug_tracking(mem_regions_buf, mem_regions_buf_size,
  281. source_vmids, source_size,
  282. destvms, destvms_size);
  283. trace_hyp_assign_batch_start(mem_regions_buf,
  284. batches_processed);
  285. batch_assign_start_ts = ktime_get();
  286. ret = qcom_scm_assign_mem_regions(mem_regions_buf,
  287. mem_regions_buf_size,
  288. source_vmids, source_size,
  289. destvms, destvms_size);
  290. trace_hyp_assign_batch_end(ret, ktime_us_delta(ktime_get(),
  291. batch_assign_start_ts));
  292. dma_unmap_single(qcom_secure_buffer_dev, entries_dma_addr,
  293. mem_regions_buf_size, DMA_TO_DEVICE);
  294. i++;
  295. if (ret) {
  296. pr_info("%s: Failed to assign memory protection, ret = %d\n",
  297. __func__, ret);
  298. /*
  299. * Make it clear to clients that the memory may no
  300. * longer be in a usable state.
  301. */
  302. ret = -EADDRNOTAVAIL;
  303. break;
  304. }
  305. update_debug_tracking(mem_regions_buf, mem_regions_buf_size,
  306. source_vmids, source_size,
  307. destvms, destvms_size, track);
  308. batch_start += batches_processed;
  309. }
  310. total_delta = ktime_us_delta(ktime_get(), first_assign_ts);
  311. trace_hyp_assign_end(total_delta, div64_u64(total_delta, i));
  312. kfree(mem_regions_buf);
  313. return ret;
  314. }
  315. static inline void set_each_page_of_sg(struct sg_table *table, u64 flag)
  316. {
  317. struct scatterlist *sg;
  318. int npages;
  319. int i = 0;
  320. for_each_sg(table->sgl, sg, table->nents, i) {
  321. npages = sg->length / PAGE_SIZE;
  322. if (sg->length % PAGE_SIZE)
  323. npages++;
  324. while (npages--)
  325. set_page_private(nth_page(sg_page(sg), npages), flag);
  326. }
  327. }
  328. #define SECURE_PAGE_MAGIC 0xEEEEEEEE
  329. int page_accessible(unsigned long pfn)
  330. {
  331. struct page *page = pfn_to_page(pfn);
  332. if (page->private == SECURE_PAGE_MAGIC)
  333. return 0;
  334. else
  335. return 1;
  336. }
  337. /*
  338. * When -EADDRNOTAVAIL is returned the memory may no longer be in
  339. * a usable state and should no longer be accessed by the HLOS.
  340. */
  341. int hyp_assign_table(struct sg_table *table,
  342. u32 *source_vm_list, int source_nelems,
  343. int *dest_vmids, int *dest_perms,
  344. int dest_nelems)
  345. {
  346. int ret = 0;
  347. u32 *source_vm_copy;
  348. size_t source_vm_copy_size;
  349. struct qcom_scm_current_perm_info *dest_vm_copy;
  350. size_t dest_vm_copy_size;
  351. dma_addr_t source_dma_addr, dest_dma_addr;
  352. struct hyp_assign_debug_track *track;
  353. if (!qcom_secure_buffer_dev)
  354. return -EPROBE_DEFER;
  355. if (!table || !table->sgl || !source_vm_list || !source_nelems ||
  356. !dest_vmids || !dest_perms || !dest_nelems || !table->nents)
  357. return -EINVAL;
  358. /*
  359. * We can only pass cache-aligned sizes to hypervisor, so we need
  360. * to kmalloc and memcpy the source_vm_list here.
  361. */
  362. source_vm_copy_size = sizeof(*source_vm_copy) * source_nelems;
  363. source_vm_copy = kmemdup(source_vm_list, source_vm_copy_size,
  364. GFP_KERNEL);
  365. if (!source_vm_copy)
  366. return -ENOMEM;
  367. source_dma_addr = dma_map_single(qcom_secure_buffer_dev, source_vm_copy,
  368. source_vm_copy_size, DMA_TO_DEVICE);
  369. if (dma_mapping_error(qcom_secure_buffer_dev, source_dma_addr)) {
  370. ret = -ENOMEM;
  371. goto out_free_source;
  372. }
  373. dest_vm_copy = populate_dest_info(dest_vmids, dest_nelems, dest_perms,
  374. &dest_vm_copy_size);
  375. if (!dest_vm_copy) {
  376. ret = -ENOMEM;
  377. goto out_unmap_source;
  378. }
  379. dest_dma_addr = dma_map_single(qcom_secure_buffer_dev, dest_vm_copy,
  380. dest_vm_copy_size, DMA_TO_DEVICE);
  381. if (dma_mapping_error(qcom_secure_buffer_dev, dest_dma_addr)) {
  382. ret = -ENOMEM;
  383. goto out_free_dest;
  384. }
  385. /* Save stacktrace & hyp_assign parameters */
  386. track = alloc_debug_tracking(dest_vmids, dest_perms, dest_nelems);
  387. #if IS_ENABLED(CONFIG_HYP_ASSIGN_DEBUG)
  388. if (!track) {
  389. ret = -ENOMEM;
  390. dma_unmap_single(qcom_secure_buffer_dev, dest_dma_addr,
  391. dest_vm_copy_size, DMA_TO_DEVICE);
  392. goto out_free_dest;
  393. }
  394. #endif /* CONFIG_HYP_ASSIGN_DEBUG */
  395. trace_hyp_assign_info(source_vm_list, source_nelems, dest_vmids,
  396. dest_perms, dest_nelems);
  397. ret = batched_hyp_assign(table, source_vm_copy, source_vm_copy_size,
  398. dest_vm_copy, dest_vm_copy_size, track);
  399. if (!ret) {
  400. while (dest_nelems--) {
  401. if (dest_vmids[dest_nelems] == VMID_HLOS)
  402. break;
  403. }
  404. if (dest_nelems == -1)
  405. set_each_page_of_sg(table, SECURE_PAGE_MAGIC);
  406. else
  407. set_each_page_of_sg(table, 0);
  408. }
  409. dma_unmap_single(qcom_secure_buffer_dev, dest_dma_addr,
  410. dest_vm_copy_size, DMA_TO_DEVICE);
  411. /* Drop initial refcount from alloc_debug_tracking */
  412. put_track(track);
  413. out_free_dest:
  414. kfree(dest_vm_copy);
  415. out_unmap_source:
  416. dma_unmap_single(qcom_secure_buffer_dev, source_dma_addr,
  417. source_vm_copy_size, DMA_TO_DEVICE);
  418. out_free_source:
  419. kfree(source_vm_copy);
  420. return ret;
  421. }
  422. EXPORT_SYMBOL(hyp_assign_table);
  423. const char *msm_secure_vmid_to_string(int secure_vmid)
  424. {
  425. switch (secure_vmid) {
  426. case VMID_TZ:
  427. return "VMID_TZ";
  428. case VMID_HLOS:
  429. return "VMID_HLOS";
  430. case VMID_CP_TOUCH:
  431. return "VMID_CP_TOUCH";
  432. case VMID_CP_BITSTREAM:
  433. return "VMID_CP_BITSTREAM";
  434. case VMID_CP_PIXEL:
  435. return "VMID_CP_PIXEL";
  436. case VMID_CP_NON_PIXEL:
  437. return "VMID_CP_NON_PIXEL";
  438. case VMID_CP_CAMERA:
  439. return "VMID_CP_CAMERA";
  440. case VMID_HLOS_FREE:
  441. return "VMID_HLOS_FREE";
  442. case VMID_MSS_MSA:
  443. return "VMID_MSS_MSA";
  444. case VMID_MSS_NONMSA:
  445. return "VMID_MSS_NONMSA";
  446. case VMID_CP_SEC_DISPLAY:
  447. return "VMID_CP_SEC_DISPLAY";
  448. case VMID_CP_APP:
  449. return "VMID_CP_APP";
  450. case VMID_LPASS:
  451. return "VMID_LPASS";
  452. case VMID_WLAN:
  453. return "VMID_WLAN";
  454. case VMID_WLAN_CE:
  455. return "VMID_WLAN_CE";
  456. case VMID_CP_CAMERA_PREVIEW:
  457. return "VMID_CP_CAMERA_PREVIEW";
  458. case VMID_CP_SPSS_SP:
  459. return "VMID_CP_SPSS_SP";
  460. case VMID_CP_SPSS_SP_SHARED:
  461. return "VMID_CP_SPSS_SP_SHARED";
  462. case VMID_CP_SPSS_HLOS_SHARED:
  463. return "VMID_CP_SPSS_HLOS_SHARED";
  464. case VMID_ADSP_HEAP:
  465. return "VMID_ADSP_HEAP";
  466. case VMID_INVAL:
  467. return "VMID_INVAL";
  468. case VMID_NAV:
  469. return "VMID_NAV";
  470. default:
  471. return "Unknown VMID";
  472. }
  473. }
  474. EXPORT_SYMBOL(msm_secure_vmid_to_string);
  475. u32 msm_secure_get_vmid_perms(u32 vmid)
  476. {
  477. if (vmid == VMID_CP_SEC_DISPLAY || (vmid == VMID_CP_CAMERA_PREVIEW &&
  478. vmid_cp_camera_preview_ro))
  479. return PERM_READ;
  480. else if (vmid == VMID_CP_CDSP)
  481. return PERM_READ | PERM_WRITE | PERM_EXEC;
  482. else
  483. return PERM_READ | PERM_WRITE;
  484. }
  485. EXPORT_SYMBOL(msm_secure_get_vmid_perms);
  486. static int qcom_secure_buffer_probe(struct platform_device *pdev)
  487. {
  488. struct device *dev = &pdev->dev;
  489. int ret;
  490. if (IS_ENABLED(CONFIG_ARM64)) {
  491. ret = dma_set_mask(dev, DMA_BIT_MASK(64));
  492. if (ret)
  493. return ret;
  494. }
  495. qcom_secure_buffer_dev = dev;
  496. vmid_cp_camera_preview_ro = of_property_read_bool(dev->of_node,
  497. "qcom,vmid-cp-camera-preview-ro");
  498. return 0;
  499. }
  500. static const struct of_device_id qcom_secure_buffer_of_match[] = {
  501. {.compatible = "qcom,secure-buffer"},
  502. {}
  503. };
  504. MODULE_DEVICE_TABLE(of, qcom_secure_buffer_of_match);
  505. static struct platform_driver qcom_secure_buffer_driver = {
  506. .probe = qcom_secure_buffer_probe,
  507. .driver = {
  508. .name = "qcom_secure_buffer",
  509. .of_match_table = qcom_secure_buffer_of_match,
  510. },
  511. };
  512. static int __init qcom_secure_buffer_init(void)
  513. {
  514. #if IS_ENABLED(CONFIG_HYP_ASSIGN_DEBUG)
  515. failure_handle = create_dummy_stack();
  516. #endif
  517. return platform_driver_register(&qcom_secure_buffer_driver);
  518. }
  519. subsys_initcall(qcom_secure_buffer_init);
  520. static void __exit qcom_secure_buffer_exit(void)
  521. {
  522. return platform_driver_unregister(&qcom_secure_buffer_driver);
  523. }
  524. module_exit(qcom_secure_buffer_exit);
  525. MODULE_LICENSE("GPL v2");