qcedev_smmu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Qti (or) Qualcomm Technologies Inc CE device driver.
  4. *
  5. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  6. */
  7. #include <linux/dma-iommu.h>
  8. #include <linux/dma-mapping.h>
  9. #include <linux/qcom-dma-mapping.h>
  10. #include <linux/list.h>
  11. #include "linux/qcedev.h"
  12. #include "qcedevi.h"
  13. #include "qcedev_smmu.h"
  14. #include "soc/qcom/secure_buffer.h"
  15. #include <linux/mem-buf.h>
  16. static int qcedev_setup_context_bank(struct context_bank_info *cb,
  17. struct device *dev)
  18. {
  19. if (!dev || !cb) {
  20. pr_err("%s err: invalid input params\n", __func__);
  21. return -EINVAL;
  22. }
  23. cb->dev = dev;
  24. if (!dev->dma_parms) {
  25. dev->dma_parms = devm_kzalloc(dev,
  26. sizeof(*dev->dma_parms), GFP_KERNEL);
  27. if (!dev->dma_parms)
  28. return -ENOMEM;
  29. }
  30. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  31. dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
  32. return 0;
  33. }
  34. int qcedev_parse_context_bank(struct platform_device *pdev)
  35. {
  36. struct qcedev_control *podev;
  37. struct context_bank_info *cb = NULL;
  38. struct device_node *np = NULL;
  39. int rc = 0;
  40. if (!pdev) {
  41. pr_err("%s err: invalid platform devices\n", __func__);
  42. return -EINVAL;
  43. }
  44. if (!pdev->dev.parent) {
  45. pr_err("%s err: failed to find a parent for %s\n",
  46. __func__, dev_name(&pdev->dev));
  47. return -EINVAL;
  48. }
  49. podev = dev_get_drvdata(pdev->dev.parent);
  50. np = pdev->dev.of_node;
  51. cb = devm_kzalloc(&pdev->dev, sizeof(*cb), GFP_KERNEL);
  52. if (!cb) {
  53. pr_err("%s ERROR = Failed to allocate cb\n", __func__);
  54. return -ENOMEM;
  55. }
  56. INIT_LIST_HEAD(&cb->list);
  57. list_add_tail(&cb->list, &podev->context_banks);
  58. rc = of_property_read_string(np, "label", &cb->name);
  59. if (rc)
  60. pr_debug("%s ERROR = Unable to read label\n", __func__);
  61. cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank");
  62. rc = qcedev_setup_context_bank(cb, &pdev->dev);
  63. if (rc) {
  64. pr_err("%s err: cannot setup context bank %d\n", __func__, rc);
  65. goto err_setup_cb;
  66. }
  67. return 0;
  68. err_setup_cb:
  69. list_del(&cb->list);
  70. devm_kfree(&pdev->dev, cb);
  71. return rc;
  72. }
  73. struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype)
  74. {
  75. struct qcedev_mem_client *mem_client = NULL;
  76. if (mtype != MEM_ION) {
  77. pr_err("%s: err: Mem type not supported\n", __func__);
  78. goto err;
  79. }
  80. mem_client = kzalloc(sizeof(*mem_client), GFP_KERNEL);
  81. if (!mem_client)
  82. goto err;
  83. mem_client->mtype = mtype;
  84. return mem_client;
  85. err:
  86. return NULL;
  87. }
  88. void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client)
  89. {
  90. kfree(mem_client);
  91. }
  92. static bool is_iommu_present(struct qcedev_handle *qce_hndl)
  93. {
  94. return !list_empty(&qce_hndl->cntl->context_banks);
  95. }
  96. static struct context_bank_info *get_context_bank(
  97. struct qcedev_handle *qce_hndl, bool is_secure)
  98. {
  99. struct qcedev_control *podev = qce_hndl->cntl;
  100. struct context_bank_info *cb = NULL, *match = NULL;
  101. list_for_each_entry(cb, &podev->context_banks, list) {
  102. if (cb->is_secure == is_secure) {
  103. match = cb;
  104. break;
  105. }
  106. }
  107. return match;
  108. }
  109. static int ion_map_buffer(struct qcedev_handle *qce_hndl,
  110. struct qcedev_mem_client *mem_client, int fd,
  111. unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
  112. {
  113. int rc = 0;
  114. struct dma_buf *buf = NULL;
  115. struct dma_buf_attachment *attach = NULL;
  116. struct sg_table *table = NULL;
  117. struct context_bank_info *cb = NULL;
  118. buf = dma_buf_get(fd);
  119. if (IS_ERR_OR_NULL(buf))
  120. return -EINVAL;
  121. if (is_iommu_present(qce_hndl)) {
  122. cb = get_context_bank(qce_hndl, !mem_buf_dma_buf_exclusive_owner(buf));
  123. if (!cb) {
  124. pr_err("%s: err: failed to get context bank info\n",
  125. __func__);
  126. rc = -EIO;
  127. goto map_err;
  128. }
  129. /* Prepare a dma buf for dma on the given device */
  130. attach = dma_buf_attach(buf, cb->dev);
  131. if (IS_ERR_OR_NULL(attach)) {
  132. rc = PTR_ERR(attach) ?: -ENOMEM;
  133. pr_err("%s: err: failed to attach dmabuf\n", __func__);
  134. goto map_err;
  135. }
  136. /* Get the scatterlist for the given attachment */
  137. attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  138. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  139. if (IS_ERR_OR_NULL(table)) {
  140. rc = PTR_ERR(table) ?: -ENOMEM;
  141. pr_err("%s: err: failed to map table\n", __func__);
  142. goto map_table_err;
  143. }
  144. if (table->sgl) {
  145. binfo->ion_buf.iova = sg_dma_address(table->sgl);
  146. binfo->ion_buf.mapped_buf_size = sg_dma_len(table->sgl);
  147. if (binfo->ion_buf.mapped_buf_size < fd_size) {
  148. pr_err("%s: err: mapping failed, size mismatch\n",
  149. __func__);
  150. rc = -ENOMEM;
  151. goto map_sg_err;
  152. }
  153. } else {
  154. pr_err("%s: err: sg list is NULL\n", __func__);
  155. rc = -ENOMEM;
  156. goto map_sg_err;
  157. }
  158. binfo->ion_buf.mapping_info.dev = cb->dev;
  159. binfo->ion_buf.mapping_info.mapping = cb->mapping;
  160. binfo->ion_buf.mapping_info.table = table;
  161. binfo->ion_buf.mapping_info.attach = attach;
  162. binfo->ion_buf.mapping_info.buf = buf;
  163. binfo->ion_buf.ion_fd = fd;
  164. } else {
  165. pr_err("%s: err: smmu not enabled\n", __func__);
  166. rc = -EIO;
  167. goto map_err;
  168. }
  169. return 0;
  170. map_sg_err:
  171. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  172. map_table_err:
  173. dma_buf_detach(buf, attach);
  174. map_err:
  175. dma_buf_put(buf);
  176. return rc;
  177. }
  178. static int ion_unmap_buffer(struct qcedev_handle *qce_hndl,
  179. struct qcedev_reg_buf_info *binfo)
  180. {
  181. struct dma_mapping_info *mapping_info = &binfo->ion_buf.mapping_info;
  182. if (is_iommu_present(qce_hndl)) {
  183. dma_buf_unmap_attachment(mapping_info->attach,
  184. mapping_info->table, DMA_BIDIRECTIONAL);
  185. dma_buf_detach(mapping_info->buf, mapping_info->attach);
  186. dma_buf_put(mapping_info->buf);
  187. }
  188. return 0;
  189. }
  190. static int qcedev_map_buffer(struct qcedev_handle *qce_hndl,
  191. struct qcedev_mem_client *mem_client, int fd,
  192. unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
  193. {
  194. int rc = -1;
  195. switch (mem_client->mtype) {
  196. case MEM_ION:
  197. rc = ion_map_buffer(qce_hndl, mem_client, fd, fd_size, binfo);
  198. break;
  199. default:
  200. pr_err("%s: err: Mem type not supported\n", __func__);
  201. break;
  202. }
  203. if (rc)
  204. pr_err("%s: err: failed to map buffer\n", __func__);
  205. return rc;
  206. }
  207. static int qcedev_unmap_buffer(struct qcedev_handle *qce_hndl,
  208. struct qcedev_mem_client *mem_client,
  209. struct qcedev_reg_buf_info *binfo)
  210. {
  211. int rc = -1;
  212. switch (mem_client->mtype) {
  213. case MEM_ION:
  214. rc = ion_unmap_buffer(qce_hndl, binfo);
  215. break;
  216. default:
  217. pr_err("%s: err: Mem type not supported\n", __func__);
  218. break;
  219. }
  220. if (rc)
  221. pr_err("%s: err: failed to unmap buffer\n", __func__);
  222. return rc;
  223. }
  224. int qcedev_check_and_map_buffer(void *handle,
  225. int fd, unsigned int offset, unsigned int fd_size,
  226. unsigned long long *vaddr)
  227. {
  228. bool found = false;
  229. struct qcedev_reg_buf_info *binfo = NULL, *temp = NULL;
  230. struct qcedev_mem_client *mem_client = NULL;
  231. struct qcedev_handle *qce_hndl = handle;
  232. int rc = 0;
  233. unsigned long mapped_size = 0;
  234. if (!handle || !vaddr || fd < 0 || offset >= fd_size) {
  235. pr_err("%s: err: invalid input arguments\n", __func__);
  236. return -EINVAL;
  237. }
  238. if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
  239. pr_err("%s: err: invalid qcedev handle\n", __func__);
  240. return -EINVAL;
  241. }
  242. mem_client = qce_hndl->cntl->mem_client;
  243. if (mem_client->mtype != MEM_ION)
  244. return -EPERM;
  245. /* Check if the buffer fd is already mapped */
  246. mutex_lock(&qce_hndl->registeredbufs.lock);
  247. list_for_each_entry(temp, &qce_hndl->registeredbufs.list, list) {
  248. if (temp->ion_buf.ion_fd == fd) {
  249. found = true;
  250. *vaddr = temp->ion_buf.iova;
  251. mapped_size = temp->ion_buf.mapped_buf_size;
  252. atomic_inc(&temp->ref_count);
  253. break;
  254. }
  255. }
  256. mutex_unlock(&qce_hndl->registeredbufs.lock);
  257. /* If buffer fd is not mapped then create a fresh mapping */
  258. if (!found) {
  259. pr_debug("%s: info: ion fd not registered with driver\n",
  260. __func__);
  261. binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
  262. if (!binfo) {
  263. pr_err("%s: err: failed to allocate binfo\n",
  264. __func__);
  265. rc = -ENOMEM;
  266. goto error;
  267. }
  268. rc = qcedev_map_buffer(qce_hndl, mem_client, fd,
  269. fd_size, binfo);
  270. if (rc) {
  271. pr_err("%s: err: failed to map fd (%d) error = %d\n",
  272. __func__, fd, rc);
  273. goto error;
  274. }
  275. *vaddr = binfo->ion_buf.iova;
  276. mapped_size = binfo->ion_buf.mapped_buf_size;
  277. atomic_inc(&binfo->ref_count);
  278. /* Add buffer mapping information to regd buffer list */
  279. mutex_lock(&qce_hndl->registeredbufs.lock);
  280. list_add_tail(&binfo->list, &qce_hndl->registeredbufs.list);
  281. mutex_unlock(&qce_hndl->registeredbufs.lock);
  282. }
  283. /* Make sure the offset is within the mapped range */
  284. if (offset >= mapped_size) {
  285. pr_err(
  286. "%s: err: Offset (%u) exceeds mapped size(%lu) for fd: %d\n",
  287. __func__, offset, mapped_size, fd);
  288. rc = -ERANGE;
  289. goto unmap;
  290. }
  291. /* return the mapped virtual address adjusted by offset */
  292. *vaddr += offset;
  293. return 0;
  294. unmap:
  295. if (!found)
  296. qcedev_unmap_buffer(handle, mem_client, binfo);
  297. error:
  298. kfree(binfo);
  299. return rc;
  300. }
  301. int qcedev_check_and_unmap_buffer(void *handle, int fd)
  302. {
  303. struct qcedev_reg_buf_info *binfo = NULL, *dummy = NULL;
  304. struct qcedev_mem_client *mem_client = NULL;
  305. struct qcedev_handle *qce_hndl = handle;
  306. bool found = false;
  307. if (!handle || fd < 0) {
  308. pr_err("%s: err: invalid input arguments\n", __func__);
  309. return -EINVAL;
  310. }
  311. if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
  312. pr_err("%s: err: invalid qcedev handle\n", __func__);
  313. return -EINVAL;
  314. }
  315. mem_client = qce_hndl->cntl->mem_client;
  316. if (mem_client->mtype != MEM_ION)
  317. return -EPERM;
  318. /* Check if the buffer fd is mapped and present in the regd list. */
  319. mutex_lock(&qce_hndl->registeredbufs.lock);
  320. list_for_each_entry_safe(binfo, dummy,
  321. &qce_hndl->registeredbufs.list, list) {
  322. if (binfo->ion_buf.ion_fd == fd) {
  323. found = true;
  324. atomic_dec(&binfo->ref_count);
  325. /* Unmap only if there are no more references */
  326. if (atomic_read(&binfo->ref_count) == 0) {
  327. qcedev_unmap_buffer(qce_hndl,
  328. mem_client, binfo);
  329. list_del(&binfo->list);
  330. kfree(binfo);
  331. }
  332. break;
  333. }
  334. }
  335. mutex_unlock(&qce_hndl->registeredbufs.lock);
  336. if (!found) {
  337. pr_err("%s: err: calling unmap on unknown fd %d\n",
  338. __func__, fd);
  339. return -EINVAL;
  340. }
  341. return 0;
  342. }
  343. int qcedev_unmap_all_buffers(void *handle)
  344. {
  345. struct qcedev_reg_buf_info *binfo = NULL;
  346. struct qcedev_mem_client *mem_client = NULL;
  347. struct qcedev_handle *qce_hndl = handle;
  348. struct list_head *pos;
  349. if (!handle) {
  350. pr_err("%s: err: invalid input arguments\n", __func__);
  351. return -EINVAL;
  352. }
  353. if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
  354. pr_err("%s: err: invalid qcedev handle\n", __func__);
  355. return -EINVAL;
  356. }
  357. mem_client = qce_hndl->cntl->mem_client;
  358. if (mem_client->mtype != MEM_ION)
  359. return -EPERM;
  360. mutex_lock(&qce_hndl->registeredbufs.lock);
  361. while (!list_empty(&qce_hndl->registeredbufs.list)) {
  362. pos = qce_hndl->registeredbufs.list.next;
  363. binfo = list_entry(pos, struct qcedev_reg_buf_info, list);
  364. if (binfo)
  365. qcedev_unmap_buffer(qce_hndl, mem_client, binfo);
  366. list_del(pos);
  367. kfree(binfo);
  368. }
  369. mutex_unlock(&qce_hndl->registeredbufs.lock);
  370. return 0;
  371. }