qcedev_smmu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Qti (or) Qualcomm Technologies Inc CE device driver.
  4. *
  5. * Copyright (c) 2018-2021, The Linux Foundation. All rights reserved.
  6. */
  7. #include <linux/dma-mapping.h>
  8. #include <linux/qcom-dma-mapping.h>
  9. #include <linux/list.h>
  10. #include "linux/qcedev.h"
  11. #include "qcedevi.h"
  12. #include "qcedev_smmu.h"
  13. #include "soc/qcom/secure_buffer.h"
  14. #include <linux/mem-buf.h>
  15. static int qcedev_setup_context_bank(struct context_bank_info *cb,
  16. struct device *dev)
  17. {
  18. if (!dev || !cb) {
  19. pr_err("%s err: invalid input params\n", __func__);
  20. return -EINVAL;
  21. }
  22. cb->dev = dev;
  23. if (!dev->dma_parms) {
  24. dev->dma_parms = devm_kzalloc(dev,
  25. sizeof(*dev->dma_parms), GFP_KERNEL);
  26. if (!dev->dma_parms)
  27. return -ENOMEM;
  28. }
  29. dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  30. dma_set_seg_boundary(dev, (unsigned long)DMA_BIT_MASK(64));
  31. return 0;
  32. }
  33. int qcedev_parse_context_bank(struct platform_device *pdev)
  34. {
  35. struct qcedev_control *podev;
  36. struct context_bank_info *cb = NULL;
  37. struct device_node *np = NULL;
  38. int rc = 0;
  39. if (!pdev) {
  40. pr_err("%s err: invalid platform devices\n", __func__);
  41. return -EINVAL;
  42. }
  43. if (!pdev->dev.parent) {
  44. pr_err("%s err: failed to find a parent for %s\n",
  45. __func__, dev_name(&pdev->dev));
  46. return -EINVAL;
  47. }
  48. podev = dev_get_drvdata(pdev->dev.parent);
  49. np = pdev->dev.of_node;
  50. cb = devm_kzalloc(&pdev->dev, sizeof(*cb), GFP_KERNEL);
  51. if (!cb) {
  52. pr_err("%s ERROR = Failed to allocate cb\n", __func__);
  53. return -ENOMEM;
  54. }
  55. INIT_LIST_HEAD(&cb->list);
  56. list_add_tail(&cb->list, &podev->context_banks);
  57. rc = of_property_read_string(np, "label", &cb->name);
  58. if (rc)
  59. pr_debug("%s ERROR = Unable to read label\n", __func__);
  60. cb->is_secure = of_property_read_bool(np, "qcom,secure-context-bank");
  61. rc = qcedev_setup_context_bank(cb, &pdev->dev);
  62. if (rc) {
  63. pr_err("%s err: cannot setup context bank %d\n", __func__, rc);
  64. goto err_setup_cb;
  65. }
  66. return 0;
  67. err_setup_cb:
  68. list_del(&cb->list);
  69. devm_kfree(&pdev->dev, cb);
  70. return rc;
  71. }
  72. struct qcedev_mem_client *qcedev_mem_new_client(enum qcedev_mem_type mtype)
  73. {
  74. struct qcedev_mem_client *mem_client = NULL;
  75. if (mtype != MEM_ION) {
  76. pr_err("%s: err: Mem type not supported\n", __func__);
  77. goto err;
  78. }
  79. mem_client = kzalloc(sizeof(*mem_client), GFP_KERNEL);
  80. if (!mem_client)
  81. goto err;
  82. mem_client->mtype = mtype;
  83. return mem_client;
  84. err:
  85. return NULL;
  86. }
  87. void qcedev_mem_delete_client(struct qcedev_mem_client *mem_client)
  88. {
  89. kfree(mem_client);
  90. }
  91. static bool is_iommu_present(struct qcedev_handle *qce_hndl)
  92. {
  93. return !list_empty(&qce_hndl->cntl->context_banks);
  94. }
  95. static struct context_bank_info *get_context_bank(
  96. struct qcedev_handle *qce_hndl, bool is_secure)
  97. {
  98. struct qcedev_control *podev = qce_hndl->cntl;
  99. struct context_bank_info *cb = NULL, *match = NULL;
  100. list_for_each_entry(cb, &podev->context_banks, list) {
  101. if (cb->is_secure == is_secure) {
  102. match = cb;
  103. break;
  104. }
  105. }
  106. return match;
  107. }
  108. static int ion_map_buffer(struct qcedev_handle *qce_hndl,
  109. struct qcedev_mem_client *mem_client, int fd,
  110. unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
  111. {
  112. int rc = 0;
  113. struct dma_buf *buf = NULL;
  114. struct dma_buf_attachment *attach = NULL;
  115. struct sg_table *table = NULL;
  116. struct context_bank_info *cb = NULL;
  117. buf = dma_buf_get(fd);
  118. if (IS_ERR_OR_NULL(buf))
  119. return -EINVAL;
  120. if (is_iommu_present(qce_hndl)) {
  121. cb = get_context_bank(qce_hndl, !mem_buf_dma_buf_exclusive_owner(buf));
  122. if (!cb) {
  123. pr_err("%s: err: failed to get context bank info\n",
  124. __func__);
  125. rc = -EIO;
  126. goto map_err;
  127. }
  128. /* Prepare a dma buf for dma on the given device */
  129. attach = dma_buf_attach(buf, cb->dev);
  130. if (IS_ERR_OR_NULL(attach)) {
  131. rc = PTR_ERR(attach) ?: -ENOMEM;
  132. pr_err("%s: err: failed to attach dmabuf\n", __func__);
  133. goto map_err;
  134. }
  135. /* Get the scatterlist for the given attachment */
  136. attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  137. table = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
  138. if (IS_ERR_OR_NULL(table)) {
  139. rc = PTR_ERR(table) ?: -ENOMEM;
  140. pr_err("%s: err: failed to map table\n", __func__);
  141. goto map_table_err;
  142. }
  143. if (table->sgl) {
  144. binfo->ion_buf.iova = sg_dma_address(table->sgl);
  145. binfo->ion_buf.mapped_buf_size = sg_dma_len(table->sgl);
  146. if (binfo->ion_buf.mapped_buf_size < fd_size) {
  147. pr_err("%s: err: mapping failed, size mismatch\n",
  148. __func__);
  149. rc = -ENOMEM;
  150. goto map_sg_err;
  151. }
  152. } else {
  153. pr_err("%s: err: sg list is NULL\n", __func__);
  154. rc = -ENOMEM;
  155. goto map_sg_err;
  156. }
  157. binfo->ion_buf.mapping_info.dev = cb->dev;
  158. binfo->ion_buf.mapping_info.mapping = cb->mapping;
  159. binfo->ion_buf.mapping_info.table = table;
  160. binfo->ion_buf.mapping_info.attach = attach;
  161. binfo->ion_buf.mapping_info.buf = buf;
  162. binfo->ion_buf.ion_fd = fd;
  163. } else {
  164. pr_err("%s: err: smmu not enabled\n", __func__);
  165. rc = -EIO;
  166. goto map_err;
  167. }
  168. return 0;
  169. map_sg_err:
  170. dma_buf_unmap_attachment(attach, table, DMA_BIDIRECTIONAL);
  171. map_table_err:
  172. dma_buf_detach(buf, attach);
  173. map_err:
  174. dma_buf_put(buf);
  175. return rc;
  176. }
  177. static int ion_unmap_buffer(struct qcedev_handle *qce_hndl,
  178. struct qcedev_reg_buf_info *binfo)
  179. {
  180. struct dma_mapping_info *mapping_info = &binfo->ion_buf.mapping_info;
  181. if (is_iommu_present(qce_hndl)) {
  182. dma_buf_unmap_attachment(mapping_info->attach,
  183. mapping_info->table, DMA_BIDIRECTIONAL);
  184. dma_buf_detach(mapping_info->buf, mapping_info->attach);
  185. dma_buf_put(mapping_info->buf);
  186. }
  187. return 0;
  188. }
  189. static int qcedev_map_buffer(struct qcedev_handle *qce_hndl,
  190. struct qcedev_mem_client *mem_client, int fd,
  191. unsigned int fd_size, struct qcedev_reg_buf_info *binfo)
  192. {
  193. int rc = -1;
  194. switch (mem_client->mtype) {
  195. case MEM_ION:
  196. rc = ion_map_buffer(qce_hndl, mem_client, fd, fd_size, binfo);
  197. break;
  198. default:
  199. pr_err("%s: err: Mem type not supported\n", __func__);
  200. break;
  201. }
  202. if (rc)
  203. pr_err("%s: err: failed to map buffer\n", __func__);
  204. return rc;
  205. }
  206. static int qcedev_unmap_buffer(struct qcedev_handle *qce_hndl,
  207. struct qcedev_mem_client *mem_client,
  208. struct qcedev_reg_buf_info *binfo)
  209. {
  210. int rc = -1;
  211. switch (mem_client->mtype) {
  212. case MEM_ION:
  213. rc = ion_unmap_buffer(qce_hndl, binfo);
  214. break;
  215. default:
  216. pr_err("%s: err: Mem type not supported\n", __func__);
  217. break;
  218. }
  219. if (rc)
  220. pr_err("%s: err: failed to unmap buffer\n", __func__);
  221. return rc;
  222. }
  223. int qcedev_check_and_map_buffer(void *handle,
  224. int fd, unsigned int offset, unsigned int fd_size,
  225. unsigned long long *vaddr)
  226. {
  227. bool found = false;
  228. struct qcedev_reg_buf_info *binfo = NULL, *temp = NULL;
  229. struct qcedev_mem_client *mem_client = NULL;
  230. struct qcedev_handle *qce_hndl = handle;
  231. int rc = 0;
  232. unsigned long mapped_size = 0;
  233. if (!handle || !vaddr || fd < 0 || offset >= fd_size) {
  234. pr_err("%s: err: invalid input arguments\n", __func__);
  235. return -EINVAL;
  236. }
  237. if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
  238. pr_err("%s: err: invalid qcedev handle\n", __func__);
  239. return -EINVAL;
  240. }
  241. mem_client = qce_hndl->cntl->mem_client;
  242. if (mem_client->mtype != MEM_ION)
  243. return -EPERM;
  244. /* Check if the buffer fd is already mapped */
  245. mutex_lock(&qce_hndl->registeredbufs.lock);
  246. list_for_each_entry(temp, &qce_hndl->registeredbufs.list, list) {
  247. if (temp->ion_buf.ion_fd == fd) {
  248. found = true;
  249. *vaddr = temp->ion_buf.iova;
  250. mapped_size = temp->ion_buf.mapped_buf_size;
  251. atomic_inc(&temp->ref_count);
  252. break;
  253. }
  254. }
  255. mutex_unlock(&qce_hndl->registeredbufs.lock);
  256. /* If buffer fd is not mapped then create a fresh mapping */
  257. if (!found) {
  258. pr_debug("%s: info: ion fd not registered with driver\n",
  259. __func__);
  260. binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
  261. if (!binfo) {
  262. pr_err("%s: err: failed to allocate binfo\n",
  263. __func__);
  264. rc = -ENOMEM;
  265. goto error;
  266. }
  267. rc = qcedev_map_buffer(qce_hndl, mem_client, fd,
  268. fd_size, binfo);
  269. if (rc) {
  270. pr_err("%s: err: failed to map fd (%d) error = %d\n",
  271. __func__, fd, rc);
  272. goto error;
  273. }
  274. *vaddr = binfo->ion_buf.iova;
  275. mapped_size = binfo->ion_buf.mapped_buf_size;
  276. atomic_inc(&binfo->ref_count);
  277. /* Add buffer mapping information to regd buffer list */
  278. mutex_lock(&qce_hndl->registeredbufs.lock);
  279. list_add_tail(&binfo->list, &qce_hndl->registeredbufs.list);
  280. mutex_unlock(&qce_hndl->registeredbufs.lock);
  281. }
  282. /* Make sure the offset is within the mapped range */
  283. if (offset >= mapped_size) {
  284. pr_err(
  285. "%s: err: Offset (%u) exceeds mapped size(%lu) for fd: %d\n",
  286. __func__, offset, mapped_size, fd);
  287. rc = -ERANGE;
  288. goto unmap;
  289. }
  290. /* return the mapped virtual address adjusted by offset */
  291. *vaddr += offset;
  292. return 0;
  293. unmap:
  294. if (!found)
  295. qcedev_unmap_buffer(handle, mem_client, binfo);
  296. error:
  297. kfree(binfo);
  298. return rc;
  299. }
  300. int qcedev_check_and_unmap_buffer(void *handle, int fd)
  301. {
  302. struct qcedev_reg_buf_info *binfo = NULL, *dummy = NULL;
  303. struct qcedev_mem_client *mem_client = NULL;
  304. struct qcedev_handle *qce_hndl = handle;
  305. bool found = false;
  306. if (!handle || fd < 0) {
  307. pr_err("%s: err: invalid input arguments\n", __func__);
  308. return -EINVAL;
  309. }
  310. if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
  311. pr_err("%s: err: invalid qcedev handle\n", __func__);
  312. return -EINVAL;
  313. }
  314. mem_client = qce_hndl->cntl->mem_client;
  315. if (mem_client->mtype != MEM_ION)
  316. return -EPERM;
  317. /* Check if the buffer fd is mapped and present in the regd list. */
  318. mutex_lock(&qce_hndl->registeredbufs.lock);
  319. list_for_each_entry_safe(binfo, dummy,
  320. &qce_hndl->registeredbufs.list, list) {
  321. if (binfo->ion_buf.ion_fd == fd) {
  322. found = true;
  323. atomic_dec(&binfo->ref_count);
  324. /* Unmap only if there are no more references */
  325. if (atomic_read(&binfo->ref_count) == 0) {
  326. qcedev_unmap_buffer(qce_hndl,
  327. mem_client, binfo);
  328. list_del(&binfo->list);
  329. kfree(binfo);
  330. }
  331. break;
  332. }
  333. }
  334. mutex_unlock(&qce_hndl->registeredbufs.lock);
  335. if (!found) {
  336. pr_err("%s: err: calling unmap on unknown fd %d\n",
  337. __func__, fd);
  338. return -EINVAL;
  339. }
  340. return 0;
  341. }
  342. int qcedev_unmap_all_buffers(void *handle)
  343. {
  344. struct qcedev_reg_buf_info *binfo = NULL;
  345. struct qcedev_mem_client *mem_client = NULL;
  346. struct qcedev_handle *qce_hndl = handle;
  347. struct list_head *pos;
  348. if (!handle) {
  349. pr_err("%s: err: invalid input arguments\n", __func__);
  350. return -EINVAL;
  351. }
  352. if (!qce_hndl->cntl || !qce_hndl->cntl->mem_client) {
  353. pr_err("%s: err: invalid qcedev handle\n", __func__);
  354. return -EINVAL;
  355. }
  356. mem_client = qce_hndl->cntl->mem_client;
  357. if (mem_client->mtype != MEM_ION)
  358. return -EPERM;
  359. mutex_lock(&qce_hndl->registeredbufs.lock);
  360. while (!list_empty(&qce_hndl->registeredbufs.list)) {
  361. pos = qce_hndl->registeredbufs.list.next;
  362. binfo = list_entry(pos, struct qcedev_reg_buf_info, list);
  363. if (binfo)
  364. qcedev_unmap_buffer(qce_hndl, mem_client, binfo);
  365. list_del(pos);
  366. kfree(binfo);
  367. }
  368. mutex_unlock(&qce_hndl->registeredbufs.lock);
  369. return 0;
  370. }