qti-smmu-proxy-tvm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/kthread.h>
  6. #include <linux/qcom-iommu-util.h>
  7. #include <dt-bindings/arm/msm/qti-smmu-proxy-dt-ids.h>
  8. #include "qti-smmu-proxy-common.h"
  9. #define RECEIVER_COMPAT_STR "smmu-proxy-receiver"
  10. #define CB_COMPAT_STR "smmu-proxy-cb"
  11. static void *msgq_hdl;
  12. struct smmu_proxy_buffer_cb_info {
  13. bool mapped;
  14. struct dma_buf_attachment *attachment;
  15. struct sg_table *sg_table;
  16. };
  17. struct smmu_proxy_buffer_state {
  18. bool locked;
  19. struct smmu_proxy_buffer_cb_info cb_info[QTI_SMMU_PROXY_CB_IDS_LEN];
  20. struct dma_buf *dmabuf;
  21. };
  22. static DEFINE_MUTEX(buffer_state_lock);
  23. static DEFINE_XARRAY(buffer_state_arr);
  24. static unsigned int cb_map_counts[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
  25. struct device *cb_devices[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
  26. struct task_struct *receiver_msgq_handler_thread;
  27. static int zero_dma_buf(struct dma_buf *dmabuf)
  28. {
  29. int ret;
  30. struct iosys_map vmap_struct = {0};
  31. ret = dma_buf_vmap(dmabuf, &vmap_struct);
  32. if (ret) {
  33. pr_err("%s: dma_buf_vmap() failed with %d\n", __func__, ret);
  34. return ret;
  35. }
  36. /* Use DMA_TO_DEVICE since we are not reading anything */
  37. ret = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
  38. if (ret) {
  39. pr_err("%s: dma_buf_begin_cpu_access() failed with %d\n", __func__, ret);
  40. goto unmap;
  41. }
  42. memset(vmap_struct.vaddr, 0, dmabuf->size);
  43. ret = dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
  44. if (ret)
  45. pr_err("%s: dma_buf_end_cpu_access() failed with %d\n", __func__, ret);
  46. unmap:
  47. dma_buf_vunmap(dmabuf, &vmap_struct);
  48. if (ret)
  49. pr_err("%s: Failed to properly zero the DMA-BUF\n", __func__);
  50. return ret;
  51. }
  52. static int iommu_unmap_and_relinquish(u32 hdl)
  53. {
  54. int cb_id, ret = 0;
  55. struct smmu_proxy_buffer_state *buf_state;
  56. mutex_lock(&buffer_state_lock);
  57. buf_state = xa_load(&buffer_state_arr, hdl);
  58. if (!buf_state) {
  59. pr_err("%s: handle 0x%llx unknown to proxy driver!\n", __func__, hdl);
  60. ret = -EINVAL;
  61. goto out;
  62. }
  63. if (buf_state->locked) {
  64. pr_err("%s: handle 0x%llx is locked!\n", __func__, hdl);
  65. ret = -EINVAL;
  66. goto out;
  67. }
  68. for (cb_id = 0; cb_id < QTI_SMMU_PROXY_CB_IDS_LEN; cb_id++) {
  69. if (buf_state->cb_info[cb_id].mapped) {
  70. dma_buf_unmap_attachment(buf_state->cb_info[cb_id].attachment,
  71. buf_state->cb_info[cb_id].sg_table,
  72. DMA_BIDIRECTIONAL);
  73. dma_buf_detach(buf_state->dmabuf,
  74. buf_state->cb_info[cb_id].attachment);
  75. buf_state->cb_info[cb_id].mapped = false;
  76. /* If nothing left is mapped for this CB, unprogram its SMR */
  77. cb_map_counts[cb_id]--;
  78. if (!cb_map_counts[cb_id]) {
  79. ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_RELEASE);
  80. if (ret) {
  81. pr_err("%s: Failed to unprogram SMR for cb_id %d rc: %d\n",
  82. __func__, cb_id, ret);
  83. break;
  84. }
  85. }
  86. }
  87. }
  88. ret = zero_dma_buf(buf_state->dmabuf);
  89. if (!ret) {
  90. dma_buf_put(buf_state->dmabuf);
  91. flush_delayed_fput();
  92. }
  93. xa_erase(&buffer_state_arr, hdl);
  94. kfree(buf_state);
  95. out:
  96. mutex_unlock(&buffer_state_lock);
  97. return ret;
  98. }
  99. static int process_unmap_request(struct smmu_proxy_unmap_req *req, size_t size)
  100. {
  101. struct smmu_proxy_unmap_resp *resp;
  102. int ret = 0;
  103. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  104. if (!resp) {
  105. pr_err("%s: Failed to allocate memory for response\n", __func__);
  106. return -ENOMEM;
  107. }
  108. ret = iommu_unmap_and_relinquish(req->hdl);
  109. resp->hdr.msg_type = SMMU_PROXY_UNMAP_RESP;
  110. resp->hdr.msg_size = sizeof(*resp);
  111. resp->hdr.ret = ret;
  112. ret = gh_msgq_send(msgq_hdl, resp, resp->hdr.msg_size, 0);
  113. if (ret < 0)
  114. pr_err("%s: failed to send response to mapping request rc: %d\n", __func__, ret);
  115. else
  116. pr_debug("%s: response to mapping request sent\n", __func__);
  117. kfree(resp);
  118. return ret;
  119. }
  120. static
  121. inline
  122. struct sg_table *retrieve_and_iommu_map(struct mem_buf_retrieve_kernel_arg *retrieve_arg,
  123. u32 cb_id)
  124. {
  125. int ret;
  126. struct dma_buf *dmabuf;
  127. bool new_buf = false;
  128. struct smmu_proxy_buffer_state *buf_state;
  129. struct dma_buf_attachment *attachment;
  130. struct sg_table *table;
  131. if (cb_id >= QTI_SMMU_PROXY_CB_IDS_LEN) {
  132. pr_err("%s: CB ID %d too large\n", __func__, cb_id);
  133. return ERR_PTR(-EINVAL);
  134. }
  135. if (!cb_devices[cb_id]) {
  136. pr_err("%s: CB of ID %d not defined\n", __func__, cb_id);
  137. return ERR_PTR(-EINVAL);
  138. }
  139. mutex_lock(&buffer_state_lock);
  140. buf_state = xa_load(&buffer_state_arr, retrieve_arg->memparcel_hdl);
  141. if (buf_state) {
  142. if (buf_state->cb_info[cb_id].mapped) {
  143. table = buf_state->cb_info[cb_id].sg_table;
  144. goto unlock;
  145. }
  146. if (buf_state->locked) {
  147. pr_err("%s: handle 0x%llx is locked!\n", __func__,
  148. retrieve_arg->memparcel_hdl);
  149. ret = -EINVAL;
  150. goto unlock_err;
  151. }
  152. dmabuf = buf_state->dmabuf;
  153. } else {
  154. new_buf = true;
  155. dmabuf = mem_buf_retrieve(retrieve_arg);
  156. if (IS_ERR(dmabuf)) {
  157. ret = PTR_ERR(dmabuf);
  158. pr_err("%s: Failed to retrieve DMA-BUF rc: %d\n", __func__, ret);
  159. goto unlock_err;
  160. }
  161. ret = zero_dma_buf(dmabuf);
  162. if (ret) {
  163. pr_err("%s: Failed to zero the DMA-BUF rc: %d\n", __func__, ret);
  164. goto free_buf;
  165. }
  166. buf_state = kzalloc(sizeof(*buf_state), GFP_KERNEL);
  167. if (!buf_state) {
  168. pr_err("%s: Unable to allocate memory for buf_state\n",
  169. __func__);
  170. ret = -ENOMEM;
  171. goto free_buf;
  172. }
  173. buf_state->dmabuf = dmabuf;
  174. }
  175. attachment = dma_buf_attach(dmabuf, cb_devices[cb_id]);
  176. if (IS_ERR(attachment)) {
  177. ret = PTR_ERR(attachment);
  178. pr_err("%s: Failed to attach rc: %d\n", __func__, ret);
  179. goto free_buf_state;
  180. }
  181. table = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
  182. if (IS_ERR(table)) {
  183. ret = PTR_ERR(table);
  184. pr_err("%s: Failed to map rc: %d\n", __func__, ret);
  185. goto detach;
  186. }
  187. if (table->nents != 1) {
  188. ret = -EINVAL;
  189. pr_err("%s: Buffer not mapped as one segment!\n", __func__);
  190. goto unmap;
  191. }
  192. buf_state->cb_info[cb_id].mapped = true;
  193. buf_state->cb_info[cb_id].attachment = attachment;
  194. buf_state->cb_info[cb_id].sg_table = table;
  195. if (!cb_map_counts[cb_id]) {
  196. ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_ACQUIRE);
  197. if (ret) {
  198. pr_err("%s: Failed to program SMRs for cb_id %d rc: %d\n", __func__,
  199. cb_id, ret);
  200. goto unmap;
  201. }
  202. }
  203. cb_map_counts[cb_id]++;
  204. ret = xa_err(xa_store(&buffer_state_arr, retrieve_arg->memparcel_hdl, buf_state,
  205. GFP_KERNEL));
  206. if (ret < 0) {
  207. pr_err("%s: Failed to store new buffer in xarray rc: %d\n", __func__,
  208. ret);
  209. goto dec_cb_map_count;
  210. }
  211. unlock:
  212. mutex_unlock(&buffer_state_lock);
  213. return table;
  214. dec_cb_map_count:
  215. cb_map_counts[cb_id]--;
  216. if (!cb_map_counts[cb_id]) {
  217. ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_RELEASE);
  218. if (ret)
  219. pr_err("%s: Failed to unprogram SMR for cb_id %d rc: %d\n",
  220. __func__, cb_id, ret);
  221. }
  222. unmap:
  223. dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
  224. detach:
  225. dma_buf_detach(dmabuf, attachment);
  226. free_buf_state:
  227. if (new_buf)
  228. kfree(buf_state);
  229. free_buf:
  230. if (new_buf)
  231. dma_buf_put(dmabuf);
  232. unlock_err:
  233. mutex_unlock(&buffer_state_lock);
  234. return ERR_PTR(ret);
  235. }
  236. static int process_map_request(struct smmu_proxy_map_req *req, size_t size)
  237. {
  238. struct smmu_proxy_map_resp *resp;
  239. int ret = 0;
  240. u32 n_acl_entries = req->acl_desc.n_acl_entries;
  241. size_t map_req_len = offsetof(struct smmu_proxy_map_req,
  242. acl_desc.acl_entries[n_acl_entries]);
  243. struct mem_buf_retrieve_kernel_arg retrieve_arg = {0};
  244. int i;
  245. struct sg_table *table;
  246. /*
  247. * Last entry of smmu_proxy_map_req is an array of arbitrary length.
  248. * Validate that the number of entries fits within the buffer given
  249. * to us by the message queue.
  250. */
  251. if (map_req_len > size) {
  252. pr_err("%s: Reported size of smmu_proxy_map_request (%d bytes) greater than message size given by message queue (%d bytes)\n",
  253. __func__, map_req_len, size);
  254. return -EINVAL;
  255. }
  256. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  257. if (!resp) {
  258. pr_err("%s: Failed to allocate memory for response\n", __func__);
  259. return -ENOMEM;
  260. }
  261. retrieve_arg.vmids = kmalloc_array(n_acl_entries, sizeof(*retrieve_arg.vmids), GFP_KERNEL);
  262. if (!retrieve_arg.vmids) {
  263. ret = -ENOMEM;
  264. goto free_resp;
  265. }
  266. retrieve_arg.perms = kmalloc_array(n_acl_entries, sizeof(*retrieve_arg.perms), GFP_KERNEL);
  267. if (!retrieve_arg.perms) {
  268. ret = -ENOMEM;
  269. goto free_vmids;
  270. }
  271. retrieve_arg.memparcel_hdl = req->hdl;
  272. retrieve_arg.sender_vmid = VMID_HLOS;
  273. retrieve_arg.nr_acl_entries = n_acl_entries;
  274. for (i = 0; i < n_acl_entries; i++) {
  275. retrieve_arg.vmids[i] = req->acl_desc.acl_entries[i].vmid;
  276. retrieve_arg.perms[i] = req->acl_desc.acl_entries[i].perms;
  277. }
  278. table = retrieve_and_iommu_map(&retrieve_arg, req->cb_id);
  279. if (IS_ERR(table)) {
  280. ret = PTR_ERR(table);
  281. goto free_perms;
  282. }
  283. resp->hdr.msg_type = SMMU_PROXY_MAP_RESP;
  284. resp->hdr.msg_size = sizeof(*resp);
  285. resp->hdr.ret = ret;
  286. resp->iova = sg_dma_address(table->sgl);
  287. resp->mapping_len = sg_dma_len(table->sgl);
  288. ret = gh_msgq_send(msgq_hdl, resp, resp->hdr.msg_size, 0);
  289. if (ret < 0) {
  290. pr_err("%s: failed to send response to mapping request rc: %d\n", __func__, ret);
  291. iommu_unmap_and_relinquish(req->hdl);
  292. } else {
  293. pr_debug("%s: response to mapping request sent\n", __func__);
  294. }
  295. free_perms:
  296. kfree(retrieve_arg.perms);
  297. free_vmids:
  298. kfree(retrieve_arg.vmids);
  299. free_resp:
  300. kfree(resp);
  301. return ret;
  302. }
  303. static void smmu_proxy_process_msg(void *buf, size_t size)
  304. {
  305. struct smmu_proxy_msg_hdr *msg_hdr = buf;
  306. struct smmu_proxy_resp_hdr *resp;
  307. int ret = -EINVAL;
  308. if (size < sizeof(*msg_hdr) || msg_hdr->msg_size != size) {
  309. pr_err("%s: message received is not of a proper size: 0x%lx, 0x:%lx\n",
  310. __func__, size, msg_hdr->msg_size);
  311. goto handle_err;
  312. }
  313. switch (msg_hdr->msg_type) {
  314. case SMMU_PROXY_MAP:
  315. ret = process_map_request(buf, size);
  316. break;
  317. case SMMU_PROXY_UNMAP:
  318. ret = process_unmap_request(buf, size);
  319. break;
  320. default:
  321. pr_err("%s: received message of unknown type: %d\n", __func__,
  322. msg_hdr->msg_type);
  323. }
  324. if (!ret)
  325. return;
  326. handle_err:
  327. resp = kzalloc(sizeof(resp), GFP_KERNEL);
  328. if (!resp) {
  329. pr_err("%s: Failed to allocate memory for response\n", __func__);
  330. return;
  331. }
  332. resp->msg_type = SMMU_PROXY_ERR_RESP;
  333. resp->msg_size = sizeof(resp);
  334. resp->ret = ret;
  335. ret = gh_msgq_send(msgq_hdl, resp, resp->msg_size, 0);
  336. if (ret < 0)
  337. pr_err("%s: failed to send error response rc: %d\n", __func__, ret);
  338. else
  339. pr_debug("%s: response to mapping request sent\n", __func__);
  340. kfree(resp);
  341. }
  342. static int receiver_msgq_handler(void *msgq_hdl)
  343. {
  344. void *buf;
  345. size_t size;
  346. int ret;
  347. buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
  348. if (!buf)
  349. return -ENOMEM;
  350. while (!kthread_should_stop()) {
  351. ret = gh_msgq_recv(msgq_hdl, buf, GH_MSGQ_MAX_MSG_SIZE_BYTES, &size, 0);
  352. if (ret < 0) {
  353. pr_err_ratelimited("%s failed to receive message rc: %d\n", __func__, ret);
  354. } else {
  355. smmu_proxy_process_msg(buf, size);
  356. }
  357. }
  358. kfree(buf);
  359. return 0;
  360. }
  361. static int smmu_proxy_ac_lock_toggle(int dma_buf_fd, bool lock)
  362. {
  363. int ret = 0;
  364. struct smmu_proxy_buffer_state *buf_state;
  365. struct dma_buf *dmabuf;
  366. u32 handle;
  367. dmabuf = dma_buf_get(dma_buf_fd);
  368. if (IS_ERR(dmabuf)) {
  369. pr_err("%s: unable to get dma-buf from FD %d, rc: %d", __func__,
  370. dma_buf_fd, PTR_ERR(dmabuf));
  371. return PTR_ERR(dmabuf);
  372. }
  373. ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &handle);
  374. if (ret) {
  375. pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
  376. goto free_buf;
  377. }
  378. mutex_lock(&buffer_state_lock);
  379. buf_state = xa_load(&buffer_state_arr, handle);
  380. if (!buf_state) {
  381. pr_err("%s: handle 0x%llx unknown to proxy driver!\n", __func__, handle);
  382. ret = -EINVAL;
  383. goto out;
  384. }
  385. if (buf_state->locked == lock) {
  386. pr_err("%s: handle 0x%llx already %s!\n", __func__, handle,
  387. lock ? "locked" : "unlocked");
  388. ret = -EINVAL;
  389. goto out;
  390. }
  391. buf_state->locked = lock;
  392. out:
  393. mutex_unlock(&buffer_state_lock);
  394. free_buf:
  395. dma_buf_put(dmabuf);
  396. return ret;
  397. }
  398. /*
  399. * Iterate over all buffers mapped to context bank @context_bank_id, and zero
  400. * out the buffers. If there is a single error for any buffer, we bail out with
  401. * an error and disregard the rest of the buffers mapped to @context_bank_id.
  402. */
  403. int smmu_proxy_clear_all_buffers(void __user *context_bank_id_array,
  404. __u32 num_cb_ids)
  405. {
  406. unsigned long handle;
  407. struct smmu_proxy_buffer_state *buf_state;
  408. __u32 cb_ids[QTI_SMMU_PROXY_CB_IDS_LEN];
  409. int i, ret = 0;
  410. bool found_mapped_cb;
  411. /* Checking this allows us to keep cb_id_arr fixed in length */
  412. if (num_cb_ids > QTI_SMMU_PROXY_CB_IDS_LEN) {
  413. pr_err("%s: Invalid number of CB IDs: %u\n", __func__, num_cb_ids);
  414. return -EINVAL;
  415. }
  416. ret = copy_struct_from_user(&cb_ids, sizeof(cb_ids), context_bank_id_array,
  417. sizeof(cb_ids));
  418. if (ret) {
  419. pr_err("%s: Failed to get CB IDs from user space rc %d\n", __func__, ret);
  420. return ret;
  421. }
  422. for (i = 0; i < num_cb_ids; i++) {
  423. if (cb_ids[i] >= QTI_SMMU_PROXY_CB_IDS_LEN) {
  424. pr_err("%s: Invalid CB ID of %u at pos %d\n", __func__, cb_ids[i], i);
  425. return -EINVAL;
  426. }
  427. }
  428. mutex_lock(&buffer_state_lock);
  429. xa_for_each(&buffer_state_arr, handle, buf_state) {
  430. found_mapped_cb = false;
  431. for (i = 0; i < num_cb_ids; i++) {
  432. if (buf_state->cb_info[cb_ids[i]].mapped) {
  433. found_mapped_cb = true;
  434. break;
  435. }
  436. }
  437. if (!found_mapped_cb)
  438. continue;
  439. ret = zero_dma_buf(buf_state->dmabuf);
  440. if (ret) {
  441. pr_err("%s: dma_buf_vmap() failed with %d\n", __func__, ret);
  442. break;
  443. }
  444. }
  445. mutex_unlock(&buffer_state_lock);
  446. return ret;
  447. }
  448. static int smmu_proxy_get_dma_buf(struct smmu_proxy_get_dma_buf_ctl *get_dma_buf_ctl)
  449. {
  450. struct smmu_proxy_buffer_state *buf_state;
  451. int fd, ret = 0;
  452. mutex_lock(&buffer_state_lock);
  453. buf_state = xa_load(&buffer_state_arr, get_dma_buf_ctl->memparcel_hdl);
  454. if (!buf_state) {
  455. pr_err("%s: handle 0x%llx unknown to proxy driver!\n", __func__,
  456. get_dma_buf_ctl->memparcel_hdl);
  457. ret = -EINVAL;
  458. goto out;
  459. }
  460. get_dma_buf(buf_state->dmabuf);
  461. fd = dma_buf_fd(buf_state->dmabuf, O_RDWR | O_CLOEXEC);
  462. if (fd < 0) {
  463. ret = fd;
  464. pr_err("%s: Failed to install FD for dma-buf rc: %d\n", __func__,
  465. ret);
  466. dma_buf_put(buf_state->dmabuf);
  467. } else {
  468. get_dma_buf_ctl->dma_buf_fd = fd;
  469. }
  470. out:
  471. mutex_unlock(&buffer_state_lock);
  472. return ret;
  473. }
  474. static long smmu_proxy_dev_ioctl(struct file *filp, unsigned int cmd,
  475. unsigned long arg)
  476. {
  477. unsigned int dir = _IOC_DIR(cmd);
  478. union smmu_proxy_ioctl_arg ioctl_arg;
  479. int ret;
  480. if (_IOC_SIZE(cmd) > sizeof(ioctl_arg))
  481. return -EINVAL;
  482. if (copy_from_user(&ioctl_arg, (void __user *)arg, _IOC_SIZE(cmd)))
  483. return -EFAULT;
  484. if (!(dir & _IOC_WRITE))
  485. memset(&ioctl_arg, 0, sizeof(ioctl_arg));
  486. switch (cmd) {
  487. case QTI_SMMU_PROXY_AC_LOCK_BUFFER:
  488. {
  489. struct smmu_proxy_acl_ctl *acl_ctl =
  490. &ioctl_arg.acl_ctl;
  491. ret = smmu_proxy_ac_lock_toggle(acl_ctl->dma_buf_fd, true);
  492. if (ret)
  493. return ret;
  494. break;
  495. }
  496. case QTI_SMMU_PROXY_AC_UNLOCK_BUFFER:
  497. {
  498. struct smmu_proxy_acl_ctl *acl_ctl =
  499. &ioctl_arg.acl_ctl;
  500. ret = smmu_proxy_ac_lock_toggle(acl_ctl->dma_buf_fd, false);
  501. if (ret)
  502. return ret;
  503. break;
  504. }
  505. case QTI_SMMU_PROXY_WIPE_BUFFERS:
  506. {
  507. struct smmu_proxy_wipe_buf_ctl *wipe_buf_ctl =
  508. &ioctl_arg.wipe_buf_ctl;
  509. ret = smmu_proxy_clear_all_buffers((void *) wipe_buf_ctl->context_bank_id_array,
  510. wipe_buf_ctl->num_cb_ids);
  511. break;
  512. }
  513. case QTI_SMMU_PROXY_GET_DMA_BUF:
  514. {
  515. ret = smmu_proxy_get_dma_buf(&ioctl_arg.get_dma_buf_ctl);
  516. break;
  517. }
  518. default:
  519. return -ENOTTY;
  520. }
  521. if (dir & _IOC_READ) {
  522. if (copy_to_user((void __user *)arg, &ioctl_arg,
  523. _IOC_SIZE(cmd)))
  524. return -EFAULT;
  525. }
  526. return 0;
  527. }
  528. static const struct file_operations smmu_proxy_dev_fops = {
  529. .unlocked_ioctl = smmu_proxy_dev_ioctl,
  530. .compat_ioctl = compat_ptr_ioctl,
  531. };
  532. static int receiver_probe_handler(struct device *dev)
  533. {
  534. int ret = 0;
  535. msgq_hdl = gh_msgq_register(GH_MSGQ_LABEL_SMMU_PROXY);
  536. if (IS_ERR(msgq_hdl)) {
  537. ret = PTR_ERR(msgq_hdl);
  538. dev_err(dev, "Queue registration failed: %d!\n", PTR_ERR(msgq_hdl));
  539. return ret;
  540. }
  541. receiver_msgq_handler_thread = kthread_run(receiver_msgq_handler, msgq_hdl,
  542. "smmu_proxy_msgq_handler");
  543. if (IS_ERR(receiver_msgq_handler_thread)) {
  544. ret = PTR_ERR(receiver_msgq_handler_thread);
  545. dev_err(dev, "Failed to launch receiver_msgq_handler thread: %d\n",
  546. PTR_ERR(receiver_msgq_handler_thread));
  547. goto free_msgq;
  548. }
  549. ret = smmu_proxy_create_dev(&smmu_proxy_dev_fops);
  550. if (ret) {
  551. pr_err("Failed to create character device with error %d\n", ret);
  552. goto free_kthread;
  553. }
  554. return 0;
  555. free_kthread:
  556. kthread_stop(receiver_msgq_handler_thread);
  557. free_msgq:
  558. gh_msgq_unregister(msgq_hdl);
  559. return ret;
  560. }
  561. static int proxy_fault_handler(struct iommu_domain *domain, struct device *dev,
  562. unsigned long iova, int flags, void *token)
  563. {
  564. dev_err(dev, "Context fault with IOVA %llx and fault flags %d\n", iova, flags);
  565. return -EINVAL;
  566. }
  567. static int cb_probe_handler(struct device *dev)
  568. {
  569. int ret;
  570. unsigned int context_bank_id;
  571. struct iommu_domain *domain;
  572. ret = of_property_read_u32(dev->of_node, "qti,cb-id", &context_bank_id);
  573. if (ret) {
  574. dev_err(dev, "Failed to read qti,cb-id property for device\n");
  575. return -EINVAL;
  576. }
  577. if (context_bank_id >= QTI_SMMU_PROXY_CB_IDS_LEN) {
  578. dev_err(dev, "Invalid CB ID: %u\n", context_bank_id);
  579. return -EINVAL;
  580. }
  581. if (cb_devices[context_bank_id]) {
  582. dev_err(dev, "Context bank %u is already populated\n", context_bank_id);
  583. return -EINVAL;
  584. }
  585. ret = dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  586. if (ret) {
  587. dev_err(dev, "Failed to set segment size\n");
  588. return ret;
  589. }
  590. ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
  591. if (ret) {
  592. dev_err(dev, "Failed to set DMA-MASK\n");
  593. return ret;
  594. }
  595. domain = iommu_get_domain_for_dev(dev);
  596. if (IS_ERR_OR_NULL(domain)) {
  597. dev_err(dev, "%s: Failed to get iommu domain\n", __func__);
  598. return -EINVAL;
  599. }
  600. iommu_set_fault_handler(domain, proxy_fault_handler, NULL);
  601. cb_devices[context_bank_id] = dev;
  602. return 0;
  603. }
  604. static int smmu_proxy_probe(struct platform_device *pdev)
  605. {
  606. struct device *dev = &pdev->dev;
  607. if (of_device_is_compatible(dev->of_node, CB_COMPAT_STR)) {
  608. return cb_probe_handler(dev);
  609. } else if (of_device_is_compatible(dev->of_node, RECEIVER_COMPAT_STR)) {
  610. return receiver_probe_handler(dev);
  611. } else {
  612. return -EINVAL;
  613. }
  614. }
  615. static const struct of_device_id smmu_proxy_match_table[] = {
  616. {.compatible = RECEIVER_COMPAT_STR},
  617. {.compatible = CB_COMPAT_STR},
  618. {},
  619. };
  620. static struct platform_driver smmu_proxy_driver = {
  621. .probe = smmu_proxy_probe,
  622. .driver = {
  623. .name = "qti-smmu-proxy",
  624. .of_match_table = smmu_proxy_match_table,
  625. },
  626. };
  627. int __init init_smmu_proxy_driver(void)
  628. {
  629. int ret;
  630. struct csf_version csf_version;
  631. ret = smmu_proxy_get_csf_version(&csf_version);
  632. if (ret) {
  633. pr_err("%s: Unable to get CSF version\n", __func__);
  634. return ret;
  635. }
  636. if (csf_version.arch_ver == 2 && csf_version.max_ver == 0) {
  637. pr_err("%s: CSF 2.5 not in use, not loading module\n", __func__);
  638. return -EINVAL;
  639. }
  640. return platform_driver_register(&smmu_proxy_driver);
  641. }
  642. module_init(init_smmu_proxy_driver);
  643. MODULE_IMPORT_NS(DMA_BUF);
  644. MODULE_LICENSE("GPL v2");