qti-smmu-proxy-tvm.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  4. */
  5. #include <linux/kthread.h>
  6. #include <linux/qcom-iommu-util.h>
  7. #include <dt-bindings/arm/msm/qti-smmu-proxy-dt-ids.h>
  8. #include "qti-smmu-proxy-common.h"
  9. #define RECEIVER_COMPAT_STR "smmu-proxy-receiver"
  10. #define CB_COMPAT_STR "smmu-proxy-cb"
  11. static void *msgq_hdl;
  12. struct smmu_proxy_buffer_cb_info {
  13. bool mapped;
  14. struct dma_buf_attachment *attachment;
  15. struct sg_table *sg_table;
  16. };
  17. struct smmu_proxy_buffer_state {
  18. bool locked;
  19. struct smmu_proxy_buffer_cb_info cb_info[QTI_SMMU_PROXY_CB_IDS_LEN];
  20. struct dma_buf *dmabuf;
  21. };
  22. static DEFINE_MUTEX(buffer_state_lock);
  23. static DEFINE_XARRAY(buffer_state_arr);
  24. static unsigned int cb_map_counts[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
  25. struct device *cb_devices[QTI_SMMU_PROXY_CB_IDS_LEN] = { 0 };
  26. struct task_struct *receiver_msgq_handler_thread;
  27. static int zero_dma_buf(struct dma_buf *dmabuf)
  28. {
  29. int ret;
  30. struct iosys_map vmap_struct = {0};
  31. ret = dma_buf_vmap(dmabuf, &vmap_struct);
  32. if (ret) {
  33. pr_err("%s: dma_buf_vmap() failed with %d\n", __func__, ret);
  34. return ret;
  35. }
  36. /* Use DMA_TO_DEVICE since we are not reading anything */
  37. ret = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
  38. if (ret) {
  39. pr_err("%s: dma_buf_begin_cpu_access() failed with %d\n", __func__, ret);
  40. goto unmap;
  41. }
  42. memset(vmap_struct.vaddr, 0, dmabuf->size);
  43. ret = dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
  44. if (ret)
  45. pr_err("%s: dma_buf_end_cpu_access() failed with %d\n", __func__, ret);
  46. unmap:
  47. dma_buf_vunmap(dmabuf, &vmap_struct);
  48. if (ret)
  49. pr_err("%s: Failed to properly zero the DMA-BUF\n", __func__);
  50. return ret;
  51. }
  52. static int iommu_unmap_and_relinquish(u32 hdl)
  53. {
  54. int cb_id, ret = 0;
  55. struct smmu_proxy_buffer_state *buf_state;
  56. mutex_lock(&buffer_state_lock);
  57. buf_state = xa_load(&buffer_state_arr, hdl);
  58. if (!buf_state) {
  59. pr_err("%s: handle 0x%x unknown to proxy driver!\n", __func__, hdl);
  60. ret = -EINVAL;
  61. goto out;
  62. }
  63. if (buf_state->locked) {
  64. pr_err("%s: handle 0x%x is locked!\n", __func__, hdl);
  65. ret = -EINVAL;
  66. goto out;
  67. }
  68. for (cb_id = 0; cb_id < QTI_SMMU_PROXY_CB_IDS_LEN; cb_id++) {
  69. if (buf_state->cb_info[cb_id].mapped) {
  70. dma_buf_unmap_attachment(buf_state->cb_info[cb_id].attachment,
  71. buf_state->cb_info[cb_id].sg_table,
  72. DMA_BIDIRECTIONAL);
  73. dma_buf_detach(buf_state->dmabuf,
  74. buf_state->cb_info[cb_id].attachment);
  75. buf_state->cb_info[cb_id].mapped = false;
  76. /* If nothing left is mapped for this CB, unprogram its SMR */
  77. cb_map_counts[cb_id]--;
  78. if (!cb_map_counts[cb_id]) {
  79. ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_RELEASE);
  80. if (ret) {
  81. pr_err("%s: Failed to unprogram SMR for cb_id %d rc: %d\n",
  82. __func__, cb_id, ret);
  83. break;
  84. }
  85. }
  86. }
  87. }
  88. ret = zero_dma_buf(buf_state->dmabuf);
  89. if (!ret) {
  90. dma_buf_put(buf_state->dmabuf);
  91. flush_delayed_fput();
  92. }
  93. xa_erase(&buffer_state_arr, hdl);
  94. kfree(buf_state);
  95. out:
  96. mutex_unlock(&buffer_state_lock);
  97. return ret;
  98. }
  99. static int process_unmap_request(struct smmu_proxy_unmap_req *req, size_t size)
  100. {
  101. struct smmu_proxy_unmap_resp *resp;
  102. int ret = 0;
  103. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  104. if (!resp) {
  105. pr_err("%s: Failed to allocate memory for response\n", __func__);
  106. return -ENOMEM;
  107. }
  108. ret = iommu_unmap_and_relinquish(req->hdl);
  109. resp->hdr.msg_type = SMMU_PROXY_UNMAP_RESP;
  110. resp->hdr.msg_size = sizeof(*resp);
  111. resp->hdr.ret = ret;
  112. ret = gh_msgq_send(msgq_hdl, resp, resp->hdr.msg_size, 0);
  113. if (ret < 0)
  114. pr_err("%s: failed to send response to mapping request rc: %d\n", __func__, ret);
  115. else
  116. pr_debug("%s: response to mapping request sent\n", __func__);
  117. kfree(resp);
  118. return ret;
  119. }
  120. static
  121. inline
  122. struct sg_table *retrieve_and_iommu_map(struct mem_buf_retrieve_kernel_arg *retrieve_arg,
  123. u32 cb_id)
  124. {
  125. int ret;
  126. struct dma_buf *dmabuf;
  127. bool new_buf = false;
  128. struct smmu_proxy_buffer_state *buf_state;
  129. struct dma_buf_attachment *attachment;
  130. struct sg_table *table;
  131. if (cb_id >= QTI_SMMU_PROXY_CB_IDS_LEN) {
  132. pr_err("%s: CB ID %d too large\n", __func__, cb_id);
  133. return ERR_PTR(-EINVAL);
  134. }
  135. if (!cb_devices[cb_id]) {
  136. pr_err("%s: CB of ID %d not defined\n", __func__, cb_id);
  137. return ERR_PTR(-EINVAL);
  138. }
  139. mutex_lock(&buffer_state_lock);
  140. buf_state = xa_load(&buffer_state_arr, retrieve_arg->memparcel_hdl);
  141. if (buf_state) {
  142. if (buf_state->cb_info[cb_id].mapped) {
  143. table = buf_state->cb_info[cb_id].sg_table;
  144. goto unlock;
  145. }
  146. if (buf_state->locked) {
  147. pr_err("%s: handle 0x%x is locked!\n", __func__,
  148. retrieve_arg->memparcel_hdl);
  149. ret = -EINVAL;
  150. goto unlock_err;
  151. }
  152. dmabuf = buf_state->dmabuf;
  153. } else {
  154. new_buf = true;
  155. dmabuf = mem_buf_retrieve(retrieve_arg);
  156. if (IS_ERR(dmabuf)) {
  157. ret = PTR_ERR(dmabuf);
  158. pr_err("%s: Failed to retrieve DMA-BUF rc: %d\n", __func__, ret);
  159. goto unlock_err;
  160. }
  161. ret = zero_dma_buf(dmabuf);
  162. if (ret) {
  163. pr_err("%s: Failed to zero the DMA-BUF rc: %d\n", __func__, ret);
  164. goto free_buf;
  165. }
  166. buf_state = kzalloc(sizeof(*buf_state), GFP_KERNEL);
  167. if (!buf_state) {
  168. pr_err("%s: Unable to allocate memory for buf_state\n",
  169. __func__);
  170. ret = -ENOMEM;
  171. goto free_buf;
  172. }
  173. buf_state->dmabuf = dmabuf;
  174. }
  175. attachment = dma_buf_attach(dmabuf, cb_devices[cb_id]);
  176. if (IS_ERR(attachment)) {
  177. ret = PTR_ERR(attachment);
  178. pr_err("%s: Failed to attach rc: %d\n", __func__, ret);
  179. goto free_buf_state;
  180. }
  181. table = dma_buf_map_attachment(attachment, DMA_BIDIRECTIONAL);
  182. if (IS_ERR(table)) {
  183. ret = PTR_ERR(table);
  184. pr_err("%s: Failed to map rc: %d\n", __func__, ret);
  185. goto detach;
  186. }
  187. if (table->nents != 1) {
  188. ret = -EINVAL;
  189. pr_err("%s: Buffer not mapped as one segment!\n", __func__);
  190. goto unmap;
  191. }
  192. buf_state->cb_info[cb_id].mapped = true;
  193. buf_state->cb_info[cb_id].attachment = attachment;
  194. buf_state->cb_info[cb_id].sg_table = table;
  195. if (!cb_map_counts[cb_id]) {
  196. ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_ACQUIRE);
  197. if (ret) {
  198. pr_err("%s: Failed to program SMRs for cb_id %d rc: %d\n", __func__,
  199. cb_id, ret);
  200. goto unmap;
  201. }
  202. }
  203. cb_map_counts[cb_id]++;
  204. ret = xa_err(xa_store(&buffer_state_arr, retrieve_arg->memparcel_hdl, buf_state,
  205. GFP_KERNEL));
  206. if (ret < 0) {
  207. pr_err("%s: Failed to store new buffer in xarray rc: %d\n", __func__,
  208. ret);
  209. goto dec_cb_map_count;
  210. }
  211. unlock:
  212. mutex_unlock(&buffer_state_lock);
  213. return table;
  214. dec_cb_map_count:
  215. cb_map_counts[cb_id]--;
  216. if (!cb_map_counts[cb_id]) {
  217. ret = qcom_iommu_sid_switch(cb_devices[cb_id], SID_RELEASE);
  218. if (ret)
  219. pr_err("%s: Failed to unprogram SMR for cb_id %d rc: %d\n",
  220. __func__, cb_id, ret);
  221. }
  222. unmap:
  223. dma_buf_unmap_attachment(attachment, table, DMA_BIDIRECTIONAL);
  224. detach:
  225. dma_buf_detach(dmabuf, attachment);
  226. free_buf_state:
  227. if (new_buf)
  228. kfree(buf_state);
  229. free_buf:
  230. if (new_buf)
  231. dma_buf_put(dmabuf);
  232. unlock_err:
  233. mutex_unlock(&buffer_state_lock);
  234. return ERR_PTR(ret);
  235. }
  236. static int process_map_request(struct smmu_proxy_map_req *req, size_t size)
  237. {
  238. struct smmu_proxy_map_resp *resp;
  239. int ret = 0;
  240. u32 n_acl_entries = req->acl_desc.n_acl_entries;
  241. size_t map_req_len = offsetof(struct smmu_proxy_map_req,
  242. acl_desc.acl_entries[n_acl_entries]);
  243. struct mem_buf_retrieve_kernel_arg retrieve_arg = {0};
  244. int i;
  245. struct sg_table *table;
  246. /*
  247. * Last entry of smmu_proxy_map_req is an array of arbitrary length.
  248. * Validate that the number of entries fits within the buffer given
  249. * to us by the message queue.
  250. */
  251. if (map_req_len > size) {
  252. pr_err("%s: Reported size of smmu_proxy_map_request (%ld bytes) greater than message size given by message queue (%ld bytes)\n",
  253. __func__, map_req_len, size);
  254. return -EINVAL;
  255. }
  256. resp = kzalloc(sizeof(*resp), GFP_KERNEL);
  257. if (!resp) {
  258. pr_err("%s: Failed to allocate memory for response\n", __func__);
  259. return -ENOMEM;
  260. }
  261. retrieve_arg.vmids = kmalloc_array(n_acl_entries, sizeof(*retrieve_arg.vmids), GFP_KERNEL);
  262. if (!retrieve_arg.vmids) {
  263. ret = -ENOMEM;
  264. goto free_resp;
  265. }
  266. retrieve_arg.perms = kmalloc_array(n_acl_entries, sizeof(*retrieve_arg.perms), GFP_KERNEL);
  267. if (!retrieve_arg.perms) {
  268. ret = -ENOMEM;
  269. goto free_vmids;
  270. }
  271. retrieve_arg.fd_flags = O_RDWR;
  272. retrieve_arg.memparcel_hdl = req->hdl;
  273. retrieve_arg.sender_vmid = VMID_HLOS;
  274. retrieve_arg.nr_acl_entries = n_acl_entries;
  275. for (i = 0; i < n_acl_entries; i++) {
  276. retrieve_arg.vmids[i] = req->acl_desc.acl_entries[i].vmid;
  277. retrieve_arg.perms[i] = req->acl_desc.acl_entries[i].perms;
  278. }
  279. table = retrieve_and_iommu_map(&retrieve_arg, req->cb_id);
  280. if (IS_ERR(table)) {
  281. ret = PTR_ERR(table);
  282. goto free_perms;
  283. }
  284. resp->hdr.msg_type = SMMU_PROXY_MAP_RESP;
  285. resp->hdr.msg_size = sizeof(*resp);
  286. resp->hdr.ret = ret;
  287. resp->iova = sg_dma_address(table->sgl);
  288. resp->mapping_len = sg_dma_len(table->sgl);
  289. ret = gh_msgq_send(msgq_hdl, resp, resp->hdr.msg_size, 0);
  290. if (ret < 0) {
  291. pr_err("%s: failed to send response to mapping request rc: %d\n", __func__, ret);
  292. iommu_unmap_and_relinquish(req->hdl);
  293. } else {
  294. pr_debug("%s: response to mapping request sent\n", __func__);
  295. }
  296. free_perms:
  297. kfree(retrieve_arg.perms);
  298. free_vmids:
  299. kfree(retrieve_arg.vmids);
  300. free_resp:
  301. kfree(resp);
  302. return ret;
  303. }
  304. static void smmu_proxy_process_msg(void *buf, size_t size)
  305. {
  306. struct smmu_proxy_msg_hdr *msg_hdr = buf;
  307. struct smmu_proxy_resp_hdr *resp;
  308. int ret = -EINVAL;
  309. if (size < sizeof(*msg_hdr) || msg_hdr->msg_size != size) {
  310. pr_err("%s: message received is not of a proper size: 0x%lx, 0x:%x\n",
  311. __func__, size, msg_hdr->msg_size);
  312. goto handle_err;
  313. }
  314. switch (msg_hdr->msg_type) {
  315. case SMMU_PROXY_MAP:
  316. ret = process_map_request(buf, size);
  317. break;
  318. case SMMU_PROXY_UNMAP:
  319. ret = process_unmap_request(buf, size);
  320. break;
  321. default:
  322. pr_err("%s: received message of unknown type: %d\n", __func__,
  323. msg_hdr->msg_type);
  324. }
  325. if (!ret)
  326. return;
  327. handle_err:
  328. resp = kzalloc(sizeof(resp), GFP_KERNEL);
  329. if (!resp) {
  330. pr_err("%s: Failed to allocate memory for response\n", __func__);
  331. return;
  332. }
  333. resp->msg_type = SMMU_PROXY_ERR_RESP;
  334. resp->msg_size = sizeof(resp);
  335. resp->ret = ret;
  336. ret = gh_msgq_send(msgq_hdl, resp, resp->msg_size, 0);
  337. if (ret < 0)
  338. pr_err("%s: failed to send error response rc: %d\n", __func__, ret);
  339. else
  340. pr_debug("%s: response to mapping request sent\n", __func__);
  341. kfree(resp);
  342. }
  343. static int receiver_msgq_handler(void *msgq_hdl)
  344. {
  345. void *buf;
  346. size_t size;
  347. int ret;
  348. buf = kzalloc(GH_MSGQ_MAX_MSG_SIZE_BYTES, GFP_KERNEL);
  349. if (!buf)
  350. return -ENOMEM;
  351. while (!kthread_should_stop()) {
  352. ret = gh_msgq_recv(msgq_hdl, buf, GH_MSGQ_MAX_MSG_SIZE_BYTES, &size, 0);
  353. if (ret < 0) {
  354. pr_err_ratelimited("%s failed to receive message rc: %d\n", __func__, ret);
  355. } else {
  356. smmu_proxy_process_msg(buf, size);
  357. }
  358. }
  359. kfree(buf);
  360. return 0;
  361. }
  362. static int smmu_proxy_ac_lock_toggle(int dma_buf_fd, bool lock)
  363. {
  364. int ret = 0;
  365. struct smmu_proxy_buffer_state *buf_state;
  366. struct dma_buf *dmabuf;
  367. u32 handle;
  368. dmabuf = dma_buf_get(dma_buf_fd);
  369. if (IS_ERR(dmabuf)) {
  370. pr_err("%s: unable to get dma-buf from FD %d, rc: %ld\n", __func__,
  371. dma_buf_fd, PTR_ERR(dmabuf));
  372. return PTR_ERR(dmabuf);
  373. }
  374. ret = mem_buf_dma_buf_get_memparcel_hdl(dmabuf, &handle);
  375. if (ret) {
  376. pr_err("%s: Failed to get memparcel handle rc: %d\n", __func__, ret);
  377. goto free_buf;
  378. }
  379. mutex_lock(&buffer_state_lock);
  380. buf_state = xa_load(&buffer_state_arr, handle);
  381. if (!buf_state) {
  382. pr_err("%s: handle 0x%x unknown to proxy driver!\n", __func__, handle);
  383. ret = -EINVAL;
  384. goto out;
  385. }
  386. if (buf_state->locked == lock) {
  387. pr_err("%s: handle 0x%x already %s!\n", __func__, handle,
  388. lock ? "locked" : "unlocked");
  389. ret = -EINVAL;
  390. goto out;
  391. }
  392. buf_state->locked = lock;
  393. out:
  394. mutex_unlock(&buffer_state_lock);
  395. free_buf:
  396. dma_buf_put(dmabuf);
  397. return ret;
  398. }
  399. /*
  400. * Iterate over all buffers mapped to context bank @context_bank_id, and zero
  401. * out the buffers. If there is a single error for any buffer, we bail out with
  402. * an error and disregard the rest of the buffers mapped to @context_bank_id.
  403. */
  404. int smmu_proxy_clear_all_buffers(void __user *context_bank_id_array,
  405. __u32 num_cb_ids)
  406. {
  407. unsigned long handle;
  408. struct smmu_proxy_buffer_state *buf_state;
  409. __u32 cb_ids[QTI_SMMU_PROXY_CB_IDS_LEN];
  410. int i, ret = 0;
  411. bool found_mapped_cb;
  412. /* Checking this allows us to keep cb_id_arr fixed in length */
  413. if (num_cb_ids > QTI_SMMU_PROXY_CB_IDS_LEN) {
  414. pr_err("%s: Invalid number of CB IDs: %u\n", __func__, num_cb_ids);
  415. return -EINVAL;
  416. }
  417. ret = copy_struct_from_user(&cb_ids, sizeof(cb_ids), context_bank_id_array,
  418. sizeof(cb_ids));
  419. if (ret) {
  420. pr_err("%s: Failed to get CB IDs from user space rc %d\n", __func__, ret);
  421. return ret;
  422. }
  423. for (i = 0; i < num_cb_ids; i++) {
  424. if (cb_ids[i] >= QTI_SMMU_PROXY_CB_IDS_LEN) {
  425. pr_err("%s: Invalid CB ID of %u at pos %d\n", __func__, cb_ids[i], i);
  426. return -EINVAL;
  427. }
  428. }
  429. mutex_lock(&buffer_state_lock);
  430. xa_for_each(&buffer_state_arr, handle, buf_state) {
  431. found_mapped_cb = false;
  432. for (i = 0; i < num_cb_ids; i++) {
  433. if (buf_state->cb_info[cb_ids[i]].mapped) {
  434. found_mapped_cb = true;
  435. break;
  436. }
  437. }
  438. if (!found_mapped_cb)
  439. continue;
  440. ret = zero_dma_buf(buf_state->dmabuf);
  441. if (ret) {
  442. pr_err("%s: dma_buf_vmap() failed with %d\n", __func__, ret);
  443. break;
  444. }
  445. }
  446. mutex_unlock(&buffer_state_lock);
  447. return ret;
  448. }
  449. static int smmu_proxy_get_dma_buf(struct smmu_proxy_get_dma_buf_ctl *get_dma_buf_ctl)
  450. {
  451. struct smmu_proxy_buffer_state *buf_state;
  452. int fd, ret = 0;
  453. mutex_lock(&buffer_state_lock);
  454. buf_state = xa_load(&buffer_state_arr, get_dma_buf_ctl->memparcel_hdl);
  455. if (!buf_state) {
  456. pr_err("%s: handle 0x%llx unknown to proxy driver!\n", __func__,
  457. get_dma_buf_ctl->memparcel_hdl);
  458. ret = -EINVAL;
  459. goto out;
  460. }
  461. get_dma_buf(buf_state->dmabuf);
  462. fd = dma_buf_fd(buf_state->dmabuf, O_CLOEXEC);
  463. if (fd < 0) {
  464. ret = fd;
  465. pr_err("%s: Failed to install FD for dma-buf rc: %d\n", __func__,
  466. ret);
  467. dma_buf_put(buf_state->dmabuf);
  468. } else {
  469. get_dma_buf_ctl->dma_buf_fd = fd;
  470. }
  471. out:
  472. mutex_unlock(&buffer_state_lock);
  473. return ret;
  474. }
  475. static long smmu_proxy_dev_ioctl(struct file *filp, unsigned int cmd,
  476. unsigned long arg)
  477. {
  478. unsigned int dir = _IOC_DIR(cmd);
  479. union smmu_proxy_ioctl_arg ioctl_arg;
  480. int ret;
  481. if (_IOC_SIZE(cmd) > sizeof(ioctl_arg))
  482. return -EINVAL;
  483. if (copy_from_user(&ioctl_arg, (void __user *)arg, _IOC_SIZE(cmd)))
  484. return -EFAULT;
  485. if (!(dir & _IOC_WRITE))
  486. memset(&ioctl_arg, 0, sizeof(ioctl_arg));
  487. switch (cmd) {
  488. case QTI_SMMU_PROXY_AC_LOCK_BUFFER:
  489. {
  490. struct smmu_proxy_acl_ctl *acl_ctl =
  491. &ioctl_arg.acl_ctl;
  492. ret = smmu_proxy_ac_lock_toggle(acl_ctl->dma_buf_fd, true);
  493. if (ret)
  494. return ret;
  495. break;
  496. }
  497. case QTI_SMMU_PROXY_AC_UNLOCK_BUFFER:
  498. {
  499. struct smmu_proxy_acl_ctl *acl_ctl =
  500. &ioctl_arg.acl_ctl;
  501. ret = smmu_proxy_ac_lock_toggle(acl_ctl->dma_buf_fd, false);
  502. if (ret)
  503. return ret;
  504. break;
  505. }
  506. case QTI_SMMU_PROXY_WIPE_BUFFERS:
  507. {
  508. struct smmu_proxy_wipe_buf_ctl *wipe_buf_ctl =
  509. &ioctl_arg.wipe_buf_ctl;
  510. ret = smmu_proxy_clear_all_buffers((void *) wipe_buf_ctl->context_bank_id_array,
  511. wipe_buf_ctl->num_cb_ids);
  512. break;
  513. }
  514. case QTI_SMMU_PROXY_GET_DMA_BUF:
  515. {
  516. ret = smmu_proxy_get_dma_buf(&ioctl_arg.get_dma_buf_ctl);
  517. break;
  518. }
  519. default:
  520. return -ENOTTY;
  521. }
  522. if (dir & _IOC_READ) {
  523. if (copy_to_user((void __user *)arg, &ioctl_arg,
  524. _IOC_SIZE(cmd)))
  525. return -EFAULT;
  526. }
  527. return 0;
  528. }
  529. static const struct file_operations smmu_proxy_dev_fops = {
  530. .unlocked_ioctl = smmu_proxy_dev_ioctl,
  531. .compat_ioctl = compat_ptr_ioctl,
  532. };
  533. static int receiver_probe_handler(struct device *dev)
  534. {
  535. int ret = 0;
  536. msgq_hdl = gh_msgq_register(GH_MSGQ_LABEL_SMMU_PROXY);
  537. if (IS_ERR(msgq_hdl)) {
  538. ret = PTR_ERR(msgq_hdl);
  539. dev_err(dev, "Queue registration failed: %ld!\n", PTR_ERR(msgq_hdl));
  540. return ret;
  541. }
  542. receiver_msgq_handler_thread = kthread_run(receiver_msgq_handler, msgq_hdl,
  543. "smmu_proxy_msgq_handler");
  544. if (IS_ERR(receiver_msgq_handler_thread)) {
  545. ret = PTR_ERR(receiver_msgq_handler_thread);
  546. dev_err(dev, "Failed to launch receiver_msgq_handler thread: %ld\n",
  547. PTR_ERR(receiver_msgq_handler_thread));
  548. goto free_msgq;
  549. }
  550. ret = smmu_proxy_create_dev(&smmu_proxy_dev_fops);
  551. if (ret) {
  552. pr_err("Failed to create character device with error %d\n", ret);
  553. goto free_kthread;
  554. }
  555. return 0;
  556. free_kthread:
  557. kthread_stop(receiver_msgq_handler_thread);
  558. free_msgq:
  559. gh_msgq_unregister(msgq_hdl);
  560. return ret;
  561. }
  562. static int proxy_fault_handler(struct iommu_domain *domain, struct device *dev,
  563. unsigned long iova, int flags, void *token)
  564. {
  565. dev_err(dev, "Context fault with IOVA %lx and fault flags %d\n", iova, flags);
  566. return -EINVAL;
  567. }
  568. static int cb_probe_handler(struct device *dev)
  569. {
  570. int ret;
  571. unsigned int context_bank_id;
  572. struct iommu_domain *domain;
  573. ret = of_property_read_u32(dev->of_node, "qti,cb-id", &context_bank_id);
  574. if (ret) {
  575. dev_err(dev, "Failed to read qti,cb-id property for device\n");
  576. return -EINVAL;
  577. }
  578. if (context_bank_id >= QTI_SMMU_PROXY_CB_IDS_LEN) {
  579. dev_err(dev, "Invalid CB ID: %u\n", context_bank_id);
  580. return -EINVAL;
  581. }
  582. if (cb_devices[context_bank_id]) {
  583. dev_err(dev, "Context bank %u is already populated\n", context_bank_id);
  584. return -EINVAL;
  585. }
  586. ret = dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
  587. if (ret) {
  588. dev_err(dev, "Failed to set segment size\n");
  589. return ret;
  590. }
  591. ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
  592. if (ret) {
  593. dev_err(dev, "Failed to set DMA-MASK\n");
  594. return ret;
  595. }
  596. domain = iommu_get_domain_for_dev(dev);
  597. if (IS_ERR_OR_NULL(domain)) {
  598. dev_err(dev, "%s: Failed to get iommu domain\n", __func__);
  599. return -EINVAL;
  600. }
  601. iommu_set_fault_handler(domain, proxy_fault_handler, NULL);
  602. cb_devices[context_bank_id] = dev;
  603. return 0;
  604. }
  605. static int smmu_proxy_probe(struct platform_device *pdev)
  606. {
  607. struct device *dev = &pdev->dev;
  608. if (of_device_is_compatible(dev->of_node, CB_COMPAT_STR)) {
  609. return cb_probe_handler(dev);
  610. } else if (of_device_is_compatible(dev->of_node, RECEIVER_COMPAT_STR)) {
  611. return receiver_probe_handler(dev);
  612. } else {
  613. return -EINVAL;
  614. }
  615. }
  616. static const struct of_device_id smmu_proxy_match_table[] = {
  617. {.compatible = RECEIVER_COMPAT_STR},
  618. {.compatible = CB_COMPAT_STR},
  619. {},
  620. };
  621. static struct platform_driver smmu_proxy_driver = {
  622. .probe = smmu_proxy_probe,
  623. .driver = {
  624. .name = "qti-smmu-proxy",
  625. .of_match_table = smmu_proxy_match_table,
  626. },
  627. };
  628. int __init init_smmu_proxy_driver(void)
  629. {
  630. int ret;
  631. struct csf_version csf_version;
  632. ret = smmu_proxy_get_csf_version(&csf_version);
  633. if (ret) {
  634. pr_err("%s: Unable to get CSF version\n", __func__);
  635. return ret;
  636. }
  637. if (csf_version.arch_ver == 2 && csf_version.max_ver == 0) {
  638. pr_err("%s: CSF 2.5 not in use, not loading module\n", __func__);
  639. return -EINVAL;
  640. }
  641. return platform_driver_register(&smmu_proxy_driver);
  642. }
  643. module_init(init_smmu_proxy_driver);
  644. MODULE_IMPORT_NS(DMA_BUF);
  645. MODULE_LICENSE("GPL v2");