msm_cvp_buf.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/pid.h>
  6. #include <linux/fdtable.h>
  7. #include <linux/rcupdate.h>
  8. #include <linux/fs.h>
  9. #include <linux/dma-buf.h>
  10. #include <linux/sched/task.h>
  11. #include "msm_cvp_common.h"
  12. #include "cvp_hfi_api.h"
  13. #include "msm_cvp_debug.h"
  14. #include "msm_cvp_core.h"
  15. #include "msm_cvp_dsp.h"
  16. #define CLEAR_USE_BITMAP(idx, inst) \
  17. do { \
  18. clear_bit(idx, &inst->dma_cache.usage_bitmap); \
  19. dprintk(CVP_MEM, "clear %x bit %d dma_cache bitmap 0x%llx\n", \
  20. hash32_ptr(inst->session), smem->bitmap_index, \
  21. inst->dma_cache.usage_bitmap); \
  22. } while (0)
  23. #define SET_USE_BITMAP(idx, inst) \
  24. do { \
  25. set_bit(idx, &inst->dma_cache.usage_bitmap); \
  26. dprintk(CVP_MEM, "Set %x bit %d dma_cache bitmap 0x%llx\n", \
  27. hash32_ptr(inst->session), idx, \
  28. inst->dma_cache.usage_bitmap); \
  29. } while (0)
  30. void print_smem(u32 tag, const char *str, struct msm_cvp_inst *inst,
  31. struct msm_cvp_smem *smem)
  32. {
  33. if (!(tag & msm_cvp_debug) || !inst || !smem)
  34. return;
  35. if (smem->dma_buf) {
  36. dprintk(tag,
  37. "%s: %x : %s size %d flags %#x iova %#x idx %d ref %d",
  38. str, hash32_ptr(inst->session), smem->dma_buf->name,
  39. smem->size, smem->flags, smem->device_addr,
  40. smem->bitmap_index, smem->refcount);
  41. }
  42. }
  43. static void print_internal_buffer(u32 tag, const char *str,
  44. struct msm_cvp_inst *inst, struct cvp_internal_buf *cbuf)
  45. {
  46. if (!(tag & msm_cvp_debug) || !inst || !cbuf)
  47. return;
  48. if (cbuf->smem->dma_buf) {
  49. dprintk(tag,
  50. "%s: %x : fd %d off %d %s size %d iova %#x",
  51. str, hash32_ptr(inst->session), cbuf->fd,
  52. cbuf->offset, cbuf->smem->dma_buf->name, cbuf->size,
  53. cbuf->smem->device_addr);
  54. } else {
  55. dprintk(tag,
  56. "%s: %x : idx %2d fd %d off %d size %d iova %#x",
  57. str, hash32_ptr(inst->session), cbuf->fd,
  58. cbuf->offset, cbuf->size, cbuf->smem->device_addr);
  59. }
  60. }
  61. void print_cvp_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
  62. struct cvp_internal_buf *cbuf)
  63. {
  64. dprintk(tag, "%s addr: %x size %u\n", str,
  65. cbuf->smem->device_addr, cbuf->size);
  66. }
  67. void print_client_buffer(u32 tag, const char *str,
  68. struct msm_cvp_inst *inst, struct eva_kmd_buffer *cbuf)
  69. {
  70. if (!(tag & msm_cvp_debug) || !inst || !cbuf)
  71. return;
  72. dprintk(tag,
  73. "%s: %x : idx %2d fd %d off %d size %d type %d flags 0x%x\n",
  74. str, hash32_ptr(inst->session), cbuf->index, cbuf->fd,
  75. cbuf->offset, cbuf->size, cbuf->type, cbuf->flags);
  76. }
  77. static bool __is_buf_valid(struct msm_cvp_inst *inst,
  78. struct eva_kmd_buffer *buf)
  79. {
  80. struct cvp_hal_session *session;
  81. struct cvp_internal_buf *cbuf = NULL;
  82. bool found = false;
  83. if (!inst || !inst->core || !buf) {
  84. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  85. return false;
  86. }
  87. if (buf->fd < 0) {
  88. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  89. return false;
  90. }
  91. if (buf->offset) {
  92. dprintk(CVP_ERR,
  93. "%s: offset is deprecated, set to 0.\n",
  94. __func__);
  95. return false;
  96. }
  97. session = (struct cvp_hal_session *)inst->session;
  98. mutex_lock(&inst->cvpdspbufs.lock);
  99. list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
  100. if (cbuf->fd == buf->fd) {
  101. if (cbuf->size != buf->size) {
  102. dprintk(CVP_ERR, "%s: buf size mismatch\n",
  103. __func__);
  104. mutex_unlock(&inst->cvpdspbufs.lock);
  105. return false;
  106. }
  107. found = true;
  108. break;
  109. }
  110. }
  111. mutex_unlock(&inst->cvpdspbufs.lock);
  112. if (found) {
  113. print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf);
  114. return false;
  115. }
  116. return true;
  117. }
  118. static struct file *msm_cvp_fget(unsigned int fd, struct task_struct *task,
  119. fmode_t mask, unsigned int refs)
  120. {
  121. struct files_struct *files = task->files;
  122. struct file *file;
  123. rcu_read_lock();
  124. loop:
  125. file = fcheck_files(files, fd);
  126. if (file) {
  127. /* File object ref couldn't be taken.
  128. * dup2() atomicity guarantee is the reason
  129. * we loop to catch the new file (or NULL pointer)
  130. */
  131. if (file->f_mode & mask)
  132. file = NULL;
  133. else if (!get_file_rcu_many(file, refs))
  134. goto loop;
  135. }
  136. rcu_read_unlock();
  137. return file;
  138. }
  139. static struct dma_buf *cvp_dma_buf_get(struct file *file, int fd,
  140. struct task_struct *task)
  141. {
  142. if (file->f_op != gfa_cv.dmabuf_f_op) {
  143. dprintk(CVP_WARN, "fd doesn't refer to dma_buf\n");
  144. return ERR_PTR(-EINVAL);
  145. }
  146. return file->private_data;
  147. }
  148. int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
  149. {
  150. int rc = 0;
  151. struct cvp_internal_buf *cbuf = NULL;
  152. struct msm_cvp_smem *smem = NULL;
  153. struct dma_buf *dma_buf = NULL;
  154. struct file *file;
  155. if (!__is_buf_valid(inst, buf))
  156. return -EINVAL;
  157. if (!inst->task)
  158. return -EINVAL;
  159. file = msm_cvp_fget(buf->fd, inst->task, FMODE_PATH, 1);
  160. if (file == NULL) {
  161. dprintk(CVP_WARN, "%s fail to get file from fd\n", __func__);
  162. return -EINVAL;
  163. }
  164. dma_buf = cvp_dma_buf_get(
  165. file,
  166. buf->fd,
  167. inst->task);
  168. if (dma_buf == ERR_PTR(-EINVAL)) {
  169. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  170. rc = -EINVAL;
  171. goto exit;
  172. }
  173. dprintk(CVP_MEM, "dma_buf from internal %llu\n", dma_buf);
  174. cbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
  175. if (!cbuf) {
  176. rc = -ENOMEM;
  177. goto exit;
  178. }
  179. smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  180. if (!smem) {
  181. rc = -ENOMEM;
  182. goto exit;
  183. }
  184. smem->dma_buf = dma_buf;
  185. smem->bitmap_index = MAX_DMABUF_NUMS;
  186. dprintk(CVP_MEM, "%s: dma_buf = %llx\n", __func__, dma_buf);
  187. rc = msm_cvp_map_smem(inst, smem, "map dsp");
  188. if (rc) {
  189. print_client_buffer(CVP_ERR, "map failed", inst, buf);
  190. goto exit;
  191. }
  192. cbuf->smem = smem;
  193. cbuf->fd = buf->fd;
  194. cbuf->size = buf->size;
  195. cbuf->offset = buf->offset;
  196. cbuf->ownership = CLIENT;
  197. cbuf->index = buf->index;
  198. buf->reserved[0] = (uint32_t)smem->device_addr;
  199. mutex_lock(&inst->cvpdspbufs.lock);
  200. list_add_tail(&cbuf->list, &inst->cvpdspbufs.list);
  201. mutex_unlock(&inst->cvpdspbufs.lock);
  202. return rc;
  203. exit:
  204. fput(file);
  205. if (smem) {
  206. if (smem->device_addr) {
  207. msm_cvp_unmap_smem(inst, smem, "unmap dsp");
  208. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  209. }
  210. kmem_cache_free(cvp_driver->smem_cache, smem);
  211. }
  212. if (cbuf)
  213. kmem_cache_free(cvp_driver->buf_cache, cbuf);
  214. return rc;
  215. }
  216. int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
  217. {
  218. int rc = 0;
  219. bool found;
  220. struct cvp_internal_buf *cbuf;
  221. struct cvp_hal_session *session;
  222. if (!inst || !inst->core || !buf) {
  223. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  224. return -EINVAL;
  225. }
  226. session = (struct cvp_hal_session *)inst->session;
  227. if (!session) {
  228. dprintk(CVP_ERR, "%s: invalid session\n", __func__);
  229. return -EINVAL;
  230. }
  231. mutex_lock(&inst->cvpdspbufs.lock);
  232. found = false;
  233. list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
  234. if (cbuf->fd == buf->fd) {
  235. found = true;
  236. break;
  237. }
  238. }
  239. mutex_unlock(&inst->cvpdspbufs.lock);
  240. if (!found) {
  241. print_client_buffer(CVP_ERR, "invalid", inst, buf);
  242. return -EINVAL;
  243. }
  244. if (cbuf->smem->device_addr) {
  245. msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
  246. msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
  247. }
  248. mutex_lock(&inst->cvpdspbufs.lock);
  249. list_del(&cbuf->list);
  250. mutex_unlock(&inst->cvpdspbufs.lock);
  251. kmem_cache_free(cvp_driver->smem_cache, cbuf->smem);
  252. kmem_cache_free(cvp_driver->buf_cache, cbuf);
  253. return rc;
  254. }
  255. void msm_cvp_cache_operations(struct msm_cvp_smem *smem, u32 type,
  256. u32 offset, u32 size)
  257. {
  258. enum smem_cache_ops cache_op;
  259. if (msm_cvp_cacheop_disabled)
  260. return;
  261. if (!smem) {
  262. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  263. return;
  264. }
  265. switch (type) {
  266. case EVA_KMD_BUFTYPE_INPUT:
  267. cache_op = SMEM_CACHE_CLEAN;
  268. break;
  269. case EVA_KMD_BUFTYPE_OUTPUT:
  270. cache_op = SMEM_CACHE_INVALIDATE;
  271. break;
  272. default:
  273. cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
  274. }
  275. dprintk(CVP_MEM,
  276. "%s: cache operation enabled for dma_buf: %llx, cache_op: %d, offset: %d, size: %d\n",
  277. __func__, smem->dma_buf, cache_op, offset, size);
  278. msm_cvp_smem_cache_operations(smem->dma_buf, cache_op, offset, size);
  279. }
  280. static struct msm_cvp_smem *msm_cvp_session_find_smem(struct msm_cvp_inst *inst,
  281. struct dma_buf *dma_buf)
  282. {
  283. struct msm_cvp_smem *smem;
  284. int i;
  285. if (inst->dma_cache.nr > MAX_DMABUF_NUMS)
  286. return NULL;
  287. mutex_lock(&inst->dma_cache.lock);
  288. for (i = 0; i < inst->dma_cache.nr; i++)
  289. if (inst->dma_cache.entries[i]->dma_buf == dma_buf) {
  290. SET_USE_BITMAP(i, inst);
  291. smem = inst->dma_cache.entries[i];
  292. smem->bitmap_index = i;
  293. atomic_inc(&smem->refcount);
  294. /*
  295. * If we find it, it means we already increased
  296. * refcount before, so we put it to avoid double
  297. * incremental.
  298. */
  299. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  300. mutex_unlock(&inst->dma_cache.lock);
  301. print_smem(CVP_MEM, "found", inst, smem);
  302. return smem;
  303. }
  304. mutex_unlock(&inst->dma_cache.lock);
  305. return NULL;
  306. }
  307. static int msm_cvp_session_add_smem(struct msm_cvp_inst *inst,
  308. struct msm_cvp_smem *smem)
  309. {
  310. unsigned int i;
  311. struct msm_cvp_smem *smem2;
  312. mutex_lock(&inst->dma_cache.lock);
  313. if (inst->dma_cache.nr < MAX_DMABUF_NUMS) {
  314. inst->dma_cache.entries[inst->dma_cache.nr] = smem;
  315. SET_USE_BITMAP(inst->dma_cache.nr, inst);
  316. smem->bitmap_index = inst->dma_cache.nr;
  317. inst->dma_cache.nr++;
  318. i = smem->bitmap_index;
  319. } else {
  320. i = find_first_zero_bit(&inst->dma_cache.usage_bitmap,
  321. MAX_DMABUF_NUMS);
  322. if (i < MAX_DMABUF_NUMS) {
  323. smem2 = inst->dma_cache.entries[i];
  324. msm_cvp_unmap_smem(inst, smem2, "unmap cpu");
  325. msm_cvp_smem_put_dma_buf(smem2->dma_buf);
  326. kmem_cache_free(cvp_driver->smem_cache, smem2);
  327. inst->dma_cache.entries[i] = smem;
  328. smem->bitmap_index = i;
  329. SET_USE_BITMAP(i, inst);
  330. } else {
  331. dprintk(CVP_WARN, "%s: not enough memory\n", __func__);
  332. mutex_unlock(&inst->dma_cache.lock);
  333. return -ENOMEM;
  334. }
  335. }
  336. atomic_inc(&smem->refcount);
  337. mutex_unlock(&inst->dma_cache.lock);
  338. dprintk(CVP_MEM, "Add entry %d into cache\n", i);
  339. return 0;
  340. }
  341. static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
  342. struct cvp_buf_type *buf)
  343. {
  344. int rc = 0, found = 1;
  345. struct msm_cvp_smem *smem = NULL;
  346. struct dma_buf *dma_buf = NULL;
  347. if (buf->fd < 0) {
  348. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  349. return NULL;
  350. }
  351. dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
  352. if (!dma_buf) {
  353. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  354. return NULL;
  355. }
  356. smem = msm_cvp_session_find_smem(inst, dma_buf);
  357. if (!smem) {
  358. found = 0;
  359. smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  360. if (!smem)
  361. return NULL;
  362. smem->dma_buf = dma_buf;
  363. smem->bitmap_index = MAX_DMABUF_NUMS;
  364. rc = msm_cvp_map_smem(inst, smem, "map cpu");
  365. if (rc)
  366. goto exit;
  367. if (buf->size > smem->size || buf->size > smem->size - buf->offset) {
  368. dprintk(CVP_ERR, "%s: invalid offset %d or size %d for a new entry\n",
  369. __func__, buf->offset, buf->size);
  370. goto exit2;
  371. }
  372. rc = msm_cvp_session_add_smem(inst, smem);
  373. if (rc && rc != -ENOMEM)
  374. goto exit2;
  375. }
  376. if (buf->size > smem->size || buf->size > smem->size - buf->offset) {
  377. dprintk(CVP_ERR, "%s: invalid offset %d or size %d\n",
  378. __func__, buf->offset, buf->size);
  379. if (found) {
  380. mutex_lock(&inst->dma_cache.lock);
  381. atomic_dec(&smem->refcount);
  382. mutex_unlock(&inst->dma_cache.lock);
  383. return NULL;
  384. }
  385. goto exit2;
  386. }
  387. return smem;
  388. exit2:
  389. msm_cvp_unmap_smem(inst, smem, "unmap cpu");
  390. exit:
  391. msm_cvp_smem_put_dma_buf(dma_buf);
  392. kmem_cache_free(cvp_driver->smem_cache, smem);
  393. smem = NULL;
  394. return smem;
  395. }
  396. static u32 msm_cvp_map_user_persist_buf(struct msm_cvp_inst *inst,
  397. struct cvp_buf_type *buf)
  398. {
  399. u32 iova = 0;
  400. struct msm_cvp_smem *smem = NULL;
  401. struct cvp_internal_buf *pbuf;
  402. if (!inst) {
  403. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  404. return -EINVAL;
  405. }
  406. pbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
  407. if (!pbuf)
  408. return 0;
  409. smem = msm_cvp_session_get_smem(inst, buf);
  410. if (!smem)
  411. goto exit;
  412. smem->flags |= SMEM_PERSIST;
  413. pbuf->smem = smem;
  414. pbuf->fd = buf->fd;
  415. pbuf->size = buf->size;
  416. pbuf->offset = buf->offset;
  417. pbuf->ownership = CLIENT;
  418. mutex_lock(&inst->persistbufs.lock);
  419. list_add_tail(&pbuf->list, &inst->persistbufs.list);
  420. mutex_unlock(&inst->persistbufs.lock);
  421. print_internal_buffer(CVP_MEM, "map persist", inst, pbuf);
  422. iova = smem->device_addr + buf->offset;
  423. return iova;
  424. exit:
  425. kmem_cache_free(cvp_driver->buf_cache, pbuf);
  426. return 0;
  427. }
  428. u32 msm_cvp_map_frame_buf(struct msm_cvp_inst *inst,
  429. struct cvp_buf_type *buf,
  430. struct msm_cvp_frame *frame)
  431. {
  432. u32 iova = 0;
  433. struct msm_cvp_smem *smem = NULL;
  434. u32 nr;
  435. u32 type;
  436. if (!inst || !frame) {
  437. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  438. return 0;
  439. }
  440. nr = frame->nr;
  441. if (nr == MAX_FRAME_BUFFER_NUMS) {
  442. dprintk(CVP_ERR, "%s: max frame buffer reached\n", __func__);
  443. return 0;
  444. }
  445. smem = msm_cvp_session_get_smem(inst, buf);
  446. if (!smem)
  447. return 0;
  448. frame->bufs[nr].fd = buf->fd;
  449. frame->bufs[nr].smem = smem;
  450. frame->bufs[nr].size = buf->size;
  451. frame->bufs[nr].offset = buf->offset;
  452. print_internal_buffer(CVP_MEM, "map cpu", inst, &frame->bufs[nr]);
  453. frame->nr++;
  454. type = EVA_KMD_BUFTYPE_INPUT | EVA_KMD_BUFTYPE_OUTPUT;
  455. msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
  456. iova = smem->device_addr + buf->offset;
  457. return iova;
  458. }
  459. static void msm_cvp_unmap_frame_buf(struct msm_cvp_inst *inst,
  460. struct msm_cvp_frame *frame)
  461. {
  462. u32 i;
  463. u32 type;
  464. struct msm_cvp_smem *smem = NULL;
  465. struct cvp_internal_buf *buf;
  466. type = EVA_KMD_BUFTYPE_OUTPUT;
  467. for (i = 0; i < frame->nr; ++i) {
  468. buf = &frame->bufs[i];
  469. smem = buf->smem;
  470. msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
  471. if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
  472. /* smem not in dmamap cache */
  473. msm_cvp_unmap_smem(inst, smem, "unmap cpu");
  474. dma_heap_buffer_free(smem->dma_buf);
  475. kmem_cache_free(cvp_driver->smem_cache, smem);
  476. buf->smem = NULL;
  477. } else {
  478. mutex_lock(&inst->dma_cache.lock);
  479. if (atomic_dec_and_test(&smem->refcount)) {
  480. CLEAR_USE_BITMAP(smem->bitmap_index, inst);
  481. print_smem(CVP_MEM, "Map dereference",
  482. inst, smem);
  483. }
  484. mutex_unlock(&inst->dma_cache.lock);
  485. }
  486. }
  487. kmem_cache_free(cvp_driver->frame_cache, frame);
  488. }
  489. void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid)
  490. {
  491. struct msm_cvp_frame *frame, *dummy1;
  492. bool found;
  493. if (!inst) {
  494. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  495. return;
  496. }
  497. ktid &= (FENCE_BIT - 1);
  498. dprintk(CVP_MEM, "%s: (%#x) unmap frame %llu\n",
  499. __func__, hash32_ptr(inst->session), ktid);
  500. found = false;
  501. mutex_lock(&inst->frames.lock);
  502. list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
  503. if (frame->ktid == ktid) {
  504. found = true;
  505. list_del(&frame->list);
  506. break;
  507. }
  508. }
  509. mutex_unlock(&inst->frames.lock);
  510. if (found)
  511. msm_cvp_unmap_frame_buf(inst, frame);
  512. else
  513. dprintk(CVP_WARN, "%s frame %llu not found!\n", __func__, ktid);
  514. }
  515. int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
  516. struct eva_kmd_hfi_packet *in_pkt,
  517. unsigned int offset, unsigned int buf_num)
  518. {
  519. struct cvp_hfi_cmd_session_hdr *cmd_hdr;
  520. struct cvp_internal_buf *pbuf, *dummy;
  521. u64 ktid;
  522. int rc = 0;
  523. struct msm_cvp_smem *smem = NULL;
  524. if (!offset || !buf_num)
  525. return rc;
  526. cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
  527. ktid = cmd_hdr->client_data.kdata & (FENCE_BIT - 1);
  528. mutex_lock(&inst->persistbufs.lock);
  529. list_for_each_entry_safe(pbuf, dummy, &inst->persistbufs.list, list) {
  530. if (pbuf->ktid == ktid && pbuf->ownership == CLIENT) {
  531. list_del(&pbuf->list);
  532. smem = pbuf->smem;
  533. dprintk(CVP_MEM, "unmap persist: %x %d %d %#x",
  534. hash32_ptr(inst->session), pbuf->fd,
  535. pbuf->size, smem->device_addr);
  536. if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
  537. /* smem not in dmamap cache */
  538. msm_cvp_unmap_smem(inst, smem,
  539. "unmap cpu");
  540. dma_heap_buffer_free(smem->dma_buf);
  541. kmem_cache_free(
  542. cvp_driver->smem_cache,
  543. smem);
  544. pbuf->smem = NULL;
  545. } else {
  546. mutex_lock(&inst->dma_cache.lock);
  547. if (atomic_dec_and_test(&smem->refcount))
  548. CLEAR_USE_BITMAP(
  549. smem->bitmap_index,
  550. inst);
  551. mutex_unlock(&inst->dma_cache.lock);
  552. }
  553. kmem_cache_free(cvp_driver->buf_cache, pbuf);
  554. }
  555. }
  556. mutex_unlock(&inst->persistbufs.lock);
  557. return rc;
  558. }
  559. int msm_cvp_mark_user_persist(struct msm_cvp_inst *inst,
  560. struct eva_kmd_hfi_packet *in_pkt,
  561. unsigned int offset, unsigned int buf_num)
  562. {
  563. struct cvp_hfi_cmd_session_hdr *cmd_hdr;
  564. struct cvp_internal_buf *pbuf, *dummy;
  565. u64 ktid;
  566. struct cvp_buf_type *buf;
  567. int i, rc = 0;
  568. if (!offset || !buf_num)
  569. return 0;
  570. cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
  571. ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
  572. ktid &= (FENCE_BIT - 1);
  573. cmd_hdr->client_data.kdata = ktid;
  574. for (i = 0; i < buf_num; i++) {
  575. buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
  576. offset += sizeof(*buf) >> 2;
  577. if (buf->fd < 0 || !buf->size)
  578. continue;
  579. mutex_lock(&inst->persistbufs.lock);
  580. list_for_each_entry_safe(pbuf, dummy, &inst->persistbufs.list,
  581. list) {
  582. if (pbuf->ownership == CLIENT) {
  583. if (pbuf->fd == buf->fd &&
  584. pbuf->size == buf->size)
  585. buf->fd = pbuf->smem->device_addr;
  586. rc = 1;
  587. break;
  588. }
  589. }
  590. mutex_unlock(&inst->persistbufs.lock);
  591. if (!rc) {
  592. dprintk(CVP_ERR, "%s No persist buf %d found\n",
  593. __func__, buf->fd);
  594. rc = -EFAULT;
  595. break;
  596. }
  597. pbuf->ktid = ktid;
  598. rc = 0;
  599. }
  600. return rc;
  601. }
  602. int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
  603. struct eva_kmd_hfi_packet *in_pkt,
  604. unsigned int offset, unsigned int buf_num)
  605. {
  606. struct cvp_buf_type *buf;
  607. int i;
  608. u32 iova;
  609. if (!offset || !buf_num)
  610. return 0;
  611. for (i = 0; i < buf_num; i++) {
  612. buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
  613. offset += sizeof(*buf) >> 2;
  614. if (buf->fd < 0 || !buf->size)
  615. continue;
  616. iova = msm_cvp_map_user_persist_buf(inst, buf);
  617. if (!iova) {
  618. dprintk(CVP_ERR,
  619. "%s: buf %d register failed.\n",
  620. __func__, i);
  621. return -EINVAL;
  622. }
  623. buf->fd = iova;
  624. }
  625. return 0;
  626. }
  627. int msm_cvp_map_frame(struct msm_cvp_inst *inst,
  628. struct eva_kmd_hfi_packet *in_pkt,
  629. unsigned int offset, unsigned int buf_num)
  630. {
  631. struct cvp_buf_type *buf;
  632. int i;
  633. u32 iova;
  634. u64 ktid;
  635. struct msm_cvp_frame *frame;
  636. struct cvp_hfi_cmd_session_hdr *cmd_hdr;
  637. if (!offset || !buf_num)
  638. return 0;
  639. cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
  640. ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
  641. ktid &= (FENCE_BIT - 1);
  642. cmd_hdr->client_data.kdata = ktid;
  643. frame = kmem_cache_zalloc(cvp_driver->frame_cache, GFP_KERNEL);
  644. if (!frame)
  645. return -ENOMEM;
  646. frame->ktid = ktid;
  647. frame->nr = 0;
  648. frame->pkt_type = cmd_hdr->packet_type;
  649. for (i = 0; i < buf_num; i++) {
  650. buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
  651. offset += sizeof(*buf) >> 2;
  652. if (buf->fd < 0 || !buf->size)
  653. continue;
  654. iova = msm_cvp_map_frame_buf(inst, buf, frame);
  655. if (!iova) {
  656. dprintk(CVP_ERR,
  657. "%s: buf %d register failed.\n",
  658. __func__, i);
  659. msm_cvp_unmap_frame_buf(inst, frame);
  660. return -EINVAL;
  661. }
  662. buf->fd = iova;
  663. }
  664. mutex_lock(&inst->frames.lock);
  665. list_add_tail(&frame->list, &inst->frames.list);
  666. mutex_unlock(&inst->frames.lock);
  667. dprintk(CVP_MEM, "%s: map frame %llu\n", __func__, ktid);
  668. return 0;
  669. }
  670. int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
  671. {
  672. int rc = 0, i;
  673. struct cvp_internal_buf *cbuf, *dummy;
  674. struct msm_cvp_frame *frame, *dummy1;
  675. struct msm_cvp_smem *smem;
  676. struct cvp_hal_session *session;
  677. session = (struct cvp_hal_session *)inst->session;
  678. mutex_lock(&inst->frames.lock);
  679. list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
  680. list_del(&frame->list);
  681. msm_cvp_unmap_frame_buf(inst, frame);
  682. }
  683. mutex_unlock(&inst->frames.lock);
  684. mutex_lock(&inst->dma_cache.lock);
  685. for (i = 0; i < inst->dma_cache.nr; i++) {
  686. smem = inst->dma_cache.entries[i];
  687. if (atomic_read(&smem->refcount) == 0) {
  688. print_smem(CVP_MEM, "free", inst, smem);
  689. } else if (!(smem->flags & SMEM_PERSIST)) {
  690. print_smem(CVP_WARN, "in use", inst, smem);
  691. }
  692. msm_cvp_unmap_smem(inst, smem, "unmap cpu");
  693. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  694. kmem_cache_free(cvp_driver->smem_cache, smem);
  695. inst->dma_cache.entries[i] = NULL;
  696. }
  697. mutex_unlock(&inst->dma_cache.lock);
  698. mutex_lock(&inst->cvpdspbufs.lock);
  699. list_for_each_entry_safe(cbuf, dummy, &inst->cvpdspbufs.list, list) {
  700. print_internal_buffer(CVP_MEM, "remove dspbufs", inst, cbuf);
  701. if (cbuf->ownership == CLIENT) {
  702. rc = cvp_dsp_deregister_buffer(hash32_ptr(session),
  703. cbuf->fd, cbuf->smem->dma_buf->size, cbuf->size,
  704. cbuf->offset, cbuf->index,
  705. (uint32_t)cbuf->smem->device_addr);
  706. if (rc)
  707. dprintk(CVP_ERR,
  708. "%s: failed dsp deregistration fd=%d rc=%d",
  709. __func__, cbuf->fd, rc);
  710. msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
  711. msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
  712. } else if (cbuf->ownership == DSP) {
  713. rc = cvp_dsp_fastrpc_unmap(inst->process_id, cbuf);
  714. if (rc)
  715. dprintk(CVP_ERR,
  716. "%s: failed to unmap buf from DSP\n",
  717. __func__);
  718. rc = cvp_release_dsp_buffers(inst, cbuf);
  719. if (rc)
  720. dprintk(CVP_ERR,
  721. "%s Fail to free buffer 0x%x\n",
  722. __func__, rc);
  723. }
  724. list_del(&cbuf->list);
  725. kmem_cache_free(cvp_driver->buf_cache, cbuf);
  726. }
  727. mutex_unlock(&inst->cvpdspbufs.lock);
  728. return rc;
  729. }
  730. void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst)
  731. {
  732. struct cvp_internal_buf *buf;
  733. int i;
  734. if (!inst) {
  735. dprintk(CVP_ERR, "%s - invalid param %pK\n",
  736. __func__, inst);
  737. return;
  738. }
  739. dprintk(CVP_ERR, "active session cmd %d\n", inst->cur_cmd_type);
  740. dprintk(CVP_ERR,
  741. "---Buffer details for inst: %pK of type: %d---\n",
  742. inst, inst->session_type);
  743. mutex_lock(&inst->dma_cache.lock);
  744. dprintk(CVP_ERR, "dma cache:\n");
  745. if (inst->dma_cache.nr <= MAX_DMABUF_NUMS)
  746. for (i = 0; i < inst->dma_cache.nr; i++)
  747. print_smem(CVP_ERR, "bufdump", inst,
  748. inst->dma_cache.entries[i]);
  749. mutex_unlock(&inst->dma_cache.lock);
  750. mutex_lock(&inst->cvpdspbufs.lock);
  751. dprintk(CVP_ERR, "dsp buffer list:\n");
  752. list_for_each_entry(buf, &inst->cvpdspbufs.list, list)
  753. print_cvp_buffer(CVP_ERR, "bufdump", inst, buf);
  754. mutex_unlock(&inst->cvpdspbufs.lock);
  755. mutex_lock(&inst->persistbufs.lock);
  756. dprintk(CVP_ERR, "persist buffer list:\n");
  757. list_for_each_entry(buf, &inst->persistbufs.list, list)
  758. print_cvp_buffer(CVP_ERR, "bufdump", inst, buf);
  759. mutex_unlock(&inst->persistbufs.lock);
  760. }
  761. struct cvp_internal_buf *cvp_allocate_arp_bufs(struct msm_cvp_inst *inst,
  762. u32 buffer_size)
  763. {
  764. struct cvp_internal_buf *buf;
  765. struct msm_cvp_list *buf_list;
  766. u32 smem_flags = SMEM_UNCACHED;
  767. int rc = 0;
  768. if (!inst) {
  769. dprintk(CVP_ERR, "%s Invalid input\n", __func__);
  770. return NULL;
  771. }
  772. buf_list = &inst->persistbufs;
  773. if (!buffer_size)
  774. return NULL;
  775. /* PERSIST buffer requires secure mapping
  776. * Disable and wait for hyp_assign available
  777. */
  778. smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
  779. buf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
  780. if (!buf) {
  781. dprintk(CVP_ERR, "%s Out of memory\n", __func__);
  782. goto fail_kzalloc;
  783. }
  784. buf->smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  785. if (!buf->smem) {
  786. dprintk(CVP_ERR, "%s Out of memory\n", __func__);
  787. goto fail_kzalloc;
  788. }
  789. buf->smem->flags = smem_flags;
  790. rc = msm_cvp_smem_alloc(buffer_size, 1, 0,
  791. &(inst->core->resources), buf->smem);
  792. if (rc) {
  793. dprintk(CVP_ERR, "Failed to allocate ARP memory\n");
  794. goto err_no_mem;
  795. }
  796. buf->size = buf->smem->size;
  797. buf->type = HFI_BUFFER_INTERNAL_PERSIST_1;
  798. buf->ownership = DRIVER;
  799. mutex_lock(&buf_list->lock);
  800. list_add_tail(&buf->list, &buf_list->list);
  801. mutex_unlock(&buf_list->lock);
  802. return buf;
  803. err_no_mem:
  804. kmem_cache_free(cvp_driver->buf_cache, buf);
  805. fail_kzalloc:
  806. return NULL;
  807. }
  808. int cvp_release_arp_buffers(struct msm_cvp_inst *inst)
  809. {
  810. struct msm_cvp_smem *smem;
  811. struct list_head *ptr, *next;
  812. struct cvp_internal_buf *buf;
  813. int rc = 0;
  814. struct msm_cvp_core *core;
  815. struct cvp_hfi_device *hdev;
  816. if (!inst) {
  817. dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
  818. return -EINVAL;
  819. }
  820. core = inst->core;
  821. if (!core) {
  822. dprintk(CVP_ERR, "Invalid core pointer = %pK\n", core);
  823. return -EINVAL;
  824. }
  825. hdev = core->device;
  826. if (!hdev) {
  827. dprintk(CVP_ERR, "Invalid device pointer = %pK\n", hdev);
  828. return -EINVAL;
  829. }
  830. dprintk(CVP_MEM, "release persist buffer!\n");
  831. mutex_lock(&inst->persistbufs.lock);
  832. /* Workaround for FW: release buffer means release all */
  833. if (inst->state <= MSM_CVP_CLOSE_DONE) {
  834. rc = call_hfi_op(hdev, session_release_buffers,
  835. (void *)inst->session);
  836. if (!rc) {
  837. mutex_unlock(&inst->persistbufs.lock);
  838. rc = wait_for_sess_signal_receipt(inst,
  839. HAL_SESSION_RELEASE_BUFFER_DONE);
  840. if (rc)
  841. dprintk(CVP_WARN,
  842. "%s: wait for signal failed, rc %d\n",
  843. __func__, rc);
  844. mutex_lock(&inst->persistbufs.lock);
  845. } else {
  846. dprintk(CVP_WARN, "Fail to send Rel prst buf\n");
  847. }
  848. }
  849. list_for_each_safe(ptr, next, &inst->persistbufs.list) {
  850. buf = list_entry(ptr, struct cvp_internal_buf, list);
  851. smem = buf->smem;
  852. if (!smem) {
  853. dprintk(CVP_ERR, "%s invalid smem\n", __func__);
  854. mutex_unlock(&inst->persistbufs.lock);
  855. return -EINVAL;
  856. }
  857. list_del(&buf->list);
  858. if (buf->ownership == DRIVER) {
  859. dprintk(CVP_MEM,
  860. "%s: %x : fd %d %s size %d",
  861. "free arp", hash32_ptr(inst->session), buf->fd,
  862. smem->dma_buf->name, buf->size);
  863. msm_cvp_smem_free(smem);
  864. kmem_cache_free(cvp_driver->smem_cache, smem);
  865. }
  866. buf->smem = NULL;
  867. kmem_cache_free(cvp_driver->buf_cache, buf);
  868. }
  869. mutex_unlock(&inst->persistbufs.lock);
  870. return rc;
  871. }
  872. int cvp_allocate_dsp_bufs(struct msm_cvp_inst *inst,
  873. struct cvp_internal_buf *buf,
  874. u32 buffer_size,
  875. u32 secure_type)
  876. {
  877. u32 smem_flags = SMEM_UNCACHED;
  878. int rc = 0;
  879. if (!inst) {
  880. dprintk(CVP_ERR, "%s Invalid input\n", __func__);
  881. return -EINVAL;
  882. }
  883. if (!buf)
  884. return -EINVAL;
  885. if (!buffer_size)
  886. return -EINVAL;
  887. switch (secure_type) {
  888. case 0:
  889. break;
  890. case 1:
  891. smem_flags |= SMEM_SECURE | SMEM_PIXEL;
  892. break;
  893. case 2:
  894. smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
  895. break;
  896. default:
  897. dprintk(CVP_ERR, "%s Invalid secure_type %d\n",
  898. __func__, secure_type);
  899. return -EINVAL;
  900. }
  901. dprintk(CVP_MEM, "%s smem_flags 0x%x\n", __func__, smem_flags);
  902. buf->smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  903. if (!buf->smem) {
  904. dprintk(CVP_ERR, "%s Out of memory\n", __func__);
  905. goto fail_kzalloc_smem_cache;
  906. }
  907. buf->smem->flags = smem_flags;
  908. rc = msm_cvp_smem_alloc(buffer_size, 1, 0,
  909. &(inst->core->resources), buf->smem);
  910. if (rc) {
  911. dprintk(CVP_ERR, "Failed to allocate ARP memory\n");
  912. goto err_no_mem;
  913. }
  914. dprintk(CVP_MEM, "%s dma_buf %pK\n", __func__, buf->smem->dma_buf);
  915. buf->size = buf->smem->size;
  916. buf->type = HFI_BUFFER_INTERNAL_PERSIST_1;
  917. buf->ownership = DSP;
  918. return rc;
  919. err_no_mem:
  920. kmem_cache_free(cvp_driver->smem_cache, buf->smem);
  921. fail_kzalloc_smem_cache:
  922. return rc;
  923. }
  924. int cvp_release_dsp_buffers(struct msm_cvp_inst *inst,
  925. struct cvp_internal_buf *buf)
  926. {
  927. struct msm_cvp_smem *smem;
  928. int rc = 0;
  929. if (!inst) {
  930. dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
  931. return -EINVAL;
  932. }
  933. if (!buf) {
  934. dprintk(CVP_ERR, "Invalid buffer pointer = %pK\n", inst);
  935. return -EINVAL;
  936. }
  937. smem = buf->smem;
  938. if (!smem) {
  939. dprintk(CVP_ERR, "%s invalid smem\n", __func__);
  940. return -EINVAL;
  941. }
  942. if (buf->ownership == DSP) {
  943. dprintk(CVP_MEM,
  944. "%s: %x : fd %x %s size %d",
  945. __func__, hash32_ptr(inst->session), buf->fd,
  946. smem->dma_buf->name, buf->size);
  947. msm_cvp_smem_free(smem);
  948. kmem_cache_free(cvp_driver->smem_cache, smem);
  949. } else {
  950. dprintk(CVP_ERR,
  951. "%s: wrong owner %d %x : fd %x %s size %d",
  952. __func__, buf->ownership, hash32_ptr(inst->session),
  953. buf->fd, smem->dma_buf->name, buf->size);
  954. }
  955. return rc;
  956. }
  957. int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
  958. struct eva_kmd_buffer *buf)
  959. {
  960. struct cvp_hfi_device *hdev;
  961. struct cvp_hal_session *session;
  962. struct msm_cvp_inst *s;
  963. int rc = 0;
  964. if (!inst || !inst->core || !buf) {
  965. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  966. return -EINVAL;
  967. }
  968. if (!buf->index)
  969. return 0;
  970. s = cvp_get_inst_validate(inst->core, inst);
  971. if (!s)
  972. return -ECONNRESET;
  973. inst->cur_cmd_type = EVA_KMD_REGISTER_BUFFER;
  974. session = (struct cvp_hal_session *)inst->session;
  975. if (!session) {
  976. dprintk(CVP_ERR, "%s: invalid session\n", __func__);
  977. rc = -EINVAL;
  978. goto exit;
  979. }
  980. hdev = inst->core->device;
  981. print_client_buffer(CVP_HFI, "register", inst, buf);
  982. rc = msm_cvp_map_buf_dsp(inst, buf);
  983. dprintk(CVP_DSP, "%s: fd %d, iova 0x%x\n", __func__,
  984. buf->fd, buf->reserved[0]);
  985. exit:
  986. inst->cur_cmd_type = 0;
  987. cvp_put_inst(s);
  988. return rc;
  989. }
  990. int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
  991. struct eva_kmd_buffer *buf)
  992. {
  993. struct msm_cvp_inst *s;
  994. int rc = 0;
  995. if (!inst || !inst->core || !buf) {
  996. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  997. return -EINVAL;
  998. }
  999. if (!buf->index)
  1000. return 0;
  1001. s = cvp_get_inst_validate(inst->core, inst);
  1002. if (!s)
  1003. return -ECONNRESET;
  1004. inst->cur_cmd_type = EVA_KMD_UNREGISTER_BUFFER;
  1005. print_client_buffer(CVP_HFI, "unregister", inst, buf);
  1006. rc = msm_cvp_unmap_buf_dsp(inst, buf);
  1007. inst->cur_cmd_type = 0;
  1008. cvp_put_inst(s);
  1009. return rc;
  1010. }