msm_cvp_buf.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/pid.h>
  6. #include <linux/fdtable.h>
  7. #include <linux/rcupdate.h>
  8. #include <linux/fs.h>
  9. #include <linux/dma-buf.h>
  10. #include <linux/sched/task.h>
  11. #include "msm_cvp_common.h"
  12. #include "cvp_hfi_api.h"
  13. #include "msm_cvp_debug.h"
  14. #include "msm_cvp_core.h"
  15. #include "msm_cvp_dsp.h"
  16. #define CLEAR_USE_BITMAP(idx, inst) \
  17. do { \
  18. clear_bit(idx, &inst->dma_cache.usage_bitmap); \
  19. dprintk(CVP_MEM, "clear %x bit %d dma_cache bitmap 0x%llx\n", \
  20. hash32_ptr(inst->session), smem->bitmap_index, \
  21. inst->dma_cache.usage_bitmap); \
  22. } while (0)
  23. #define SET_USE_BITMAP(idx, inst) \
  24. do { \
  25. set_bit(idx, &inst->dma_cache.usage_bitmap); \
  26. dprintk(CVP_MEM, "Set %x bit %d dma_cache bitmap 0x%llx\n", \
  27. hash32_ptr(inst->session), idx, \
  28. inst->dma_cache.usage_bitmap); \
  29. } while (0)
  30. void print_smem(u32 tag, const char *str, struct msm_cvp_inst *inst,
  31. struct msm_cvp_smem *smem)
  32. {
  33. if (!(tag & msm_cvp_debug) || !inst || !smem)
  34. return;
  35. if (smem->dma_buf) {
  36. dprintk(tag,
  37. "%s: %x : %s size %d flags %#x iova %#x idx %d ref %d",
  38. str, hash32_ptr(inst->session), smem->dma_buf->name,
  39. smem->size, smem->flags, smem->device_addr,
  40. smem->bitmap_index, smem->refcount);
  41. }
  42. }
  43. static void print_internal_buffer(u32 tag, const char *str,
  44. struct msm_cvp_inst *inst, struct cvp_internal_buf *cbuf)
  45. {
  46. if (!(tag & msm_cvp_debug) || !inst || !cbuf)
  47. return;
  48. if (cbuf->smem->dma_buf) {
  49. dprintk(tag,
  50. "%s: %x : fd %d off %d %s size %d iova %#x",
  51. str, hash32_ptr(inst->session), cbuf->fd,
  52. cbuf->offset, cbuf->smem->dma_buf->name, cbuf->size,
  53. cbuf->smem->device_addr);
  54. } else {
  55. dprintk(tag,
  56. "%s: %x : idx %2d fd %d off %d size %d iova %#x",
  57. str, hash32_ptr(inst->session), cbuf->fd,
  58. cbuf->offset, cbuf->size, cbuf->smem->device_addr);
  59. }
  60. }
  61. void print_cvp_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
  62. struct cvp_internal_buf *cbuf)
  63. {
  64. dprintk(tag, "%s addr: %x size %u\n", str,
  65. cbuf->smem->device_addr, cbuf->size);
  66. }
  67. void print_client_buffer(u32 tag, const char *str,
  68. struct msm_cvp_inst *inst, struct eva_kmd_buffer *cbuf)
  69. {
  70. if (!(tag & msm_cvp_debug) || !inst || !cbuf)
  71. return;
  72. dprintk(tag,
  73. "%s: %x : idx %2d fd %d off %d size %d type %d flags 0x%x\n",
  74. str, hash32_ptr(inst->session), cbuf->index, cbuf->fd,
  75. cbuf->offset, cbuf->size, cbuf->type, cbuf->flags);
  76. }
  77. static bool __is_buf_valid(struct msm_cvp_inst *inst,
  78. struct eva_kmd_buffer *buf)
  79. {
  80. struct cvp_hal_session *session;
  81. struct cvp_internal_buf *cbuf = NULL;
  82. bool found = false;
  83. if (!inst || !inst->core || !buf) {
  84. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  85. return false;
  86. }
  87. if (buf->fd < 0) {
  88. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  89. return false;
  90. }
  91. if (buf->offset) {
  92. dprintk(CVP_ERR,
  93. "%s: offset is deprecated, set to 0.\n",
  94. __func__);
  95. return false;
  96. }
  97. session = (struct cvp_hal_session *)inst->session;
  98. mutex_lock(&inst->cvpdspbufs.lock);
  99. list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
  100. if (cbuf->fd == buf->fd) {
  101. if (cbuf->size != buf->size) {
  102. dprintk(CVP_ERR, "%s: buf size mismatch\n",
  103. __func__);
  104. mutex_unlock(&inst->cvpdspbufs.lock);
  105. return false;
  106. }
  107. found = true;
  108. break;
  109. }
  110. }
  111. mutex_unlock(&inst->cvpdspbufs.lock);
  112. if (found) {
  113. print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf);
  114. return false;
  115. }
  116. return true;
  117. }
  118. static struct file *msm_cvp_fget(unsigned int fd, struct task_struct *task,
  119. fmode_t mask, unsigned int refs)
  120. {
  121. struct files_struct *files = task->files;
  122. struct file *file;
  123. rcu_read_lock();
  124. loop:
  125. file = fcheck_files(files, fd);
  126. if (file) {
  127. /* File object ref couldn't be taken.
  128. * dup2() atomicity guarantee is the reason
  129. * we loop to catch the new file (or NULL pointer)
  130. */
  131. if (file->f_mode & mask)
  132. file = NULL;
  133. else if (!get_file_rcu_many(file, refs))
  134. goto loop;
  135. }
  136. rcu_read_unlock();
  137. return file;
  138. }
  139. static struct dma_buf *cvp_dma_buf_get(struct file *file, int fd,
  140. struct task_struct *task)
  141. {
  142. if (file->f_op != gfa_cv.dmabuf_f_op) {
  143. dprintk(CVP_WARN, "fd doesn't refer to dma_buf\n");
  144. return ERR_PTR(-EINVAL);
  145. }
  146. return file->private_data;
  147. }
  148. int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
  149. {
  150. int rc = 0;
  151. struct cvp_internal_buf *cbuf = NULL;
  152. struct msm_cvp_smem *smem = NULL;
  153. struct dma_buf *dma_buf = NULL;
  154. struct file *file;
  155. if (!__is_buf_valid(inst, buf))
  156. return -EINVAL;
  157. if (!inst->task)
  158. return -EINVAL;
  159. file = msm_cvp_fget(buf->fd, inst->task, FMODE_PATH, 1);
  160. if (file == NULL) {
  161. dprintk(CVP_WARN, "%s fail to get file from fd\n", __func__);
  162. return -EINVAL;
  163. }
  164. dma_buf = cvp_dma_buf_get(
  165. file,
  166. buf->fd,
  167. inst->task);
  168. if (dma_buf == ERR_PTR(-EINVAL)) {
  169. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  170. rc = -EINVAL;
  171. goto exit;
  172. }
  173. dprintk(CVP_MEM, "dma_buf from internal %llu\n", dma_buf);
  174. cbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
  175. if (!cbuf) {
  176. rc = -ENOMEM;
  177. goto exit;
  178. }
  179. smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  180. if (!smem) {
  181. rc = -ENOMEM;
  182. goto exit;
  183. }
  184. smem->dma_buf = dma_buf;
  185. smem->file = file;
  186. smem->bitmap_index = MAX_DMABUF_NUMS;
  187. dprintk(CVP_MEM, "%s: dma_buf = %llx\n", __func__, dma_buf);
  188. rc = msm_cvp_map_smem(inst, smem, "map dsp");
  189. if (rc) {
  190. print_client_buffer(CVP_ERR, "map failed", inst, buf);
  191. goto exit;
  192. }
  193. cbuf->smem = smem;
  194. cbuf->fd = buf->fd;
  195. cbuf->size = buf->size;
  196. cbuf->offset = buf->offset;
  197. cbuf->ownership = CLIENT;
  198. cbuf->index = buf->index;
  199. buf->reserved[0] = (uint32_t)smem->device_addr;
  200. mutex_lock(&inst->cvpdspbufs.lock);
  201. list_add_tail(&cbuf->list, &inst->cvpdspbufs.list);
  202. mutex_unlock(&inst->cvpdspbufs.lock);
  203. return rc;
  204. exit:
  205. fput(file);
  206. if (smem) {
  207. if (smem->device_addr) {
  208. msm_cvp_unmap_smem(inst, smem, "unmap dsp");
  209. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  210. }
  211. kmem_cache_free(cvp_driver->smem_cache, smem);
  212. }
  213. if (cbuf)
  214. kmem_cache_free(cvp_driver->buf_cache, cbuf);
  215. return rc;
  216. }
  217. int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
  218. {
  219. int rc = 0;
  220. bool found;
  221. struct cvp_internal_buf *cbuf;
  222. struct cvp_hal_session *session;
  223. if (!inst || !inst->core || !buf) {
  224. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  225. return -EINVAL;
  226. }
  227. session = (struct cvp_hal_session *)inst->session;
  228. if (!session) {
  229. dprintk(CVP_ERR, "%s: invalid session\n", __func__);
  230. return -EINVAL;
  231. }
  232. mutex_lock(&inst->cvpdspbufs.lock);
  233. found = false;
  234. list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
  235. if (cbuf->fd == buf->fd) {
  236. found = true;
  237. break;
  238. }
  239. }
  240. mutex_unlock(&inst->cvpdspbufs.lock);
  241. if (!found) {
  242. print_client_buffer(CVP_ERR, "invalid", inst, buf);
  243. return -EINVAL;
  244. }
  245. if (cbuf->smem->device_addr) {
  246. msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
  247. msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
  248. fput(cbuf->smem->file);
  249. }
  250. mutex_lock(&inst->cvpdspbufs.lock);
  251. list_del(&cbuf->list);
  252. mutex_unlock(&inst->cvpdspbufs.lock);
  253. kmem_cache_free(cvp_driver->smem_cache, cbuf->smem);
  254. kmem_cache_free(cvp_driver->buf_cache, cbuf);
  255. return rc;
  256. }
  257. void msm_cvp_cache_operations(struct msm_cvp_smem *smem, u32 type,
  258. u32 offset, u32 size)
  259. {
  260. enum smem_cache_ops cache_op;
  261. if (msm_cvp_cacheop_disabled)
  262. return;
  263. if (!smem) {
  264. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  265. return;
  266. }
  267. switch (type) {
  268. case EVA_KMD_BUFTYPE_INPUT:
  269. cache_op = SMEM_CACHE_CLEAN;
  270. break;
  271. case EVA_KMD_BUFTYPE_OUTPUT:
  272. cache_op = SMEM_CACHE_INVALIDATE;
  273. break;
  274. default:
  275. cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
  276. }
  277. dprintk(CVP_MEM,
  278. "%s: cache operation enabled for dma_buf: %llx, cache_op: %d, offset: %d, size: %d\n",
  279. __func__, smem->dma_buf, cache_op, offset, size);
  280. msm_cvp_smem_cache_operations(smem->dma_buf, cache_op, offset, size);
  281. }
  282. static struct msm_cvp_smem *msm_cvp_session_find_smem(struct msm_cvp_inst *inst,
  283. struct dma_buf *dma_buf)
  284. {
  285. struct msm_cvp_smem *smem;
  286. int i;
  287. if (inst->dma_cache.nr > MAX_DMABUF_NUMS)
  288. return NULL;
  289. mutex_lock(&inst->dma_cache.lock);
  290. for (i = 0; i < inst->dma_cache.nr; i++)
  291. if (inst->dma_cache.entries[i]->dma_buf == dma_buf) {
  292. SET_USE_BITMAP(i, inst);
  293. smem = inst->dma_cache.entries[i];
  294. smem->bitmap_index = i;
  295. atomic_inc(&smem->refcount);
  296. /*
  297. * If we find it, it means we already increased
  298. * refcount before, so we put it to avoid double
  299. * incremental.
  300. */
  301. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  302. mutex_unlock(&inst->dma_cache.lock);
  303. print_smem(CVP_MEM, "found", inst, smem);
  304. return smem;
  305. }
  306. mutex_unlock(&inst->dma_cache.lock);
  307. return NULL;
  308. }
  309. static int msm_cvp_session_add_smem(struct msm_cvp_inst *inst,
  310. struct msm_cvp_smem *smem)
  311. {
  312. unsigned int i;
  313. struct msm_cvp_smem *smem2;
  314. mutex_lock(&inst->dma_cache.lock);
  315. if (inst->dma_cache.nr < MAX_DMABUF_NUMS) {
  316. inst->dma_cache.entries[inst->dma_cache.nr] = smem;
  317. SET_USE_BITMAP(inst->dma_cache.nr, inst);
  318. smem->bitmap_index = inst->dma_cache.nr;
  319. inst->dma_cache.nr++;
  320. i = smem->bitmap_index;
  321. } else {
  322. i = find_first_zero_bit(&inst->dma_cache.usage_bitmap,
  323. MAX_DMABUF_NUMS);
  324. if (i < MAX_DMABUF_NUMS) {
  325. smem2 = inst->dma_cache.entries[i];
  326. msm_cvp_unmap_smem(inst, smem2, "unmap cpu");
  327. msm_cvp_smem_put_dma_buf(smem2->dma_buf);
  328. kmem_cache_free(cvp_driver->smem_cache, smem2);
  329. inst->dma_cache.entries[i] = smem;
  330. smem->bitmap_index = i;
  331. SET_USE_BITMAP(i, inst);
  332. } else {
  333. dprintk(CVP_WARN, "%s: not enough memory\n", __func__);
  334. mutex_unlock(&inst->dma_cache.lock);
  335. return -ENOMEM;
  336. }
  337. }
  338. atomic_inc(&smem->refcount);
  339. mutex_unlock(&inst->dma_cache.lock);
  340. dprintk(CVP_MEM, "Add entry %d into cache\n", i);
  341. return 0;
  342. }
  343. static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
  344. struct cvp_buf_type *buf)
  345. {
  346. int rc = 0, found = 1;
  347. struct msm_cvp_smem *smem = NULL;
  348. struct dma_buf *dma_buf = NULL;
  349. if (buf->fd < 0) {
  350. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  351. return NULL;
  352. }
  353. dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
  354. if (!dma_buf) {
  355. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  356. return NULL;
  357. }
  358. smem = msm_cvp_session_find_smem(inst, dma_buf);
  359. if (!smem) {
  360. found = 0;
  361. smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  362. if (!smem)
  363. return NULL;
  364. smem->dma_buf = dma_buf;
  365. smem->bitmap_index = MAX_DMABUF_NUMS;
  366. rc = msm_cvp_map_smem(inst, smem, "map cpu");
  367. if (rc)
  368. goto exit;
  369. if (buf->size > smem->size || buf->size > smem->size - buf->offset) {
  370. dprintk(CVP_ERR, "%s: invalid offset %d or size %d for a new entry\n",
  371. __func__, buf->offset, buf->size);
  372. goto exit2;
  373. }
  374. rc = msm_cvp_session_add_smem(inst, smem);
  375. if (rc && rc != -ENOMEM)
  376. goto exit2;
  377. }
  378. if (buf->size > smem->size || buf->size > smem->size - buf->offset) {
  379. dprintk(CVP_ERR, "%s: invalid offset %d or size %d\n",
  380. __func__, buf->offset, buf->size);
  381. if (found) {
  382. mutex_lock(&inst->dma_cache.lock);
  383. atomic_dec(&smem->refcount);
  384. mutex_unlock(&inst->dma_cache.lock);
  385. return NULL;
  386. }
  387. goto exit2;
  388. }
  389. return smem;
  390. exit2:
  391. msm_cvp_unmap_smem(inst, smem, "unmap cpu");
  392. exit:
  393. msm_cvp_smem_put_dma_buf(dma_buf);
  394. kmem_cache_free(cvp_driver->smem_cache, smem);
  395. smem = NULL;
  396. return smem;
  397. }
  398. static u32 msm_cvp_map_user_persist_buf(struct msm_cvp_inst *inst,
  399. struct cvp_buf_type *buf)
  400. {
  401. u32 iova = 0;
  402. struct msm_cvp_smem *smem = NULL;
  403. struct cvp_internal_buf *pbuf;
  404. if (!inst) {
  405. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  406. return -EINVAL;
  407. }
  408. pbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
  409. if (!pbuf)
  410. return 0;
  411. smem = msm_cvp_session_get_smem(inst, buf);
  412. if (!smem)
  413. goto exit;
  414. pbuf->smem = smem;
  415. pbuf->fd = buf->fd;
  416. pbuf->size = buf->size;
  417. pbuf->offset = buf->offset;
  418. pbuf->ownership = CLIENT;
  419. mutex_lock(&inst->persistbufs.lock);
  420. list_add_tail(&pbuf->list, &inst->persistbufs.list);
  421. mutex_unlock(&inst->persistbufs.lock);
  422. print_internal_buffer(CVP_MEM, "map persist", inst, pbuf);
  423. iova = smem->device_addr + buf->offset;
  424. return iova;
  425. exit:
  426. kmem_cache_free(cvp_driver->buf_cache, pbuf);
  427. return 0;
  428. }
  429. u32 msm_cvp_map_frame_buf(struct msm_cvp_inst *inst,
  430. struct cvp_buf_type *buf,
  431. struct msm_cvp_frame *frame)
  432. {
  433. u32 iova = 0;
  434. struct msm_cvp_smem *smem = NULL;
  435. u32 nr;
  436. u32 type;
  437. if (!inst || !frame) {
  438. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  439. return 0;
  440. }
  441. nr = frame->nr;
  442. if (nr == MAX_FRAME_BUFFER_NUMS) {
  443. dprintk(CVP_ERR, "%s: max frame buffer reached\n", __func__);
  444. return 0;
  445. }
  446. smem = msm_cvp_session_get_smem(inst, buf);
  447. if (!smem)
  448. return 0;
  449. frame->bufs[nr].fd = buf->fd;
  450. frame->bufs[nr].smem = smem;
  451. frame->bufs[nr].size = buf->size;
  452. frame->bufs[nr].offset = buf->offset;
  453. print_internal_buffer(CVP_MEM, "map cpu", inst, &frame->bufs[nr]);
  454. frame->nr++;
  455. type = EVA_KMD_BUFTYPE_INPUT | EVA_KMD_BUFTYPE_OUTPUT;
  456. msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
  457. iova = smem->device_addr + buf->offset;
  458. return iova;
  459. }
  460. static void msm_cvp_unmap_frame_buf(struct msm_cvp_inst *inst,
  461. struct msm_cvp_frame *frame)
  462. {
  463. u32 i;
  464. u32 type;
  465. struct msm_cvp_smem *smem = NULL;
  466. struct cvp_internal_buf *buf;
  467. type = EVA_KMD_BUFTYPE_OUTPUT;
  468. for (i = 0; i < frame->nr; ++i) {
  469. buf = &frame->bufs[i];
  470. smem = buf->smem;
  471. msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
  472. if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
  473. /* smem not in dmamap cache */
  474. msm_cvp_unmap_smem(inst, smem, "unmap cpu");
  475. dma_heap_buffer_free(smem->dma_buf);
  476. kmem_cache_free(cvp_driver->smem_cache, smem);
  477. buf->smem = NULL;
  478. } else {
  479. mutex_lock(&inst->dma_cache.lock);
  480. if (atomic_dec_and_test(&smem->refcount)) {
  481. CLEAR_USE_BITMAP(smem->bitmap_index, inst);
  482. print_smem(CVP_MEM, "Map dereference",
  483. inst, smem);
  484. }
  485. mutex_unlock(&inst->dma_cache.lock);
  486. }
  487. }
  488. kmem_cache_free(cvp_driver->frame_cache, frame);
  489. }
  490. void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid)
  491. {
  492. struct msm_cvp_frame *frame, *dummy1;
  493. bool found;
  494. if (!inst) {
  495. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  496. return;
  497. }
  498. ktid &= (FENCE_BIT - 1);
  499. dprintk(CVP_MEM, "%s: (%#x) unmap frame %llu\n",
  500. __func__, hash32_ptr(inst->session), ktid);
  501. found = false;
  502. mutex_lock(&inst->frames.lock);
  503. list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
  504. if (frame->ktid == ktid) {
  505. found = true;
  506. list_del(&frame->list);
  507. break;
  508. }
  509. }
  510. mutex_unlock(&inst->frames.lock);
  511. if (found)
  512. msm_cvp_unmap_frame_buf(inst, frame);
  513. else
  514. dprintk(CVP_WARN, "%s frame %llu not found!\n", __func__, ktid);
  515. }
  516. int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
  517. struct eva_kmd_hfi_packet *in_pkt,
  518. unsigned int offset, unsigned int buf_num)
  519. {
  520. struct cvp_hfi_cmd_session_hdr *cmd_hdr;
  521. struct cvp_internal_buf *pbuf, *dummy;
  522. u64 ktid;
  523. int rc = 0;
  524. struct msm_cvp_smem *smem = NULL;
  525. if (!offset || !buf_num)
  526. return rc;
  527. cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
  528. ktid = cmd_hdr->client_data.kdata & (FENCE_BIT - 1);
  529. mutex_lock(&inst->persistbufs.lock);
  530. list_for_each_entry_safe(pbuf, dummy, &inst->persistbufs.list, list) {
  531. if (pbuf->ktid == ktid && pbuf->ownership == CLIENT) {
  532. list_del(&pbuf->list);
  533. smem = pbuf->smem;
  534. dprintk(CVP_MEM, "unmap persist: %x %d %d %#x",
  535. hash32_ptr(inst->session), pbuf->fd,
  536. pbuf->size, smem->device_addr);
  537. if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
  538. /* smem not in dmamap cache */
  539. msm_cvp_unmap_smem(inst, smem,
  540. "unmap cpu");
  541. dma_heap_buffer_free(smem->dma_buf);
  542. kmem_cache_free(
  543. cvp_driver->smem_cache,
  544. smem);
  545. pbuf->smem = NULL;
  546. } else {
  547. mutex_lock(&inst->dma_cache.lock);
  548. if (atomic_dec_and_test(&smem->refcount))
  549. CLEAR_USE_BITMAP(
  550. smem->bitmap_index,
  551. inst);
  552. mutex_unlock(&inst->dma_cache.lock);
  553. }
  554. kmem_cache_free(cvp_driver->buf_cache, pbuf);
  555. }
  556. }
  557. mutex_unlock(&inst->persistbufs.lock);
  558. return rc;
  559. }
  560. int msm_cvp_mark_user_persist(struct msm_cvp_inst *inst,
  561. struct eva_kmd_hfi_packet *in_pkt,
  562. unsigned int offset, unsigned int buf_num)
  563. {
  564. struct cvp_hfi_cmd_session_hdr *cmd_hdr;
  565. struct cvp_internal_buf *pbuf, *dummy;
  566. u64 ktid;
  567. struct cvp_buf_type *buf;
  568. int i, rc = 0;
  569. if (!offset || !buf_num)
  570. return 0;
  571. cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
  572. ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
  573. ktid &= (FENCE_BIT - 1);
  574. cmd_hdr->client_data.kdata = ktid;
  575. for (i = 0; i < buf_num; i++) {
  576. buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
  577. offset += sizeof(*buf) >> 2;
  578. if (buf->fd < 0 || !buf->size)
  579. continue;
  580. mutex_lock(&inst->persistbufs.lock);
  581. list_for_each_entry_safe(pbuf, dummy, &inst->persistbufs.list,
  582. list) {
  583. if (pbuf->ownership == CLIENT) {
  584. if (pbuf->fd == buf->fd &&
  585. pbuf->size == buf->size)
  586. buf->fd = pbuf->smem->device_addr;
  587. rc = 1;
  588. break;
  589. }
  590. }
  591. mutex_unlock(&inst->persistbufs.lock);
  592. if (!rc) {
  593. dprintk(CVP_ERR, "%s No persist buf %d found\n",
  594. __func__, buf->fd);
  595. rc = -EFAULT;
  596. break;
  597. }
  598. pbuf->ktid = ktid;
  599. rc = 0;
  600. }
  601. return rc;
  602. }
  603. int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
  604. struct eva_kmd_hfi_packet *in_pkt,
  605. unsigned int offset, unsigned int buf_num)
  606. {
  607. struct cvp_buf_type *buf;
  608. int i;
  609. u32 iova;
  610. if (!offset || !buf_num)
  611. return 0;
  612. for (i = 0; i < buf_num; i++) {
  613. buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
  614. offset += sizeof(*buf) >> 2;
  615. if (buf->fd < 0 || !buf->size)
  616. continue;
  617. iova = msm_cvp_map_user_persist_buf(inst, buf);
  618. if (!iova) {
  619. dprintk(CVP_ERR,
  620. "%s: buf %d register failed.\n",
  621. __func__, i);
  622. return -EINVAL;
  623. }
  624. buf->fd = iova;
  625. }
  626. return 0;
  627. }
  628. int msm_cvp_map_frame(struct msm_cvp_inst *inst,
  629. struct eva_kmd_hfi_packet *in_pkt,
  630. unsigned int offset, unsigned int buf_num)
  631. {
  632. struct cvp_buf_type *buf;
  633. int i;
  634. u32 iova;
  635. u64 ktid;
  636. struct msm_cvp_frame *frame;
  637. struct cvp_hfi_cmd_session_hdr *cmd_hdr;
  638. if (!offset || !buf_num)
  639. return 0;
  640. cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
  641. ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
  642. ktid &= (FENCE_BIT - 1);
  643. cmd_hdr->client_data.kdata = ktid;
  644. frame = kmem_cache_zalloc(cvp_driver->frame_cache, GFP_KERNEL);
  645. if (!frame)
  646. return -ENOMEM;
  647. frame->ktid = ktid;
  648. frame->nr = 0;
  649. frame->pkt_type = cmd_hdr->packet_type;
  650. for (i = 0; i < buf_num; i++) {
  651. buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
  652. offset += sizeof(*buf) >> 2;
  653. if (buf->fd < 0 || !buf->size)
  654. continue;
  655. iova = msm_cvp_map_frame_buf(inst, buf, frame);
  656. if (!iova) {
  657. dprintk(CVP_ERR,
  658. "%s: buf %d register failed.\n",
  659. __func__, i);
  660. msm_cvp_unmap_frame_buf(inst, frame);
  661. return -EINVAL;
  662. }
  663. buf->fd = iova;
  664. }
  665. mutex_lock(&inst->frames.lock);
  666. list_add_tail(&frame->list, &inst->frames.list);
  667. mutex_unlock(&inst->frames.lock);
  668. dprintk(CVP_MEM, "%s: map frame %llu\n", __func__, ktid);
  669. return 0;
  670. }
  671. int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
  672. {
  673. int rc = 0, i;
  674. struct cvp_internal_buf *cbuf, *dummy;
  675. struct msm_cvp_frame *frame, *dummy1;
  676. struct msm_cvp_smem *smem;
  677. struct cvp_hal_session *session;
  678. session = (struct cvp_hal_session *)inst->session;
  679. mutex_lock(&inst->frames.lock);
  680. list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
  681. list_del(&frame->list);
  682. msm_cvp_unmap_frame_buf(inst, frame);
  683. }
  684. mutex_unlock(&inst->frames.lock);
  685. mutex_lock(&inst->dma_cache.lock);
  686. for (i = 0; i < inst->dma_cache.nr; i++) {
  687. smem = inst->dma_cache.entries[i];
  688. if (atomic_read(&smem->refcount) == 0) {
  689. print_smem(CVP_MEM, "free", inst, smem);
  690. } else {
  691. print_smem(CVP_WARN, "in use", inst, smem);
  692. }
  693. msm_cvp_unmap_smem(inst, smem, "unmap cpu");
  694. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  695. kmem_cache_free(cvp_driver->smem_cache, smem);
  696. inst->dma_cache.entries[i] = NULL;
  697. }
  698. mutex_unlock(&inst->dma_cache.lock);
  699. mutex_lock(&inst->cvpdspbufs.lock);
  700. list_for_each_entry_safe(cbuf, dummy, &inst->cvpdspbufs.list, list) {
  701. print_internal_buffer(CVP_MEM, "remove dspbufs", inst, cbuf);
  702. if (cbuf->ownership == CLIENT) {
  703. rc = cvp_dsp_deregister_buffer(hash32_ptr(session),
  704. cbuf->fd, cbuf->smem->dma_buf->size, cbuf->size,
  705. cbuf->offset, cbuf->index,
  706. (uint32_t)cbuf->smem->device_addr);
  707. if (rc)
  708. dprintk(CVP_ERR,
  709. "%s: failed dsp deregistration fd=%d rc=%d",
  710. __func__, cbuf->fd, rc);
  711. msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
  712. msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
  713. } else if (cbuf->ownership == DSP) {
  714. rc = cvp_dsp_fastrpc_unmap(inst->process_id, cbuf);
  715. if (rc)
  716. dprintk(CVP_ERR,
  717. "%s: failed to unmap buf from DSP\n",
  718. __func__);
  719. rc = cvp_release_dsp_buffers(inst, cbuf);
  720. if (rc)
  721. dprintk(CVP_ERR,
  722. "%s Fail to free buffer 0x%x\n",
  723. __func__, rc);
  724. }
  725. list_del(&cbuf->list);
  726. kmem_cache_free(cvp_driver->buf_cache, cbuf);
  727. }
  728. mutex_unlock(&inst->cvpdspbufs.lock);
  729. return rc;
  730. }
  731. void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst)
  732. {
  733. struct cvp_internal_buf *buf;
  734. int i;
  735. if (!inst) {
  736. dprintk(CVP_ERR, "%s - invalid param %pK\n",
  737. __func__, inst);
  738. return;
  739. }
  740. dprintk(CVP_ERR, "active session cmd %d\n", inst->cur_cmd_type);
  741. dprintk(CVP_ERR,
  742. "---Buffer details for inst: %pK of type: %d---\n",
  743. inst, inst->session_type);
  744. mutex_lock(&inst->dma_cache.lock);
  745. dprintk(CVP_ERR, "dma cache:\n");
  746. if (inst->dma_cache.nr <= MAX_DMABUF_NUMS)
  747. for (i = 0; i < inst->dma_cache.nr; i++)
  748. print_smem(CVP_ERR, "bufdump", inst,
  749. inst->dma_cache.entries[i]);
  750. mutex_unlock(&inst->dma_cache.lock);
  751. mutex_lock(&inst->cvpdspbufs.lock);
  752. dprintk(CVP_ERR, "dsp buffer list:\n");
  753. list_for_each_entry(buf, &inst->cvpdspbufs.list, list)
  754. print_cvp_buffer(CVP_ERR, "bufdump", inst, buf);
  755. mutex_unlock(&inst->cvpdspbufs.lock);
  756. mutex_lock(&inst->persistbufs.lock);
  757. dprintk(CVP_ERR, "persist buffer list:\n");
  758. list_for_each_entry(buf, &inst->persistbufs.list, list)
  759. print_cvp_buffer(CVP_ERR, "bufdump", inst, buf);
  760. mutex_unlock(&inst->persistbufs.lock);
  761. }
  762. struct cvp_internal_buf *cvp_allocate_arp_bufs(struct msm_cvp_inst *inst,
  763. u32 buffer_size)
  764. {
  765. struct cvp_internal_buf *buf;
  766. struct msm_cvp_list *buf_list;
  767. u32 smem_flags = SMEM_UNCACHED;
  768. int rc = 0;
  769. if (!inst) {
  770. dprintk(CVP_ERR, "%s Invalid input\n", __func__);
  771. return NULL;
  772. }
  773. buf_list = &inst->persistbufs;
  774. if (!buffer_size)
  775. return NULL;
  776. /* PERSIST buffer requires secure mapping
  777. * Disable and wait for hyp_assign available
  778. */
  779. smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
  780. buf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
  781. if (!buf) {
  782. dprintk(CVP_ERR, "%s Out of memory\n", __func__);
  783. goto fail_kzalloc;
  784. }
  785. buf->smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  786. if (!buf->smem) {
  787. dprintk(CVP_ERR, "%s Out of memory\n", __func__);
  788. goto fail_kzalloc;
  789. }
  790. buf->smem->flags = smem_flags;
  791. rc = msm_cvp_smem_alloc(buffer_size, 1, 0,
  792. &(inst->core->resources), buf->smem);
  793. if (rc) {
  794. dprintk(CVP_ERR, "Failed to allocate ARP memory\n");
  795. goto err_no_mem;
  796. }
  797. buf->size = buf->smem->size;
  798. buf->type = HFI_BUFFER_INTERNAL_PERSIST_1;
  799. buf->ownership = DRIVER;
  800. mutex_lock(&buf_list->lock);
  801. list_add_tail(&buf->list, &buf_list->list);
  802. mutex_unlock(&buf_list->lock);
  803. return buf;
  804. err_no_mem:
  805. kmem_cache_free(cvp_driver->buf_cache, buf);
  806. fail_kzalloc:
  807. return NULL;
  808. }
  809. int cvp_release_arp_buffers(struct msm_cvp_inst *inst)
  810. {
  811. struct msm_cvp_smem *smem;
  812. struct list_head *ptr, *next;
  813. struct cvp_internal_buf *buf;
  814. int rc = 0;
  815. struct msm_cvp_core *core;
  816. struct cvp_hfi_device *hdev;
  817. if (!inst) {
  818. dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
  819. return -EINVAL;
  820. }
  821. core = inst->core;
  822. if (!core) {
  823. dprintk(CVP_ERR, "Invalid core pointer = %pK\n", core);
  824. return -EINVAL;
  825. }
  826. hdev = core->device;
  827. if (!hdev) {
  828. dprintk(CVP_ERR, "Invalid device pointer = %pK\n", hdev);
  829. return -EINVAL;
  830. }
  831. dprintk(CVP_MEM, "release persist buffer!\n");
  832. mutex_lock(&inst->persistbufs.lock);
  833. /* Workaround for FW: release buffer means release all */
  834. if (inst->state <= MSM_CVP_CLOSE_DONE) {
  835. rc = call_hfi_op(hdev, session_release_buffers,
  836. (void *)inst->session);
  837. if (!rc) {
  838. mutex_unlock(&inst->persistbufs.lock);
  839. rc = wait_for_sess_signal_receipt(inst,
  840. HAL_SESSION_RELEASE_BUFFER_DONE);
  841. if (rc)
  842. dprintk(CVP_WARN,
  843. "%s: wait for signal failed, rc %d\n",
  844. __func__, rc);
  845. mutex_lock(&inst->persistbufs.lock);
  846. } else {
  847. dprintk(CVP_WARN, "Fail to send Rel prst buf\n");
  848. }
  849. }
  850. list_for_each_safe(ptr, next, &inst->persistbufs.list) {
  851. buf = list_entry(ptr, struct cvp_internal_buf, list);
  852. smem = buf->smem;
  853. if (!smem) {
  854. dprintk(CVP_ERR, "%s invalid smem\n", __func__);
  855. mutex_unlock(&inst->persistbufs.lock);
  856. return -EINVAL;
  857. }
  858. list_del(&buf->list);
  859. if (buf->ownership == DRIVER) {
  860. dprintk(CVP_MEM,
  861. "%s: %x : fd %d %s size %d",
  862. "free arp", hash32_ptr(inst->session), buf->fd,
  863. smem->dma_buf->name, buf->size);
  864. msm_cvp_smem_free(smem);
  865. kmem_cache_free(cvp_driver->smem_cache, smem);
  866. }
  867. buf->smem = NULL;
  868. kmem_cache_free(cvp_driver->buf_cache, buf);
  869. }
  870. mutex_unlock(&inst->persistbufs.lock);
  871. return rc;
  872. }
  873. int cvp_allocate_dsp_bufs(struct msm_cvp_inst *inst,
  874. struct cvp_internal_buf *buf,
  875. u32 buffer_size,
  876. u32 secure_type)
  877. {
  878. u32 smem_flags = SMEM_UNCACHED;
  879. int rc = 0;
  880. if (!inst) {
  881. dprintk(CVP_ERR, "%s Invalid input\n", __func__);
  882. return -EINVAL;
  883. }
  884. if (!buf)
  885. return -EINVAL;
  886. if (!buffer_size)
  887. return -EINVAL;
  888. switch (secure_type) {
  889. case 0:
  890. break;
  891. case 1:
  892. smem_flags |= SMEM_SECURE | SMEM_PIXEL;
  893. break;
  894. case 2:
  895. smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
  896. break;
  897. default:
  898. dprintk(CVP_ERR, "%s Invalid secure_type %d\n",
  899. __func__, secure_type);
  900. return -EINVAL;
  901. }
  902. dprintk(CVP_MEM, "%s smem_flags 0x%x\n", __func__, smem_flags);
  903. buf->smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  904. if (!buf->smem) {
  905. dprintk(CVP_ERR, "%s Out of memory\n", __func__);
  906. goto fail_kzalloc_smem_cache;
  907. }
  908. buf->smem->flags = smem_flags;
  909. rc = msm_cvp_smem_alloc(buffer_size, 1, 0,
  910. &(inst->core->resources), buf->smem);
  911. if (rc) {
  912. dprintk(CVP_ERR, "Failed to allocate ARP memory\n");
  913. goto err_no_mem;
  914. }
  915. dprintk(CVP_MEM, "%s dma_buf %pK\n", __func__, buf->smem->dma_buf);
  916. buf->size = buf->smem->size;
  917. buf->type = HFI_BUFFER_INTERNAL_PERSIST_1;
  918. buf->ownership = DSP;
  919. return rc;
  920. err_no_mem:
  921. kmem_cache_free(cvp_driver->smem_cache, buf->smem);
  922. fail_kzalloc_smem_cache:
  923. return rc;
  924. }
  925. int cvp_release_dsp_buffers(struct msm_cvp_inst *inst,
  926. struct cvp_internal_buf *buf)
  927. {
  928. struct msm_cvp_smem *smem;
  929. int rc = 0;
  930. if (!inst) {
  931. dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
  932. return -EINVAL;
  933. }
  934. if (!buf) {
  935. dprintk(CVP_ERR, "Invalid buffer pointer = %pK\n", inst);
  936. return -EINVAL;
  937. }
  938. smem = buf->smem;
  939. if (!smem) {
  940. dprintk(CVP_ERR, "%s invalid smem\n", __func__);
  941. return -EINVAL;
  942. }
  943. if (buf->ownership == DSP) {
  944. dprintk(CVP_MEM,
  945. "%s: %x : fd %x %s size %d",
  946. __func__, hash32_ptr(inst->session), buf->fd,
  947. smem->dma_buf->name, buf->size);
  948. msm_cvp_smem_free(smem);
  949. kmem_cache_free(cvp_driver->smem_cache, smem);
  950. } else {
  951. dprintk(CVP_ERR,
  952. "%s: wrong owner %d %x : fd %x %s size %d",
  953. __func__, buf->ownership, hash32_ptr(inst->session),
  954. buf->fd, smem->dma_buf->name, buf->size);
  955. }
  956. return rc;
  957. }
  958. int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
  959. struct eva_kmd_buffer *buf)
  960. {
  961. struct cvp_hfi_device *hdev;
  962. struct cvp_hal_session *session;
  963. struct msm_cvp_inst *s;
  964. int rc = 0;
  965. if (!inst || !inst->core || !buf) {
  966. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  967. return -EINVAL;
  968. }
  969. if (!buf->index)
  970. return 0;
  971. s = cvp_get_inst_validate(inst->core, inst);
  972. if (!s)
  973. return -ECONNRESET;
  974. inst->cur_cmd_type = EVA_KMD_REGISTER_BUFFER;
  975. session = (struct cvp_hal_session *)inst->session;
  976. if (!session) {
  977. dprintk(CVP_ERR, "%s: invalid session\n", __func__);
  978. rc = -EINVAL;
  979. goto exit;
  980. }
  981. hdev = inst->core->device;
  982. print_client_buffer(CVP_HFI, "register", inst, buf);
  983. rc = msm_cvp_map_buf_dsp(inst, buf);
  984. dprintk(CVP_DSP, "%s: fd %d, iova 0x%x\n", __func__,
  985. buf->fd, buf->reserved[0]);
  986. exit:
  987. inst->cur_cmd_type = 0;
  988. cvp_put_inst(s);
  989. return rc;
  990. }
  991. int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
  992. struct eva_kmd_buffer *buf)
  993. {
  994. struct msm_cvp_inst *s;
  995. int rc = 0;
  996. if (!inst || !inst->core || !buf) {
  997. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  998. return -EINVAL;
  999. }
  1000. if (!buf->index)
  1001. return 0;
  1002. s = cvp_get_inst_validate(inst->core, inst);
  1003. if (!s)
  1004. return -ECONNRESET;
  1005. inst->cur_cmd_type = EVA_KMD_UNREGISTER_BUFFER;
  1006. print_client_buffer(CVP_HFI, "unregister", inst, buf);
  1007. rc = msm_cvp_unmap_buf_dsp(inst, buf);
  1008. inst->cur_cmd_type = 0;
  1009. cvp_put_inst(s);
  1010. return rc;
  1011. }