msm_cvp_buf.c 47 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/pid.h>
  6. #include <linux/fdtable.h>
  7. #include <linux/rcupdate.h>
  8. #include <linux/fs.h>
  9. #include <linux/dma-buf.h>
  10. #include <linux/sched/task.h>
  11. #include <linux/version.h>
  12. #include "msm_cvp_common.h"
  13. #include "cvp_hfi_api.h"
  14. #include "msm_cvp_debug.h"
  15. #include "msm_cvp_core.h"
  16. #include "msm_cvp_dsp.h"
  17. #define CLEAR_USE_BITMAP(idx, inst) \
  18. do { \
  19. clear_bit(idx, &inst->dma_cache.usage_bitmap); \
  20. dprintk(CVP_MEM, "clear %x bit %d dma_cache bitmap 0x%llx\n", \
  21. hash32_ptr(inst->session), smem->bitmap_index, \
  22. inst->dma_cache.usage_bitmap); \
  23. } while (0)
  24. #define SET_USE_BITMAP(idx, inst) \
  25. do { \
  26. set_bit(idx, &inst->dma_cache.usage_bitmap); \
  27. dprintk(CVP_MEM, "Set %x bit %d dma_cache bitmap 0x%llx\n", \
  28. hash32_ptr(inst->session), idx, \
  29. inst->dma_cache.usage_bitmap); \
  30. } while (0)
  31. static void _wncc_print_cvpwnccbufs_table(struct msm_cvp_inst* inst);
  32. static int _wncc_unmap_metadata_bufs(struct eva_kmd_hfi_packet* in_pkt,
  33. unsigned int num_layers, struct eva_kmd_wncc_metadata** wncc_metadata);
  34. int print_smem(u32 tag, const char *str, struct msm_cvp_inst *inst,
  35. struct msm_cvp_smem *smem)
  36. {
  37. if (!(tag & msm_cvp_debug))
  38. return 0;
  39. if (!inst || !smem) {
  40. dprintk(CVP_ERR, "Invalid inst 0x%llx or smem 0x%llx\n",
  41. inst, smem);
  42. return -EINVAL;
  43. }
  44. if (smem->dma_buf) {
  45. dprintk(tag,
  46. "%s: %x : %s size %d flags %#x iova %#x idx %d ref %d",
  47. str, hash32_ptr(inst->session), smem->dma_buf->name,
  48. smem->size, smem->flags, smem->device_addr,
  49. smem->bitmap_index, smem->refcount);
  50. }
  51. return 0;
  52. }
  53. static void print_internal_buffer(u32 tag, const char *str,
  54. struct msm_cvp_inst *inst, struct cvp_internal_buf *cbuf)
  55. {
  56. if (!(tag & msm_cvp_debug) || !inst || !cbuf)
  57. return;
  58. if (cbuf->smem->dma_buf) {
  59. dprintk(tag,
  60. "%s: %x : fd %d off %d %s size %d iova %#x",
  61. str, hash32_ptr(inst->session), cbuf->fd,
  62. cbuf->offset, cbuf->smem->dma_buf->name, cbuf->size,
  63. cbuf->smem->device_addr);
  64. } else {
  65. dprintk(tag,
  66. "%s: %x : idx %2d fd %d off %d size %d iova %#x",
  67. str, hash32_ptr(inst->session), cbuf->fd,
  68. cbuf->offset, cbuf->size, cbuf->smem->device_addr);
  69. }
  70. }
  71. void print_cvp_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
  72. struct cvp_internal_buf *cbuf)
  73. {
  74. dprintk(tag, "%s addr: %x size %u\n", str,
  75. cbuf->smem->device_addr, cbuf->size);
  76. }
  77. static void _log_smem(struct inst_snapshot *snapshot, struct msm_cvp_inst *inst,
  78. struct msm_cvp_smem *smem, bool logging)
  79. {
  80. if (print_smem(CVP_ERR, "bufdump", inst, smem))
  81. return;
  82. if (!logging || !snapshot)
  83. return;
  84. if (snapshot && snapshot->smem_index < MAX_ENTRIES) {
  85. struct smem_data *s;
  86. s = &snapshot->smem_log[snapshot->smem_index];
  87. snapshot->smem_index++;
  88. s->size = smem->size;
  89. s->flags = smem->flags;
  90. s->device_addr = smem->device_addr;
  91. s->bitmap_index = smem->bitmap_index;
  92. s->refcount = atomic_read(&smem->refcount);
  93. }
  94. }
  95. static void _log_buf(struct inst_snapshot *snapshot, enum smem_prop prop,
  96. struct msm_cvp_inst *inst, struct cvp_internal_buf *cbuf,
  97. bool logging)
  98. {
  99. struct cvp_buf_data *buf = NULL;
  100. u32 index;
  101. print_cvp_buffer(CVP_ERR, "bufdump", inst, cbuf);
  102. if (!logging)
  103. return;
  104. if (snapshot) {
  105. if (prop == SMEM_ADSP && snapshot->dsp_index < MAX_ENTRIES) {
  106. index = snapshot->dsp_index;
  107. buf = &snapshot->dsp_buf_log[index];
  108. snapshot->dsp_index++;
  109. } else if (prop == SMEM_PERSIST &&
  110. snapshot->persist_index < MAX_ENTRIES) {
  111. index = snapshot->persist_index;
  112. buf = &snapshot->persist_buf_log[index];
  113. snapshot->persist_index++;
  114. }
  115. if (buf) {
  116. buf->device_addr = cbuf->smem->device_addr;
  117. buf->size = cbuf->size;
  118. }
  119. }
  120. }
  121. void print_client_buffer(u32 tag, const char *str,
  122. struct msm_cvp_inst *inst, struct eva_kmd_buffer *cbuf)
  123. {
  124. if (!(tag & msm_cvp_debug) || !str || !inst || !cbuf)
  125. return;
  126. dprintk(tag,
  127. "%s: %x : idx %2d fd %d off %d size %d type %d flags 0x%x"
  128. " reserved[0] %u\n",
  129. str, hash32_ptr(inst->session), cbuf->index, cbuf->fd,
  130. cbuf->offset, cbuf->size, cbuf->type, cbuf->flags,
  131. cbuf->reserved[0]);
  132. }
  133. static bool __is_buf_valid(struct msm_cvp_inst *inst,
  134. struct eva_kmd_buffer *buf)
  135. {
  136. struct cvp_hal_session *session;
  137. struct cvp_internal_buf *cbuf = NULL;
  138. bool found = false;
  139. if (!inst || !inst->core || !buf) {
  140. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  141. return false;
  142. }
  143. if (buf->fd < 0) {
  144. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  145. return false;
  146. }
  147. if (buf->offset) {
  148. dprintk(CVP_ERR,
  149. "%s: offset is deprecated, set to 0.\n",
  150. __func__);
  151. return false;
  152. }
  153. session = (struct cvp_hal_session *)inst->session;
  154. mutex_lock(&inst->cvpdspbufs.lock);
  155. list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
  156. if (cbuf->fd == buf->fd) {
  157. if (cbuf->size != buf->size) {
  158. dprintk(CVP_ERR, "%s: buf size mismatch\n",
  159. __func__);
  160. mutex_unlock(&inst->cvpdspbufs.lock);
  161. return false;
  162. }
  163. found = true;
  164. break;
  165. }
  166. }
  167. mutex_unlock(&inst->cvpdspbufs.lock);
  168. if (found) {
  169. print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf);
  170. return false;
  171. }
  172. return true;
  173. }
  174. static struct file *msm_cvp_fget(unsigned int fd, struct task_struct *task,
  175. fmode_t mask, unsigned int refs)
  176. {
  177. struct files_struct *files = task->files;
  178. struct file *file;
  179. if (!files)
  180. return NULL;
  181. rcu_read_lock();
  182. loop:
  183. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0))
  184. file = fcheck_files(files, fd);
  185. #else
  186. file = files_lookup_fd_rcu(files, fd);
  187. #endif
  188. if (file) {
  189. /* File object ref couldn't be taken.
  190. * dup2() atomicity guarantee is the reason
  191. * we loop to catch the new file (or NULL pointer)
  192. */
  193. if (file->f_mode & mask)
  194. file = NULL;
  195. else if (!get_file_rcu_many(file, refs))
  196. goto loop;
  197. }
  198. rcu_read_unlock();
  199. return file;
  200. }
  201. static struct dma_buf *cvp_dma_buf_get(struct file *file, int fd,
  202. struct task_struct *task)
  203. {
  204. if (file->f_op != gfa_cv.dmabuf_f_op) {
  205. dprintk(CVP_WARN, "fd doesn't refer to dma_buf\n");
  206. return ERR_PTR(-EINVAL);
  207. }
  208. return file->private_data;
  209. }
  210. int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
  211. {
  212. int rc = 0;
  213. struct cvp_internal_buf *cbuf = NULL;
  214. struct msm_cvp_smem *smem = NULL;
  215. struct dma_buf *dma_buf = NULL;
  216. struct file *file;
  217. if (!__is_buf_valid(inst, buf))
  218. return -EINVAL;
  219. if (!inst->task)
  220. return -EINVAL;
  221. file = msm_cvp_fget(buf->fd, inst->task, FMODE_PATH, 1);
  222. if (file == NULL) {
  223. dprintk(CVP_WARN, "%s fail to get file from fd\n", __func__);
  224. return -EINVAL;
  225. }
  226. dma_buf = cvp_dma_buf_get(
  227. file,
  228. buf->fd,
  229. inst->task);
  230. if (dma_buf == ERR_PTR(-EINVAL)) {
  231. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  232. rc = -EINVAL;
  233. goto exit;
  234. }
  235. dprintk(CVP_MEM, "dma_buf from internal %llu\n", dma_buf);
  236. cbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
  237. if (!cbuf) {
  238. rc = -ENOMEM;
  239. goto exit;
  240. }
  241. smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  242. if (!smem) {
  243. rc = -ENOMEM;
  244. goto exit;
  245. }
  246. smem->dma_buf = dma_buf;
  247. smem->bitmap_index = MAX_DMABUF_NUMS;
  248. dprintk(CVP_MEM, "%s: dma_buf = %llx\n", __func__, dma_buf);
  249. rc = msm_cvp_map_smem(inst, smem, "map dsp");
  250. if (rc) {
  251. print_client_buffer(CVP_ERR, "map failed", inst, buf);
  252. goto exit;
  253. }
  254. cbuf->smem = smem;
  255. cbuf->fd = buf->fd;
  256. cbuf->size = buf->size;
  257. cbuf->offset = buf->offset;
  258. cbuf->ownership = CLIENT;
  259. cbuf->index = buf->index;
  260. buf->reserved[0] = (uint32_t)smem->device_addr;
  261. mutex_lock(&inst->cvpdspbufs.lock);
  262. list_add_tail(&cbuf->list, &inst->cvpdspbufs.list);
  263. mutex_unlock(&inst->cvpdspbufs.lock);
  264. return rc;
  265. exit:
  266. fput(file);
  267. if (smem) {
  268. if (smem->device_addr)
  269. msm_cvp_unmap_smem(inst, smem, "unmap dsp");
  270. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  271. kmem_cache_free(cvp_driver->smem_cache, smem);
  272. }
  273. if (cbuf)
  274. kmem_cache_free(cvp_driver->buf_cache, cbuf);
  275. return rc;
  276. }
  277. int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
  278. {
  279. int rc = 0;
  280. bool found;
  281. struct cvp_internal_buf *cbuf;
  282. struct cvp_hal_session *session;
  283. if (!inst || !inst->core || !buf) {
  284. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  285. return -EINVAL;
  286. }
  287. session = (struct cvp_hal_session *)inst->session;
  288. if (!session) {
  289. dprintk(CVP_ERR, "%s: invalid session\n", __func__);
  290. return -EINVAL;
  291. }
  292. mutex_lock(&inst->cvpdspbufs.lock);
  293. found = false;
  294. list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
  295. if (cbuf->fd == buf->fd) {
  296. found = true;
  297. break;
  298. }
  299. }
  300. mutex_unlock(&inst->cvpdspbufs.lock);
  301. if (!found) {
  302. print_client_buffer(CVP_ERR, "invalid", inst, buf);
  303. return -EINVAL;
  304. }
  305. if (cbuf->smem->device_addr) {
  306. msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
  307. msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
  308. }
  309. mutex_lock(&inst->cvpdspbufs.lock);
  310. list_del(&cbuf->list);
  311. mutex_unlock(&inst->cvpdspbufs.lock);
  312. kmem_cache_free(cvp_driver->smem_cache, cbuf->smem);
  313. kmem_cache_free(cvp_driver->buf_cache, cbuf);
  314. return rc;
  315. }
  316. int msm_cvp_map_buf_wncc(struct msm_cvp_inst *inst,
  317. struct eva_kmd_buffer *buf)
  318. {
  319. int rc = 0, i;
  320. bool found = false;
  321. struct cvp_internal_buf* cbuf;
  322. struct msm_cvp_smem* smem = NULL;
  323. struct dma_buf* dma_buf = NULL;
  324. if (!inst || !inst->core || !buf) {
  325. dprintk(CVP_ERR, "%s: invalid params", __func__);
  326. return -EINVAL;
  327. }
  328. if (!inst->session) {
  329. dprintk(CVP_ERR, "%s: invalid session", __func__);
  330. return -EINVAL;
  331. }
  332. if (buf->index) {
  333. dprintk(CVP_ERR, "%s: buf index is NOT 0 fd=%d",
  334. __func__, buf->fd);
  335. return -EINVAL;
  336. }
  337. if (buf->fd < 0) {
  338. dprintk(CVP_ERR, "%s: invalid fd = %d", __func__, buf->fd);
  339. return -EINVAL;
  340. }
  341. if (buf->offset) {
  342. dprintk(CVP_ERR, "%s: offset is not supported, set to 0.",
  343. __func__);
  344. return -EINVAL;
  345. }
  346. mutex_lock(&inst->cvpwnccbufs.lock);
  347. list_for_each_entry(cbuf, &inst->cvpwnccbufs.list, list) {
  348. if (cbuf->fd == buf->fd) {
  349. if (cbuf->size != buf->size) {
  350. dprintk(CVP_ERR, "%s: buf size mismatch",
  351. __func__);
  352. mutex_unlock(&inst->cvpwnccbufs.lock);
  353. return -EINVAL;
  354. }
  355. found = true;
  356. break;
  357. }
  358. }
  359. mutex_unlock(&inst->cvpwnccbufs.lock);
  360. if (found) {
  361. print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf);
  362. return -EINVAL;
  363. }
  364. dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
  365. if (!dma_buf) {
  366. dprintk(CVP_ERR, "%s: invalid fd = %d", __func__, buf->fd);
  367. return -EINVAL;
  368. }
  369. cbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
  370. if (!cbuf) {
  371. msm_cvp_smem_put_dma_buf(dma_buf);
  372. return -ENOMEM;
  373. }
  374. smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  375. if (!smem) {
  376. kmem_cache_free(cvp_driver->buf_cache, cbuf);
  377. msm_cvp_smem_put_dma_buf(dma_buf);
  378. return -ENOMEM;
  379. }
  380. smem->dma_buf = dma_buf;
  381. smem->bitmap_index = MAX_DMABUF_NUMS;
  382. dprintk(CVP_MEM, "%s: dma_buf = %llx", __func__, dma_buf);
  383. rc = msm_cvp_map_smem(inst, smem, "map wncc");
  384. if (rc) {
  385. dprintk(CVP_ERR, "%s: map failed", __func__);
  386. print_client_buffer(CVP_ERR, __func__, inst, buf);
  387. goto exit;
  388. }
  389. cbuf->smem = smem;
  390. cbuf->fd = buf->fd;
  391. cbuf->size = buf->size;
  392. cbuf->offset = buf->offset;
  393. cbuf->ownership = CLIENT;
  394. cbuf->index = buf->index;
  395. /* Added for PreSil/RUMI testing */
  396. #ifdef USE_PRESIL
  397. dprintk(CVP_DBG,
  398. "wncc buffer is %x for cam_presil_send_buffer"
  399. " with MAP_ADDR_OFFSET %x",
  400. (u64)(smem->device_addr) - MAP_ADDR_OFFSET, MAP_ADDR_OFFSET);
  401. cam_presil_send_buffer((u64)smem->dma_buf, 0,
  402. (u32)cbuf->offset, (u32)cbuf->size,
  403. (u64)(smem->device_addr) - MAP_ADDR_OFFSET);
  404. #endif
  405. mutex_lock(&inst->cvpwnccbufs.lock);
  406. if (inst->cvpwnccbufs_table == NULL) {
  407. inst->cvpwnccbufs_table =
  408. (struct msm_cvp_wncc_buffer*) kzalloc(
  409. sizeof(struct msm_cvp_wncc_buffer) *
  410. EVA_KMD_WNCC_MAX_SRC_BUFS,
  411. GFP_KERNEL);
  412. if (!inst->cvpwnccbufs_table) {
  413. mutex_unlock(&inst->cvpwnccbufs.lock);
  414. goto exit;
  415. }
  416. }
  417. list_add_tail(&cbuf->list, &inst->cvpwnccbufs.list);
  418. for (i = 0; i < EVA_KMD_WNCC_MAX_SRC_BUFS; i++)
  419. {
  420. if (inst->cvpwnccbufs_table[i].iova == 0)
  421. {
  422. inst->cvpwnccbufs_num++;
  423. inst->cvpwnccbufs_table[i].fd = buf->fd;
  424. inst->cvpwnccbufs_table[i].iova = smem->device_addr;
  425. inst->cvpwnccbufs_table[i].size = smem->size;
  426. /* buf reserved[0] used to store wncc src buf id */
  427. buf->reserved[0] = i + EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
  428. /* cbuf ktid used to store wncc src buf id */
  429. cbuf->ktid = i + EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
  430. dprintk(CVP_MEM, "%s: wncc buf iova: 0x%08X",
  431. __func__, inst->cvpwnccbufs_table[i].iova);
  432. break;
  433. }
  434. }
  435. if (i == EVA_KMD_WNCC_MAX_SRC_BUFS) {
  436. dprintk(CVP_ERR,
  437. "%s: wncc buf table full - max (%u) already registered",
  438. __func__, EVA_KMD_WNCC_MAX_SRC_BUFS);
  439. /* _wncc_print_cvpwnccbufs_table(inst); */
  440. mutex_unlock(&inst->cvpwnccbufs.lock);
  441. rc = -EDQUOT;
  442. goto exit;
  443. }
  444. mutex_unlock(&inst->cvpwnccbufs.lock);
  445. return rc;
  446. exit:
  447. if (smem->device_addr)
  448. msm_cvp_unmap_smem(inst, smem, "unmap wncc");
  449. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  450. kmem_cache_free(cvp_driver->buf_cache, cbuf);
  451. cbuf = NULL;
  452. kmem_cache_free(cvp_driver->smem_cache, smem);
  453. smem = NULL;
  454. return rc;
  455. }
  456. int msm_cvp_unmap_buf_wncc(struct msm_cvp_inst *inst,
  457. struct eva_kmd_buffer *buf)
  458. {
  459. int rc = 0;
  460. bool found;
  461. struct cvp_internal_buf *cbuf;
  462. uint32_t buf_id, buf_idx;
  463. if (!inst || !inst->core || !buf) {
  464. dprintk(CVP_ERR, "%s: invalid params", __func__);
  465. return -EINVAL;
  466. }
  467. if (!inst->session) {
  468. dprintk(CVP_ERR, "%s: invalid session", __func__);
  469. return -EINVAL;
  470. }
  471. if (buf->index) {
  472. dprintk(CVP_ERR, "%s: buf index is NOT 0 fd=%d",
  473. __func__, buf->fd);
  474. return -EINVAL;
  475. }
  476. buf_id = buf->reserved[0];
  477. if (buf_id < EVA_KMD_WNCC_SRC_BUF_ID_OFFSET || buf_id >=
  478. (EVA_KMD_WNCC_MAX_SRC_BUFS + EVA_KMD_WNCC_SRC_BUF_ID_OFFSET)) {
  479. dprintk(CVP_ERR, "%s: invalid buffer id %d",
  480. __func__, buf->reserved[0]);
  481. return -EINVAL;
  482. }
  483. mutex_lock(&inst->cvpwnccbufs.lock);
  484. if (inst->cvpwnccbufs_num == 0) {
  485. dprintk(CVP_ERR, "%s: no wncc buffers currently mapped", __func__);
  486. mutex_unlock(&inst->cvpwnccbufs.lock);
  487. return -EINVAL;
  488. }
  489. buf_idx = buf_id - EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
  490. if (inst->cvpwnccbufs_table[buf_idx].iova == 0) {
  491. dprintk(CVP_ERR, "%s: buffer id %d not found",
  492. __func__, buf_id);
  493. mutex_unlock(&inst->cvpwnccbufs.lock);
  494. return -EINVAL;
  495. }
  496. buf->fd = inst->cvpwnccbufs_table[buf_idx].fd;
  497. found = false;
  498. list_for_each_entry(cbuf, &inst->cvpwnccbufs.list, list) {
  499. if (cbuf->fd == buf->fd) {
  500. found = true;
  501. break;
  502. }
  503. }
  504. if (!found) {
  505. dprintk(CVP_ERR, "%s: buffer id %d not found",
  506. __func__, buf_id);
  507. print_client_buffer(CVP_ERR, __func__, inst, buf);
  508. _wncc_print_cvpwnccbufs_table(inst);
  509. mutex_unlock(&inst->cvpwnccbufs.lock);
  510. return -EINVAL;
  511. }
  512. mutex_unlock(&inst->cvpwnccbufs.lock);
  513. if (cbuf->smem->device_addr) {
  514. msm_cvp_unmap_smem(inst, cbuf->smem, "unmap wncc");
  515. msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
  516. }
  517. mutex_lock(&inst->cvpwnccbufs.lock);
  518. list_del(&cbuf->list);
  519. inst->cvpwnccbufs_table[buf_idx].fd = 0;
  520. inst->cvpwnccbufs_table[buf_idx].iova = 0;
  521. inst->cvpwnccbufs_table[buf_idx].size = 0;
  522. inst->cvpwnccbufs_num--;
  523. if (inst->cvpwnccbufs_num == 0) {
  524. kfree(inst->cvpwnccbufs_table);
  525. inst->cvpwnccbufs_table = NULL;
  526. }
  527. mutex_unlock(&inst->cvpwnccbufs.lock);
  528. kmem_cache_free(cvp_driver->smem_cache, cbuf->smem);
  529. kmem_cache_free(cvp_driver->buf_cache, cbuf);
  530. return rc;
  531. }
  532. static void _wncc_print_oob(struct eva_kmd_oob_wncc* wncc_oob)
  533. {
  534. u32 i, j;
  535. if (!wncc_oob) {
  536. dprintk(CVP_ERR, "%s: invalid params", __func__);
  537. return;
  538. }
  539. dprintk(CVP_DBG, "%s: wncc OOB --", __func__);
  540. dprintk(CVP_DBG, "%s: num_layers: %u", __func__, wncc_oob->num_layers);
  541. for (i = 0; i < wncc_oob->num_layers; i++) {
  542. dprintk(CVP_DBG, "%s: layers[%u].num_addrs: %u",
  543. __func__, i, wncc_oob->layers[i].num_addrs);
  544. for (j = 0; j < wncc_oob->layers[i].num_addrs; j++) {
  545. dprintk(CVP_DBG,
  546. "%s: layers[%u].addrs[%u]: %04u 0x%08x",
  547. __func__, i, j,
  548. wncc_oob->layers[i].addrs[j].buffer_id,
  549. wncc_oob->layers[i].addrs[j].offset);
  550. }
  551. }
  552. }
  553. static void _wncc_print_cvpwnccbufs_table(struct msm_cvp_inst* inst)
  554. {
  555. u32 i, entries = 0;
  556. if (!inst) {
  557. dprintk(CVP_ERR, "%s: invalid params", __func__);
  558. return;
  559. }
  560. if (inst->cvpwnccbufs_num == 0) {
  561. dprintk(CVP_DBG, "%s: wncc buffer look-up table is empty",
  562. __func__);
  563. return;
  564. }
  565. if (!inst->cvpwnccbufs_table) {
  566. dprintk(CVP_ERR, "%s: invalid params", __func__);
  567. return;
  568. }
  569. dprintk(CVP_DBG, "%s: wncc buffer table:");
  570. for (i = 0; i < EVA_KMD_WNCC_MAX_SRC_BUFS &&
  571. entries < inst->cvpwnccbufs_num; i++) {
  572. if (inst->cvpwnccbufs_table[i].iova != 0) {
  573. dprintk(CVP_DBG,
  574. "%s: buf_idx=%04d --> "
  575. "fd=%03d, iova=0x%08x, size=%d",
  576. __func__, i,
  577. inst->cvpwnccbufs_table[i].fd,
  578. inst->cvpwnccbufs_table[i].iova,
  579. inst->cvpwnccbufs_table[i].size);
  580. entries++;
  581. }
  582. }
  583. }
  584. static void _wncc_print_metadata_buf(u32 num_layers, u32 num_addrs,
  585. struct eva_kmd_wncc_metadata** wncc_metadata)
  586. {
  587. u32 i, j, iova;
  588. if (num_layers < 1 || num_layers > EVA_KMD_WNCC_MAX_LAYERS ||
  589. !wncc_metadata) {
  590. dprintk(CVP_ERR, "%s: invalid params", __func__);
  591. return;
  592. }
  593. dprintk(CVP_DBG, "%s: wncc metadata buffers --", __func__);
  594. dprintk(CVP_DBG, "%s: num_layers: %u", __func__, num_layers);
  595. dprintk(CVP_DBG, "%s: num_addrs: %u", __func__, num_addrs);
  596. for (i = 0; i < num_layers; i++) {
  597. for (j = 0; j < num_addrs; j++) {
  598. iova = (wncc_metadata[i][j].iova_msb << 22) |
  599. wncc_metadata[i][j].iova_lsb;
  600. dprintk(CVP_DBG,
  601. "%s: wncc_metadata[%u][%u]: "
  602. "%4u %3u %4u %3u 0x%08x %1u %4d %4d %4d %4d",
  603. __func__, i, j,
  604. wncc_metadata[i][j].loc_x_dec,
  605. wncc_metadata[i][j].loc_x_frac,
  606. wncc_metadata[i][j].loc_y_dec,
  607. wncc_metadata[i][j].loc_y_frac,
  608. iova,
  609. wncc_metadata[i][j].scale_idx,
  610. wncc_metadata[i][j].aff_coeff_3,
  611. wncc_metadata[i][j].aff_coeff_2,
  612. wncc_metadata[i][j].aff_coeff_1,
  613. wncc_metadata[i][j].aff_coeff_0);
  614. }
  615. }
  616. }
  617. static int _wncc_copy_oob_from_user(struct eva_kmd_hfi_packet* in_pkt,
  618. struct eva_kmd_oob_wncc* wncc_oob)
  619. {
  620. int rc = 0;
  621. u32 oob_type;
  622. struct eva_kmd_oob_wncc* wncc_oob_u;
  623. struct eva_kmd_oob_wncc* wncc_oob_k;
  624. unsigned int i;
  625. u32 num_addrs;
  626. if (!in_pkt || !wncc_oob) {
  627. dprintk(CVP_ERR, "%s: invalid params", __func__);
  628. return -EINVAL;
  629. }
  630. if (!access_ok(in_pkt->oob_buf, sizeof(*in_pkt->oob_buf))) {
  631. dprintk(CVP_ERR, "%s: invalid OOB buf pointer", __func__);
  632. return -EINVAL;
  633. }
  634. rc = get_user(oob_type, &in_pkt->oob_buf->oob_type);
  635. if (rc)
  636. return rc;
  637. if (oob_type != EVA_KMD_OOB_WNCC) {
  638. dprintk(CVP_ERR, "%s: incorrect OOB type (%d) for wncc",
  639. __func__, oob_type);
  640. return -EINVAL;
  641. }
  642. wncc_oob_u = &in_pkt->oob_buf->wncc;
  643. wncc_oob_k = wncc_oob;
  644. rc = get_user(wncc_oob_k->num_layers, &wncc_oob_u->num_layers);
  645. if (rc)
  646. return rc;
  647. if (wncc_oob_k->num_layers < 1 ||
  648. wncc_oob_k->num_layers > EVA_KMD_WNCC_MAX_LAYERS) {
  649. dprintk(CVP_ERR, "%s: invalid wncc num layers", __func__);
  650. return -EINVAL;
  651. }
  652. for (i = 0; i < wncc_oob_k->num_layers; i++) {
  653. rc = get_user(wncc_oob_k->layers[i].num_addrs,
  654. &wncc_oob_u->layers[i].num_addrs);
  655. if (rc)
  656. break;
  657. num_addrs = wncc_oob_k->layers[i].num_addrs;
  658. if (num_addrs < 1 || num_addrs > EVA_KMD_WNCC_MAX_ADDRESSES) {
  659. dprintk(CVP_ERR,
  660. "%s: invalid wncc num addrs for layer %u",
  661. __func__, i);
  662. rc = -EINVAL;
  663. break;
  664. }
  665. rc = copy_from_user(wncc_oob_k->layers[i].addrs,
  666. wncc_oob_u->layers[i].addrs,
  667. wncc_oob_k->layers[i].num_addrs *
  668. sizeof(struct eva_kmd_wncc_addr));
  669. if (rc)
  670. break;
  671. }
  672. if (false)
  673. _wncc_print_oob(wncc_oob);
  674. return rc;
  675. }
  676. static int _wncc_map_metadata_bufs(struct eva_kmd_hfi_packet* in_pkt,
  677. unsigned int num_layers, struct eva_kmd_wncc_metadata** wncc_metadata)
  678. {
  679. int rc = 0, i;
  680. struct cvp_buf_type* wncc_metadata_bufs;
  681. struct dma_buf* dmabuf;
  682. struct dma_buf_map map;
  683. if (!in_pkt || !wncc_metadata ||
  684. num_layers < 1 || num_layers > EVA_KMD_WNCC_MAX_LAYERS) {
  685. dprintk(CVP_ERR, "%s: invalid params", __func__);
  686. return -EINVAL;
  687. }
  688. wncc_metadata_bufs = (struct cvp_buf_type*)
  689. &in_pkt->pkt_data[EVA_KMD_WNCC_HFI_METADATA_BUFS_OFFSET];
  690. for (i = 0; i < num_layers; i++) {
  691. dmabuf = dma_buf_get(wncc_metadata_bufs[i].fd);
  692. if (IS_ERR(dmabuf)) {
  693. rc = PTR_ERR(dmabuf);
  694. dprintk(CVP_ERR,
  695. "%s: dma_buf_get() failed for "
  696. "wncc_metadata_bufs[%d], rc %d",
  697. __func__, i, rc);
  698. break;
  699. }
  700. rc = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
  701. if (rc) {
  702. dprintk(CVP_ERR,
  703. "%s: dma_buf_begin_cpu_access() failed "
  704. "for wncc_metadata_bufs[%d], rc %d",
  705. __func__, i, rc);
  706. dma_buf_put(dmabuf);
  707. break;
  708. }
  709. rc = dma_buf_vmap(dmabuf, &map);
  710. if (rc) {
  711. dprintk(CVP_ERR,
  712. "%s: dma_buf_vmap() failed for "
  713. "wncc_metadata_bufs[%d]",
  714. __func__, i);
  715. dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
  716. dma_buf_put(dmabuf);
  717. break;
  718. }
  719. dprintk(CVP_DBG,
  720. "%s: wncc_metadata_bufs[%d] map.is_iomem is %d",
  721. __func__, i, map.is_iomem);
  722. wncc_metadata[i] = (struct eva_kmd_wncc_metadata*)map.vaddr;
  723. dma_buf_put(dmabuf);
  724. }
  725. if (rc)
  726. _wncc_unmap_metadata_bufs(in_pkt, i, wncc_metadata);
  727. return rc;
  728. }
  729. static int _wncc_unmap_metadata_bufs(struct eva_kmd_hfi_packet* in_pkt,
  730. unsigned int num_layers, struct eva_kmd_wncc_metadata** wncc_metadata)
  731. {
  732. int rc = 0, i;
  733. struct cvp_buf_type* wncc_metadata_bufs;
  734. struct dma_buf* dmabuf;
  735. struct dma_buf_map map;
  736. if (!in_pkt || !wncc_metadata ||
  737. num_layers < 1 || num_layers > EVA_KMD_WNCC_MAX_LAYERS) {
  738. dprintk(CVP_ERR, "%s: invalid params", __func__);
  739. return -EINVAL;
  740. }
  741. wncc_metadata_bufs = (struct cvp_buf_type*)
  742. &in_pkt->pkt_data[EVA_KMD_WNCC_HFI_METADATA_BUFS_OFFSET];
  743. for (i = 0; i < num_layers; i++) {
  744. if (!wncc_metadata[i]) {
  745. rc = -EINVAL;
  746. break;
  747. }
  748. dmabuf = dma_buf_get(wncc_metadata_bufs[i].fd);
  749. if (IS_ERR(dmabuf)) {
  750. rc = -PTR_ERR(dmabuf);
  751. dprintk(CVP_ERR,
  752. "%s: dma_buf_get() failed for "
  753. "wncc_metadata_bufs[%d], rc %d",
  754. __func__, i, rc);
  755. break;
  756. }
  757. dma_buf_map_set_vaddr(&map, wncc_metadata[i]);
  758. dma_buf_vunmap(dmabuf, &map);
  759. wncc_metadata[i] = NULL;
  760. rc = dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
  761. dma_buf_put(dmabuf);
  762. if (rc) {
  763. dprintk(CVP_ERR,
  764. "%s: dma_buf_end_cpu_access() failed "
  765. "for wncc_metadata_bufs[%d], rc %d",
  766. __func__, i, rc);
  767. break;
  768. }
  769. }
  770. return rc;
  771. }
  772. static int msm_cvp_proc_oob_wncc(struct msm_cvp_inst* inst,
  773. struct eva_kmd_hfi_packet* in_pkt)
  774. {
  775. int rc = 0;
  776. struct eva_kmd_oob_wncc* wncc_oob;
  777. struct eva_kmd_wncc_metadata* wncc_metadata[EVA_KMD_WNCC_MAX_LAYERS];
  778. unsigned int i, j;
  779. bool empty = false;
  780. u32 buf_id, buf_idx, buf_offset, iova;
  781. if (!inst || !inst->core || !in_pkt) {
  782. dprintk(CVP_ERR, "%s: invalid params", __func__);
  783. return -EINVAL;
  784. }
  785. wncc_oob = (struct eva_kmd_oob_wncc*)kzalloc(
  786. sizeof(struct eva_kmd_oob_wncc), GFP_KERNEL);
  787. if (!wncc_oob)
  788. return -ENOMEM;
  789. rc = _wncc_copy_oob_from_user(in_pkt, wncc_oob);
  790. if (rc) {
  791. dprintk(CVP_ERR, "%s: OOB buf copying failed", __func__);
  792. goto exit;
  793. }
  794. rc = _wncc_map_metadata_bufs(in_pkt,
  795. wncc_oob->num_layers, wncc_metadata);
  796. if (rc) {
  797. dprintk(CVP_ERR, "%s: failed to map wncc metadata bufs",
  798. __func__);
  799. goto exit;
  800. }
  801. mutex_lock(&inst->cvpwnccbufs.lock);
  802. if (inst->cvpwnccbufs_num == 0 || inst->cvpwnccbufs_table == NULL) {
  803. dprintk(CVP_ERR, "%s: no wncc bufs currently mapped", __func__);
  804. empty = true;
  805. rc = -EINVAL;
  806. }
  807. for (i = 0; !empty && i < wncc_oob->num_layers; i++) {
  808. for (j = 0; j < wncc_oob->layers[i].num_addrs; j++) {
  809. buf_id = wncc_oob->layers[i].addrs[j].buffer_id;
  810. if (buf_id < EVA_KMD_WNCC_SRC_BUF_ID_OFFSET ||
  811. buf_id >= (EVA_KMD_WNCC_SRC_BUF_ID_OFFSET +
  812. EVA_KMD_WNCC_MAX_SRC_BUFS)) {
  813. dprintk(CVP_ERR,
  814. "%s: invalid wncc buf id %u "
  815. "in layer #%u address #%u",
  816. __func__, buf_id, i, j);
  817. rc = -EINVAL;
  818. break;
  819. }
  820. buf_idx = buf_id - EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
  821. if (inst->cvpwnccbufs_table[buf_idx].iova == 0) {
  822. dprintk(CVP_ERR,
  823. "%s: unmapped wncc buf id %u "
  824. "in layer #%u address #%u",
  825. __func__, buf_id, i, j);
  826. /* _wncc_print_cvpwnccbufs_table(inst); */
  827. rc = -EINVAL;
  828. break;
  829. }
  830. buf_offset = wncc_oob->layers[i].addrs[j].offset;
  831. if (buf_offset >=
  832. inst->cvpwnccbufs_table[buf_idx].size) {
  833. /* NOTE: This buffer offset validation is
  834. * not comprehensive since wncc src image
  835. * resolution information is not known to
  836. * KMD. UMD is responsible for comprehensive
  837. * validation.
  838. */
  839. dprintk(CVP_ERR,
  840. "%s: invalid wncc buf offset %u "
  841. "in layer #%u address #%u",
  842. __func__, buf_offset, i, j);
  843. rc = -EINVAL;
  844. break;
  845. }
  846. iova = inst->cvpwnccbufs_table[buf_idx].iova +
  847. buf_offset;
  848. wncc_metadata[i][j].iova_lsb = iova;
  849. wncc_metadata[i][j].iova_msb = iova >> 22;
  850. }
  851. }
  852. mutex_unlock(&inst->cvpwnccbufs.lock);
  853. if (false)
  854. _wncc_print_metadata_buf(wncc_oob->num_layers,
  855. wncc_oob->layers[0].num_addrs, wncc_metadata);
  856. if (_wncc_unmap_metadata_bufs(in_pkt,
  857. wncc_oob->num_layers, wncc_metadata)) {
  858. dprintk(CVP_ERR, "%s: failed to unmap wncc metadata bufs",
  859. __func__);
  860. }
  861. exit:
  862. kfree(wncc_oob);
  863. return rc;
  864. }
  865. int msm_cvp_proc_oob(struct msm_cvp_inst* inst,
  866. struct eva_kmd_hfi_packet* in_pkt)
  867. {
  868. int rc = 0;
  869. struct cvp_hfi_cmd_session_hdr* cmd_hdr =
  870. (struct cvp_hfi_cmd_session_hdr*)in_pkt;
  871. if (!inst || !inst->core || !in_pkt) {
  872. dprintk(CVP_ERR, "%s: invalid params", __func__);
  873. return -EINVAL;
  874. }
  875. switch (cmd_hdr->packet_type) {
  876. case HFI_CMD_SESSION_CVP_WARP_NCC_FRAME:
  877. rc = msm_cvp_proc_oob_wncc(inst, in_pkt);
  878. break;
  879. default:
  880. break;
  881. }
  882. return rc;
  883. }
  884. void msm_cvp_cache_operations(struct msm_cvp_smem *smem, u32 type,
  885. u32 offset, u32 size)
  886. {
  887. enum smem_cache_ops cache_op;
  888. if (msm_cvp_cacheop_disabled)
  889. return;
  890. if (!smem) {
  891. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  892. return;
  893. }
  894. switch (type) {
  895. case EVA_KMD_BUFTYPE_INPUT:
  896. cache_op = SMEM_CACHE_CLEAN;
  897. break;
  898. case EVA_KMD_BUFTYPE_OUTPUT:
  899. cache_op = SMEM_CACHE_INVALIDATE;
  900. break;
  901. default:
  902. cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
  903. }
  904. dprintk(CVP_MEM,
  905. "%s: cache operation enabled for dma_buf: %llx, cache_op: %d, offset: %d, size: %d\n",
  906. __func__, smem->dma_buf, cache_op, offset, size);
  907. msm_cvp_smem_cache_operations(smem->dma_buf, cache_op, offset, size);
  908. }
  909. static struct msm_cvp_smem *msm_cvp_session_find_smem(struct msm_cvp_inst *inst,
  910. struct dma_buf *dma_buf)
  911. {
  912. struct msm_cvp_smem *smem;
  913. int i;
  914. if (inst->dma_cache.nr > MAX_DMABUF_NUMS)
  915. return NULL;
  916. mutex_lock(&inst->dma_cache.lock);
  917. for (i = 0; i < inst->dma_cache.nr; i++)
  918. if (inst->dma_cache.entries[i]->dma_buf == dma_buf) {
  919. SET_USE_BITMAP(i, inst);
  920. smem = inst->dma_cache.entries[i];
  921. smem->bitmap_index = i;
  922. atomic_inc(&smem->refcount);
  923. /*
  924. * If we find it, it means we already increased
  925. * refcount before, so we put it to avoid double
  926. * incremental.
  927. */
  928. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  929. mutex_unlock(&inst->dma_cache.lock);
  930. print_smem(CVP_MEM, "found", inst, smem);
  931. return smem;
  932. }
  933. mutex_unlock(&inst->dma_cache.lock);
  934. return NULL;
  935. }
  936. static int msm_cvp_session_add_smem(struct msm_cvp_inst *inst,
  937. struct msm_cvp_smem *smem)
  938. {
  939. unsigned int i;
  940. struct msm_cvp_smem *smem2;
  941. mutex_lock(&inst->dma_cache.lock);
  942. if (inst->dma_cache.nr < MAX_DMABUF_NUMS) {
  943. inst->dma_cache.entries[inst->dma_cache.nr] = smem;
  944. SET_USE_BITMAP(inst->dma_cache.nr, inst);
  945. smem->bitmap_index = inst->dma_cache.nr;
  946. inst->dma_cache.nr++;
  947. i = smem->bitmap_index;
  948. } else {
  949. i = find_first_zero_bit(&inst->dma_cache.usage_bitmap,
  950. MAX_DMABUF_NUMS);
  951. if (i < MAX_DMABUF_NUMS) {
  952. smem2 = inst->dma_cache.entries[i];
  953. msm_cvp_unmap_smem(inst, smem2, "unmap cpu");
  954. msm_cvp_smem_put_dma_buf(smem2->dma_buf);
  955. kmem_cache_free(cvp_driver->smem_cache, smem2);
  956. inst->dma_cache.entries[i] = smem;
  957. smem->bitmap_index = i;
  958. SET_USE_BITMAP(i, inst);
  959. } else {
  960. dprintk(CVP_WARN,
  961. "%s: reached limit, fallback to frame mapping list\n"
  962. , __func__);
  963. mutex_unlock(&inst->dma_cache.lock);
  964. return -ENOMEM;
  965. }
  966. }
  967. atomic_inc(&smem->refcount);
  968. mutex_unlock(&inst->dma_cache.lock);
  969. dprintk(CVP_MEM, "Add entry %d into cache\n", i);
  970. return 0;
  971. }
  972. static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
  973. struct cvp_buf_type *buf)
  974. {
  975. int rc = 0, found = 1;
  976. struct msm_cvp_smem *smem = NULL;
  977. struct dma_buf *dma_buf = NULL;
  978. if (buf->fd < 0) {
  979. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  980. return NULL;
  981. }
  982. dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
  983. if (!dma_buf) {
  984. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  985. return NULL;
  986. }
  987. smem = msm_cvp_session_find_smem(inst, dma_buf);
  988. if (!smem) {
  989. found = 0;
  990. smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  991. if (!smem)
  992. return NULL;
  993. smem->dma_buf = dma_buf;
  994. smem->bitmap_index = MAX_DMABUF_NUMS;
  995. rc = msm_cvp_map_smem(inst, smem, "map cpu");
  996. if (rc)
  997. goto exit;
  998. if (buf->size > smem->size || buf->size > smem->size - buf->offset) {
  999. dprintk(CVP_ERR, "%s: invalid offset %d or size %d for a new entry\n",
  1000. __func__, buf->offset, buf->size);
  1001. goto exit2;
  1002. }
  1003. rc = msm_cvp_session_add_smem(inst, smem);
  1004. if (rc && rc != -ENOMEM)
  1005. goto exit2;
  1006. }
  1007. if (buf->size > smem->size || buf->size > smem->size - buf->offset) {
  1008. dprintk(CVP_ERR, "%s: invalid offset %d or size %d\n",
  1009. __func__, buf->offset, buf->size);
  1010. if (found) {
  1011. mutex_lock(&inst->dma_cache.lock);
  1012. atomic_dec(&smem->refcount);
  1013. mutex_unlock(&inst->dma_cache.lock);
  1014. return NULL;
  1015. }
  1016. goto exit2;
  1017. }
  1018. return smem;
  1019. exit2:
  1020. msm_cvp_unmap_smem(inst, smem, "unmap cpu");
  1021. exit:
  1022. msm_cvp_smem_put_dma_buf(dma_buf);
  1023. kmem_cache_free(cvp_driver->smem_cache, smem);
  1024. smem = NULL;
  1025. return smem;
  1026. }
  1027. static u32 msm_cvp_map_user_persist_buf(struct msm_cvp_inst *inst,
  1028. struct cvp_buf_type *buf)
  1029. {
  1030. u32 iova = 0;
  1031. struct msm_cvp_smem *smem = NULL;
  1032. struct cvp_internal_buf *pbuf;
  1033. if (!inst) {
  1034. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1035. return -EINVAL;
  1036. }
  1037. pbuf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
  1038. if (!pbuf)
  1039. return 0;
  1040. smem = msm_cvp_session_get_smem(inst, buf);
  1041. if (!smem)
  1042. goto exit;
  1043. smem->flags |= SMEM_PERSIST;
  1044. pbuf->smem = smem;
  1045. pbuf->fd = buf->fd;
  1046. pbuf->size = buf->size;
  1047. pbuf->offset = buf->offset;
  1048. pbuf->ownership = CLIENT;
  1049. mutex_lock(&inst->persistbufs.lock);
  1050. list_add_tail(&pbuf->list, &inst->persistbufs.list);
  1051. mutex_unlock(&inst->persistbufs.lock);
  1052. print_internal_buffer(CVP_MEM, "map persist", inst, pbuf);
  1053. iova = smem->device_addr + buf->offset;
  1054. return iova;
  1055. exit:
  1056. kmem_cache_free(cvp_driver->buf_cache, pbuf);
  1057. return 0;
  1058. }
  1059. u32 msm_cvp_map_frame_buf(struct msm_cvp_inst *inst,
  1060. struct cvp_buf_type *buf,
  1061. struct msm_cvp_frame *frame)
  1062. {
  1063. u32 iova = 0;
  1064. struct msm_cvp_smem *smem = NULL;
  1065. u32 nr;
  1066. u32 type;
  1067. if (!inst || !frame) {
  1068. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1069. return 0;
  1070. }
  1071. nr = frame->nr;
  1072. if (nr == MAX_FRAME_BUFFER_NUMS) {
  1073. dprintk(CVP_ERR, "%s: max frame buffer reached\n", __func__);
  1074. return 0;
  1075. }
  1076. smem = msm_cvp_session_get_smem(inst, buf);
  1077. if (!smem)
  1078. return 0;
  1079. frame->bufs[nr].fd = buf->fd;
  1080. frame->bufs[nr].smem = smem;
  1081. frame->bufs[nr].size = buf->size;
  1082. frame->bufs[nr].offset = buf->offset;
  1083. print_internal_buffer(CVP_MEM, "map cpu", inst, &frame->bufs[nr]);
  1084. frame->nr++;
  1085. type = EVA_KMD_BUFTYPE_INPUT | EVA_KMD_BUFTYPE_OUTPUT;
  1086. msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
  1087. iova = smem->device_addr + buf->offset;
  1088. return iova;
  1089. }
  1090. static void msm_cvp_unmap_frame_buf(struct msm_cvp_inst *inst,
  1091. struct msm_cvp_frame *frame)
  1092. {
  1093. u32 i;
  1094. u32 type;
  1095. struct msm_cvp_smem *smem = NULL;
  1096. struct cvp_internal_buf *buf;
  1097. type = EVA_KMD_BUFTYPE_OUTPUT;
  1098. for (i = 0; i < frame->nr; ++i) {
  1099. buf = &frame->bufs[i];
  1100. smem = buf->smem;
  1101. msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
  1102. if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
  1103. /* smem not in dmamap cache */
  1104. msm_cvp_unmap_smem(inst, smem, "unmap cpu");
  1105. dma_heap_buffer_free(smem->dma_buf);
  1106. kmem_cache_free(cvp_driver->smem_cache, smem);
  1107. buf->smem = NULL;
  1108. } else {
  1109. mutex_lock(&inst->dma_cache.lock);
  1110. if (atomic_dec_and_test(&smem->refcount)) {
  1111. CLEAR_USE_BITMAP(smem->bitmap_index, inst);
  1112. print_smem(CVP_MEM, "Map dereference",
  1113. inst, smem);
  1114. }
  1115. mutex_unlock(&inst->dma_cache.lock);
  1116. }
  1117. }
  1118. kmem_cache_free(cvp_driver->frame_cache, frame);
  1119. }
  1120. void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid)
  1121. {
  1122. struct msm_cvp_frame *frame, *dummy1;
  1123. bool found;
  1124. if (!inst) {
  1125. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1126. return;
  1127. }
  1128. ktid &= (FENCE_BIT - 1);
  1129. dprintk(CVP_MEM, "%s: (%#x) unmap frame %llu\n",
  1130. __func__, hash32_ptr(inst->session), ktid);
  1131. found = false;
  1132. mutex_lock(&inst->frames.lock);
  1133. list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
  1134. if (frame->ktid == ktid) {
  1135. found = true;
  1136. list_del(&frame->list);
  1137. break;
  1138. }
  1139. }
  1140. mutex_unlock(&inst->frames.lock);
  1141. if (found)
  1142. msm_cvp_unmap_frame_buf(inst, frame);
  1143. else
  1144. dprintk(CVP_WARN, "%s frame %llu not found!\n", __func__, ktid);
  1145. }
  1146. int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
  1147. struct eva_kmd_hfi_packet *in_pkt,
  1148. unsigned int offset, unsigned int buf_num)
  1149. {
  1150. struct cvp_hfi_cmd_session_hdr *cmd_hdr;
  1151. struct cvp_internal_buf *pbuf, *dummy;
  1152. u64 ktid;
  1153. int rc = 0;
  1154. struct msm_cvp_smem *smem = NULL;
  1155. if (!offset || !buf_num)
  1156. return rc;
  1157. cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
  1158. ktid = cmd_hdr->client_data.kdata & (FENCE_BIT - 1);
  1159. mutex_lock(&inst->persistbufs.lock);
  1160. list_for_each_entry_safe(pbuf, dummy, &inst->persistbufs.list, list) {
  1161. if (pbuf->ktid == ktid && pbuf->ownership == CLIENT) {
  1162. list_del(&pbuf->list);
  1163. smem = pbuf->smem;
  1164. dprintk(CVP_MEM, "unmap persist: %x %d %d %#x",
  1165. hash32_ptr(inst->session), pbuf->fd,
  1166. pbuf->size, smem->device_addr);
  1167. if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
  1168. /* smem not in dmamap cache */
  1169. msm_cvp_unmap_smem(inst, smem,
  1170. "unmap cpu");
  1171. dma_heap_buffer_free(smem->dma_buf);
  1172. kmem_cache_free(
  1173. cvp_driver->smem_cache,
  1174. smem);
  1175. pbuf->smem = NULL;
  1176. } else {
  1177. mutex_lock(&inst->dma_cache.lock);
  1178. if (atomic_dec_and_test(&smem->refcount))
  1179. CLEAR_USE_BITMAP(
  1180. smem->bitmap_index,
  1181. inst);
  1182. mutex_unlock(&inst->dma_cache.lock);
  1183. }
  1184. kmem_cache_free(cvp_driver->buf_cache, pbuf);
  1185. }
  1186. }
  1187. mutex_unlock(&inst->persistbufs.lock);
  1188. return rc;
  1189. }
  1190. int msm_cvp_mark_user_persist(struct msm_cvp_inst *inst,
  1191. struct eva_kmd_hfi_packet *in_pkt,
  1192. unsigned int offset, unsigned int buf_num)
  1193. {
  1194. struct cvp_hfi_cmd_session_hdr *cmd_hdr;
  1195. struct cvp_internal_buf *pbuf, *dummy;
  1196. u64 ktid;
  1197. struct cvp_buf_type *buf;
  1198. int i, rc = 0;
  1199. if (!offset || !buf_num)
  1200. return 0;
  1201. cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
  1202. ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
  1203. ktid &= (FENCE_BIT - 1);
  1204. cmd_hdr->client_data.kdata = ktid;
  1205. for (i = 0; i < buf_num; i++) {
  1206. buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
  1207. offset += sizeof(*buf) >> 2;
  1208. if (buf->fd < 0 || !buf->size)
  1209. continue;
  1210. mutex_lock(&inst->persistbufs.lock);
  1211. list_for_each_entry_safe(pbuf, dummy, &inst->persistbufs.list,
  1212. list) {
  1213. if (pbuf->ownership == CLIENT) {
  1214. if (pbuf->fd == buf->fd &&
  1215. pbuf->size == buf->size)
  1216. buf->fd = pbuf->smem->device_addr;
  1217. rc = 1;
  1218. break;
  1219. }
  1220. }
  1221. mutex_unlock(&inst->persistbufs.lock);
  1222. if (!rc) {
  1223. dprintk(CVP_ERR, "%s No persist buf %d found\n",
  1224. __func__, buf->fd);
  1225. rc = -EFAULT;
  1226. break;
  1227. }
  1228. pbuf->ktid = ktid;
  1229. rc = 0;
  1230. }
  1231. return rc;
  1232. }
  1233. int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
  1234. struct eva_kmd_hfi_packet *in_pkt,
  1235. unsigned int offset, unsigned int buf_num)
  1236. {
  1237. struct cvp_buf_type *buf;
  1238. int i;
  1239. u32 iova;
  1240. if (!offset || !buf_num)
  1241. return 0;
  1242. for (i = 0; i < buf_num; i++) {
  1243. buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
  1244. offset += sizeof(*buf) >> 2;
  1245. if (buf->fd < 0 || !buf->size)
  1246. continue;
  1247. iova = msm_cvp_map_user_persist_buf(inst, buf);
  1248. if (!iova) {
  1249. dprintk(CVP_ERR,
  1250. "%s: buf %d register failed.\n",
  1251. __func__, i);
  1252. return -EINVAL;
  1253. }
  1254. buf->fd = iova;
  1255. }
  1256. return 0;
  1257. }
  1258. int msm_cvp_map_frame(struct msm_cvp_inst *inst,
  1259. struct eva_kmd_hfi_packet *in_pkt,
  1260. unsigned int offset, unsigned int buf_num)
  1261. {
  1262. struct cvp_buf_type *buf;
  1263. int i;
  1264. u32 iova;
  1265. u64 ktid;
  1266. struct msm_cvp_frame *frame;
  1267. struct cvp_hfi_cmd_session_hdr *cmd_hdr;
  1268. if (!offset || !buf_num)
  1269. return 0;
  1270. cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
  1271. ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
  1272. ktid &= (FENCE_BIT - 1);
  1273. cmd_hdr->client_data.kdata = ktid;
  1274. frame = kmem_cache_zalloc(cvp_driver->frame_cache, GFP_KERNEL);
  1275. if (!frame)
  1276. return -ENOMEM;
  1277. frame->ktid = ktid;
  1278. frame->nr = 0;
  1279. frame->pkt_type = cmd_hdr->packet_type;
  1280. for (i = 0; i < buf_num; i++) {
  1281. buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
  1282. offset += sizeof(*buf) >> 2;
  1283. if (buf->fd < 0 || !buf->size)
  1284. continue;
  1285. iova = msm_cvp_map_frame_buf(inst, buf, frame);
  1286. if (!iova) {
  1287. dprintk(CVP_ERR,
  1288. "%s: buf %d register failed.\n",
  1289. __func__, i);
  1290. msm_cvp_unmap_frame_buf(inst, frame);
  1291. return -EINVAL;
  1292. }
  1293. buf->fd = iova;
  1294. }
  1295. mutex_lock(&inst->frames.lock);
  1296. list_add_tail(&frame->list, &inst->frames.list);
  1297. mutex_unlock(&inst->frames.lock);
  1298. dprintk(CVP_MEM, "%s: map frame %llu\n", __func__, ktid);
  1299. return 0;
  1300. }
  1301. int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
  1302. {
  1303. int rc = 0, i;
  1304. struct cvp_internal_buf *cbuf, *dummy;
  1305. struct msm_cvp_frame *frame, *dummy1;
  1306. struct msm_cvp_smem *smem;
  1307. struct cvp_hal_session *session;
  1308. struct eva_kmd_buffer buf;
  1309. session = (struct cvp_hal_session *)inst->session;
  1310. mutex_lock(&inst->frames.lock);
  1311. list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
  1312. list_del(&frame->list);
  1313. msm_cvp_unmap_frame_buf(inst, frame);
  1314. }
  1315. mutex_unlock(&inst->frames.lock);
  1316. mutex_lock(&inst->dma_cache.lock);
  1317. for (i = 0; i < inst->dma_cache.nr; i++) {
  1318. smem = inst->dma_cache.entries[i];
  1319. if (atomic_read(&smem->refcount) == 0) {
  1320. print_smem(CVP_MEM, "free", inst, smem);
  1321. } else if (!(smem->flags & SMEM_PERSIST)) {
  1322. print_smem(CVP_WARN, "in use", inst, smem);
  1323. }
  1324. msm_cvp_unmap_smem(inst, smem, "unmap cpu");
  1325. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  1326. kmem_cache_free(cvp_driver->smem_cache, smem);
  1327. inst->dma_cache.entries[i] = NULL;
  1328. }
  1329. mutex_unlock(&inst->dma_cache.lock);
  1330. mutex_lock(&inst->cvpdspbufs.lock);
  1331. list_for_each_entry_safe(cbuf, dummy, &inst->cvpdspbufs.list, list) {
  1332. print_internal_buffer(CVP_MEM, "remove dspbufs", inst, cbuf);
  1333. if (cbuf->ownership == CLIENT) {
  1334. rc = cvp_dsp_deregister_buffer(hash32_ptr(session),
  1335. cbuf->fd, cbuf->smem->dma_buf->size, cbuf->size,
  1336. cbuf->offset, cbuf->index,
  1337. (uint32_t)cbuf->smem->device_addr);
  1338. if (rc)
  1339. dprintk(CVP_ERR,
  1340. "%s: failed dsp deregistration fd=%d rc=%d",
  1341. __func__, cbuf->fd, rc);
  1342. msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
  1343. msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
  1344. } else if (cbuf->ownership == DSP) {
  1345. rc = cvp_dsp_fastrpc_unmap(inst->process_id, cbuf);
  1346. if (rc)
  1347. dprintk(CVP_ERR,
  1348. "%s: failed to unmap buf from DSP\n",
  1349. __func__);
  1350. rc = cvp_release_dsp_buffers(inst, cbuf);
  1351. if (rc)
  1352. dprintk(CVP_ERR,
  1353. "%s Fail to free buffer 0x%x\n",
  1354. __func__, rc);
  1355. }
  1356. list_del(&cbuf->list);
  1357. kmem_cache_free(cvp_driver->buf_cache, cbuf);
  1358. }
  1359. mutex_unlock(&inst->cvpdspbufs.lock);
  1360. mutex_lock(&inst->cvpwnccbufs.lock);
  1361. if (inst->cvpwnccbufs_num != 0)
  1362. dprintk(CVP_WARN, "%s: cvpwnccbufs not empty, contains %d bufs",
  1363. __func__, inst->cvpwnccbufs_num);
  1364. list_for_each_entry_safe(cbuf, dummy, &inst->cvpwnccbufs.list, list) {
  1365. print_internal_buffer(CVP_MEM, "remove wnccbufs", inst, cbuf);
  1366. buf.fd = cbuf->fd;
  1367. buf.reserved[0] = cbuf->ktid;
  1368. mutex_unlock(&inst->cvpwnccbufs.lock);
  1369. msm_cvp_unmap_buf_wncc(inst, &buf);
  1370. mutex_lock(&inst->cvpwnccbufs.lock);
  1371. }
  1372. mutex_unlock(&inst->cvpwnccbufs.lock);
  1373. return rc;
  1374. }
  1375. void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst, bool log)
  1376. {
  1377. struct cvp_internal_buf *buf;
  1378. struct msm_cvp_core *core;
  1379. struct inst_snapshot *snap = NULL;
  1380. int i;
  1381. core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
  1382. if (log && core->log.snapshot_index < 16) {
  1383. snap = &core->log.snapshot[core->log.snapshot_index];
  1384. snap->session = inst->session;
  1385. core->log.snapshot_index++;
  1386. }
  1387. if (!inst) {
  1388. dprintk(CVP_ERR, "%s - invalid param %pK\n",
  1389. __func__, inst);
  1390. return;
  1391. }
  1392. dprintk(CVP_ERR,
  1393. "---Buffer details for inst: %pK of type: %d---\n",
  1394. inst, inst->session_type);
  1395. mutex_lock(&inst->dma_cache.lock);
  1396. dprintk(CVP_ERR, "dma cache: %d\n", inst->dma_cache.nr);
  1397. if (inst->dma_cache.nr <= MAX_DMABUF_NUMS)
  1398. for (i = 0; i < inst->dma_cache.nr; i++)
  1399. _log_smem(snap, inst, inst->dma_cache.entries[i], log);
  1400. mutex_unlock(&inst->dma_cache.lock);
  1401. mutex_lock(&inst->cvpdspbufs.lock);
  1402. dprintk(CVP_ERR, "dsp buffer list:\n");
  1403. list_for_each_entry(buf, &inst->cvpdspbufs.list, list)
  1404. _log_buf(snap, SMEM_ADSP, inst, buf, log);
  1405. mutex_unlock(&inst->cvpdspbufs.lock);
  1406. mutex_lock(&inst->cvpwnccbufs.lock);
  1407. dprintk(CVP_ERR, "wncc buffer list:\n");
  1408. list_for_each_entry(buf, &inst->cvpwnccbufs.list, list)
  1409. print_cvp_buffer(CVP_ERR, "bufdump", inst, buf);
  1410. mutex_unlock(&inst->cvpwnccbufs.lock);
  1411. mutex_lock(&inst->persistbufs.lock);
  1412. dprintk(CVP_ERR, "persist buffer list:\n");
  1413. list_for_each_entry(buf, &inst->persistbufs.list, list)
  1414. _log_buf(snap, SMEM_PERSIST, inst, buf, log);
  1415. mutex_unlock(&inst->persistbufs.lock);
  1416. }
  1417. struct cvp_internal_buf *cvp_allocate_arp_bufs(struct msm_cvp_inst *inst,
  1418. u32 buffer_size)
  1419. {
  1420. struct cvp_internal_buf *buf;
  1421. struct msm_cvp_list *buf_list;
  1422. u32 smem_flags = SMEM_UNCACHED;
  1423. int rc = 0;
  1424. if (!inst) {
  1425. dprintk(CVP_ERR, "%s Invalid input\n", __func__);
  1426. return NULL;
  1427. }
  1428. buf_list = &inst->persistbufs;
  1429. if (!buffer_size)
  1430. return NULL;
  1431. /* PERSIST buffer requires secure mapping
  1432. * Disable and wait for hyp_assign available
  1433. */
  1434. smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
  1435. buf = kmem_cache_zalloc(cvp_driver->buf_cache, GFP_KERNEL);
  1436. if (!buf) {
  1437. dprintk(CVP_ERR, "%s Out of memory\n", __func__);
  1438. goto fail_kzalloc;
  1439. }
  1440. buf->smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  1441. if (!buf->smem) {
  1442. dprintk(CVP_ERR, "%s Out of memory\n", __func__);
  1443. goto fail_kzalloc;
  1444. }
  1445. buf->smem->flags = smem_flags;
  1446. rc = msm_cvp_smem_alloc(buffer_size, 1, 0,
  1447. &(inst->core->resources), buf->smem);
  1448. if (rc) {
  1449. dprintk(CVP_ERR, "Failed to allocate ARP memory\n");
  1450. goto err_no_mem;
  1451. }
  1452. buf->size = buf->smem->size;
  1453. buf->type = HFI_BUFFER_INTERNAL_PERSIST_1;
  1454. buf->ownership = DRIVER;
  1455. mutex_lock(&buf_list->lock);
  1456. list_add_tail(&buf->list, &buf_list->list);
  1457. mutex_unlock(&buf_list->lock);
  1458. return buf;
  1459. err_no_mem:
  1460. kmem_cache_free(cvp_driver->buf_cache, buf);
  1461. fail_kzalloc:
  1462. return NULL;
  1463. }
  1464. int cvp_release_arp_buffers(struct msm_cvp_inst *inst)
  1465. {
  1466. struct msm_cvp_smem *smem;
  1467. struct list_head *ptr, *next;
  1468. struct cvp_internal_buf *buf;
  1469. int rc = 0;
  1470. struct msm_cvp_core *core;
  1471. struct cvp_hfi_device *hdev;
  1472. if (!inst) {
  1473. dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
  1474. return -EINVAL;
  1475. }
  1476. core = inst->core;
  1477. if (!core) {
  1478. dprintk(CVP_ERR, "Invalid core pointer = %pK\n", core);
  1479. return -EINVAL;
  1480. }
  1481. hdev = core->device;
  1482. if (!hdev) {
  1483. dprintk(CVP_ERR, "Invalid device pointer = %pK\n", hdev);
  1484. return -EINVAL;
  1485. }
  1486. dprintk(CVP_MEM, "release persist buffer!\n");
  1487. mutex_lock(&inst->persistbufs.lock);
  1488. /* Workaround for FW: release buffer means release all */
  1489. if (inst->state <= MSM_CVP_CLOSE_DONE) {
  1490. rc = call_hfi_op(hdev, session_release_buffers,
  1491. (void *)inst->session);
  1492. if (!rc) {
  1493. mutex_unlock(&inst->persistbufs.lock);
  1494. rc = wait_for_sess_signal_receipt(inst,
  1495. HAL_SESSION_RELEASE_BUFFER_DONE);
  1496. if (rc)
  1497. dprintk(CVP_WARN,
  1498. "%s: wait for signal failed, rc %d\n",
  1499. __func__, rc);
  1500. mutex_lock(&inst->persistbufs.lock);
  1501. } else {
  1502. dprintk(CVP_WARN, "Fail to send Rel prst buf\n");
  1503. }
  1504. }
  1505. list_for_each_safe(ptr, next, &inst->persistbufs.list) {
  1506. buf = list_entry(ptr, struct cvp_internal_buf, list);
  1507. smem = buf->smem;
  1508. if (!smem) {
  1509. dprintk(CVP_ERR, "%s invalid smem\n", __func__);
  1510. mutex_unlock(&inst->persistbufs.lock);
  1511. return -EINVAL;
  1512. }
  1513. list_del(&buf->list);
  1514. if (buf->ownership == DRIVER) {
  1515. dprintk(CVP_MEM,
  1516. "%s: %x : fd %d %s size %d",
  1517. "free arp", hash32_ptr(inst->session), buf->fd,
  1518. smem->dma_buf->name, buf->size);
  1519. msm_cvp_smem_free(smem);
  1520. kmem_cache_free(cvp_driver->smem_cache, smem);
  1521. }
  1522. buf->smem = NULL;
  1523. kmem_cache_free(cvp_driver->buf_cache, buf);
  1524. }
  1525. mutex_unlock(&inst->persistbufs.lock);
  1526. return rc;
  1527. }
  1528. int cvp_allocate_dsp_bufs(struct msm_cvp_inst *inst,
  1529. struct cvp_internal_buf *buf,
  1530. u32 buffer_size,
  1531. u32 secure_type)
  1532. {
  1533. u32 smem_flags = SMEM_UNCACHED;
  1534. int rc = 0;
  1535. if (!inst) {
  1536. dprintk(CVP_ERR, "%s Invalid input\n", __func__);
  1537. return -EINVAL;
  1538. }
  1539. if (!buf)
  1540. return -EINVAL;
  1541. if (!buffer_size)
  1542. return -EINVAL;
  1543. switch (secure_type) {
  1544. case 0:
  1545. break;
  1546. case 1:
  1547. smem_flags |= SMEM_SECURE | SMEM_PIXEL;
  1548. break;
  1549. case 2:
  1550. smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
  1551. break;
  1552. default:
  1553. dprintk(CVP_ERR, "%s Invalid secure_type %d\n",
  1554. __func__, secure_type);
  1555. return -EINVAL;
  1556. }
  1557. dprintk(CVP_MEM, "%s smem_flags 0x%x\n", __func__, smem_flags);
  1558. buf->smem = kmem_cache_zalloc(cvp_driver->smem_cache, GFP_KERNEL);
  1559. if (!buf->smem) {
  1560. dprintk(CVP_ERR, "%s Out of memory\n", __func__);
  1561. goto fail_kzalloc_smem_cache;
  1562. }
  1563. buf->smem->flags = smem_flags;
  1564. rc = msm_cvp_smem_alloc(buffer_size, 1, 0,
  1565. &(inst->core->resources), buf->smem);
  1566. if (rc) {
  1567. dprintk(CVP_ERR, "Failed to allocate ARP memory\n");
  1568. goto err_no_mem;
  1569. }
  1570. dprintk(CVP_MEM, "%s dma_buf %pK\n", __func__, buf->smem->dma_buf);
  1571. buf->size = buf->smem->size;
  1572. buf->type = HFI_BUFFER_INTERNAL_PERSIST_1;
  1573. buf->ownership = DSP;
  1574. return rc;
  1575. err_no_mem:
  1576. kmem_cache_free(cvp_driver->smem_cache, buf->smem);
  1577. fail_kzalloc_smem_cache:
  1578. return rc;
  1579. }
  1580. int cvp_release_dsp_buffers(struct msm_cvp_inst *inst,
  1581. struct cvp_internal_buf *buf)
  1582. {
  1583. struct msm_cvp_smem *smem;
  1584. int rc = 0;
  1585. if (!inst) {
  1586. dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
  1587. return -EINVAL;
  1588. }
  1589. if (!buf) {
  1590. dprintk(CVP_ERR, "Invalid buffer pointer = %pK\n", inst);
  1591. return -EINVAL;
  1592. }
  1593. smem = buf->smem;
  1594. if (!smem) {
  1595. dprintk(CVP_ERR, "%s invalid smem\n", __func__);
  1596. return -EINVAL;
  1597. }
  1598. if (buf->ownership == DSP) {
  1599. dprintk(CVP_MEM,
  1600. "%s: %x : fd %x %s size %d",
  1601. __func__, hash32_ptr(inst->session), buf->fd,
  1602. smem->dma_buf->name, buf->size);
  1603. msm_cvp_smem_free(smem);
  1604. kmem_cache_free(cvp_driver->smem_cache, smem);
  1605. } else {
  1606. dprintk(CVP_ERR,
  1607. "%s: wrong owner %d %x : fd %x %s size %d",
  1608. __func__, buf->ownership, hash32_ptr(inst->session),
  1609. buf->fd, smem->dma_buf->name, buf->size);
  1610. }
  1611. return rc;
  1612. }
  1613. int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
  1614. struct eva_kmd_buffer *buf)
  1615. {
  1616. struct cvp_hfi_device *hdev;
  1617. struct cvp_hal_session *session;
  1618. struct msm_cvp_inst *s;
  1619. int rc = 0;
  1620. if (!inst || !inst->core || !buf) {
  1621. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1622. return -EINVAL;
  1623. }
  1624. s = cvp_get_inst_validate(inst->core, inst);
  1625. if (!s)
  1626. return -ECONNRESET;
  1627. session = (struct cvp_hal_session *)inst->session;
  1628. if (!session) {
  1629. dprintk(CVP_ERR, "%s: invalid session\n", __func__);
  1630. rc = -EINVAL;
  1631. goto exit;
  1632. }
  1633. hdev = inst->core->device;
  1634. print_client_buffer(CVP_HFI, "register", inst, buf);
  1635. if (buf->index)
  1636. rc = msm_cvp_map_buf_dsp(inst, buf);
  1637. else
  1638. rc = msm_cvp_map_buf_wncc(inst, buf);
  1639. dprintk(CVP_DSP, "%s: fd %d, iova 0x%x\n", __func__,
  1640. buf->fd, buf->reserved[0]);
  1641. exit:
  1642. cvp_put_inst(s);
  1643. return rc;
  1644. }
  1645. int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
  1646. struct eva_kmd_buffer *buf)
  1647. {
  1648. struct msm_cvp_inst *s;
  1649. int rc = 0;
  1650. if (!inst || !inst->core || !buf) {
  1651. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1652. return -EINVAL;
  1653. }
  1654. s = cvp_get_inst_validate(inst->core, inst);
  1655. if (!s)
  1656. return -ECONNRESET;
  1657. print_client_buffer(CVP_HFI, "unregister", inst, buf);
  1658. if (buf->index)
  1659. rc = msm_cvp_unmap_buf_dsp(inst, buf);
  1660. else
  1661. rc = msm_cvp_unmap_buf_wncc(inst, buf);
  1662. cvp_put_inst(s);
  1663. return rc;
  1664. }