msm_cvp_buf.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/pid.h>
  7. #include <linux/fdtable.h>
  8. #include <linux/rcupdate.h>
  9. #include <linux/fs.h>
  10. #include <linux/dma-buf.h>
  11. #include <linux/sched/task.h>
  12. #include <linux/version.h>
  13. #include "msm_cvp_common.h"
  14. #include "cvp_hfi_api.h"
  15. #include "msm_cvp_debug.h"
  16. #include "msm_cvp_core.h"
  17. #include "msm_cvp_dsp.h"
  18. #include "eva_shared_def.h"
  19. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 16, 0))
  20. #define eva_buf_map dma_buf_map
  21. #define _buf_map_set_vaddr dma_buf_map_set_vaddr
  22. #else
  23. #define eva_buf_map iosys_map
  24. #define _buf_map_set_vaddr iosys_map_set_vaddr
  25. #endif
  26. #define CLEAR_USE_BITMAP(idx, inst) \
  27. do { \
  28. clear_bit(idx, &inst->dma_cache.usage_bitmap); \
  29. dprintk(CVP_MEM, "clear %x bit %d dma_cache bitmap 0x%llx\n", \
  30. hash32_ptr(inst->session), smem->bitmap_index, \
  31. inst->dma_cache.usage_bitmap); \
  32. } while (0)
  33. #define SET_USE_BITMAP(idx, inst) \
  34. do { \
  35. set_bit(idx, &inst->dma_cache.usage_bitmap); \
  36. dprintk(CVP_MEM, "Set %x bit %d dma_cache bitmap 0x%llx\n", \
  37. hash32_ptr(inst->session), idx, \
  38. inst->dma_cache.usage_bitmap); \
  39. } while (0)
  40. struct cvp_oob_pool wncc_buf_pool;
  41. static void _wncc_print_cvpwnccbufs_table(struct msm_cvp_inst* inst);
  42. static int _wncc_unmap_metadata_bufs(struct eva_kmd_hfi_packet* in_pkt,
  43. struct eva_kmd_oob_wncc *wncc_oob,
  44. struct eva_kmd_wncc_metadata** wncc_metadata);
  45. void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst, bool log);
  46. int print_smem(u32 tag, const char *str, struct msm_cvp_inst *inst,
  47. struct msm_cvp_smem *smem)
  48. {
  49. int i;
  50. char name[PKT_NAME_LEN] = "Unknown";
  51. if (!(tag & msm_cvp_debug))
  52. return 0;
  53. if (!inst || !smem) {
  54. dprintk(CVP_ERR, "Invalid inst 0x%llx or smem 0x%llx\n",
  55. inst, smem);
  56. return -EINVAL;
  57. }
  58. if (smem->dma_buf) {
  59. i = get_pkt_index_from_type(smem->pkt_type);
  60. if (i > 0)
  61. strlcpy(name, cvp_hfi_defs[i].name, PKT_NAME_LEN);
  62. if (!atomic_read(&smem->refcount))
  63. dprintk(tag,
  64. " UNUSED mapping %s: 0x%llx size %d iova %#x idx %d pkt_type %s buf_idx %#x fd %d",
  65. str, smem->dma_buf,
  66. smem->size, smem->device_addr, smem->bitmap_index, name, smem->buf_idx, smem->fd);
  67. else
  68. dprintk(tag,
  69. "%s: %x : 0x%llx size %d flags %#x iova %#x idx %d ref %d pkt_type %s buf_idx %#x fd %d",
  70. str, hash32_ptr(inst->session), smem->dma_buf,
  71. smem->size, smem->flags, smem->device_addr,
  72. smem->bitmap_index, atomic_read(&smem->refcount),
  73. name, smem->buf_idx, smem->fd);
  74. }
  75. return 0;
  76. }
  77. static void print_internal_buffer(u32 tag, const char *str,
  78. struct msm_cvp_inst *inst, struct cvp_internal_buf *cbuf)
  79. {
  80. if (!(tag & msm_cvp_debug) || !inst || !cbuf)
  81. return;
  82. if (cbuf->smem->dma_buf) {
  83. dprintk(tag,
  84. "%s: %x : fd %d off %d 0x%llx %s size %d iova %#x",
  85. str, hash32_ptr(inst->session), cbuf->fd,
  86. cbuf->offset, cbuf->smem->dma_buf, cbuf->smem->dma_buf->name,
  87. cbuf->size, cbuf->smem->device_addr);
  88. } else {
  89. dprintk(tag,
  90. "%s: %x : idx %2d fd %d off %d size %d iova %#x",
  91. str, hash32_ptr(inst->session), cbuf->index, cbuf->fd,
  92. cbuf->offset, cbuf->size, cbuf->smem->device_addr);
  93. }
  94. }
  95. void print_cvp_buffer(u32 tag, const char *str, struct msm_cvp_inst *inst,
  96. struct cvp_internal_buf *cbuf)
  97. {
  98. if (!inst || !cbuf) {
  99. dprintk(CVP_ERR,
  100. "%s Invalid params inst %pK, cbuf %pK\n",
  101. str, inst, cbuf);
  102. return;
  103. }
  104. print_smem(tag, str, inst, cbuf->smem);
  105. }
  106. static void _log_smem(struct inst_snapshot *snapshot, struct msm_cvp_inst *inst,
  107. struct msm_cvp_smem *smem, bool logging)
  108. {
  109. if (print_smem(CVP_ERR, "bufdump", inst, smem))
  110. return;
  111. if (!logging || !snapshot)
  112. return;
  113. if (snapshot && snapshot->smem_index < MAX_ENTRIES) {
  114. struct smem_data *s;
  115. s = &snapshot->smem_log[snapshot->smem_index];
  116. snapshot->smem_index++;
  117. s->size = smem->size;
  118. s->flags = smem->flags;
  119. s->device_addr = smem->device_addr;
  120. s->bitmap_index = smem->bitmap_index;
  121. s->refcount = atomic_read(&smem->refcount);
  122. s->pkt_type = smem->pkt_type;
  123. s->buf_idx = smem->buf_idx;
  124. }
  125. }
  126. static void _log_buf(struct inst_snapshot *snapshot, enum smem_prop prop,
  127. struct msm_cvp_inst *inst, struct cvp_internal_buf *cbuf,
  128. bool logging)
  129. {
  130. struct cvp_buf_data *buf = NULL;
  131. u32 index;
  132. print_cvp_buffer(CVP_ERR, "bufdump", inst, cbuf);
  133. if (!logging)
  134. return;
  135. if (snapshot) {
  136. if (prop == SMEM_CDSP && snapshot->dsp_index < MAX_ENTRIES) {
  137. index = snapshot->dsp_index;
  138. buf = &snapshot->dsp_buf_log[index];
  139. snapshot->dsp_index++;
  140. } else if (prop == SMEM_PERSIST &&
  141. snapshot->persist_index < MAX_ENTRIES) {
  142. index = snapshot->persist_index;
  143. buf = &snapshot->persist_buf_log[index];
  144. snapshot->persist_index++;
  145. }
  146. if (buf) {
  147. buf->device_addr = cbuf->smem->device_addr;
  148. buf->size = cbuf->size;
  149. }
  150. }
  151. }
  152. void print_client_buffer(u32 tag, const char *str,
  153. struct msm_cvp_inst *inst, struct eva_kmd_buffer *cbuf)
  154. {
  155. if (!(tag & msm_cvp_debug) || !str || !inst || !cbuf)
  156. return;
  157. dprintk(tag,
  158. "%s: %x : idx %2d fd %d off %d size %d type %d flags 0x%x"
  159. " reserved[0] %u\n",
  160. str, hash32_ptr(inst->session), cbuf->index, cbuf->fd,
  161. cbuf->offset, cbuf->size, cbuf->type, cbuf->flags,
  162. cbuf->reserved[0]);
  163. }
  164. static bool __is_buf_valid(struct msm_cvp_inst *inst,
  165. struct eva_kmd_buffer *buf)
  166. {
  167. struct cvp_hal_session *session;
  168. struct cvp_internal_buf *cbuf = (struct cvp_internal_buf *)0xdeadbeef;
  169. bool found = false;
  170. if (!inst || !inst->core || !buf) {
  171. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  172. return false;
  173. }
  174. if (buf->fd < 0) {
  175. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  176. return false;
  177. }
  178. if (buf->offset) {
  179. dprintk(CVP_ERR,
  180. "%s: offset is deprecated, set to 0.\n",
  181. __func__);
  182. return false;
  183. }
  184. session = (struct cvp_hal_session *)inst->session;
  185. mutex_lock(&inst->cvpdspbufs.lock);
  186. list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
  187. if (cbuf->fd == buf->fd) {
  188. if (cbuf->size != buf->size) {
  189. dprintk(CVP_ERR, "%s: buf size mismatch\n",
  190. __func__);
  191. mutex_unlock(&inst->cvpdspbufs.lock);
  192. return false;
  193. }
  194. found = true;
  195. break;
  196. }
  197. }
  198. mutex_unlock(&inst->cvpdspbufs.lock);
  199. if (found) {
  200. print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf);
  201. return false;
  202. }
  203. return true;
  204. }
  205. static struct file *msm_cvp_fget(unsigned int fd, struct task_struct *task,
  206. fmode_t mask, unsigned int refs)
  207. {
  208. struct files_struct *files = task->files;
  209. struct file *file;
  210. if (!files)
  211. return NULL;
  212. rcu_read_lock();
  213. loop:
  214. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0))
  215. file = fcheck_files(files, fd);
  216. #else
  217. file = files_lookup_fd_rcu(files, fd);
  218. #endif
  219. if (file) {
  220. /* File object ref couldn't be taken.
  221. * dup2() atomicity guarantee is the reason
  222. * we loop to catch the new file (or NULL pointer)
  223. */
  224. if (file->f_mode & mask)
  225. file = NULL;
  226. else if (!get_file_rcu(file))
  227. goto loop;
  228. }
  229. rcu_read_unlock();
  230. return file;
  231. }
  232. static struct dma_buf *cvp_dma_buf_get(struct file *file, int fd,
  233. struct task_struct *task)
  234. {
  235. if (file->f_op != gfa_cv.dmabuf_f_op) {
  236. dprintk(CVP_WARN, "fd doesn't refer to dma_buf\n");
  237. return ERR_PTR(-EINVAL);
  238. }
  239. return file->private_data;
  240. }
  241. int msm_cvp_map_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
  242. {
  243. int rc = 0;
  244. struct cvp_internal_buf *cbuf = NULL;
  245. struct msm_cvp_smem *smem = NULL;
  246. struct dma_buf *dma_buf = NULL;
  247. struct file *file;
  248. if (!__is_buf_valid(inst, buf))
  249. return -EINVAL;
  250. if (!inst->task)
  251. return -EINVAL;
  252. file = msm_cvp_fget(buf->fd, inst->task, FMODE_PATH, 1);
  253. if (file == NULL) {
  254. dprintk(CVP_WARN, "%s fail to get file from fd %d %s\n", __func__, buf->fd, inst->proc_name);
  255. return -EINVAL;
  256. }
  257. dma_buf = cvp_dma_buf_get(
  258. file,
  259. buf->fd,
  260. inst->task);
  261. if (dma_buf == ERR_PTR(-EINVAL)) {
  262. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  263. rc = -EINVAL;
  264. goto exit;
  265. }
  266. if (dma_buf->size < buf->size) {
  267. dprintk(CVP_ERR, "%s DSP client buffer too large %d > %d\n",
  268. __func__, buf->size, dma_buf->size);
  269. rc = -EINVAL;
  270. goto exit;
  271. }
  272. dprintk(CVP_MEM, "dma_buf from internal %llu\n", dma_buf);
  273. cbuf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
  274. if (!cbuf) {
  275. rc = -ENOMEM;
  276. goto exit;
  277. }
  278. smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
  279. if (!smem) {
  280. rc = -ENOMEM;
  281. goto exit;
  282. }
  283. smem->dma_buf = dma_buf;
  284. smem->bitmap_index = MAX_DMABUF_NUMS;
  285. smem->pkt_type = 0;
  286. smem->buf_idx = 0;
  287. smem->fd = buf->fd;
  288. dprintk(CVP_MEM, "%s: dma_buf = %llx\n", __func__, dma_buf);
  289. rc = msm_cvp_map_smem(inst, smem, "map dsp");
  290. if (rc) {
  291. print_client_buffer(CVP_ERR, "map failed", inst, buf);
  292. goto exit;
  293. }
  294. atomic_inc(&smem->refcount);
  295. cbuf->smem = smem;
  296. cbuf->fd = buf->fd;
  297. cbuf->size = buf->size;
  298. cbuf->offset = buf->offset;
  299. cbuf->ownership = CLIENT;
  300. cbuf->index = buf->index;
  301. buf->reserved[0] = (uint32_t)smem->device_addr;
  302. mutex_lock(&inst->cvpdspbufs.lock);
  303. list_add_tail(&cbuf->list, &inst->cvpdspbufs.list);
  304. mutex_unlock(&inst->cvpdspbufs.lock);
  305. return rc;
  306. exit:
  307. fput(file);
  308. if (smem) {
  309. if (smem->device_addr)
  310. msm_cvp_unmap_smem(inst, smem, "unmap dsp");
  311. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  312. cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
  313. }
  314. if (cbuf)
  315. cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
  316. return rc;
  317. }
  318. int msm_cvp_unmap_buf_dsp(struct msm_cvp_inst *inst, struct eva_kmd_buffer *buf)
  319. {
  320. int rc = 0;
  321. bool found;
  322. struct cvp_internal_buf *cbuf = (struct cvp_internal_buf *)0xdeadbeef;
  323. struct cvp_hal_session *session;
  324. if (!inst || !inst->core || !buf) {
  325. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  326. return -EINVAL;
  327. }
  328. session = (struct cvp_hal_session *)inst->session;
  329. if (!session) {
  330. dprintk(CVP_ERR, "%s: invalid session\n", __func__);
  331. return -EINVAL;
  332. }
  333. mutex_lock(&inst->cvpdspbufs.lock);
  334. found = false;
  335. list_for_each_entry(cbuf, &inst->cvpdspbufs.list, list) {
  336. if (cbuf->fd == buf->fd) {
  337. found = true;
  338. break;
  339. }
  340. }
  341. if (!found) {
  342. mutex_unlock(&inst->cvpdspbufs.lock);
  343. print_client_buffer(CVP_ERR, "invalid", inst, buf);
  344. return -EINVAL;
  345. }
  346. if (cbuf->smem->device_addr) {
  347. u64 idx = inst->unused_dsp_bufs.ktid;
  348. inst->unused_dsp_bufs.smem[idx] = *(cbuf->smem);
  349. inst->unused_dsp_bufs.nr++;
  350. inst->unused_dsp_bufs.nr =
  351. (inst->unused_dsp_bufs.nr > MAX_FRAME_BUFFER_NUMS)?
  352. MAX_FRAME_BUFFER_NUMS : inst->unused_dsp_bufs.nr;
  353. inst->unused_dsp_bufs.ktid = ++idx % MAX_FRAME_BUFFER_NUMS;
  354. msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
  355. msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
  356. atomic_dec(&cbuf->smem->refcount);
  357. }
  358. list_del(&cbuf->list);
  359. mutex_unlock(&inst->cvpdspbufs.lock);
  360. cvp_kmem_cache_free(&cvp_driver->smem_cache, cbuf->smem);
  361. cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
  362. return rc;
  363. }
  364. int msm_cvp_map_buf_wncc(struct msm_cvp_inst *inst,
  365. struct eva_kmd_buffer *buf)
  366. {
  367. int rc = 0, i;
  368. bool found = false;
  369. struct cvp_internal_buf* cbuf = (struct cvp_internal_buf *)0xdeadbeef;
  370. struct msm_cvp_smem* smem = NULL;
  371. struct dma_buf* dma_buf = NULL;
  372. if (!inst || !inst->core || !buf) {
  373. dprintk(CVP_ERR, "%s: invalid params", __func__);
  374. return -EINVAL;
  375. }
  376. if (!inst->session) {
  377. dprintk(CVP_ERR, "%s: invalid session", __func__);
  378. return -EINVAL;
  379. }
  380. if (buf->index) {
  381. dprintk(CVP_ERR, "%s: buf index is NOT 0 fd=%d",
  382. __func__, buf->fd);
  383. return -EINVAL;
  384. }
  385. if (buf->fd < 0) {
  386. dprintk(CVP_ERR, "%s: invalid fd = %d", __func__, buf->fd);
  387. return -EINVAL;
  388. }
  389. if (buf->offset) {
  390. dprintk(CVP_ERR, "%s: offset is not supported, set to 0.",
  391. __func__);
  392. return -EINVAL;
  393. }
  394. mutex_lock(&inst->cvpwnccbufs.lock);
  395. list_for_each_entry(cbuf, &inst->cvpwnccbufs.list, list) {
  396. if (cbuf->fd == buf->fd) {
  397. if (cbuf->size != buf->size) {
  398. dprintk(CVP_ERR, "%s: buf size mismatch",
  399. __func__);
  400. mutex_unlock(&inst->cvpwnccbufs.lock);
  401. return -EINVAL;
  402. }
  403. found = true;
  404. break;
  405. }
  406. }
  407. mutex_unlock(&inst->cvpwnccbufs.lock);
  408. if (found) {
  409. print_internal_buffer(CVP_ERR, "duplicate", inst, cbuf);
  410. return -EINVAL;
  411. }
  412. dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
  413. if (!dma_buf) {
  414. dprintk(CVP_ERR, "%s: invalid fd = %d", __func__, buf->fd);
  415. return -EINVAL;
  416. }
  417. cbuf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
  418. if (!cbuf) {
  419. msm_cvp_smem_put_dma_buf(dma_buf);
  420. return -ENOMEM;
  421. }
  422. smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
  423. if (!smem) {
  424. cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
  425. msm_cvp_smem_put_dma_buf(dma_buf);
  426. return -ENOMEM;
  427. }
  428. smem->dma_buf = dma_buf;
  429. smem->bitmap_index = MAX_DMABUF_NUMS;
  430. smem->pkt_type = 0;
  431. smem->buf_idx = 0;
  432. smem->fd = buf->fd;
  433. dprintk(CVP_MEM, "%s: dma_buf = %llx", __func__, dma_buf);
  434. rc = msm_cvp_map_smem(inst, smem, "map wncc");
  435. if (rc) {
  436. dprintk(CVP_ERR, "%s: map failed", __func__);
  437. print_client_buffer(CVP_ERR, __func__, inst, buf);
  438. goto exit;
  439. }
  440. cbuf->smem = smem;
  441. cbuf->fd = buf->fd;
  442. cbuf->size = buf->size;
  443. cbuf->offset = buf->offset;
  444. cbuf->ownership = CLIENT;
  445. cbuf->index = buf->index;
  446. /* Added for PreSil/RUMI testing */
  447. #ifdef USE_PRESIL
  448. dprintk(CVP_DBG,
  449. "wncc buffer is %x for cam_presil_send_buffer"
  450. " with MAP_ADDR_OFFSET %x",
  451. (u64)(smem->device_addr) - MAP_ADDR_OFFSET, MAP_ADDR_OFFSET);
  452. cam_presil_send_buffer((u64)smem->dma_buf, 0,
  453. (u32)cbuf->offset, (u32)cbuf->size,
  454. (u64)(smem->device_addr) - MAP_ADDR_OFFSET);
  455. #endif
  456. mutex_lock(&inst->cvpwnccbufs.lock);
  457. if (inst->cvpwnccbufs_table == NULL) {
  458. inst->cvpwnccbufs_table =
  459. (struct msm_cvp_wncc_buffer*) kzalloc(
  460. sizeof(struct msm_cvp_wncc_buffer) *
  461. EVA_KMD_WNCC_MAX_SRC_BUFS,
  462. GFP_KERNEL);
  463. if (!inst->cvpwnccbufs_table) {
  464. mutex_unlock(&inst->cvpwnccbufs.lock);
  465. goto exit;
  466. }
  467. }
  468. list_add_tail(&cbuf->list, &inst->cvpwnccbufs.list);
  469. for (i = 0; i < EVA_KMD_WNCC_MAX_SRC_BUFS; i++)
  470. {
  471. if (inst->cvpwnccbufs_table[i].iova == 0)
  472. {
  473. inst->cvpwnccbufs_num++;
  474. inst->cvpwnccbufs_table[i].fd = buf->fd;
  475. inst->cvpwnccbufs_table[i].iova = smem->device_addr;
  476. inst->cvpwnccbufs_table[i].size = smem->size;
  477. /* buf reserved[0] used to store wncc src buf id */
  478. buf->reserved[0] = i + EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
  479. /* cbuf ktid used to store wncc src buf id */
  480. cbuf->ktid = i + EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
  481. dprintk(CVP_MEM, "%s: wncc buf iova: 0x%08X",
  482. __func__, inst->cvpwnccbufs_table[i].iova);
  483. break;
  484. }
  485. }
  486. if (i == EVA_KMD_WNCC_MAX_SRC_BUFS) {
  487. dprintk(CVP_ERR,
  488. "%s: wncc buf table full - max (%u) already registered",
  489. __func__, EVA_KMD_WNCC_MAX_SRC_BUFS);
  490. /* _wncc_print_cvpwnccbufs_table(inst); */
  491. mutex_unlock(&inst->cvpwnccbufs.lock);
  492. rc = -EDQUOT;
  493. goto exit;
  494. }
  495. mutex_unlock(&inst->cvpwnccbufs.lock);
  496. return rc;
  497. exit:
  498. if (smem->device_addr)
  499. msm_cvp_unmap_smem(inst, smem, "unmap wncc");
  500. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  501. cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
  502. cbuf = NULL;
  503. cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
  504. smem = NULL;
  505. return rc;
  506. }
  507. int msm_cvp_unmap_buf_wncc(struct msm_cvp_inst *inst,
  508. struct eva_kmd_buffer *buf)
  509. {
  510. int rc = 0;
  511. bool found;
  512. struct cvp_internal_buf *cbuf = (struct cvp_internal_buf *)0xdeadbeef;
  513. uint32_t buf_id, buf_idx;
  514. if (!inst || !inst->core || !buf) {
  515. dprintk(CVP_ERR, "%s: invalid params", __func__);
  516. return -EINVAL;
  517. }
  518. if (!inst->session) {
  519. dprintk(CVP_ERR, "%s: invalid session", __func__);
  520. return -EINVAL;
  521. }
  522. if (buf->index) {
  523. dprintk(CVP_ERR, "%s: buf index is NOT 0 fd=%d",
  524. __func__, buf->fd);
  525. return -EINVAL;
  526. }
  527. buf_id = buf->reserved[0];
  528. if (buf_id < EVA_KMD_WNCC_SRC_BUF_ID_OFFSET || buf_id >=
  529. (EVA_KMD_WNCC_MAX_SRC_BUFS + EVA_KMD_WNCC_SRC_BUF_ID_OFFSET)) {
  530. dprintk(CVP_ERR, "%s: invalid buffer id %d",
  531. __func__, buf->reserved[0]);
  532. return -EINVAL;
  533. }
  534. mutex_lock(&inst->cvpwnccbufs.lock);
  535. if (inst->cvpwnccbufs_num == 0) {
  536. dprintk(CVP_ERR, "%s: no wncc buffers currently mapped", __func__);
  537. mutex_unlock(&inst->cvpwnccbufs.lock);
  538. return -EINVAL;
  539. }
  540. buf_idx = buf_id - EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
  541. if (inst->cvpwnccbufs_table[buf_idx].iova == 0) {
  542. dprintk(CVP_ERR, "%s: buffer id %d not found",
  543. __func__, buf_id);
  544. mutex_unlock(&inst->cvpwnccbufs.lock);
  545. return -EINVAL;
  546. }
  547. buf->fd = inst->cvpwnccbufs_table[buf_idx].fd;
  548. found = false;
  549. list_for_each_entry(cbuf, &inst->cvpwnccbufs.list, list) {
  550. if (cbuf->fd == buf->fd) {
  551. found = true;
  552. break;
  553. }
  554. }
  555. if (!found) {
  556. dprintk(CVP_ERR, "%s: buffer id %d not found",
  557. __func__, buf_id);
  558. print_client_buffer(CVP_ERR, __func__, inst, buf);
  559. _wncc_print_cvpwnccbufs_table(inst);
  560. mutex_unlock(&inst->cvpwnccbufs.lock);
  561. return -EINVAL;
  562. }
  563. if (cbuf->smem->device_addr) {
  564. u64 idx = inst->unused_wncc_bufs.ktid;
  565. inst->unused_wncc_bufs.smem[idx] = *(cbuf->smem);
  566. inst->unused_wncc_bufs.nr++;
  567. inst->unused_wncc_bufs.nr =
  568. (inst->unused_wncc_bufs.nr > NUM_WNCC_BUFS)?
  569. NUM_WNCC_BUFS : inst->unused_wncc_bufs.nr;
  570. inst->unused_wncc_bufs.ktid = ++idx % NUM_WNCC_BUFS;
  571. }
  572. mutex_unlock(&inst->cvpwnccbufs.lock);
  573. if (cbuf->smem->device_addr) {
  574. msm_cvp_unmap_smem(inst, cbuf->smem, "unmap wncc");
  575. msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
  576. }
  577. mutex_lock(&inst->cvpwnccbufs.lock);
  578. list_del(&cbuf->list);
  579. inst->cvpwnccbufs_table[buf_idx].fd = 0;
  580. inst->cvpwnccbufs_table[buf_idx].iova = 0;
  581. inst->cvpwnccbufs_table[buf_idx].size = 0;
  582. inst->cvpwnccbufs_num--;
  583. if (inst->cvpwnccbufs_num == 0) {
  584. kfree(inst->cvpwnccbufs_table);
  585. inst->cvpwnccbufs_table = NULL;
  586. }
  587. mutex_unlock(&inst->cvpwnccbufs.lock);
  588. cvp_kmem_cache_free(&cvp_driver->smem_cache, cbuf->smem);
  589. cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
  590. return rc;
  591. }
  592. static void _wncc_print_oob(struct eva_kmd_oob_wncc* wncc_oob)
  593. {
  594. u32 i, j;
  595. if (!wncc_oob) {
  596. dprintk(CVP_ERR, "%s: invalid params", __func__);
  597. return;
  598. }
  599. dprintk(CVP_DBG, "%s: wncc OOB --", __func__);
  600. dprintk(CVP_DBG, "%s: num_layers: %u", __func__, wncc_oob->num_layers);
  601. for (i = 0; i < wncc_oob->num_layers; i++) {
  602. dprintk(CVP_DBG, "%s: layers[%u].num_addrs: %u",
  603. __func__, i, wncc_oob->layers[i].num_addrs);
  604. for (j = 0; j < wncc_oob->layers[i].num_addrs; j++) {
  605. dprintk(CVP_DBG,
  606. "%s: layers[%u].addrs[%u]: %04u 0x%08x",
  607. __func__, i, j,
  608. wncc_oob->layers[i].addrs[j].buffer_id,
  609. wncc_oob->layers[i].addrs[j].offset);
  610. }
  611. }
  612. }
  613. static void _wncc_print_cvpwnccbufs_table(struct msm_cvp_inst* inst)
  614. {
  615. u32 i, entries = 0;
  616. if (!inst) {
  617. dprintk(CVP_ERR, "%s: invalid params", __func__);
  618. return;
  619. }
  620. if (inst->cvpwnccbufs_num == 0) {
  621. dprintk(CVP_DBG, "%s: wncc buffer look-up table is empty",
  622. __func__);
  623. return;
  624. }
  625. if (!inst->cvpwnccbufs_table) {
  626. dprintk(CVP_ERR, "%s: invalid params", __func__);
  627. return;
  628. }
  629. dprintk(CVP_DBG, "%s: wncc buffer table:", __func__);
  630. for (i = 0; i < EVA_KMD_WNCC_MAX_SRC_BUFS &&
  631. entries < inst->cvpwnccbufs_num; i++) {
  632. if (inst->cvpwnccbufs_table[i].iova != 0) {
  633. dprintk(CVP_DBG,
  634. "%s: buf_idx=%04d --> "
  635. "fd=%03d, iova=0x%08x, size=%d",
  636. __func__, i,
  637. inst->cvpwnccbufs_table[i].fd,
  638. inst->cvpwnccbufs_table[i].iova,
  639. inst->cvpwnccbufs_table[i].size);
  640. entries++;
  641. }
  642. }
  643. }
  644. static void _wncc_print_metadata_buf(u32 num_layers, u32 num_addrs,
  645. struct eva_kmd_wncc_metadata** wncc_metadata)
  646. {
  647. u32 i, j, iova;
  648. if (num_layers < 1 || num_layers > EVA_KMD_WNCC_MAX_LAYERS ||
  649. !wncc_metadata) {
  650. dprintk(CVP_ERR, "%s: invalid params", __func__);
  651. return;
  652. }
  653. dprintk(CVP_DBG, "%s: wncc metadata buffers --", __func__);
  654. dprintk(CVP_DBG, "%s: num_layers: %u", __func__, num_layers);
  655. dprintk(CVP_DBG, "%s: num_addrs: %u", __func__, num_addrs);
  656. for (i = 0; i < num_layers; i++) {
  657. for (j = 0; j < num_addrs; j++) {
  658. iova = (wncc_metadata[i][j].iova_msb << 22) |
  659. wncc_metadata[i][j].iova_lsb;
  660. dprintk(CVP_DBG,
  661. "%s: wncc_metadata[%u][%u]: "
  662. "%4u %3u %4u %3u 0x%08x %1u %4d %4d %4d %4d",
  663. __func__, i, j,
  664. wncc_metadata[i][j].loc_x_dec,
  665. wncc_metadata[i][j].loc_x_frac,
  666. wncc_metadata[i][j].loc_y_dec,
  667. wncc_metadata[i][j].loc_y_frac,
  668. iova,
  669. wncc_metadata[i][j].scale_idx,
  670. wncc_metadata[i][j].aff_coeff_3,
  671. wncc_metadata[i][j].aff_coeff_2,
  672. wncc_metadata[i][j].aff_coeff_1,
  673. wncc_metadata[i][j].aff_coeff_0);
  674. }
  675. }
  676. }
  677. static int _wncc_copy_oob_from_user(struct eva_kmd_hfi_packet* in_pkt,
  678. struct eva_kmd_oob_wncc* wncc_oob)
  679. {
  680. int rc = 0;
  681. u32 oob_type = 0;
  682. struct eva_kmd_oob_buf* oob_buf_u;
  683. struct eva_kmd_oob_wncc* wncc_oob_u;
  684. struct eva_kmd_oob_wncc* wncc_oob_k;
  685. unsigned int i;
  686. u32 num_addrs;
  687. if (!in_pkt || !wncc_oob) {
  688. dprintk(CVP_ERR, "%s: invalid params", __func__);
  689. return -EINVAL;
  690. }
  691. oob_buf_u = in_pkt->oob_buf;
  692. if (!access_ok(oob_buf_u, sizeof(*oob_buf_u))) {
  693. dprintk(CVP_ERR, "%s: invalid OOB buf pointer", __func__);
  694. return -EINVAL;
  695. }
  696. if (!access_ok(&oob_buf_u->oob_type, sizeof(oob_buf_u->oob_type))) {
  697. dprintk(CVP_ERR,
  698. "%s: bad OOB buf pointer, oob_type inaccessible",
  699. __func__);
  700. return -EINVAL;
  701. }
  702. rc = get_user(oob_type, &oob_buf_u->oob_type);
  703. if (rc)
  704. return rc;
  705. if (oob_type != EVA_KMD_OOB_WNCC) {
  706. dprintk(CVP_ERR, "%s: incorrect OOB type (%d) for wncc",
  707. __func__, oob_type);
  708. return -EINVAL;
  709. }
  710. wncc_oob_u = &oob_buf_u->wncc;
  711. wncc_oob_k = wncc_oob;
  712. if (!access_ok(&wncc_oob_u->metadata_bufs_offset,
  713. sizeof(wncc_oob_u->metadata_bufs_offset))) {
  714. dprintk(CVP_ERR,
  715. "%s: bad OOB buf pointer, wncc.metadata_bufs_offset inaccessible",
  716. __func__);
  717. return -EINVAL;
  718. }
  719. rc = get_user(wncc_oob_k->metadata_bufs_offset,
  720. &wncc_oob_u->metadata_bufs_offset);
  721. if (rc)
  722. return rc;
  723. if (wncc_oob_k->metadata_bufs_offset > ((sizeof(in_pkt->pkt_data)
  724. - sizeof(struct cvp_buf_type)) / sizeof(__u32))) {
  725. dprintk(CVP_ERR, "%s: invalid wncc metadata bufs offset",
  726. __func__);
  727. return -EINVAL;
  728. }
  729. if (!access_ok(&wncc_oob_u->num_layers,
  730. sizeof(wncc_oob_u->num_layers))) {
  731. dprintk(CVP_ERR,
  732. "%s: bad OOB buf pointer, wncc.num_layers inaccessible",
  733. __func__);
  734. return -EINVAL;
  735. }
  736. rc = get_user(wncc_oob_k->num_layers, &wncc_oob_u->num_layers);
  737. if (rc)
  738. return rc;
  739. if (wncc_oob_k->num_layers < 1 ||
  740. wncc_oob_k->num_layers > EVA_KMD_WNCC_MAX_LAYERS) {
  741. dprintk(CVP_ERR, "%s: invalid wncc num layers", __func__);
  742. return -EINVAL;
  743. }
  744. for (i = 0; i < wncc_oob_k->num_layers; i++) {
  745. if (!access_ok(&wncc_oob_u->layers[i].num_addrs,
  746. sizeof(wncc_oob_u->layers[i].num_addrs))) {
  747. dprintk(CVP_ERR,
  748. "%s: bad OOB buf pointer, wncc.layers[%u].num_addrs inaccessible",
  749. __func__, i);
  750. return -EINVAL;
  751. }
  752. rc = get_user(wncc_oob_k->layers[i].num_addrs,
  753. &wncc_oob_u->layers[i].num_addrs);
  754. if (rc)
  755. break;
  756. num_addrs = wncc_oob_k->layers[i].num_addrs;
  757. if (num_addrs < 1 || num_addrs > EVA_KMD_WNCC_MAX_ADDRESSES) {
  758. dprintk(CVP_ERR,
  759. "%s: invalid wncc num addrs for layer %u",
  760. __func__, i);
  761. rc = -EINVAL;
  762. break;
  763. }
  764. if (!access_ok(wncc_oob_u->layers[i].addrs,
  765. num_addrs * sizeof(struct eva_kmd_wncc_addr)) ||
  766. !access_ok(&wncc_oob_u->layers[i].addrs[num_addrs - 1],
  767. sizeof(struct eva_kmd_wncc_addr))) {
  768. dprintk(CVP_ERR,
  769. "%s: bad OOB buf pointer, wncc.layers[%u].addrs inaccessible",
  770. __func__, i);
  771. return -EINVAL;
  772. }
  773. rc = copy_from_user(wncc_oob_k->layers[i].addrs,
  774. wncc_oob_u->layers[i].addrs,
  775. num_addrs * sizeof(struct eva_kmd_wncc_addr));
  776. if (rc)
  777. break;
  778. }
  779. if (false)
  780. _wncc_print_oob(wncc_oob);
  781. return rc;
  782. }
  783. static int _wncc_map_metadata_bufs(struct eva_kmd_hfi_packet* in_pkt,
  784. struct eva_kmd_oob_wncc *wncc_oob,
  785. struct eva_kmd_wncc_metadata** wncc_metadata)
  786. {
  787. int rc = 0, i;
  788. struct cvp_buf_type* wncc_metadata_bufs;
  789. struct dma_buf* dmabuf;
  790. struct eva_buf_map map;
  791. __u32 num_layers, metadata_bufs_offset;
  792. if (!in_pkt || !wncc_metadata || !wncc_oob) {
  793. dprintk(CVP_ERR, "%s: invalid params", __func__);
  794. return -EINVAL;
  795. }
  796. num_layers = wncc_oob->num_layers;
  797. metadata_bufs_offset = wncc_oob->metadata_bufs_offset;
  798. if (num_layers < 1 || num_layers > EVA_KMD_WNCC_MAX_LAYERS) {
  799. dprintk(CVP_ERR, "%s: invalid wncc num layers", __func__);
  800. return -EINVAL;
  801. }
  802. if (metadata_bufs_offset > ((sizeof(in_pkt->pkt_data)
  803. - sizeof(struct cvp_buf_type)) / sizeof(__u32))) {
  804. dprintk(CVP_ERR, "%s: invalid wncc metadata bufs offset",
  805. __func__);
  806. return -EINVAL;
  807. }
  808. wncc_metadata_bufs = (struct cvp_buf_type*)
  809. &in_pkt->pkt_data[metadata_bufs_offset];
  810. for (i = 0; i < num_layers; i++) {
  811. dmabuf = dma_buf_get(wncc_metadata_bufs[i].fd);
  812. if (IS_ERR(dmabuf)) {
  813. rc = PTR_ERR(dmabuf);
  814. dprintk(CVP_ERR,
  815. "%s: dma_buf_get() failed for "
  816. "wncc_metadata_bufs[%d], rc %d",
  817. __func__, i, rc);
  818. break;
  819. }
  820. if (dmabuf->size < wncc_oob->layers[i].num_addrs *
  821. sizeof(struct eva_kmd_wncc_metadata)) {
  822. dprintk(CVP_ERR,
  823. "%s: wncc_metadata_bufs[%d] size insufficient for num addrs in oob",
  824. __func__, i);
  825. dma_buf_put(dmabuf);
  826. rc = -EINVAL;
  827. break;
  828. }
  829. rc = dma_buf_begin_cpu_access(dmabuf, DMA_TO_DEVICE);
  830. if (rc) {
  831. dprintk(CVP_ERR,
  832. "%s: dma_buf_begin_cpu_access() failed "
  833. "for wncc_metadata_bufs[%d], rc %d",
  834. __func__, i, rc);
  835. dma_buf_put(dmabuf);
  836. break;
  837. }
  838. rc = dma_buf_vmap(dmabuf, &map);
  839. if (rc) {
  840. dprintk(CVP_ERR,
  841. "%s: dma_buf_vmap() failed for "
  842. "wncc_metadata_bufs[%d]",
  843. __func__, i);
  844. dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
  845. dma_buf_put(dmabuf);
  846. break;
  847. }
  848. dprintk(CVP_DBG,
  849. "%s: wncc_metadata_bufs[%d] map.is_iomem is %d",
  850. __func__, i, map.is_iomem);
  851. wncc_metadata[i] = (struct eva_kmd_wncc_metadata*)map.vaddr;
  852. dma_buf_put(dmabuf);
  853. }
  854. if (rc)
  855. _wncc_unmap_metadata_bufs(in_pkt, wncc_oob, wncc_metadata);
  856. return rc;
  857. }
  858. static int _wncc_unmap_metadata_bufs(struct eva_kmd_hfi_packet* in_pkt,
  859. struct eva_kmd_oob_wncc *wncc_oob,
  860. struct eva_kmd_wncc_metadata** wncc_metadata)
  861. {
  862. int rc = 0, i;
  863. struct cvp_buf_type* wncc_metadata_bufs;
  864. struct dma_buf* dmabuf;
  865. struct eva_buf_map map;
  866. __u32 num_layers, metadata_bufs_offset;
  867. if (!in_pkt || !wncc_metadata || !wncc_oob) {
  868. dprintk(CVP_ERR, "%s: invalid params", __func__);
  869. return -EINVAL;
  870. }
  871. num_layers = wncc_oob->num_layers;
  872. metadata_bufs_offset = wncc_oob->metadata_bufs_offset;
  873. if (num_layers < 1 || num_layers > EVA_KMD_WNCC_MAX_LAYERS) {
  874. dprintk(CVP_ERR, "%s: invalid wncc num layers", __func__);
  875. return -EINVAL;
  876. }
  877. if (metadata_bufs_offset > ((sizeof(in_pkt->pkt_data)
  878. - sizeof(struct cvp_buf_type)) / sizeof(__u32))) {
  879. dprintk(CVP_ERR, "%s: invalid wncc metadata bufs offset",
  880. __func__);
  881. return -EINVAL;
  882. }
  883. wncc_metadata_bufs = (struct cvp_buf_type*)
  884. &in_pkt->pkt_data[metadata_bufs_offset];
  885. for (i = 0; i < num_layers; i++) {
  886. if (!wncc_metadata[i]) {
  887. continue;
  888. }
  889. dmabuf = dma_buf_get(wncc_metadata_bufs[i].fd);
  890. if (IS_ERR(dmabuf)) {
  891. rc = -PTR_ERR(dmabuf);
  892. dprintk(CVP_ERR,
  893. "%s: dma_buf_get() failed for "
  894. "wncc_metadata_bufs[%d], rc %d",
  895. __func__, i, rc);
  896. break;
  897. }
  898. _buf_map_set_vaddr(&map, wncc_metadata[i]);
  899. dma_buf_vunmap(dmabuf, &map);
  900. wncc_metadata[i] = NULL;
  901. rc = dma_buf_end_cpu_access(dmabuf, DMA_TO_DEVICE);
  902. dma_buf_put(dmabuf);
  903. if (rc) {
  904. dprintk(CVP_ERR,
  905. "%s: dma_buf_end_cpu_access() failed "
  906. "for wncc_metadata_bufs[%d], rc %d",
  907. __func__, i, rc);
  908. break;
  909. }
  910. }
  911. return rc;
  912. }
  913. static int init_wncc_bufs(void)
  914. {
  915. int i;
  916. for (i = 0; i < NUM_WNCC_BUFS; i++) {
  917. wncc_buf_pool.bufs[i] = (struct eva_kmd_oob_wncc*)kzalloc(
  918. sizeof(struct eva_kmd_oob_wncc), GFP_KERNEL);
  919. if (!wncc_buf_pool.bufs[i]) {
  920. i--;
  921. goto exit_fail;
  922. }
  923. }
  924. wncc_buf_pool.used_bitmap = 0;
  925. wncc_buf_pool.allocated = true;
  926. return 0;
  927. exit_fail:
  928. while (i >= 0) {
  929. kfree(wncc_buf_pool.bufs[i]);
  930. i--;
  931. }
  932. return -ENOMEM;
  933. }
  934. static int alloc_wncc_buf(struct wncc_oob_buf *wob)
  935. {
  936. int rc, i;
  937. mutex_lock(&wncc_buf_pool.lock);
  938. if (!wncc_buf_pool.allocated) {
  939. rc = init_wncc_bufs();
  940. if (rc) {
  941. mutex_unlock(&wncc_buf_pool.lock);
  942. return rc;
  943. }
  944. }
  945. for (i = 0; i < NUM_WNCC_BUFS; i++) {
  946. if (!(wncc_buf_pool.used_bitmap & BIT(i))) {
  947. wncc_buf_pool.used_bitmap |= BIT(i);
  948. wob->bitmap_idx = i;
  949. wob->buf = wncc_buf_pool.bufs[i];
  950. mutex_unlock(&wncc_buf_pool.lock);
  951. return 0;
  952. }
  953. }
  954. mutex_unlock(&wncc_buf_pool.lock);
  955. wob->bitmap_idx = 0xff;
  956. wob->buf = (struct eva_kmd_oob_wncc*)kzalloc(
  957. sizeof(struct eva_kmd_oob_wncc), GFP_KERNEL);
  958. if (!wob->buf)
  959. rc = -ENOMEM;
  960. else
  961. rc = 0;
  962. return rc;
  963. }
  964. static void free_wncc_buf(struct wncc_oob_buf *wob)
  965. {
  966. if (!wob)
  967. return;
  968. if (wob->bitmap_idx == 0xff) {
  969. kfree(wob->buf);
  970. return;
  971. }
  972. if (wob->bitmap_idx < NUM_WNCC_BUFS) {
  973. mutex_lock(&wncc_buf_pool.lock);
  974. wncc_buf_pool.used_bitmap &= ~BIT(wob->bitmap_idx);
  975. memset(wob->buf, 0, sizeof(struct eva_kmd_oob_wncc));
  976. wob->buf = NULL;
  977. mutex_unlock(&wncc_buf_pool.lock);
  978. }
  979. }
  980. static int msm_cvp_proc_oob_wncc(struct msm_cvp_inst* inst,
  981. struct eva_kmd_hfi_packet* in_pkt)
  982. {
  983. int rc = 0;
  984. struct eva_kmd_oob_wncc* wncc_oob;
  985. struct wncc_oob_buf wob;
  986. struct eva_kmd_wncc_metadata* wncc_metadata[EVA_KMD_WNCC_MAX_LAYERS];
  987. unsigned int i, j;
  988. bool empty = false;
  989. u32 buf_id, buf_idx, buf_offset, iova;
  990. if (!inst || !inst->core || !in_pkt) {
  991. dprintk(CVP_ERR, "%s: invalid params", __func__);
  992. return -EINVAL;
  993. }
  994. rc = alloc_wncc_buf(&wob);
  995. if (rc)
  996. return -ENOMEM;
  997. wncc_oob = wob.buf;
  998. rc = _wncc_copy_oob_from_user(in_pkt, wncc_oob);
  999. if (rc) {
  1000. dprintk(CVP_ERR, "%s: OOB buf copying failed", __func__);
  1001. goto exit;
  1002. }
  1003. memset(wncc_metadata, 0,
  1004. sizeof(*wncc_metadata) * EVA_KMD_WNCC_MAX_LAYERS);
  1005. rc = _wncc_map_metadata_bufs(in_pkt, wncc_oob, wncc_metadata);
  1006. if (rc) {
  1007. dprintk(CVP_ERR, "%s: failed to map wncc metadata bufs",
  1008. __func__);
  1009. goto exit;
  1010. }
  1011. mutex_lock(&inst->cvpwnccbufs.lock);
  1012. if (inst->cvpwnccbufs_num == 0 || inst->cvpwnccbufs_table == NULL) {
  1013. dprintk(CVP_ERR, "%s: no wncc bufs currently mapped", __func__);
  1014. empty = true;
  1015. rc = -EINVAL;
  1016. }
  1017. for (i = 0; !empty && i < wncc_oob->num_layers; i++) {
  1018. for (j = 0; j < wncc_oob->layers[i].num_addrs; j++) {
  1019. buf_id = wncc_oob->layers[i].addrs[j].buffer_id;
  1020. if (buf_id < EVA_KMD_WNCC_SRC_BUF_ID_OFFSET ||
  1021. buf_id >= (EVA_KMD_WNCC_SRC_BUF_ID_OFFSET +
  1022. EVA_KMD_WNCC_MAX_SRC_BUFS)) {
  1023. dprintk(CVP_ERR,
  1024. "%s: invalid wncc buf id %u "
  1025. "in layer #%u address #%u",
  1026. __func__, buf_id, i, j);
  1027. rc = -EINVAL;
  1028. break;
  1029. }
  1030. buf_idx = buf_id - EVA_KMD_WNCC_SRC_BUF_ID_OFFSET;
  1031. if (inst->cvpwnccbufs_table[buf_idx].iova == 0) {
  1032. dprintk(CVP_ERR,
  1033. "%s: unmapped wncc buf id %u "
  1034. "in layer #%u address #%u",
  1035. __func__, buf_id, i, j);
  1036. /* _wncc_print_cvpwnccbufs_table(inst); */
  1037. rc = -EINVAL;
  1038. break;
  1039. }
  1040. buf_offset = wncc_oob->layers[i].addrs[j].offset;
  1041. if (buf_offset >=
  1042. inst->cvpwnccbufs_table[buf_idx].size) {
  1043. /* NOTE: This buffer offset validation is
  1044. * not comprehensive since wncc src image
  1045. * resolution information is not known to
  1046. * KMD. UMD is responsible for comprehensive
  1047. * validation.
  1048. */
  1049. dprintk(CVP_ERR,
  1050. "%s: invalid wncc buf offset %u "
  1051. "in layer #%u address #%u",
  1052. __func__, buf_offset, i, j);
  1053. rc = -EINVAL;
  1054. break;
  1055. }
  1056. iova = inst->cvpwnccbufs_table[buf_idx].iova +
  1057. buf_offset;
  1058. wncc_metadata[i][j].iova_lsb = iova;
  1059. wncc_metadata[i][j].iova_msb = iova >> 22;
  1060. }
  1061. }
  1062. mutex_unlock(&inst->cvpwnccbufs.lock);
  1063. if (false)
  1064. _wncc_print_metadata_buf(wncc_oob->num_layers,
  1065. wncc_oob->layers[0].num_addrs, wncc_metadata);
  1066. if (_wncc_unmap_metadata_bufs(in_pkt, wncc_oob, wncc_metadata)) {
  1067. dprintk(CVP_ERR, "%s: failed to unmap wncc metadata bufs",
  1068. __func__);
  1069. }
  1070. exit:
  1071. free_wncc_buf(&wob);
  1072. return rc;
  1073. }
  1074. int msm_cvp_proc_oob(struct msm_cvp_inst* inst,
  1075. struct eva_kmd_hfi_packet* in_pkt)
  1076. {
  1077. int rc = 0;
  1078. struct cvp_hfi_cmd_session_hdr* cmd_hdr =
  1079. (struct cvp_hfi_cmd_session_hdr*)in_pkt;
  1080. if (!inst || !inst->core || !in_pkt) {
  1081. dprintk(CVP_ERR, "%s: invalid params", __func__);
  1082. return -EINVAL;
  1083. }
  1084. switch (cmd_hdr->packet_type) {
  1085. case HFI_CMD_SESSION_CVP_WARP_NCC_FRAME:
  1086. rc = msm_cvp_proc_oob_wncc(inst, in_pkt);
  1087. break;
  1088. default:
  1089. break;
  1090. }
  1091. return rc;
  1092. }
  1093. void msm_cvp_cache_operations(struct msm_cvp_smem *smem, u32 type,
  1094. u32 offset, u32 size)
  1095. {
  1096. enum smem_cache_ops cache_op;
  1097. if (msm_cvp_cacheop_disabled)
  1098. return;
  1099. if (!smem) {
  1100. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1101. return;
  1102. }
  1103. switch (type) {
  1104. case EVA_KMD_BUFTYPE_INPUT:
  1105. cache_op = SMEM_CACHE_CLEAN;
  1106. break;
  1107. case EVA_KMD_BUFTYPE_OUTPUT:
  1108. cache_op = SMEM_CACHE_INVALIDATE;
  1109. break;
  1110. default:
  1111. cache_op = SMEM_CACHE_CLEAN_INVALIDATE;
  1112. }
  1113. dprintk(CVP_MEM,
  1114. "%s: cache operation enabled for dma_buf: %llx, cache_op: %d, offset: %d, size: %d\n",
  1115. __func__, smem->dma_buf, cache_op, offset, size);
  1116. msm_cvp_smem_cache_operations(smem->dma_buf, cache_op, offset, size);
  1117. }
  1118. static struct msm_cvp_smem *msm_cvp_session_find_smem(struct msm_cvp_inst *inst,
  1119. struct dma_buf *dma_buf,
  1120. u32 pkt_type)
  1121. {
  1122. struct msm_cvp_smem *smem;
  1123. struct msm_cvp_frame *frame = (struct msm_cvp_frame *)0xdeadbeef;
  1124. struct cvp_internal_buf *buf = (struct cvp_internal_buf *)0xdeadbeef;
  1125. int i;
  1126. if (inst->dma_cache.nr > MAX_DMABUF_NUMS)
  1127. return NULL;
  1128. mutex_lock(&inst->dma_cache.lock);
  1129. for (i = 0; i < inst->dma_cache.nr; i++)
  1130. if (inst->dma_cache.entries[i]->dma_buf == dma_buf) {
  1131. SET_USE_BITMAP(i, inst);
  1132. smem = inst->dma_cache.entries[i];
  1133. smem->bitmap_index = i;
  1134. smem->pkt_type = pkt_type;
  1135. atomic_inc(&smem->refcount);
  1136. /*
  1137. * If we find it, it means we already increased
  1138. * refcount before, so we put it to avoid double
  1139. * incremental.
  1140. */
  1141. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  1142. mutex_unlock(&inst->dma_cache.lock);
  1143. print_smem(CVP_MEM, "found in cache", inst, smem);
  1144. return smem;
  1145. }
  1146. mutex_unlock(&inst->dma_cache.lock);
  1147. /* earch persist list */
  1148. mutex_lock(&inst->persistbufs.lock);
  1149. list_for_each_entry(buf, &inst->persistbufs.list, list) {
  1150. smem = buf->smem;
  1151. if (smem && smem->dma_buf == dma_buf) {
  1152. atomic_inc(&smem->refcount);
  1153. mutex_unlock(&inst->persistbufs.lock);
  1154. print_smem(CVP_MEM, "found in persist", inst, smem);
  1155. return smem;
  1156. }
  1157. }
  1158. mutex_unlock(&inst->persistbufs.lock);
  1159. /* Search frame list */
  1160. mutex_lock(&inst->frames.lock);
  1161. list_for_each_entry(frame, &inst->frames.list, list) {
  1162. for (i = 0; i < frame->nr; i++) {
  1163. smem = frame->bufs[i].smem;
  1164. if (smem && smem->dma_buf == dma_buf) {
  1165. atomic_inc(&smem->refcount);
  1166. mutex_unlock(&inst->frames.lock);
  1167. print_smem(CVP_MEM, "found in frame",
  1168. inst, smem);
  1169. return smem;
  1170. }
  1171. }
  1172. }
  1173. mutex_unlock(&inst->frames.lock);
  1174. return NULL;
  1175. }
  1176. static int msm_cvp_session_add_smem(struct msm_cvp_inst *inst,
  1177. struct msm_cvp_smem *smem)
  1178. {
  1179. unsigned int i;
  1180. struct msm_cvp_smem *smem2;
  1181. mutex_lock(&inst->dma_cache.lock);
  1182. if (inst->dma_cache.nr < MAX_DMABUF_NUMS) {
  1183. inst->dma_cache.entries[inst->dma_cache.nr] = smem;
  1184. SET_USE_BITMAP(inst->dma_cache.nr, inst);
  1185. smem->bitmap_index = inst->dma_cache.nr;
  1186. inst->dma_cache.nr++;
  1187. i = smem->bitmap_index;
  1188. } else {
  1189. i = find_first_zero_bit(&inst->dma_cache.usage_bitmap,
  1190. MAX_DMABUF_NUMS);
  1191. if (i < MAX_DMABUF_NUMS) {
  1192. smem2 = inst->dma_cache.entries[i];
  1193. msm_cvp_unmap_smem(inst, smem2, "unmap cpu");
  1194. msm_cvp_smem_put_dma_buf(smem2->dma_buf);
  1195. cvp_kmem_cache_free(&cvp_driver->smem_cache, smem2);
  1196. inst->dma_cache.entries[i] = smem;
  1197. smem->bitmap_index = i;
  1198. SET_USE_BITMAP(i, inst);
  1199. } else {
  1200. dprintk(CVP_WARN,
  1201. "%s: reached limit, fallback to buf mapping list\n"
  1202. , __func__);
  1203. atomic_inc(&smem->refcount);
  1204. mutex_unlock(&inst->dma_cache.lock);
  1205. return -ENOMEM;
  1206. }
  1207. }
  1208. atomic_inc(&smem->refcount);
  1209. mutex_unlock(&inst->dma_cache.lock);
  1210. dprintk(CVP_MEM, "Add entry %d into cache\n", i);
  1211. return 0;
  1212. }
  1213. static struct msm_cvp_smem *msm_cvp_session_get_smem(struct msm_cvp_inst *inst,
  1214. struct cvp_buf_type *buf,
  1215. bool is_persist,
  1216. u32 pkt_type)
  1217. {
  1218. int rc = 0, found = 1;
  1219. struct msm_cvp_smem *smem = NULL;
  1220. struct dma_buf *dma_buf = NULL;
  1221. if (buf->fd < 0) {
  1222. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  1223. return NULL;
  1224. }
  1225. dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
  1226. if (!dma_buf) {
  1227. dprintk(CVP_ERR, "%s: Invalid fd = %d", __func__, buf->fd);
  1228. return NULL;
  1229. }
  1230. if (is_persist) {
  1231. smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
  1232. if (!smem)
  1233. return NULL;
  1234. smem->dma_buf = dma_buf;
  1235. smem->bitmap_index = MAX_DMABUF_NUMS;
  1236. smem->pkt_type = pkt_type;
  1237. smem->flags |= SMEM_PERSIST;
  1238. smem->fd = buf->fd;
  1239. atomic_inc(&smem->refcount);
  1240. rc = msm_cvp_map_smem(inst, smem, "map cpu");
  1241. if (rc)
  1242. goto exit;
  1243. if (!IS_CVP_BUF_VALID(buf, smem)) {
  1244. dprintk(CVP_ERR,
  1245. "%s: invalid offset %d or size %d persist\n",
  1246. __func__, buf->offset, buf->size);
  1247. goto exit2;
  1248. }
  1249. return smem;
  1250. }
  1251. smem = msm_cvp_session_find_smem(inst, dma_buf, pkt_type);
  1252. if (!smem) {
  1253. found = 0;
  1254. smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
  1255. if (!smem)
  1256. return NULL;
  1257. smem->dma_buf = dma_buf;
  1258. smem->bitmap_index = MAX_DMABUF_NUMS;
  1259. smem->pkt_type = pkt_type;
  1260. smem->fd = buf->fd;
  1261. if (is_params_pkt(pkt_type))
  1262. smem->flags |= SMEM_PERSIST;
  1263. rc = msm_cvp_map_smem(inst, smem, "map cpu");
  1264. if (rc)
  1265. goto exit;
  1266. if (!IS_CVP_BUF_VALID(buf, smem)) {
  1267. dprintk(CVP_ERR,
  1268. "%s: invalid buf %d %d fd %d dma 0x%llx %s %d type %#x\n",
  1269. __func__, buf->offset, buf->size, buf->fd,
  1270. dma_buf, dma_buf->name, dma_buf->size, pkt_type);
  1271. goto exit2;
  1272. }
  1273. rc = msm_cvp_session_add_smem(inst, smem);
  1274. if (rc && rc != -ENOMEM)
  1275. goto exit2;
  1276. return smem;
  1277. }
  1278. if (!IS_CVP_BUF_VALID(buf, smem)) {
  1279. dprintk(CVP_ERR, "%s: invalid offset %d or size %d found\n",
  1280. __func__, buf->offset, buf->size);
  1281. if (found) {
  1282. mutex_lock(&inst->dma_cache.lock);
  1283. atomic_dec(&smem->refcount);
  1284. mutex_unlock(&inst->dma_cache.lock);
  1285. return NULL;
  1286. }
  1287. goto exit2;
  1288. }
  1289. return smem;
  1290. exit2:
  1291. msm_cvp_unmap_smem(inst, smem, "unmap cpu");
  1292. exit:
  1293. msm_cvp_smem_put_dma_buf(dma_buf);
  1294. cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
  1295. smem = NULL;
  1296. return smem;
  1297. }
  1298. static int msm_cvp_unmap_user_persist_buf(struct msm_cvp_inst *inst,
  1299. struct cvp_buf_type *buf,
  1300. u32 pkt_type, u32 buf_idx, u32 *iova)
  1301. {
  1302. struct msm_cvp_smem *smem = NULL;
  1303. struct list_head *ptr;
  1304. struct list_head *next;
  1305. struct cvp_internal_buf *pbuf;
  1306. struct dma_buf *dma_buf;
  1307. if (!inst) {
  1308. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1309. return -EINVAL;
  1310. }
  1311. dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
  1312. if (!dma_buf)
  1313. return -EINVAL;
  1314. mutex_lock(&inst->persistbufs.lock);
  1315. list_for_each_safe(ptr, next, &inst->persistbufs.list) {
  1316. if (!ptr) {
  1317. mutex_unlock(&inst->persistbufs.lock);
  1318. return -EINVAL;
  1319. }
  1320. pbuf = list_entry(ptr, struct cvp_internal_buf, list);
  1321. if (dma_buf == pbuf->smem->dma_buf && (pbuf->smem->flags & SMEM_PERSIST)) {
  1322. *iova = pbuf->smem->device_addr;
  1323. dprintk(CVP_MEM,
  1324. "Unmap persist fd %d, dma_buf %#llx iova %#x\n",
  1325. pbuf->fd, pbuf->smem->dma_buf, *iova);
  1326. list_del(&pbuf->list);
  1327. if (*iova) {
  1328. msm_cvp_unmap_smem(inst, pbuf->smem, "unmap user persist");
  1329. msm_cvp_smem_put_dma_buf(pbuf->smem->dma_buf);
  1330. pbuf->smem->device_addr = 0;
  1331. }
  1332. cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
  1333. pbuf->smem = NULL;
  1334. cvp_kmem_cache_free(&cvp_driver->buf_cache, pbuf);
  1335. mutex_unlock(&inst->persistbufs.lock);
  1336. dma_buf_put(dma_buf);
  1337. return 0;
  1338. }
  1339. }
  1340. mutex_unlock(&inst->persistbufs.lock);
  1341. dma_buf_put(dma_buf);
  1342. return -EINVAL;
  1343. }
  1344. static int msm_cvp_map_user_persist_buf(struct msm_cvp_inst *inst,
  1345. struct cvp_buf_type *buf,
  1346. u32 pkt_type, u32 buf_idx, u32 *iova)
  1347. {
  1348. struct msm_cvp_smem *smem = NULL;
  1349. struct list_head *ptr;
  1350. struct list_head *next;
  1351. struct cvp_internal_buf *pbuf;
  1352. struct dma_buf *dma_buf;
  1353. int ret;
  1354. if (!inst) {
  1355. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1356. return -EINVAL;
  1357. }
  1358. dma_buf = msm_cvp_smem_get_dma_buf(buf->fd);
  1359. if (!dma_buf)
  1360. return -EINVAL;
  1361. mutex_lock(&inst->persistbufs.lock);
  1362. if (!inst->persistbufs.list.next) {
  1363. mutex_unlock(&inst->persistbufs.lock);
  1364. return -EINVAL;
  1365. }
  1366. list_for_each_safe(ptr, next, &inst->persistbufs.list) {
  1367. if (!ptr)
  1368. return -EINVAL;
  1369. pbuf = list_entry(ptr, struct cvp_internal_buf, list);
  1370. if (dma_buf == pbuf->smem->dma_buf) {
  1371. pbuf->size =
  1372. (pbuf->size >= buf->size) ?
  1373. pbuf->size : buf->size;
  1374. *iova = pbuf->smem->device_addr + buf->offset;
  1375. mutex_unlock(&inst->persistbufs.lock);
  1376. atomic_inc(&pbuf->smem->refcount);
  1377. dma_buf_put(dma_buf);
  1378. dprintk(CVP_MEM,
  1379. "map persist Reuse fd %d, dma_buf %#llx\n",
  1380. pbuf->fd, pbuf->smem->dma_buf);
  1381. return 0;
  1382. }
  1383. }
  1384. mutex_unlock(&inst->persistbufs.lock);
  1385. dma_buf_put(dma_buf);
  1386. pbuf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
  1387. if (!pbuf) {
  1388. dprintk(CVP_ERR, "%s failed to allocate kmem obj\n",
  1389. __func__);
  1390. return -ENOMEM;
  1391. }
  1392. if (is_params_pkt(pkt_type))
  1393. smem = msm_cvp_session_get_smem(inst, buf, false, pkt_type);
  1394. else
  1395. smem = msm_cvp_session_get_smem(inst, buf, true, pkt_type);
  1396. if (!smem) {
  1397. ret = -ENOMEM;
  1398. goto exit;
  1399. }
  1400. smem->pkt_type = pkt_type;
  1401. smem->buf_idx = buf_idx;
  1402. smem->fd = buf->fd;
  1403. pbuf->smem = smem;
  1404. pbuf->fd = buf->fd;
  1405. pbuf->size = buf->size;
  1406. pbuf->offset = buf->offset;
  1407. pbuf->ownership = CLIENT;
  1408. mutex_lock(&inst->persistbufs.lock);
  1409. list_add_tail(&pbuf->list, &inst->persistbufs.list);
  1410. mutex_unlock(&inst->persistbufs.lock);
  1411. print_internal_buffer(CVP_MEM, "map persist", inst, pbuf);
  1412. *iova = smem->device_addr + buf->offset;
  1413. return 0;
  1414. exit:
  1415. cvp_kmem_cache_free(&cvp_driver->buf_cache, pbuf);
  1416. return ret;
  1417. }
  1418. static u32 msm_cvp_map_frame_buf(struct msm_cvp_inst *inst,
  1419. struct cvp_buf_type *buf,
  1420. struct msm_cvp_frame *frame,
  1421. u32 pkt_type, u32 buf_idx)
  1422. {
  1423. u32 iova = 0;
  1424. struct msm_cvp_smem *smem = NULL;
  1425. u32 nr;
  1426. u32 type;
  1427. if (!inst || !frame) {
  1428. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1429. return 0;
  1430. }
  1431. nr = frame->nr;
  1432. if (nr == MAX_FRAME_BUFFER_NUMS) {
  1433. dprintk(CVP_ERR, "%s: max frame buffer reached\n", __func__);
  1434. return 0;
  1435. }
  1436. smem = msm_cvp_session_get_smem(inst, buf, false, pkt_type);
  1437. if (!smem)
  1438. return 0;
  1439. smem->buf_idx = buf_idx;
  1440. frame->bufs[nr].fd = buf->fd;
  1441. frame->bufs[nr].smem = smem;
  1442. frame->bufs[nr].size = buf->size;
  1443. frame->bufs[nr].offset = buf->offset;
  1444. print_internal_buffer(CVP_MEM, "map cpu", inst, &frame->bufs[nr]);
  1445. frame->nr++;
  1446. type = EVA_KMD_BUFTYPE_INPUT | EVA_KMD_BUFTYPE_OUTPUT;
  1447. msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
  1448. iova = smem->device_addr + buf->offset;
  1449. return iova;
  1450. }
  1451. static void msm_cvp_unmap_frame_buf(struct msm_cvp_inst *inst,
  1452. struct msm_cvp_frame *frame)
  1453. {
  1454. u32 i;
  1455. u32 type;
  1456. struct msm_cvp_smem *smem = NULL;
  1457. struct cvp_internal_buf *buf;
  1458. type = EVA_KMD_BUFTYPE_OUTPUT;
  1459. for (i = 0; i < frame->nr; ++i) {
  1460. buf = &frame->bufs[i];
  1461. smem = buf->smem;
  1462. msm_cvp_cache_operations(smem, type, buf->offset, buf->size);
  1463. if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
  1464. /* smem not in dmamap cache */
  1465. if (atomic_dec_and_test(&smem->refcount)) {
  1466. msm_cvp_unmap_smem(inst, smem, "unmap cpu");
  1467. dma_heap_buffer_free(smem->dma_buf);
  1468. smem->buf_idx |= 0xdead0000;
  1469. cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
  1470. buf->smem = NULL;
  1471. }
  1472. } else {
  1473. mutex_lock(&inst->dma_cache.lock);
  1474. if (atomic_dec_and_test(&smem->refcount)) {
  1475. CLEAR_USE_BITMAP(smem->bitmap_index, inst);
  1476. print_smem(CVP_MEM, "Map dereference",
  1477. inst, smem);
  1478. smem->buf_idx |= 0x10000000;
  1479. }
  1480. mutex_unlock(&inst->dma_cache.lock);
  1481. }
  1482. }
  1483. cvp_kmem_cache_free(&cvp_driver->frame_cache, frame);
  1484. }
  1485. static void backup_frame_buffers(struct msm_cvp_inst *inst,
  1486. struct msm_cvp_frame *frame)
  1487. {
  1488. /* Save frame buffers before unmap them */
  1489. int i = frame->nr;
  1490. if (i == 0 || i > MAX_FRAME_BUFFER_NUMS)
  1491. return;
  1492. inst->last_frame.ktid = frame->ktid;
  1493. inst->last_frame.nr = frame->nr;
  1494. do {
  1495. i--;
  1496. if (frame->bufs[i].smem->bitmap_index < MAX_DMABUF_NUMS) {
  1497. /*
  1498. * Frame buffer info can be found in dma_cache table,
  1499. * Skip saving
  1500. */
  1501. inst->last_frame.nr = 0;
  1502. return;
  1503. }
  1504. inst->last_frame.smem[i] = *(frame->bufs[i].smem);
  1505. } while (i);
  1506. }
  1507. void msm_cvp_unmap_frame(struct msm_cvp_inst *inst, u64 ktid)
  1508. {
  1509. struct msm_cvp_frame *frame = (struct msm_cvp_frame *)0xdeadbeef, *dummy1;
  1510. bool found;
  1511. if (!inst) {
  1512. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1513. return;
  1514. }
  1515. ktid &= (FENCE_BIT - 1);
  1516. dprintk(CVP_MEM, "%s: (%#x) unmap frame %llu\n",
  1517. __func__, hash32_ptr(inst->session), ktid);
  1518. found = false;
  1519. mutex_lock(&inst->frames.lock);
  1520. list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
  1521. if (frame->ktid == ktid) {
  1522. found = true;
  1523. list_del(&frame->list);
  1524. dprintk(CVP_CMD, "%s: "
  1525. "pkt_type %08x sess_id %08x trans_id <> ktid %llu\n",
  1526. __func__, frame->pkt_type,
  1527. hash32_ptr(inst->session),
  1528. frame->ktid);
  1529. /* Save the previous frame mappings for debug */
  1530. backup_frame_buffers(inst, frame);
  1531. msm_cvp_unmap_frame_buf(inst, frame);
  1532. break;
  1533. }
  1534. }
  1535. mutex_unlock(&inst->frames.lock);
  1536. if (!found)
  1537. dprintk(CVP_WARN, "%s frame %llu not found!\n", __func__, ktid);
  1538. }
  1539. /*
  1540. * Unmap persistent buffer before sending RELEASE_PERSIST_BUFFERS to FW
  1541. * This packet is sent after SESSION_STOP. The assumption is FW/HW will
  1542. * NOT access any of the 3 persist buffer.
  1543. */
  1544. int msm_cvp_unmap_user_persist(struct msm_cvp_inst *inst,
  1545. struct eva_kmd_hfi_packet *in_pkt,
  1546. unsigned int offset, unsigned int buf_num)
  1547. {
  1548. struct cvp_buf_type *buf;
  1549. struct cvp_hfi_cmd_session_hdr *cmd_hdr;
  1550. int i, ret;
  1551. u32 iova;
  1552. if (!offset || !buf_num)
  1553. return 0;
  1554. cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
  1555. for (i = 0; i < buf_num; i++) {
  1556. buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
  1557. offset += sizeof(*buf) >> 2;
  1558. if (buf->fd < 0 || !buf->size)
  1559. continue;
  1560. ret = msm_cvp_unmap_user_persist_buf(inst, buf,
  1561. cmd_hdr->packet_type, i, &iova);
  1562. if (ret) {
  1563. dprintk(CVP_ERR,
  1564. "%s: buf %d unmap failed.\n",
  1565. __func__, i);
  1566. return ret;
  1567. }
  1568. buf->fd = iova;
  1569. }
  1570. return 0;
  1571. }
  1572. int msm_cvp_map_user_persist(struct msm_cvp_inst *inst,
  1573. struct eva_kmd_hfi_packet *in_pkt,
  1574. unsigned int offset, unsigned int buf_num)
  1575. {
  1576. struct cvp_buf_type *buf;
  1577. struct cvp_hfi_cmd_session_hdr *cmd_hdr;
  1578. int i, ret;
  1579. u32 iova;
  1580. if (!offset || !buf_num)
  1581. return 0;
  1582. cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
  1583. for (i = 0; i < buf_num; i++) {
  1584. buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
  1585. offset += sizeof(*buf) >> 2;
  1586. if (buf->fd < 0 || !buf->size)
  1587. continue;
  1588. ret = msm_cvp_map_user_persist_buf(inst, buf,
  1589. cmd_hdr->packet_type, i, &iova);
  1590. if (ret) {
  1591. dprintk(CVP_ERR,
  1592. "%s: buf %d map failed.\n",
  1593. __func__, i);
  1594. return ret;
  1595. }
  1596. buf->fd = iova;
  1597. }
  1598. return 0;
  1599. }
  1600. int msm_cvp_map_frame(struct msm_cvp_inst *inst,
  1601. struct eva_kmd_hfi_packet *in_pkt,
  1602. unsigned int offset, unsigned int buf_num)
  1603. {
  1604. struct cvp_buf_type *buf;
  1605. int i;
  1606. u32 iova;
  1607. u64 ktid;
  1608. struct msm_cvp_frame *frame;
  1609. struct cvp_hfi_cmd_session_hdr *cmd_hdr;
  1610. struct msm_cvp_inst *instance = (struct msm_cvp_inst *)0xdeadbeef;
  1611. struct msm_cvp_core *core = NULL;
  1612. core = cvp_driver->cvp_core;
  1613. if (!core)
  1614. return -EINVAL;
  1615. if (!offset || !buf_num)
  1616. return 0;
  1617. cmd_hdr = (struct cvp_hfi_cmd_session_hdr *)in_pkt;
  1618. ktid = atomic64_inc_return(&inst->core->kernel_trans_id);
  1619. ktid &= (FENCE_BIT - 1);
  1620. cmd_hdr->client_data.kdata = ktid;
  1621. dprintk(CVP_CMD, "%s: "
  1622. "pkt_type %08x sess_id %08x trans_id %u ktid %llu\n",
  1623. __func__, cmd_hdr->packet_type,
  1624. cmd_hdr->session_id,
  1625. cmd_hdr->client_data.transaction_id,
  1626. cmd_hdr->client_data.kdata & (FENCE_BIT - 1));
  1627. frame = cvp_kmem_cache_zalloc(&cvp_driver->frame_cache, GFP_KERNEL);
  1628. if (!frame)
  1629. return -ENOMEM;
  1630. frame->ktid = ktid;
  1631. frame->nr = 0;
  1632. frame->pkt_type = cmd_hdr->packet_type;
  1633. for (i = 0; i < buf_num; i++) {
  1634. buf = (struct cvp_buf_type *)&in_pkt->pkt_data[offset];
  1635. offset += sizeof(*buf) >> 2;
  1636. if (buf->fd < 0 || !buf->size) {
  1637. buf->fd = 0;
  1638. buf->size = 0;
  1639. continue;
  1640. }
  1641. iova = msm_cvp_map_frame_buf(inst, buf, frame, cmd_hdr->packet_type, i);
  1642. if (!iova) {
  1643. dprintk(CVP_ERR,
  1644. "%s: buf %d register failed.\n",
  1645. __func__, i);
  1646. dprintk(CVP_ERR, "smem_leak_count %d\n", core->smem_leak_count);
  1647. mutex_lock(&core->lock);
  1648. list_for_each_entry(instance, &core->instances, list) {
  1649. msm_cvp_print_inst_bufs(instance, false);
  1650. }
  1651. mutex_unlock(&core->lock);
  1652. msm_cvp_unmap_frame_buf(inst, frame);
  1653. return -EINVAL;
  1654. }
  1655. buf->fd = iova;
  1656. }
  1657. mutex_lock(&inst->frames.lock);
  1658. list_add_tail(&frame->list, &inst->frames.list);
  1659. mutex_unlock(&inst->frames.lock);
  1660. dprintk(CVP_MEM, "%s: map frame %llu\n", __func__, ktid);
  1661. return 0;
  1662. }
  1663. int msm_cvp_session_deinit_buffers(struct msm_cvp_inst *inst)
  1664. {
  1665. int rc = 0, i;
  1666. struct cvp_internal_buf *cbuf, *dummy;
  1667. struct msm_cvp_frame *frame = (struct msm_cvp_frame *)0xdeadbeef, *dummy1;
  1668. struct msm_cvp_smem *smem;
  1669. struct cvp_hal_session *session;
  1670. struct eva_kmd_buffer buf;
  1671. struct list_head *ptr = (struct list_head *)0xdead;
  1672. struct list_head *next = (struct list_head *)0xdead;
  1673. session = (struct cvp_hal_session *)inst->session;
  1674. mutex_lock(&inst->frames.lock);
  1675. list_for_each_entry_safe(frame, dummy1, &inst->frames.list, list) {
  1676. list_del(&frame->list);
  1677. msm_cvp_unmap_frame_buf(inst, frame);
  1678. }
  1679. mutex_unlock(&inst->frames.lock);
  1680. mutex_lock(&inst->persistbufs.lock);
  1681. list_for_each_safe(ptr, next, &inst->persistbufs.list) {
  1682. if (!ptr)
  1683. return -EINVAL;
  1684. cbuf = list_entry(ptr, struct cvp_internal_buf, list);
  1685. smem = cbuf->smem;
  1686. if (!smem) {
  1687. dprintk(CVP_ERR, "%s invalid persist smem\n", __func__);
  1688. mutex_unlock(&inst->persistbufs.lock);
  1689. return -EINVAL;
  1690. }
  1691. if (cbuf->ownership != DRIVER) {
  1692. dprintk(CVP_MEM,
  1693. "%s: %x : fd %d %pK size %d",
  1694. "free user persistent", hash32_ptr(inst->session), cbuf->fd,
  1695. smem->dma_buf, cbuf->size);
  1696. list_del(&cbuf->list);
  1697. if (smem->bitmap_index >= MAX_DMABUF_NUMS) {
  1698. /*
  1699. * don't care refcount, has to remove mapping
  1700. * this is user persistent buffer
  1701. */
  1702. if (smem->device_addr) {
  1703. msm_cvp_unmap_smem(inst, smem,
  1704. "unmap persist");
  1705. msm_cvp_smem_put_dma_buf(
  1706. cbuf->smem->dma_buf);
  1707. smem->device_addr = 0;
  1708. }
  1709. cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
  1710. cbuf->smem = NULL;
  1711. cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
  1712. } else {
  1713. /*
  1714. * DMM_PARAMS and WAP_NCC_PARAMS cases
  1715. * Leave dma_cache cleanup to unmap
  1716. */
  1717. cbuf->smem = NULL;
  1718. cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
  1719. }
  1720. }
  1721. }
  1722. mutex_unlock(&inst->persistbufs.lock);
  1723. mutex_lock(&inst->dma_cache.lock);
  1724. for (i = 0; i < inst->dma_cache.nr; i++) {
  1725. smem = inst->dma_cache.entries[i];
  1726. if (atomic_read(&smem->refcount) == 0) {
  1727. print_smem(CVP_MEM, "free", inst, smem);
  1728. } else if (!(smem->flags & SMEM_PERSIST)) {
  1729. print_smem(CVP_WARN, "in use", inst, smem);
  1730. }
  1731. msm_cvp_unmap_smem(inst, smem, "unmap cpu");
  1732. msm_cvp_smem_put_dma_buf(smem->dma_buf);
  1733. cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
  1734. inst->dma_cache.entries[i] = NULL;
  1735. }
  1736. mutex_unlock(&inst->dma_cache.lock);
  1737. cbuf = (struct cvp_internal_buf *)0xdeadbeef;
  1738. mutex_lock(&inst->cvpdspbufs.lock);
  1739. list_for_each_entry_safe(cbuf, dummy, &inst->cvpdspbufs.list, list) {
  1740. print_internal_buffer(CVP_MEM, "remove dspbufs", inst, cbuf);
  1741. if (cbuf->ownership == CLIENT) {
  1742. msm_cvp_unmap_smem(inst, cbuf->smem, "unmap dsp");
  1743. msm_cvp_smem_put_dma_buf(cbuf->smem->dma_buf);
  1744. } else if (cbuf->ownership == DSP) {
  1745. rc = cvp_dsp_fastrpc_unmap(inst->dsp_handle, cbuf);
  1746. if (rc)
  1747. dprintk(CVP_ERR,
  1748. "%s: failed to unmap buf from DSP\n",
  1749. __func__);
  1750. rc = cvp_release_dsp_buffers(inst, cbuf);
  1751. if (rc)
  1752. dprintk(CVP_ERR,
  1753. "%s Fail to free buffer 0x%x\n",
  1754. __func__, rc);
  1755. }
  1756. list_del(&cbuf->list);
  1757. cvp_kmem_cache_free(&cvp_driver->buf_cache, cbuf);
  1758. }
  1759. mutex_unlock(&inst->cvpdspbufs.lock);
  1760. mutex_lock(&inst->cvpwnccbufs.lock);
  1761. if (inst->cvpwnccbufs_num != 0)
  1762. dprintk(CVP_WARN, "%s: cvpwnccbufs not empty, contains %d bufs",
  1763. __func__, inst->cvpwnccbufs_num);
  1764. list_for_each_entry_safe(cbuf, dummy, &inst->cvpwnccbufs.list, list) {
  1765. print_internal_buffer(CVP_MEM, "remove wnccbufs", inst, cbuf);
  1766. buf.fd = cbuf->fd;
  1767. buf.reserved[0] = cbuf->ktid;
  1768. mutex_unlock(&inst->cvpwnccbufs.lock);
  1769. msm_cvp_unmap_buf_wncc(inst, &buf);
  1770. mutex_lock(&inst->cvpwnccbufs.lock);
  1771. }
  1772. mutex_unlock(&inst->cvpwnccbufs.lock);
  1773. return rc;
  1774. }
  1775. void msm_cvp_populate_dsp_buf_info(struct cvp_internal_buf *buf,
  1776. struct cvp_hal_session *session,
  1777. u32 session_id,
  1778. struct msm_cvp_core *core)
  1779. {
  1780. struct cvp_hfi_ops *dev_ops = (struct cvp_hfi_ops *) core->dev_ops;
  1781. struct iris_hfi_device *cvp_device = (struct iris_hfi_device *) dev_ops->hfi_device_data;
  1782. struct cvp_iface_q_info dsp_debugQ_info = cvp_device->dsp_iface_queues[DEBUG_Q];
  1783. struct cvp_dsp_trace_buf *trace_buf;
  1784. struct cvp_dsp_trace *dsp_debug_trace;
  1785. dsp_debug_trace = (struct cvp_dsp_trace *) dsp_debugQ_info.q_array.align_virtual_addr;
  1786. if (!dsp_debug_trace) {
  1787. dprintk(CVP_ERR, "dsp trace is NULL\n");
  1788. return;
  1789. }
  1790. for (int session_idx = 0; session_idx < EVA_TRACE_MAX_SESSION_NUM; session_idx++) {
  1791. if (dsp_debug_trace->sessions[session_idx].session_id == session_id) {
  1792. u32 buf_cnt = dsp_debug_trace->sessions[session_idx].buf_cnt;
  1793. for (int buf_idx = 0; buf_idx < buf_cnt; buf_idx++) {
  1794. trace_buf = &dsp_debug_trace->sessions[session_idx].buf[buf_idx];
  1795. if (buf->smem->device_addr == trace_buf->iova) {
  1796. buf->smem->buf_idx = trace_buf->buf_idx;
  1797. buf->smem->pkt_type = trace_buf->pkt_type;
  1798. buf->smem->fd = trace_buf->fd;
  1799. return;
  1800. }
  1801. }
  1802. }
  1803. }
  1804. }
  1805. #define MAX_NUM_FRAMES_DUMP 4
  1806. void msm_cvp_print_inst_bufs(struct msm_cvp_inst *inst, bool log)
  1807. {
  1808. struct cvp_internal_buf *buf = (struct cvp_internal_buf *)0xdeadbeef;
  1809. struct msm_cvp_frame *frame = (struct msm_cvp_frame *)0xdeadbeef;
  1810. struct msm_cvp_core *core;
  1811. struct inst_snapshot *snap = NULL;
  1812. int i = 0, c = 0;
  1813. // DSP trace related variables
  1814. struct cvp_hal_session *session;
  1815. u32 session_id;
  1816. session = (struct cvp_hal_session *)inst->session;
  1817. session_id = hash32_ptr(session);
  1818. core = cvp_driver->cvp_core;
  1819. if (log && core->log.snapshot_index < 16) {
  1820. snap = &core->log.snapshot[core->log.snapshot_index];
  1821. snap->session = inst->session;
  1822. core->log.snapshot_index++;
  1823. }
  1824. if (!inst) {
  1825. dprintk(CVP_ERR, "%s - invalid param %pK\n",
  1826. __func__, inst);
  1827. return;
  1828. }
  1829. dprintk(CVP_ERR,
  1830. "---Buffer details for inst: %pK %s of type: %d---\n",
  1831. inst, inst->proc_name, inst->session_type);
  1832. dprintk(CVP_ERR, "dma_cache entries %d\n", inst->dma_cache.nr);
  1833. mutex_lock(&inst->dma_cache.lock);
  1834. if (inst->dma_cache.nr <= MAX_DMABUF_NUMS)
  1835. for (i = 0; i < inst->dma_cache.nr; i++)
  1836. _log_smem(snap, inst, inst->dma_cache.entries[i], log);
  1837. mutex_unlock(&inst->dma_cache.lock);
  1838. i = 0;
  1839. dprintk(CVP_ERR, "frame buffer list\n");
  1840. mutex_lock(&inst->frames.lock);
  1841. list_for_each_entry(frame, &inst->frames.list, list) {
  1842. i++;
  1843. if (i <= MAX_NUM_FRAMES_DUMP) {
  1844. dprintk(CVP_ERR, "frame no %d tid %llx bufs\n",
  1845. i, frame->ktid);
  1846. for (c = 0; c < frame->nr; c++)
  1847. _log_smem(snap, inst, frame->bufs[c].smem,
  1848. log);
  1849. }
  1850. }
  1851. if (i > MAX_NUM_FRAMES_DUMP)
  1852. dprintk(CVP_ERR, "Skipped %d frames' buffers\n",
  1853. (i - MAX_NUM_FRAMES_DUMP));
  1854. mutex_unlock(&inst->frames.lock);
  1855. mutex_lock(&inst->cvpdspbufs.lock);
  1856. dprintk(CVP_ERR, "dsp buffer list:\n");
  1857. list_for_each_entry(buf, &inst->cvpdspbufs.list, list) {
  1858. // Populate DSP buffer info from debug queue to kernel instance
  1859. msm_cvp_populate_dsp_buf_info(buf, session, session_id, core);
  1860. // Log print buffer info
  1861. _log_buf(snap, SMEM_CDSP, inst, buf, log);
  1862. }
  1863. mutex_unlock(&inst->cvpdspbufs.lock);
  1864. mutex_lock(&inst->cvpwnccbufs.lock);
  1865. dprintk(CVP_ERR, "wncc buffer list:\n");
  1866. list_for_each_entry(buf, &inst->cvpwnccbufs.list, list)
  1867. print_cvp_buffer(CVP_ERR, "bufdump", inst, buf);
  1868. mutex_unlock(&inst->cvpwnccbufs.lock);
  1869. mutex_lock(&inst->persistbufs.lock);
  1870. dprintk(CVP_ERR, "persist buffer list:\n");
  1871. list_for_each_entry(buf, &inst->persistbufs.list, list)
  1872. _log_buf(snap, SMEM_PERSIST, inst, buf, log);
  1873. mutex_unlock(&inst->persistbufs.lock);
  1874. dprintk(CVP_ERR, "last frame ktid %llx\n", inst->last_frame.ktid);
  1875. for (i = 0; i < inst->last_frame.nr; i++)
  1876. _log_smem(snap, inst, &inst->last_frame.smem[i], log);
  1877. dprintk(CVP_ERR, "unmapped wncc bufs\n");
  1878. for (i = 0; i < inst->unused_wncc_bufs.nr; i++)
  1879. _log_smem(snap, inst, &inst->unused_wncc_bufs.smem[i], log);
  1880. dprintk(CVP_ERR, "unmapped dsp bufs\n");
  1881. for (i = 0; i < inst->unused_dsp_bufs.nr; i++)
  1882. _log_smem(snap, inst, &inst->unused_dsp_bufs.smem[i], log);
  1883. }
  1884. struct cvp_internal_buf *cvp_allocate_arp_bufs(struct msm_cvp_inst *inst,
  1885. u32 buffer_size)
  1886. {
  1887. struct cvp_internal_buf *buf;
  1888. struct msm_cvp_list *buf_list;
  1889. u32 smem_flags = SMEM_UNCACHED;
  1890. int rc = 0;
  1891. if (!inst) {
  1892. dprintk(CVP_ERR, "%s Invalid input\n", __func__);
  1893. return NULL;
  1894. }
  1895. buf_list = &inst->persistbufs;
  1896. if (!buffer_size)
  1897. return NULL;
  1898. /* If PERSIST buffer requires secure mapping, uncomment
  1899. * below flags setting
  1900. * smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
  1901. */
  1902. buf = cvp_kmem_cache_zalloc(&cvp_driver->buf_cache, GFP_KERNEL);
  1903. if (!buf) {
  1904. dprintk(CVP_ERR, "%s Out of memory\n", __func__);
  1905. goto fail_kzalloc;
  1906. }
  1907. buf->smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
  1908. if (!buf->smem) {
  1909. dprintk(CVP_ERR, "%s Out of memory\n", __func__);
  1910. goto fail_kzalloc;
  1911. }
  1912. buf->smem->flags = smem_flags;
  1913. rc = msm_cvp_smem_alloc(buffer_size, 1, 0, /* 0: no mapping in kernel space */
  1914. &(inst->core->resources), buf->smem);
  1915. if (rc) {
  1916. dprintk(CVP_ERR, "Failed to allocate ARP memory\n");
  1917. goto err_no_mem;
  1918. }
  1919. buf->smem->pkt_type = buf->smem->buf_idx = 0;
  1920. buf->smem->pkt_type = buf->smem->buf_idx = 0;
  1921. atomic_inc(&buf->smem->refcount);
  1922. buf->size = buf->smem->size;
  1923. buf->type = HFI_BUFFER_INTERNAL_PERSIST_1;
  1924. buf->ownership = DRIVER;
  1925. mutex_lock(&buf_list->lock);
  1926. list_add_tail(&buf->list, &buf_list->list);
  1927. mutex_unlock(&buf_list->lock);
  1928. return buf;
  1929. err_no_mem:
  1930. cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
  1931. fail_kzalloc:
  1932. return NULL;
  1933. }
  1934. int cvp_release_arp_buffers(struct msm_cvp_inst *inst)
  1935. {
  1936. struct msm_cvp_smem *smem;
  1937. struct list_head *ptr = (struct list_head *)0xdead;
  1938. struct list_head *next = (struct list_head *)0xdead;
  1939. struct cvp_internal_buf *buf;
  1940. int rc = 0;
  1941. struct msm_cvp_core *core;
  1942. struct cvp_hfi_ops *ops_tbl;
  1943. if (!inst) {
  1944. dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
  1945. return -EINVAL;
  1946. }
  1947. core = inst->core;
  1948. if (!core) {
  1949. dprintk(CVP_ERR, "Invalid core pointer = %pK\n", core);
  1950. return -EINVAL;
  1951. }
  1952. ops_tbl = core->dev_ops;
  1953. if (!ops_tbl) {
  1954. dprintk(CVP_ERR, "Invalid device pointer = %pK\n", ops_tbl);
  1955. return -EINVAL;
  1956. }
  1957. dprintk(CVP_MEM, "release persist buffer!\n");
  1958. mutex_lock(&inst->persistbufs.lock);
  1959. /* Workaround for FW: release buffer means release all */
  1960. if (inst->state > MSM_CVP_CORE_INIT_DONE && inst->state <= MSM_CVP_CLOSE_DONE) {
  1961. rc = call_hfi_op(ops_tbl, session_release_buffers,
  1962. (void *)inst->session);
  1963. if (!rc) {
  1964. mutex_unlock(&inst->persistbufs.lock);
  1965. rc = wait_for_sess_signal_receipt(inst,
  1966. HAL_SESSION_RELEASE_BUFFER_DONE);
  1967. if (rc)
  1968. dprintk(CVP_WARN,
  1969. "%s: wait release_arp signal failed, rc %d\n",
  1970. __func__, rc);
  1971. mutex_lock(&inst->persistbufs.lock);
  1972. } else {
  1973. dprintk_rl(CVP_WARN, "Fail to send Rel prst buf\n");
  1974. }
  1975. }
  1976. list_for_each_safe(ptr, next, &inst->persistbufs.list) {
  1977. if (!ptr)
  1978. return -EINVAL;
  1979. buf = list_entry(ptr, struct cvp_internal_buf, list);
  1980. smem = buf->smem;
  1981. if (!smem) {
  1982. dprintk(CVP_ERR, "%s invalid smem\n", __func__);
  1983. mutex_unlock(&inst->persistbufs.lock);
  1984. return -EINVAL;
  1985. }
  1986. if (buf->ownership == DRIVER) {
  1987. dprintk(CVP_MEM,
  1988. "%s: %x : fd %d %pK size %d",
  1989. "free arp", hash32_ptr(inst->session), buf->fd,
  1990. smem->dma_buf, buf->size);
  1991. list_del(&buf->list);
  1992. atomic_dec(&smem->refcount);
  1993. msm_cvp_smem_free(smem);
  1994. cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
  1995. buf->smem = NULL;
  1996. cvp_kmem_cache_free(&cvp_driver->buf_cache, buf);
  1997. }
  1998. }
  1999. mutex_unlock(&inst->persistbufs.lock);
  2000. return rc;
  2001. }
  2002. int cvp_allocate_dsp_bufs(struct msm_cvp_inst *inst,
  2003. struct cvp_internal_buf *buf,
  2004. u32 buffer_size,
  2005. u32 secure_type)
  2006. {
  2007. u32 smem_flags = SMEM_UNCACHED;
  2008. int rc = 0;
  2009. if (!inst) {
  2010. dprintk(CVP_ERR, "%s Invalid input\n", __func__);
  2011. return -EINVAL;
  2012. }
  2013. if (!buf)
  2014. return -EINVAL;
  2015. if (!buffer_size)
  2016. return -EINVAL;
  2017. switch (secure_type) {
  2018. case 0:
  2019. break;
  2020. case 1:
  2021. smem_flags |= SMEM_SECURE | SMEM_PIXEL;
  2022. break;
  2023. case 2:
  2024. smem_flags |= SMEM_SECURE | SMEM_NON_PIXEL;
  2025. break;
  2026. default:
  2027. dprintk(CVP_ERR, "%s Invalid secure_type %d\n",
  2028. __func__, secure_type);
  2029. return -EINVAL;
  2030. }
  2031. dprintk(CVP_MEM, "%s smem_flags 0x%x\n", __func__, smem_flags);
  2032. buf->smem = cvp_kmem_cache_zalloc(&cvp_driver->smem_cache, GFP_KERNEL);
  2033. if (!buf->smem) {
  2034. dprintk(CVP_ERR, "%s Out of memory\n", __func__);
  2035. goto fail_kzalloc_smem_cache;
  2036. }
  2037. buf->smem->flags = smem_flags;
  2038. rc = msm_cvp_smem_alloc(buffer_size, 1, 0,
  2039. &(inst->core->resources), buf->smem);
  2040. if (rc) {
  2041. dprintk(CVP_ERR, "Failed to allocate DSP buf\n");
  2042. goto err_no_mem;
  2043. }
  2044. buf->smem->pkt_type = buf->smem->buf_idx = 0;
  2045. atomic_inc(&buf->smem->refcount);
  2046. dprintk(CVP_MEM, "%s dma_buf %pK\n", __func__, buf->smem->dma_buf);
  2047. buf->size = buf->smem->size;
  2048. buf->type = HFI_BUFFER_INTERNAL_PERSIST_1;
  2049. buf->ownership = DSP;
  2050. return rc;
  2051. err_no_mem:
  2052. cvp_kmem_cache_free(&cvp_driver->smem_cache, buf->smem);
  2053. fail_kzalloc_smem_cache:
  2054. return rc;
  2055. }
  2056. int cvp_release_dsp_buffers(struct msm_cvp_inst *inst,
  2057. struct cvp_internal_buf *buf)
  2058. {
  2059. struct msm_cvp_smem *smem;
  2060. int rc = 0;
  2061. if (!inst) {
  2062. dprintk(CVP_ERR, "Invalid instance pointer = %pK\n", inst);
  2063. return -EINVAL;
  2064. }
  2065. if (!buf) {
  2066. dprintk(CVP_ERR, "Invalid buffer pointer = %pK\n", inst);
  2067. return -EINVAL;
  2068. }
  2069. smem = buf->smem;
  2070. if (!smem) {
  2071. dprintk(CVP_ERR, "%s invalid smem\n", __func__);
  2072. return -EINVAL;
  2073. }
  2074. if (buf->ownership == DSP) {
  2075. dprintk(CVP_MEM,
  2076. "%s: %x : fd %x %s size %d",
  2077. __func__, hash32_ptr(inst->session), buf->fd,
  2078. smem->dma_buf->name, buf->size);
  2079. atomic_dec(&smem->refcount);
  2080. msm_cvp_smem_free(smem);
  2081. cvp_kmem_cache_free(&cvp_driver->smem_cache, smem);
  2082. } else {
  2083. dprintk(CVP_ERR,
  2084. "%s: wrong owner %d %x : fd %x %s size %d",
  2085. __func__, buf->ownership, hash32_ptr(inst->session),
  2086. buf->fd, smem->dma_buf->name, buf->size);
  2087. }
  2088. return rc;
  2089. }
  2090. int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
  2091. struct eva_kmd_buffer *buf)
  2092. {
  2093. struct cvp_hfi_ops *ops_tbl;
  2094. struct cvp_hal_session *session;
  2095. struct msm_cvp_inst *s;
  2096. int rc = 0;
  2097. if (!inst || !inst->core || !buf) {
  2098. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  2099. return -EINVAL;
  2100. }
  2101. s = cvp_get_inst_validate(inst->core, inst);
  2102. if (!s)
  2103. return -ECONNRESET;
  2104. session = (struct cvp_hal_session *)inst->session;
  2105. if (!session) {
  2106. dprintk(CVP_ERR, "%s: invalid session\n", __func__);
  2107. rc = -EINVAL;
  2108. goto exit;
  2109. }
  2110. ops_tbl = inst->core->dev_ops;
  2111. print_client_buffer(CVP_HFI, "register", inst, buf);
  2112. if (buf->index)
  2113. rc = msm_cvp_map_buf_dsp(inst, buf);
  2114. else
  2115. rc = msm_cvp_map_buf_wncc(inst, buf);
  2116. dprintk(CVP_DSP, "%s: fd %d, iova 0x%x\n", __func__,
  2117. buf->fd, buf->reserved[0]);
  2118. exit:
  2119. cvp_put_inst(s);
  2120. return rc;
  2121. }
  2122. int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
  2123. struct eva_kmd_buffer *buf)
  2124. {
  2125. struct msm_cvp_inst *s;
  2126. int rc = 0;
  2127. if (!inst || !inst->core || !buf) {
  2128. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  2129. return -EINVAL;
  2130. }
  2131. s = cvp_get_inst_validate(inst->core, inst);
  2132. if (!s)
  2133. return -ECONNRESET;
  2134. print_client_buffer(CVP_HFI, "unregister", inst, buf);
  2135. if (buf->index)
  2136. rc = msm_cvp_unmap_buf_dsp(inst, buf);
  2137. else
  2138. rc = msm_cvp_unmap_buf_wncc(inst, buf);
  2139. cvp_put_inst(s);
  2140. return rc;
  2141. }