msm_vidc_driver.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/workqueue.h>
  7. #include <media/msm_vidc_utils.h>
  8. #include <media/msm_media_info.h>
  9. #include "msm_vidc_driver.h"
  10. #include "msm_vidc_platform.h"
  11. #include "msm_vidc_internal.h"
  12. #include "msm_vidc_memory.h"
  13. #include "msm_vidc_debug.h"
  14. #include "venus_hfi.h"
  15. #include "msm_vidc.h"
  16. #define COUNT_BITS(a, out) ({ \
  17. while ((a) >= 1) { \
  18. (out) += (a) & (1); \
  19. (a) >>= (1); \
  20. } \
  21. })
  22. void print_vidc_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
  23. struct msm_vidc_buffer *vbuf)
  24. {
  25. struct msm_vidc_buffer *mbuf;
  26. if (!(tag & msm_vidc_debug) || !inst || !vbuf)
  27. return;
  28. mbuf = get_meta_buffer(inst, vbuf);
  29. if (!mbuf)
  30. dprintk(tag, inst->sid,
  31. "%s: %s: idx %2d fd %3d off %d daddr %#x size %d filled %d flags %#x ts %lld attr %#x\n",
  32. str, vbuf->type == MSM_VIDC_BUF_INPUT ? "INPUT" : "OUTPUT",
  33. vbuf->index, vbuf->fd, vbuf->data_offset,
  34. vbuf->device_addr, vbuf->buffer_size, vbuf->data_size,
  35. vbuf->flags, vbuf->timestamp, vbuf->attr);
  36. else
  37. dprintk(tag, inst->sid,
  38. "%s: %s: idx %2d fd %3d off %d daddr %#x size %d filled %d flags %#x ts %lld attr %#x meta: fd %3d daddr %#x size %d\n",
  39. str, vbuf->type == MSM_VIDC_BUF_INPUT ? "INPUT" : "OUTPUT",
  40. vbuf->index, vbuf->fd, vbuf->data_offset,
  41. vbuf->device_addr, vbuf->buffer_size, vbuf->data_size,
  42. vbuf->flags, vbuf->timestamp, vbuf->attr,
  43. mbuf->fd, mbuf->device_addr, mbuf->buffer_size);
  44. }
  45. void print_vb2_buffer(const char *str, struct msm_vidc_inst *inst,
  46. struct vb2_buffer *vb2)
  47. {
  48. if (!inst || !vb2)
  49. return;
  50. s_vpr_e(inst->sid,
  51. "%s: %s: idx %2d fd %d off %d size %d filled %d\n",
  52. str, vb2->type == INPUT_PLANE ? "INPUT" : "OUTPUT",
  53. vb2->index, vb2->planes[0].m.fd,
  54. vb2->planes[0].data_offset, vb2->planes[0].length,
  55. vb2->planes[0].bytesused);
  56. }
  57. enum msm_vidc_buffer_type v4l2_type_to_driver(u32 type)
  58. {
  59. enum msm_vidc_buffer_type buffer_type = 0;
  60. switch (type) {
  61. case INPUT_PLANE:
  62. buffer_type = MSM_VIDC_BUF_INPUT;
  63. break;
  64. case OUTPUT_PLANE:
  65. buffer_type = MSM_VIDC_BUF_OUTPUT;
  66. break;
  67. case INPUT_META_PLANE:
  68. buffer_type = MSM_VIDC_BUF_INPUT_META;
  69. break;
  70. case OUTPUT_META_PLANE:
  71. buffer_type = MSM_VIDC_BUF_OUTPUT_META;
  72. break;
  73. default:
  74. d_vpr_e("%s: vidc buffer type not found for %#x\n",
  75. __func__, type);
  76. break;
  77. }
  78. return buffer_type;
  79. }
  80. u32 v4l2_type_from_driver(enum msm_vidc_buffer_type buffer_type)
  81. {
  82. u32 type = 0;
  83. switch (buffer_type) {
  84. case MSM_VIDC_BUF_INPUT:
  85. type = INPUT_PLANE;
  86. break;
  87. case MSM_VIDC_BUF_OUTPUT:
  88. type = OUTPUT_PLANE;
  89. break;
  90. case MSM_VIDC_BUF_INPUT_META:
  91. type = INPUT_META_PLANE;
  92. break;
  93. case MSM_VIDC_BUF_OUTPUT_META:
  94. type = OUTPUT_META_PLANE;
  95. break;
  96. default:
  97. d_vpr_e("%s: v4l2 type not found for %#x\n",
  98. __func__, buffer_type);
  99. break;
  100. }
  101. return buffer_type;
  102. }
  103. enum msm_vidc_codec_type v4l2_codec_to_driver(u32 v4l2_codec)
  104. {
  105. enum msm_vidc_codec_type codec = 0;
  106. switch (v4l2_codec) {
  107. case V4L2_PIX_FMT_H264:
  108. codec = MSM_VIDC_H264;
  109. break;
  110. case V4L2_PIX_FMT_HEVC:
  111. codec = MSM_VIDC_HEVC;
  112. break;
  113. case V4L2_PIX_FMT_VP9:
  114. codec = MSM_VIDC_VP9;
  115. break;
  116. case V4L2_PIX_FMT_MPEG2:
  117. codec = MSM_VIDC_MPEG2;
  118. break;
  119. default:
  120. d_vpr_e("%s: vidc codec not found for %#x\n", __func__, v4l2_codec);
  121. break;
  122. }
  123. return codec;
  124. }
  125. u32 v4l2_codec_from_driver(enum msm_vidc_codec_type codec)
  126. {
  127. u32 v4l2_codec = 0;
  128. switch (codec) {
  129. case MSM_VIDC_H264:
  130. v4l2_codec = V4L2_PIX_FMT_H264;
  131. break;
  132. case MSM_VIDC_HEVC:
  133. v4l2_codec = V4L2_PIX_FMT_HEVC;
  134. break;
  135. case MSM_VIDC_VP9:
  136. v4l2_codec = V4L2_PIX_FMT_VP9;
  137. break;
  138. case MSM_VIDC_MPEG2:
  139. v4l2_codec = V4L2_PIX_FMT_MPEG2;
  140. break;
  141. default:
  142. d_vpr_e("%s: v4l2 codec not found for %#x\n", __func__, codec);
  143. break;
  144. }
  145. return v4l2_codec;
  146. }
  147. enum msm_vidc_colorformat_type v4l2_colorformat_to_driver(u32 v4l2_colorformat)
  148. {
  149. enum msm_vidc_colorformat_type colorformat = 0;
  150. switch (v4l2_colorformat) {
  151. case V4L2_PIX_FMT_NV12:
  152. colorformat = MSM_VIDC_FMT_NV12;
  153. break;
  154. case V4L2_PIX_FMT_NV21:
  155. colorformat = MSM_VIDC_FMT_NV21;
  156. break;
  157. case V4L2_PIX_FMT_NV12_UBWC:
  158. colorformat = MSM_VIDC_FMT_NV12_UBWC;
  159. break;
  160. case V4L2_PIX_FMT_NV12_TP10_UBWC:
  161. colorformat = MSM_VIDC_FMT_NV12_TP10_UBWC;
  162. break;
  163. case V4L2_PIX_FMT_RGBA8888_UBWC:
  164. colorformat = MSM_VIDC_FMT_RGBA8888_UBWC;
  165. break;
  166. case V4L2_PIX_FMT_NV12_P010_UBWC:
  167. colorformat = MSM_VIDC_FMT_SDE_Y_CBCR_H2V2_P010_VENUS;
  168. break;
  169. case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS:
  170. colorformat = MSM_VIDC_FMT_SDE_Y_CBCR_H2V2_P010_VENUS;
  171. break;
  172. default:
  173. d_vpr_e("%s: vidc format not found for %#x\n", __func__, v4l2_colorformat);
  174. break;
  175. }
  176. return colorformat;
  177. }
  178. u32 v4l2_colorformat_from_driver(enum msm_vidc_colorformat_type colorformat)
  179. {
  180. u32 v4l2_colorformat = 0;
  181. switch (colorformat) {
  182. case MSM_VIDC_FMT_NV12:
  183. v4l2_colorformat = V4L2_PIX_FMT_NV12;
  184. break;
  185. case MSM_VIDC_FMT_NV21:
  186. v4l2_colorformat = V4L2_PIX_FMT_NV21;
  187. break;
  188. case MSM_VIDC_FMT_NV12_UBWC:
  189. v4l2_colorformat = V4L2_PIX_FMT_NV12_UBWC;
  190. break;
  191. case MSM_VIDC_FMT_NV12_TP10_UBWC:
  192. v4l2_colorformat = V4L2_PIX_FMT_NV12_TP10_UBWC;
  193. break;
  194. case MSM_VIDC_FMT_RGBA8888_UBWC:
  195. v4l2_colorformat = V4L2_PIX_FMT_RGBA8888_UBWC;
  196. break;
  197. case MSM_VIDC_FMT_NV12_P010_UBWC:
  198. v4l2_colorformat = V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS;
  199. break;
  200. case MSM_VIDC_FMT_SDE_Y_CBCR_H2V2_P010_VENUS:
  201. v4l2_colorformat = V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS;
  202. break;
  203. default:
  204. d_vpr_e("%s: v4l2 format not found for %#x\n", __func__, colorformat);
  205. break;
  206. }
  207. return v4l2_colorformat;
  208. }
  209. u32 v4l2_colorformat_to_media(u32 v4l2_fmt)
  210. {
  211. switch (v4l2_fmt) {
  212. case V4L2_PIX_FMT_NV12:
  213. return COLOR_FMT_NV12;
  214. case V4L2_PIX_FMT_NV21:
  215. return COLOR_FMT_NV21;
  216. case V4L2_PIX_FMT_NV12_512:
  217. return COLOR_FMT_NV12_512;
  218. case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS:
  219. return COLOR_FMT_P010;
  220. case V4L2_PIX_FMT_NV12_UBWC:
  221. return COLOR_FMT_NV12_UBWC;
  222. case V4L2_PIX_FMT_NV12_TP10_UBWC:
  223. return COLOR_FMT_NV12_BPP10_UBWC;
  224. case V4L2_PIX_FMT_RGBA8888_UBWC:
  225. return COLOR_FMT_RGBA8888_UBWC;
  226. default:
  227. d_vpr_e(
  228. "Invalid v4l2 color fmt FMT : %x, Set default(NV12)",
  229. v4l2_fmt);
  230. return COLOR_FMT_NV12;
  231. }
  232. }
  233. int v4l2_type_to_driver_port(struct msm_vidc_inst *inst, u32 type,
  234. const char *func)
  235. {
  236. int port;
  237. if (type == INPUT_PLANE) {
  238. port = INPUT_PORT;
  239. } else if (type == INPUT_META_PLANE) {
  240. port = INPUT_META_PORT;
  241. } else if (type == OUTPUT_PLANE) {
  242. port = OUTPUT_PORT;
  243. } else if (type == OUTPUT_META_PLANE) {
  244. port = OUTPUT_META_PORT;
  245. } else {
  246. s_vpr_e(inst->sid, "%s: invalid type %d\n", func, type);
  247. port = -EINVAL;
  248. }
  249. return port;
  250. }
  251. u32 msm_vidc_get_buffer_region(struct msm_vidc_inst *inst,
  252. enum msm_vidc_buffer_type buffer_type, const char *func)
  253. {
  254. u32 region = MSM_VIDC_NON_SECURE;
  255. if (!is_secure_session(inst))
  256. return region;
  257. switch (buffer_type) {
  258. case MSM_VIDC_BUF_INPUT:
  259. if (is_encode_session(inst))
  260. region = MSM_VIDC_SECURE_PIXEL;
  261. else
  262. region = MSM_VIDC_SECURE_BITSTREAM;
  263. break;
  264. case MSM_VIDC_BUF_OUTPUT:
  265. if (is_encode_session(inst))
  266. region = MSM_VIDC_SECURE_BITSTREAM;
  267. else
  268. region = MSM_VIDC_SECURE_PIXEL;
  269. break;
  270. case MSM_VIDC_BUF_INPUT_META:
  271. case MSM_VIDC_BUF_OUTPUT_META:
  272. region = MSM_VIDC_NON_SECURE;
  273. break;
  274. case MSM_VIDC_BUF_SCRATCH:
  275. region = MSM_VIDC_SECURE_BITSTREAM;
  276. break;
  277. case MSM_VIDC_BUF_SCRATCH_1:
  278. region = MSM_VIDC_SECURE_NONPIXEL;
  279. break;
  280. case MSM_VIDC_BUF_SCRATCH_2:
  281. region = MSM_VIDC_SECURE_PIXEL;
  282. break;
  283. case MSM_VIDC_BUF_PERSIST:
  284. if (is_encode_session(inst))
  285. region = MSM_VIDC_SECURE_NONPIXEL;
  286. else
  287. region = MSM_VIDC_SECURE_BITSTREAM;
  288. break;
  289. case MSM_VIDC_BUF_PERSIST_1:
  290. region = MSM_VIDC_SECURE_NONPIXEL;
  291. break;
  292. default:
  293. s_vpr_e(inst->sid, "%s: invalid buffer type %d\n",
  294. func, buffer_type);
  295. }
  296. return region;
  297. }
  298. struct msm_vidc_buffers *msm_vidc_get_buffers(
  299. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  300. const char *func)
  301. {
  302. switch (buffer_type) {
  303. case MSM_VIDC_BUF_INPUT:
  304. return &inst->buffers.input;
  305. case MSM_VIDC_BUF_INPUT_META:
  306. return &inst->buffers.input_meta;
  307. case MSM_VIDC_BUF_OUTPUT:
  308. return &inst->buffers.output;
  309. case MSM_VIDC_BUF_OUTPUT_META:
  310. return &inst->buffers.output_meta;
  311. case MSM_VIDC_BUF_SCRATCH:
  312. return &inst->buffers.scratch;
  313. case MSM_VIDC_BUF_SCRATCH_1:
  314. return &inst->buffers.scratch_1;
  315. case MSM_VIDC_BUF_SCRATCH_2:
  316. return &inst->buffers.scratch_2;
  317. case MSM_VIDC_BUF_PERSIST:
  318. return &inst->buffers.persist;
  319. case MSM_VIDC_BUF_PERSIST_1:
  320. return &inst->buffers.persist_1;
  321. default:
  322. s_vpr_e(inst->sid, "%s: invalid buffer type %d\n",
  323. func, buffer_type);
  324. return NULL;
  325. }
  326. }
  327. struct msm_vidc_mappings *msm_vidc_get_mappings(
  328. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  329. const char *func)
  330. {
  331. switch (buffer_type) {
  332. case MSM_VIDC_BUF_INPUT:
  333. return &inst->mappings.input;
  334. case MSM_VIDC_BUF_INPUT_META:
  335. return &inst->mappings.input_meta;
  336. case MSM_VIDC_BUF_OUTPUT:
  337. return &inst->mappings.output;
  338. case MSM_VIDC_BUF_OUTPUT_META:
  339. return &inst->mappings.output_meta;
  340. case MSM_VIDC_BUF_SCRATCH:
  341. return &inst->mappings.scratch;
  342. case MSM_VIDC_BUF_SCRATCH_1:
  343. return &inst->mappings.scratch_1;
  344. case MSM_VIDC_BUF_SCRATCH_2:
  345. return &inst->mappings.scratch_2;
  346. case MSM_VIDC_BUF_PERSIST:
  347. return &inst->mappings.persist;
  348. case MSM_VIDC_BUF_PERSIST_1:
  349. return &inst->mappings.persist_1;
  350. default:
  351. s_vpr_e(inst->sid, "%s: invalid buffer type %d\n",
  352. func, buffer_type);
  353. return NULL;
  354. }
  355. }
  356. struct msm_vidc_allocations *msm_vidc_get_allocations(
  357. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  358. const char *func)
  359. {
  360. switch (buffer_type) {
  361. case MSM_VIDC_BUF_SCRATCH:
  362. return &inst->allocations.scratch;
  363. case MSM_VIDC_BUF_SCRATCH_1:
  364. return &inst->allocations.scratch_1;
  365. case MSM_VIDC_BUF_SCRATCH_2:
  366. return &inst->allocations.scratch_2;
  367. case MSM_VIDC_BUF_PERSIST:
  368. return &inst->allocations.persist;
  369. case MSM_VIDC_BUF_PERSIST_1:
  370. return &inst->allocations.persist_1;
  371. default:
  372. s_vpr_e(inst->sid, "%s: invalid buffer type %d\n",
  373. func, buffer_type);
  374. return NULL;
  375. }
  376. }
  377. int msm_vidc_change_inst_state(struct msm_vidc_inst *inst,
  378. enum msm_vidc_inst_state request_state, const char *func)
  379. {
  380. if (!inst) {
  381. d_vpr_e("%s: invalid params\n", __func__);
  382. return -EINVAL;
  383. }
  384. if (!request_state) {
  385. s_vpr_e(inst->sid, "%s: invalid request state\n", func);
  386. return -EINVAL;
  387. }
  388. if (inst->state == MSM_VIDC_ERROR) {
  389. s_vpr_h(inst->sid,
  390. "%s: inst is in bad state, can not change state to %d\n",
  391. func, request_state);
  392. return 0;
  393. }
  394. s_vpr_h(inst->sid, "%s: state changed from %d to %d\n",
  395. func, inst->state, request_state);
  396. inst->state = request_state;
  397. return 0;
  398. }
  399. int msm_vidc_get_control(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
  400. {
  401. int rc = 0;
  402. if (!inst || !ctrl) {
  403. d_vpr_e("%s: invalid params\n", __func__);
  404. return -EINVAL;
  405. }
  406. switch (ctrl->id) {
  407. case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
  408. ctrl->val = inst->buffers.output.min_count +
  409. inst->buffers.output.extra_count;
  410. s_vpr_h(inst->sid, "g_min: output buffers %d\n", ctrl->val);
  411. break;
  412. case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
  413. ctrl->val = inst->buffers.input.min_count +
  414. inst->buffers.input.extra_count;
  415. s_vpr_h(inst->sid, "g_min: input buffers %d\n", ctrl->val);
  416. break;
  417. default:
  418. break;
  419. }
  420. return rc;
  421. }
  422. static int vb2_buffer_to_driver(struct vb2_buffer *vb2, struct msm_vidc_buffer *buf)
  423. {
  424. int rc = 0;
  425. if (!vb2 || !buf) {
  426. d_vpr_e("%s: invalid params\n", __func__);
  427. return -EINVAL;
  428. }
  429. buf->valid = true;
  430. buf->type = v4l2_type_to_driver(vb2->type);
  431. if (!buf->type)
  432. return -EINVAL;
  433. buf->index = vb2->index;
  434. buf->fd = vb2->planes[0].m.fd;
  435. buf->data_offset = vb2->planes[0].data_offset;
  436. buf->data_size = vb2->planes[0].bytesused;
  437. buf->buffer_size = vb2->planes[0].length;
  438. buf->timestamp = vb2->timestamp;
  439. return rc;
  440. }
  441. int msm_vidc_unmap_driver_buf(struct msm_vidc_inst *inst,
  442. struct msm_vidc_buffer *buf)
  443. {
  444. int rc = 0;
  445. struct msm_vidc_mappings *mappings;
  446. struct msm_vidc_map *map = NULL;
  447. bool found = false;
  448. if (!inst || !buf) {
  449. d_vpr_e("%s: invalid params\n", __func__);
  450. return -EINVAL;
  451. }
  452. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  453. if (!mappings)
  454. return -EINVAL;
  455. /* sanity check to see if it was not removed */
  456. list_for_each_entry(map, &mappings->list, list) {
  457. if (map->dmabuf == buf->dmabuf) {
  458. found = true;
  459. break;
  460. }
  461. }
  462. if (!found) {
  463. print_vidc_buffer(VIDC_ERR, "no buf in mappings", inst, buf);
  464. return -EINVAL;
  465. }
  466. rc = msm_vidc_memory_unmap(inst->core, map);
  467. if (rc) {
  468. print_vidc_buffer(VIDC_ERR, "unmap failed", inst, buf);
  469. return -EINVAL;
  470. }
  471. /* finally delete if refcount is zero */
  472. if (!map->refcount) {
  473. list_del(&map->list);
  474. kfree(map);
  475. }
  476. return 0;
  477. }
  478. int msm_vidc_put_driver_buf(struct msm_vidc_inst *inst,
  479. struct msm_vidc_buffer *buf)
  480. {
  481. int rc = 0;
  482. if (!inst || !buf) {
  483. d_vpr_e("%s: invalid params\n", __func__);
  484. return -EINVAL;
  485. }
  486. /* do not unmap / delete read only buffer */
  487. if (buf->attr & MSM_VIDC_ATTR_READ_ONLY)
  488. return 0;
  489. rc = msm_vidc_unmap_driver_buf(inst, buf);
  490. if (rc)
  491. return rc;
  492. /* delete the buffer from buffers->list */
  493. list_del(&buf->list);
  494. kfree(buf);
  495. return 0;
  496. }
  497. int msm_vidc_map_driver_buf(struct msm_vidc_inst *inst,
  498. struct msm_vidc_buffer *buf)
  499. {
  500. int rc = 0;
  501. struct msm_vidc_mappings *mappings;
  502. struct msm_vidc_map *map = NULL;
  503. bool found = false;
  504. if (!inst || !buf) {
  505. d_vpr_e("%s: invalid params\n", __func__);
  506. return -EINVAL;
  507. }
  508. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  509. if (!mappings)
  510. return -EINVAL;
  511. /* check if it is an existing one */
  512. list_for_each_entry(map, &mappings->list, list) {
  513. if (map->dmabuf == buf->dmabuf) {
  514. found = true;
  515. break;
  516. }
  517. }
  518. if (found) {
  519. /* skip mapping for RO buffer */
  520. if (!(buf->attr & MSM_VIDC_ATTR_READ_ONLY)) {
  521. rc = msm_vidc_memory_map(inst->core, map);
  522. if (rc)
  523. return -ENOMEM;
  524. buf->device_addr = map->device_addr;
  525. }
  526. return 0;
  527. }
  528. map = kzalloc(sizeof(struct msm_vidc_map), GFP_KERNEL);
  529. if (!map) {
  530. s_vpr_e(inst->sid, "%s: alloc failed\n", __func__);
  531. return -ENOMEM;
  532. }
  533. INIT_LIST_HEAD(&map->list);
  534. map->type = buf->type;
  535. map->dmabuf = buf->dmabuf;
  536. map->region = msm_vidc_get_buffer_region(inst, buf->type, __func__);
  537. rc = msm_vidc_memory_map(inst->core, map);
  538. if (rc) {
  539. kfree(map);
  540. return -ENOMEM;
  541. }
  542. buf->device_addr = map->device_addr;
  543. list_add_tail(&map->list, &mappings->list);
  544. return 0;
  545. }
  546. struct msm_vidc_buffer *msm_vidc_get_driver_buf(struct msm_vidc_inst *inst,
  547. struct vb2_buffer *vb2)
  548. {
  549. int rc = 0;
  550. struct msm_vidc_buffer *buf = NULL;
  551. struct msm_vidc_buffers *buffers;
  552. struct dma_buf *dmabuf;
  553. enum msm_vidc_buffer_type buf_type;
  554. bool found = false;
  555. if (!inst || !vb2) {
  556. d_vpr_e("%s: invalid params\n", __func__);
  557. return NULL;
  558. }
  559. buf_type = v4l2_type_to_driver(vb2->type);
  560. if (!buf_type)
  561. return NULL;
  562. buffers = msm_vidc_get_buffers(inst, buf_type, __func__);
  563. if (!buffers)
  564. return NULL;
  565. dmabuf = msm_vidc_memory_get_dmabuf(vb2->planes[0].m.fd);
  566. if (!dmabuf)
  567. return NULL;
  568. msm_vidc_memory_put_dmabuf(dmabuf);
  569. /* check if it is an existing buffer */
  570. list_for_each_entry(buf, &buffers->list, list) {
  571. if (buf->dmabuf == dmabuf &&
  572. buf->data_offset == vb2->planes[0].data_offset) {
  573. found = true;
  574. break;
  575. }
  576. }
  577. if (found) {
  578. /* only YUV buffers are allowed to repeat */
  579. if ((is_decode_session(inst) && vb2->type != OUTPUT_PLANE) ||
  580. (is_encode_session(inst) && vb2->type != INPUT_PLANE)) {
  581. print_vidc_buffer(VIDC_ERR,
  582. "existing buffer", inst, buf);
  583. goto error;
  584. }
  585. /* for decoder, YUV with RO flag are allowed to repeat */
  586. if (is_decode_session(inst) &&
  587. !(buf->attr & MSM_VIDC_ATTR_READ_ONLY)) {
  588. print_vidc_buffer(VIDC_ERR,
  589. "existing buffer without RO flag", inst, buf);
  590. goto error;
  591. }
  592. /* for encoder, treat the repeated buffer as new buffer */
  593. if (is_encode_session(inst) && vb2->type == INPUT_PLANE)
  594. found = false;
  595. } else {
  596. buf = kzalloc(sizeof(struct msm_vidc_buffer), GFP_KERNEL);
  597. if (!buf) {
  598. s_vpr_e(inst->sid, "%s: alloc failed\n", __func__);
  599. goto error;
  600. }
  601. buf->dmabuf = dmabuf;
  602. }
  603. rc = vb2_buffer_to_driver(vb2, buf);
  604. if (rc)
  605. goto error;
  606. if (!found)
  607. list_add_tail(&buf->list, &buffers->list);
  608. rc = msm_vidc_map_driver_buf(inst, buf);
  609. if (rc)
  610. goto error;
  611. return buf;
  612. error:
  613. if (!found)
  614. kfree(buf);
  615. return NULL;
  616. }
  617. struct msm_vidc_buffer *get_meta_buffer(struct msm_vidc_inst *inst,
  618. struct msm_vidc_buffer *buf)
  619. {
  620. struct msm_vidc_buffer *mbuf;
  621. struct msm_vidc_buffers *meta;
  622. bool found = false;
  623. if (!inst || !buf) {
  624. d_vpr_e("%s: invalid params\n", __func__);
  625. return NULL;
  626. }
  627. if (buf->type == MSM_VIDC_BUF_INPUT) {
  628. meta = &inst->buffers.input_meta;
  629. } else if (buf->type == MSM_VIDC_BUF_OUTPUT) {
  630. meta = &inst->buffers.output_meta;
  631. } else {
  632. s_vpr_e(inst->sid, "%s: invalid buffer type %d\n",
  633. __func__, buf->type);
  634. return NULL;
  635. }
  636. list_for_each_entry(mbuf, &meta->list, list) {
  637. if (!mbuf->valid)
  638. continue;
  639. if (mbuf->type == buf->type &&
  640. mbuf->index == buf->index) {
  641. found = true;
  642. break;
  643. }
  644. }
  645. if (!found)
  646. return NULL;
  647. return mbuf;
  648. }
  649. int msm_vidc_queue_buffer(struct msm_vidc_inst *inst, struct vb2_buffer *vb2)
  650. {
  651. int rc = 0;
  652. struct msm_vidc_buffer *buf;
  653. int port;
  654. if (!inst || !vb2) {
  655. d_vpr_e("%s: invalid params\n", __func__);
  656. return -EINVAL;
  657. }
  658. buf = msm_vidc_get_driver_buf(inst, vb2);
  659. if (!buf)
  660. return -EINVAL;
  661. /* meta buffer will be queued along with actual buffer */
  662. if (buf->type == MSM_VIDC_BUF_INPUT_META ||
  663. buf->type == MSM_VIDC_BUF_OUTPUT_META)
  664. return 0;
  665. /* skip queuing if streamon not completed */
  666. port = v4l2_type_to_driver_port(inst, vb2->type, __func__);
  667. if (port < 0)
  668. return -EINVAL;
  669. if (!inst->vb2q[port].streaming) {
  670. buf->attr |= MSM_VIDC_ATTR_DEFERRED;
  671. print_vidc_buffer(VIDC_HIGH, "qbuf deferred", inst, buf);
  672. return 0;
  673. }
  674. print_vidc_buffer(VIDC_HIGH, "qbuf", inst, buf);
  675. rc = venus_hfi_queue_buffer(inst, buf, get_meta_buffer(inst, buf));
  676. if (rc)
  677. return rc;
  678. return rc;
  679. }
  680. int msm_vidc_create_internal_buffers(struct msm_vidc_inst *inst,
  681. enum msm_vidc_buffer_type buffer_type)
  682. {
  683. int rc = 0;
  684. struct msm_vidc_buffers *buffers;
  685. struct msm_vidc_allocations *allocations;
  686. struct msm_vidc_mappings *mappings;
  687. int i;
  688. d_vpr_h("%s()\n", __func__);
  689. if (!inst || !inst->core) {
  690. d_vpr_e("%s: invalid params\n", __func__);
  691. return -EINVAL;
  692. }
  693. if (!is_internal_buffer(buffer_type)) {
  694. s_vpr_e(inst->sid, "%s: buffer type %#x is not internal\n",
  695. __func__, buffer_type);
  696. return 0;
  697. }
  698. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  699. if (!buffers)
  700. return -EINVAL;
  701. allocations = msm_vidc_get_allocations(inst, buffer_type, __func__);
  702. if (!allocations)
  703. return -EINVAL;
  704. mappings = msm_vidc_get_mappings(inst, buffer_type, __func__);
  705. if (!mappings)
  706. return -EINVAL;
  707. for (i = 0; i < buffers->min_count; i++) {
  708. struct msm_vidc_buffer *buffer;
  709. struct msm_vidc_alloc *alloc;
  710. struct msm_vidc_map *map;
  711. if (!buffers->size) {
  712. s_vpr_e(inst->sid, "%s: invalid buffer %#x\n",
  713. __func__, buffer_type);
  714. return -EINVAL;
  715. }
  716. buffer = kzalloc(sizeof(struct msm_vidc_buffer), GFP_KERNEL);
  717. if (!buffer) {
  718. s_vpr_e(inst->sid, "%s: buf alloc failed\n", __func__);
  719. return -ENOMEM;
  720. }
  721. INIT_LIST_HEAD(&buffer->list);
  722. buffer->valid = true;
  723. buffer->type = buffer_type;
  724. buffer->index = i;
  725. buffer->buffer_size = buffers->size;
  726. list_add_tail(&buffer->list, &buffers->list);
  727. alloc = kzalloc(sizeof(struct msm_vidc_alloc), GFP_KERNEL);
  728. if (!alloc) {
  729. s_vpr_e(inst->sid, "%s: alloc failed\n", __func__);
  730. return -ENOMEM;
  731. }
  732. INIT_LIST_HEAD(&alloc->list);
  733. alloc->type = buffer_type;
  734. alloc->region = msm_vidc_get_buffer_region(inst,
  735. buffer_type, __func__);
  736. alloc->size = buffer->buffer_size;
  737. rc = msm_vidc_memory_alloc(inst->core, alloc);
  738. if (rc)
  739. return -ENOMEM;
  740. list_add_tail(&alloc->list, &allocations->list);
  741. map = kzalloc(sizeof(struct msm_vidc_map), GFP_KERNEL);
  742. if (!map) {
  743. s_vpr_e(inst->sid, "%s: map alloc failed\n", __func__);
  744. return -ENOMEM;
  745. }
  746. INIT_LIST_HEAD(&map->list);
  747. map->type = alloc->type;
  748. map->region = alloc->region;
  749. map->dmabuf = alloc->dmabuf;
  750. rc = msm_vidc_memory_map(inst->core, map);
  751. if (rc)
  752. return -ENOMEM;
  753. list_add_tail(&map->list, &mappings->list);
  754. s_vpr_e(inst->sid, "%s: created buffer_type %#x, size %d\n",
  755. __func__, buffer_type, buffers->size);
  756. }
  757. return 0;
  758. }
  759. int msm_vidc_queue_internal_buffers(struct msm_vidc_inst *inst,
  760. enum msm_vidc_buffer_type buffer_type)
  761. {
  762. int rc = 0;
  763. struct msm_vidc_buffers *buffers;
  764. struct msm_vidc_buffer *buffer, *dummy;
  765. d_vpr_h("%s()\n", __func__);
  766. if (!inst || !inst->core) {
  767. d_vpr_e("%s: invalid params\n", __func__);
  768. return -EINVAL;
  769. }
  770. if (!is_internal_buffer(buffer_type)) {
  771. s_vpr_e(inst->sid, "%s: buffer type %#x is not internal\n",
  772. __func__, buffer_type);
  773. return 0;
  774. }
  775. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  776. if (!buffers)
  777. return -EINVAL;
  778. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  779. /* do not queue pending release buffers */
  780. if (buffer->flags & MSM_VIDC_ATTR_PENDING_RELEASE)
  781. continue;
  782. /* do not queue already queued buffers */
  783. if (buffer->attr & MSM_VIDC_ATTR_QUEUED)
  784. continue;
  785. rc = venus_hfi_queue_buffer(inst, buffer, NULL);
  786. if (rc)
  787. return rc;
  788. /* mark queued */
  789. buffer->attr |= MSM_VIDC_ATTR_QUEUED;
  790. s_vpr_h(inst->sid, "%s: queued buffer_type %#x, size %d\n",
  791. __func__, buffer_type, buffers->size);
  792. }
  793. return 0;
  794. }
  795. int msm_vidc_release_internal_buffers(struct msm_vidc_inst *inst,
  796. enum msm_vidc_buffer_type buffer_type)
  797. {
  798. int rc = 0;
  799. struct msm_vidc_buffers *buffers;
  800. struct msm_vidc_buffer *buffer, *dummy;
  801. d_vpr_h("%s()\n", __func__);
  802. if (!inst || !inst->core) {
  803. d_vpr_e("%s: invalid params\n", __func__);
  804. return -EINVAL;
  805. }
  806. if (!is_internal_buffer(buffer_type)) {
  807. s_vpr_e(inst->sid, "%s: buffer type %#x is not internal\n",
  808. __func__, buffer_type);
  809. return 0;
  810. }
  811. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  812. if (!buffers)
  813. return -EINVAL;
  814. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  815. /* do not release already pending release buffers */
  816. if (buffer->attr & MSM_VIDC_ATTR_PENDING_RELEASE)
  817. continue;
  818. /* release only queued buffers */
  819. if (!(buffer->attr & MSM_VIDC_ATTR_QUEUED))
  820. continue;
  821. rc = venus_hfi_release_buffer(inst, buffer);
  822. if (rc)
  823. return rc;
  824. /* mark pending release */
  825. buffer->attr |= MSM_VIDC_ATTR_PENDING_RELEASE;
  826. s_vpr_e(inst->sid, "%s: released buffer_type %#x, size %d\n",
  827. __func__, buffer_type, buffers->size);
  828. }
  829. return 0;
  830. }
  831. int msm_vidc_setup_event_queue(struct msm_vidc_inst *inst)
  832. {
  833. int rc = 0;
  834. int index;
  835. struct msm_vidc_core *core;
  836. d_vpr_h("%s()\n", __func__);
  837. if (!inst || !inst->core) {
  838. d_vpr_e("%s: invalid params\n", __func__);
  839. return -EINVAL;
  840. }
  841. core = inst->core;
  842. // TODO: check decode is index = 0 and encode is index 1
  843. if (is_decode_session(inst))
  844. index = 0;
  845. else if (is_encode_session(inst))
  846. index = 1;
  847. else
  848. return -EINVAL;
  849. v4l2_fh_init(&inst->event_handler, &core->vdev[index].vdev);
  850. v4l2_fh_add(&inst->event_handler);
  851. return rc;
  852. }
  853. static int vb2q_init(struct msm_vidc_inst *inst,
  854. struct vb2_queue *q, enum v4l2_buf_type type)
  855. {
  856. struct msm_vidc_core *core;
  857. if (!inst || !q || !inst->core) {
  858. d_vpr_e("%s: invalid params\n", __func__);
  859. return -EINVAL;
  860. }
  861. core = inst->core;
  862. q->type = type;
  863. q->io_modes = VB2_MMAP | VB2_USERPTR;
  864. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  865. q->ops = core->vb2_ops;
  866. q->mem_ops = core->vb2_mem_ops;
  867. q->drv_priv = inst;
  868. q->allow_zero_bytesused = 1;
  869. q->copy_timestamp = 1;
  870. return vb2_queue_init(q);
  871. }
  872. int msm_vidc_vb2_queue_init(struct msm_vidc_inst *inst)
  873. {
  874. int rc = 0;
  875. d_vpr_h("%s()\n", __func__);
  876. if (!inst) {
  877. d_vpr_e("%s: invalid params\n", __func__);
  878. return -EINVAL;
  879. }
  880. rc = vb2q_init(inst, &inst->vb2q[INPUT_PORT], INPUT_PLANE);
  881. if (rc)
  882. return rc;
  883. rc = vb2q_init(inst, &inst->vb2q[OUTPUT_PORT], OUTPUT_PLANE);
  884. if (rc)
  885. return rc;
  886. rc = vb2q_init(inst, &inst->vb2q[INPUT_META_PORT], INPUT_META_PLANE);
  887. if (rc)
  888. return rc;
  889. rc = vb2q_init(inst, &inst->vb2q[OUTPUT_META_PORT], OUTPUT_META_PLANE);
  890. if (rc)
  891. return rc;
  892. return rc;
  893. }
  894. int msm_vidc_add_session(struct msm_vidc_inst *inst)
  895. {
  896. int rc = 0;
  897. struct msm_vidc_inst *i;
  898. struct msm_vidc_core *core;
  899. u32 count = 0;
  900. if (!inst || !inst->core) {
  901. d_vpr_e("%s: invalid params\n", __func__);
  902. return -EINVAL;
  903. }
  904. core = inst->core;
  905. mutex_lock(&core->lock);
  906. list_for_each_entry(i, &core->instances, list)
  907. count++;
  908. if (count < 0xffffff /*TODO: MAX_SUPPORTED_INSTANCES*/) {
  909. list_add_tail(&inst->list, &core->instances);
  910. } else {
  911. d_vpr_e("%s: total sessions %d exceeded max limit %d\n",
  912. __func__, count, MAX_SUPPORTED_INSTANCES);
  913. rc = -EINVAL;
  914. }
  915. mutex_unlock(&core->lock);
  916. /* assign session_id */
  917. inst->session_id = count + 1;
  918. inst->sid = inst->session_id;
  919. return rc;
  920. }
  921. int msm_vidc_session_open(struct msm_vidc_inst *inst)
  922. {
  923. int rc = 0;
  924. if (!inst) {
  925. d_vpr_e("%s: invalid params\n", __func__);
  926. return -EINVAL;
  927. }
  928. rc = venus_hfi_session_open(inst);
  929. if (rc)
  930. return rc;
  931. inst->session_created = true;
  932. return 0;
  933. }
  934. int msm_vidc_get_inst_capability(struct msm_vidc_inst *inst)
  935. {
  936. int rc = 0;
  937. int i;
  938. struct msm_vidc_core *core;
  939. d_vpr_h("%s()\n", __func__);
  940. if (!inst || !inst->core || !inst->capabilities) {
  941. d_vpr_e("%s: invalid params\n", __func__);
  942. return -EINVAL;
  943. }
  944. core = inst->core;
  945. for (i = 0; i < core->codecs_count; i++) {
  946. if (core->inst_caps[i].domain == inst->domain &&
  947. core->inst_caps[i].codec == inst->codec) {
  948. s_vpr_h(inst->sid,
  949. "%s: copied capabilities with %#x codec\n",
  950. __func__, inst->codec);
  951. memcpy(inst->capabilities, &core->inst_caps[i],
  952. sizeof(struct msm_vidc_inst_capability));
  953. }
  954. }
  955. if (!inst->capabilities) {
  956. s_vpr_e(inst->sid, "%s: capabilities not found\n", __func__);
  957. return -EINVAL;
  958. }
  959. return rc;
  960. }
  961. static int msm_vidc_init_core_caps(struct msm_vidc_core *core)
  962. {
  963. int rc = 0;
  964. int i, num_platform_caps;
  965. struct msm_platform_core_capability *platform_data;
  966. if (!core || !core->platform) {
  967. d_vpr_e("%s: invalid params\n", __func__);
  968. rc = -EINVAL;
  969. goto exit;
  970. }
  971. platform_data = core->platform->data.core_data;
  972. if (!platform_data) {
  973. d_vpr_e("%s: platform core data is NULL\n",
  974. __func__);
  975. rc = -EINVAL;
  976. goto exit;
  977. }
  978. if (!core->capabilities) {
  979. core->capabilities = kcalloc(1,
  980. (sizeof(struct msm_vidc_core_capability) *
  981. CORE_CAP_MAX), GFP_KERNEL);
  982. if (!core->capabilities) {
  983. d_vpr_e("%s: failed to allocate core capabilities\n",
  984. __func__);
  985. rc = -ENOMEM;
  986. goto exit;
  987. }
  988. } else {
  989. d_vpr_e("%s: capabilities memory is expected to be freed\n",
  990. __func__);
  991. }
  992. num_platform_caps = core->platform->data.core_data_size;
  993. /* loop over platform caps */
  994. for (i = 0; i < num_platform_caps; i++) {
  995. core->capabilities[platform_data[i].type].type = platform_data[i].type;
  996. core->capabilities[platform_data[i].type].value = platform_data[i].value;
  997. }
  998. exit:
  999. return rc;
  1000. }
  1001. static void update_inst_capability(struct msm_platform_inst_capability *in,
  1002. struct msm_vidc_inst_capability *capability)
  1003. {
  1004. if (!in || !capability) {
  1005. d_vpr_e("%s: invalid params %pK %pK\n",
  1006. __func__, in, capability);
  1007. return;
  1008. }
  1009. if (in->cap < INST_CAP_MAX) {
  1010. capability->cap[in->cap].cap = in->cap;
  1011. capability->cap[in->cap].min = in->min;
  1012. capability->cap[in->cap].max = in->max;
  1013. capability->cap[in->cap].step_or_mask = in->step_or_mask;
  1014. capability->cap[in->cap].value = in->value;
  1015. capability->cap[in->cap].flags = in->flags;
  1016. capability->cap[in->cap].v4l2_id = in->v4l2_id;
  1017. capability->cap[in->cap].hfi_id = in->hfi_id;
  1018. memcpy(capability->cap[in->cap].parents, in->parents,
  1019. sizeof(capability->cap[in->cap].parents));
  1020. memcpy(capability->cap[in->cap].children, in->children,
  1021. sizeof(capability->cap[in->cap].children));
  1022. capability->cap[in->cap].adjust = in->adjust;
  1023. capability->cap[in->cap].set = in->set;
  1024. } else {
  1025. d_vpr_e("%s: invalid cap %d\n",
  1026. __func__, in->cap);
  1027. }
  1028. }
  1029. static int msm_vidc_init_instance_caps(struct msm_vidc_core *core)
  1030. {
  1031. int rc = 0;
  1032. u8 enc_valid_codecs, dec_valid_codecs;
  1033. u8 count_bits, enc_codec_count;
  1034. u8 codecs_count = 0;
  1035. int i, j, check_bit, num_platform_caps;
  1036. struct msm_platform_inst_capability *platform_data = NULL;
  1037. if (!core || !core->platform || !core->capabilities) {
  1038. d_vpr_e("%s: invalid params\n", __func__);
  1039. rc = -EINVAL;
  1040. goto exit;
  1041. }
  1042. platform_data = core->platform->data.instance_data;
  1043. if (!platform_data) {
  1044. d_vpr_e("%s: platform instance data is NULL\n",
  1045. __func__);
  1046. rc = -EINVAL;
  1047. goto exit;
  1048. }
  1049. enc_valid_codecs = core->capabilities[ENC_CODECS].value;
  1050. count_bits = enc_valid_codecs;
  1051. COUNT_BITS(count_bits, codecs_count);
  1052. enc_codec_count = codecs_count;
  1053. dec_valid_codecs = core->capabilities[DEC_CODECS].value;
  1054. count_bits = dec_valid_codecs;
  1055. COUNT_BITS(count_bits, codecs_count);
  1056. core->codecs_count = codecs_count;
  1057. if (!core->inst_caps) {
  1058. core->inst_caps = kcalloc(codecs_count,
  1059. sizeof(struct msm_vidc_inst_capability),
  1060. GFP_KERNEL);
  1061. if (!core->inst_caps) {
  1062. d_vpr_e("%s: failed to allocate core capabilities\n",
  1063. __func__);
  1064. rc = -ENOMEM;
  1065. goto exit;
  1066. }
  1067. } else {
  1068. d_vpr_e("%s: capabilities memory is expected to be freed\n",
  1069. __func__);
  1070. }
  1071. check_bit = 0;
  1072. /* determine codecs for enc domain */
  1073. for (i = 0; i < enc_codec_count; i++) {
  1074. while (check_bit < (sizeof(enc_valid_codecs) * 8)) {
  1075. if (enc_valid_codecs & BIT(check_bit)) {
  1076. core->inst_caps[i].domain = MSM_VIDC_ENCODER;
  1077. core->inst_caps[i].codec = enc_valid_codecs &
  1078. BIT(check_bit);
  1079. check_bit++;
  1080. break;
  1081. }
  1082. check_bit++;
  1083. }
  1084. }
  1085. /* reset checkbit to check from 0th bit of decoder codecs set bits*/
  1086. check_bit = 0;
  1087. /* determine codecs for dec domain */
  1088. for (; i < codecs_count; i++) {
  1089. while (check_bit < (sizeof(dec_valid_codecs) * 8)) {
  1090. if (dec_valid_codecs & BIT(check_bit)) {
  1091. core->inst_caps[i].domain = MSM_VIDC_DECODER;
  1092. core->inst_caps[i].codec = dec_valid_codecs &
  1093. BIT(check_bit);
  1094. check_bit++;
  1095. break;
  1096. }
  1097. check_bit++;
  1098. }
  1099. }
  1100. num_platform_caps = core->platform->data.instance_data_size;
  1101. d_vpr_h("%s: num caps %d\n", __func__, num_platform_caps);
  1102. /* loop over each platform capability */
  1103. for (i = 0; i < num_platform_caps; i++) {
  1104. /* select matching core codec and update it */
  1105. for (j = 0; j < codecs_count; j++) {
  1106. if ((platform_data[i].domain &
  1107. core->inst_caps[j].domain) &&
  1108. (platform_data[i].codec &
  1109. core->inst_caps[j].codec)) {
  1110. /* update core capability */
  1111. update_inst_capability(&platform_data[i],
  1112. &core->inst_caps[j]);
  1113. }
  1114. }
  1115. }
  1116. exit:
  1117. return rc;
  1118. }
  1119. int msm_vidc_core_init(struct msm_vidc_core *core)
  1120. {
  1121. int rc = 0;
  1122. d_vpr_h("%s()\n", __func__);
  1123. if (!core || !core->platform) {
  1124. d_vpr_e("%s: invalid params\n", __func__);
  1125. return -EINVAL;
  1126. }
  1127. mutex_lock(&core->lock);
  1128. if (core->state == MSM_VIDC_CORE_ERROR) {
  1129. d_vpr_e("%s: core invalid state\n", __func__);
  1130. rc = -EINVAL;
  1131. goto unlock;
  1132. }
  1133. if (core->state == MSM_VIDC_CORE_INIT) {
  1134. rc = 0;
  1135. goto unlock;
  1136. }
  1137. rc = msm_vidc_init_core_caps(core);
  1138. if (rc)
  1139. goto unlock;
  1140. rc = msm_vidc_init_instance_caps(core);
  1141. if (rc)
  1142. goto unlock;
  1143. core->state = MSM_VIDC_CORE_INIT;
  1144. init_completion(&core->init_done);
  1145. core->smmu_fault_handled = false;
  1146. core->ssr.trigger = false;
  1147. rc = venus_hfi_core_init(core);
  1148. if (rc) {
  1149. d_vpr_e("%s: core init failed\n", __func__);
  1150. core->state = MSM_VIDC_CORE_DEINIT;
  1151. goto unlock;
  1152. }
  1153. mutex_unlock(&core->lock);
  1154. /*TODO: acquire lock or not */
  1155. d_vpr_h("%s(): waiting for sys init done, %d ms\n", __func__,
  1156. core->platform->data.core_data[HW_RESPONSE_TIMEOUT].value);
  1157. rc = wait_for_completion_timeout(&core->init_done, msecs_to_jiffies(
  1158. core->platform->data.core_data[HW_RESPONSE_TIMEOUT].value));
  1159. if (!rc) {
  1160. d_vpr_e("%s: system init timed out\n", __func__);
  1161. //msm_comm_kill_session(inst);
  1162. //rc = -EIO;
  1163. } else {
  1164. d_vpr_h("%s: system init wait completed\n", __func__);
  1165. rc = 0;
  1166. }
  1167. mutex_lock(&core->lock);
  1168. unlock:
  1169. mutex_unlock(&core->lock);
  1170. return rc;
  1171. }
  1172. int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
  1173. struct device *dev, unsigned long iova, int flags, void *data)
  1174. {
  1175. return -EINVAL;
  1176. }
  1177. int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
  1178. enum msm_vidc_ssr_trigger_type type)
  1179. {
  1180. return 0;
  1181. }
  1182. void msm_vidc_ssr_handler(struct work_struct *work)
  1183. {
  1184. }
  1185. void msm_vidc_pm_work_handler(struct work_struct *work)
  1186. {
  1187. }
  1188. void msm_vidc_fw_unload_handler(struct work_struct *work)
  1189. {
  1190. }
  1191. void msm_vidc_batch_handler(struct work_struct *work)
  1192. {
  1193. }
  1194. struct msm_vidc_inst *get_inst(struct msm_vidc_core *core,
  1195. u32 session_id)
  1196. {
  1197. struct msm_vidc_inst *inst = NULL;
  1198. bool matches = false;
  1199. if (!core) {
  1200. d_vpr_e("%s: invalid params\n", __func__);
  1201. return NULL;
  1202. }
  1203. mutex_lock(&core->lock);
  1204. list_for_each_entry(inst, &core->instances, list) {
  1205. if (inst->session_id == session_id) {
  1206. matches = true;
  1207. break;
  1208. }
  1209. }
  1210. inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
  1211. mutex_unlock(&core->lock);
  1212. return inst;
  1213. }
  1214. static void put_inst_helper(struct kref *kref)
  1215. {
  1216. struct msm_vidc_inst *inst = container_of(kref,
  1217. struct msm_vidc_inst, kref);
  1218. msm_vidc_close(inst);
  1219. }
  1220. void put_inst(struct msm_vidc_inst *inst)
  1221. {
  1222. if (!inst) {
  1223. d_vpr_e("%s: invalid params\n", __func__);
  1224. return;
  1225. }
  1226. kref_put(&inst->kref, put_inst_helper);
  1227. }
  1228. void core_lock(struct msm_vidc_core *core, const char *function)
  1229. {
  1230. mutex_lock(&core->lock);
  1231. }
  1232. void core_unlock(struct msm_vidc_core *core, const char *function)
  1233. {
  1234. mutex_unlock(&core->lock);
  1235. }
  1236. void inst_lock(struct msm_vidc_inst *inst, const char *function)
  1237. {
  1238. mutex_lock(&inst->lock);
  1239. }
  1240. void inst_unlock(struct msm_vidc_inst *inst, const char *function)
  1241. {
  1242. mutex_unlock(&inst->lock);
  1243. }