msm_vidc_vb2.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. /* Copyright (c) 2022-2023. Qualcomm Innovation Center, Inc. All rights reserved. */
  6. #include "msm_vidc_vb2.h"
  7. #include "msm_vidc_core.h"
  8. #include "msm_vidc_inst.h"
  9. #include "msm_vidc_internal.h"
  10. #include "msm_vidc_driver.h"
  11. #include "msm_vidc_power.h"
  12. #include "msm_vidc_debug.h"
  13. #include "msm_vdec.h"
  14. #include "msm_venc.h"
  15. #include "msm_vidc_control.h"
  16. #include "msm_vidc_platform.h"
  17. extern struct msm_vidc_core *g_core;
  18. struct vb2_queue *msm_vidc_get_vb2q(struct msm_vidc_inst *inst,
  19. u32 type, const char *func)
  20. {
  21. struct vb2_queue *q = NULL;
  22. if (!inst) {
  23. d_vpr_e("%s: invalid params\n", func);
  24. return NULL;
  25. }
  26. if (type == INPUT_MPLANE) {
  27. q = inst->bufq[INPUT_PORT].vb2q;
  28. } else if (type == OUTPUT_MPLANE) {
  29. q = inst->bufq[OUTPUT_PORT].vb2q;
  30. } else if (type == INPUT_META_PLANE) {
  31. q = inst->bufq[INPUT_META_PORT].vb2q;
  32. } else if (type == OUTPUT_META_PLANE) {
  33. q = inst->bufq[OUTPUT_META_PORT].vb2q;
  34. } else {
  35. i_vpr_e(inst, "%s: invalid buffer type %d\n",
  36. __func__, type);
  37. }
  38. return q;
  39. }
  40. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  41. void *msm_vb2_alloc(struct device *dev, unsigned long attrs,
  42. unsigned long size, enum dma_data_direction dma_dir,
  43. gfp_t gfp_flags)
  44. {
  45. return (void *)0xdeadbeef;
  46. }
  47. void *msm_vb2_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
  48. unsigned long size, enum dma_data_direction dma_dir)
  49. {
  50. return (void *)0xdeadbeef;
  51. }
  52. #else
  53. void *msm_vb2_alloc(struct vb2_buffer *vb, struct device *dev,
  54. unsigned long size)
  55. {
  56. return (void *)0xdeadbeef;
  57. }
  58. void *msm_vb2_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
  59. struct dma_buf *dbuf, unsigned long size)
  60. {
  61. struct msm_vidc_inst *inst;
  62. struct msm_vidc_core *core;
  63. struct msm_vidc_buffer *buf = NULL;
  64. if (!vb || !dev || !dbuf || !vb->vb2_queue) {
  65. d_vpr_e("%s: invalid params\n", __func__);
  66. return NULL;
  67. }
  68. inst = vb->vb2_queue->drv_priv;
  69. inst = get_inst_ref(g_core, inst);
  70. if (!inst || !inst->core) {
  71. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  72. return NULL;
  73. }
  74. core = inst->core;
  75. buf = msm_vidc_fetch_buffer(inst, vb);
  76. if (!buf) {
  77. i_vpr_e(inst, "%s: failed to fetch buffer\n", __func__);
  78. buf = NULL;
  79. goto exit;
  80. }
  81. buf->inst = inst;
  82. buf->attach = call_mem_op(core, dma_buf_attach, core, dbuf, dev);
  83. if (!buf->attach) {
  84. buf->attach = NULL;
  85. buf = NULL;
  86. goto exit;
  87. }
  88. buf->dmabuf = dbuf;
  89. print_vidc_buffer(VIDC_LOW, "low ", "attach", inst, buf);
  90. exit:
  91. if (!buf)
  92. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  93. put_inst(inst);
  94. return buf;
  95. }
  96. #endif
  97. void msm_vb2_put(void *buf_priv)
  98. {
  99. }
  100. int msm_vb2_mmap(void *buf_priv, struct vm_area_struct *vma)
  101. {
  102. return 0;
  103. }
  104. void msm_vb2_detach_dmabuf(void *buf_priv)
  105. {
  106. struct msm_vidc_buffer *vbuf = buf_priv;
  107. struct msm_vidc_buffer *ro_buf, *dummy;
  108. struct msm_vidc_core *core;
  109. struct msm_vidc_inst *inst;
  110. if (!vbuf || !vbuf->inst) {
  111. d_vpr_e("%s: invalid params\n", __func__);
  112. return;
  113. }
  114. inst = vbuf->inst;
  115. if (!inst || !inst->core) {
  116. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  117. return;
  118. }
  119. core = inst->core;
  120. if (is_decode_session(inst) && is_output_buffer(vbuf->type)) {
  121. list_for_each_entry_safe(ro_buf, dummy, &inst->buffers.read_only.list, list) {
  122. if (ro_buf->dmabuf == vbuf->dmabuf) {
  123. print_vidc_buffer(VIDC_LOW, "low ", "detach: found ro buf", inst, ro_buf);
  124. ro_buf->attach = vbuf->attach;
  125. vbuf->attach = NULL;
  126. goto exit;
  127. }
  128. }
  129. }
  130. print_vidc_buffer(VIDC_LOW, "low ", "detach", inst, vbuf);
  131. if (vbuf->attach && vbuf->dmabuf) {
  132. call_mem_op(core, dma_buf_detach, core, vbuf->dmabuf, vbuf->attach);
  133. vbuf->attach = NULL;
  134. vbuf->dmabuf = NULL;
  135. vbuf->inst = NULL;
  136. }
  137. vbuf->inst = NULL;
  138. exit:
  139. return;
  140. }
  141. int msm_vb2_map_dmabuf(void *buf_priv)
  142. {
  143. int rc = 0;
  144. struct msm_vidc_buffer *buf = buf_priv;
  145. struct msm_vidc_core *core;
  146. struct msm_vidc_inst *inst;
  147. if (!buf || !buf->inst) {
  148. d_vpr_e("%s: invalid params\n", __func__);
  149. return -EINVAL;
  150. }
  151. inst = buf->inst;
  152. inst = get_inst_ref(g_core, inst);
  153. if (!inst || !inst->core) {
  154. d_vpr_e("%s: invalid params\n", __func__);
  155. return -EINVAL;
  156. }
  157. core = inst->core;
  158. buf->sg_table = call_mem_op(core, dma_buf_map_attachment, core, buf->attach);
  159. if (!buf->sg_table || !buf->sg_table->sgl) {
  160. buf->sg_table = NULL;
  161. rc = -ENOMEM;
  162. goto exit;
  163. }
  164. buf->device_addr = sg_dma_address(buf->sg_table->sgl);
  165. print_vidc_buffer(VIDC_HIGH, "high", "map", inst, buf);
  166. exit:
  167. if (rc)
  168. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  169. put_inst(inst);
  170. return rc;
  171. }
  172. void msm_vb2_unmap_dmabuf(void *buf_priv)
  173. {
  174. struct msm_vidc_buffer *vbuf = buf_priv;
  175. struct msm_vidc_buffer *ro_buf, *dummy;
  176. struct msm_vidc_core *core;
  177. struct msm_vidc_inst *inst;
  178. if (!vbuf || !vbuf->inst) {
  179. d_vpr_e("%s: invalid params\n", __func__);
  180. return;
  181. }
  182. inst = vbuf->inst;
  183. if (!inst || !inst->core) {
  184. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  185. return;
  186. }
  187. core = inst->core;
  188. if (is_decode_session(inst) && is_output_buffer(vbuf->type)) {
  189. list_for_each_entry_safe(ro_buf, dummy, &inst->buffers.read_only.list, list) {
  190. if (ro_buf->dmabuf == vbuf->dmabuf) {
  191. print_vidc_buffer(VIDC_LOW, "low ", "unmap: found ro buf", inst, ro_buf);
  192. ro_buf->sg_table = vbuf->sg_table;
  193. ro_buf->attach = vbuf->attach;
  194. vbuf->sg_table = NULL;
  195. vbuf->device_addr = 0x0;
  196. goto exit;
  197. }
  198. }
  199. }
  200. print_vidc_buffer(VIDC_HIGH, "high", "unmap", inst, vbuf);
  201. if (vbuf->attach && vbuf->sg_table) {
  202. call_mem_op(core, dma_buf_unmap_attachment, core, vbuf->attach, vbuf->sg_table);
  203. vbuf->sg_table = NULL;
  204. vbuf->device_addr = 0x0;
  205. }
  206. exit:
  207. return;
  208. }
  209. int msm_vb2_queue_setup(struct vb2_queue *q,
  210. unsigned int *num_buffers, unsigned int *num_planes,
  211. unsigned int sizes[], struct device *alloc_devs[])
  212. {
  213. int rc = 0;
  214. struct msm_vidc_inst *inst;
  215. struct msm_vidc_core *core;
  216. int port;
  217. struct v4l2_format *f;
  218. enum msm_vidc_buffer_type buffer_type = 0;
  219. enum msm_vidc_buffer_region region = MSM_VIDC_REGION_NONE;
  220. struct context_bank_info *cb = NULL;
  221. if (!q || !num_buffers || !num_planes
  222. || !sizes || !q->drv_priv) {
  223. d_vpr_e("%s: invalid params, q = %pK, %pK, %pK\n",
  224. __func__, q, num_buffers, num_planes);
  225. return -EINVAL;
  226. }
  227. inst = q->drv_priv;
  228. if (!inst || !inst->core) {
  229. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  230. return -EINVAL;
  231. }
  232. core = inst->core;
  233. if (is_state(inst, MSM_VIDC_STREAMING)) {
  234. i_vpr_e(inst, "%s: invalid state %d\n", __func__, inst->state);
  235. return -EINVAL;
  236. }
  237. port = v4l2_type_to_driver_port(inst, q->type, __func__);
  238. if (port < 0)
  239. return -EINVAL;
  240. if (*num_planes && (port == INPUT_PORT || port == OUTPUT_PORT)) {
  241. f = &inst->fmts[port];
  242. if (*num_planes != f->fmt.pix_mp.num_planes) {
  243. i_vpr_e(inst, "%s: requested num_planes %d not supported %d\n",
  244. __func__, *num_planes, f->fmt.pix_mp.num_planes);
  245. return -EINVAL;
  246. }
  247. if (sizes[0] < inst->fmts[port].fmt.pix_mp.plane_fmt[0].sizeimage) {
  248. i_vpr_e(inst, "%s: requested size %d not acceptable\n",
  249. __func__, sizes[0]);
  250. return -EINVAL;
  251. }
  252. }
  253. buffer_type = v4l2_type_to_driver(q->type, __func__);
  254. if (!buffer_type)
  255. return -EINVAL;
  256. rc = msm_vidc_free_buffers(inst, buffer_type);
  257. if (rc) {
  258. i_vpr_e(inst, "%s: failed to free buffers, type %s\n",
  259. __func__, v4l2_type_name(q->type));
  260. return rc;
  261. }
  262. if (port == INPUT_PORT) {
  263. *num_planes = 1;
  264. if (*num_buffers < inst->buffers.input.min_count +
  265. inst->buffers.input.extra_count)
  266. *num_buffers = inst->buffers.input.min_count +
  267. inst->buffers.input.extra_count;
  268. inst->buffers.input.actual_count = *num_buffers;
  269. } else if (port == INPUT_META_PORT) {
  270. *num_planes = 1;
  271. if (*num_buffers < inst->buffers.input_meta.min_count +
  272. inst->buffers.input_meta.extra_count)
  273. *num_buffers = inst->buffers.input_meta.min_count +
  274. inst->buffers.input_meta.extra_count;
  275. inst->buffers.input_meta.actual_count = *num_buffers;
  276. } else if (port == OUTPUT_PORT) {
  277. *num_planes = 1;
  278. if (*num_buffers < inst->buffers.output.min_count +
  279. inst->buffers.output.extra_count)
  280. *num_buffers = inst->buffers.output.min_count +
  281. inst->buffers.output.extra_count;
  282. inst->buffers.output.actual_count = *num_buffers;
  283. } else if (port == OUTPUT_META_PORT) {
  284. *num_planes = 1;
  285. if (*num_buffers < inst->buffers.output_meta.min_count +
  286. inst->buffers.output_meta.extra_count)
  287. *num_buffers = inst->buffers.output_meta.min_count +
  288. inst->buffers.output_meta.extra_count;
  289. inst->buffers.output_meta.actual_count = *num_buffers;
  290. }
  291. if (port == INPUT_PORT || port == OUTPUT_PORT) {
  292. sizes[0] = inst->fmts[port].fmt.pix_mp.plane_fmt[0].sizeimage;
  293. } else if (port == OUTPUT_META_PORT) {
  294. sizes[0] = inst->fmts[port].fmt.meta.buffersize;
  295. } else if (port == INPUT_META_PORT) {
  296. if (inst->capabilities->cap[SUPER_FRAME].value)
  297. sizes[0] = inst->capabilities->cap[SUPER_FRAME].value *
  298. inst->fmts[port].fmt.meta.buffersize;
  299. else
  300. sizes[0] = inst->fmts[port].fmt.meta.buffersize;
  301. }
  302. rc = msm_vidc_allocate_buffers(inst, buffer_type, *num_buffers);
  303. if (rc) {
  304. i_vpr_e(inst, "%s: failed to allocate buffers, type %s\n",
  305. __func__, v4l2_type_name(q->type));
  306. return rc;
  307. }
  308. region = call_mem_op(core, buffer_region, inst, buffer_type);
  309. cb = msm_vidc_get_context_bank_for_region(core, region);
  310. if (!cb) {
  311. d_vpr_e("%s: Failed to get context bank device\n",
  312. __func__);
  313. return -EIO;
  314. }
  315. q->dev = cb->dev;
  316. i_vpr_h(inst,
  317. "queue_setup: type %s num_buffers %d sizes[0] %d cb %s\n",
  318. v4l2_type_name(q->type), *num_buffers, sizes[0], cb->name);
  319. return rc;
  320. }
  321. int msm_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
  322. {
  323. int rc = 0;
  324. struct msm_vidc_inst *inst;
  325. if (!q || !q->drv_priv) {
  326. d_vpr_e("%s: invalid input, q = %pK\n", __func__, q);
  327. return -EINVAL;
  328. }
  329. inst = q->drv_priv;
  330. inst = get_inst_ref(g_core, inst);
  331. if (!inst || !inst->core || !inst->capabilities) {
  332. d_vpr_e("%s: invalid params\n", __func__);
  333. return -EINVAL;
  334. }
  335. client_lock(inst, __func__);
  336. inst_lock(inst, __func__);
  337. if (is_session_error(inst)) {
  338. i_vpr_e(inst, "%s: inst in error state\n", __func__);
  339. rc = -EBUSY;
  340. goto unlock;
  341. }
  342. rc = inst->event_handle(inst, MSM_VIDC_STREAMON, q);
  343. if (rc) {
  344. i_vpr_e(inst, "Streamon: %s failed\n", v4l2_type_name(q->type));
  345. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  346. goto unlock;
  347. }
  348. unlock:
  349. inst_unlock(inst, __func__);
  350. client_unlock(inst, __func__);
  351. put_inst(inst);
  352. return rc;
  353. }
  354. int msm_vidc_start_streaming(struct msm_vidc_inst *inst, struct vb2_queue *q)
  355. {
  356. enum msm_vidc_buffer_type buf_type;
  357. int rc = 0;
  358. if (!inst || !q) {
  359. d_vpr_e("%s: invalid params\n", __func__);
  360. return -EINVAL;
  361. }
  362. if (q->type == INPUT_META_PLANE || q->type == OUTPUT_META_PLANE) {
  363. i_vpr_h(inst, "%s: nothing to start on %s\n",
  364. __func__, v4l2_type_name(q->type));
  365. return 0;
  366. }
  367. if (q->type != INPUT_MPLANE && q->type != OUTPUT_MPLANE) {
  368. i_vpr_e(inst, "%s: invalid type %d\n", __func__, q->type);
  369. return -EINVAL;
  370. }
  371. if (!is_decode_session(inst) && !is_encode_session(inst)) {
  372. i_vpr_e(inst, "%s: invalid session %d\n", __func__, inst->domain);
  373. return -EINVAL;
  374. }
  375. i_vpr_h(inst, "Streamon: %s\n", v4l2_type_name(q->type));
  376. if (!inst->once_per_session_set) {
  377. inst->once_per_session_set = true;
  378. rc = msm_vidc_prepare_dependency_list(inst);
  379. if (rc)
  380. return rc;
  381. rc = msm_vidc_session_set_codec(inst);
  382. if (rc)
  383. return rc;
  384. rc = msm_vidc_session_set_secure_mode(inst);
  385. if (rc)
  386. return rc;
  387. if (is_encode_session(inst)) {
  388. rc = msm_vidc_alloc_and_queue_session_internal_buffers(inst,
  389. MSM_VIDC_BUF_ARP);
  390. if (rc)
  391. return rc;
  392. } else if(is_decode_session(inst)) {
  393. rc = msm_vidc_session_set_default_header(inst);
  394. if (rc)
  395. return rc;
  396. rc = msm_vidc_alloc_and_queue_session_internal_buffers(inst,
  397. MSM_VIDC_BUF_PERSIST);
  398. if (rc)
  399. return rc;
  400. }
  401. }
  402. if (is_decode_session(inst))
  403. inst->decode_batch.enable = msm_vidc_allow_decode_batch(inst);
  404. msm_vidc_allow_dcvs(inst);
  405. msm_vidc_power_data_reset(inst);
  406. if (q->type == INPUT_MPLANE) {
  407. if (is_decode_session(inst))
  408. rc = msm_vdec_streamon_input(inst);
  409. else if (is_encode_session(inst))
  410. rc = msm_venc_streamon_input(inst);
  411. } else if (q->type == OUTPUT_MPLANE) {
  412. if (is_decode_session(inst))
  413. rc = msm_vdec_streamon_output(inst);
  414. else if (is_encode_session(inst))
  415. rc = msm_venc_streamon_output(inst);
  416. }
  417. if (rc)
  418. return rc;
  419. /* print final buffer counts & size details */
  420. msm_vidc_print_buffer_info(inst);
  421. buf_type = v4l2_type_to_driver(q->type, __func__);
  422. if (!buf_type)
  423. return -EINVAL;
  424. /* queue pending buffers */
  425. rc = msm_vidc_queue_deferred_buffers(inst, buf_type);
  426. if (rc)
  427. return rc;
  428. /* initialize statistics timer(one time) */
  429. if (!inst->stats.time_ms)
  430. inst->stats.time_ms = ktime_get_ns() / 1000 / 1000;
  431. /* schedule to print buffer statistics */
  432. rc = schedule_stats_work(inst);
  433. if (rc)
  434. return rc;
  435. if ((q->type == INPUT_MPLANE && inst->bufq[OUTPUT_PORT].vb2q->streaming) ||
  436. (q->type == OUTPUT_MPLANE && inst->bufq[INPUT_PORT].vb2q->streaming)) {
  437. rc = msm_vidc_get_properties(inst);
  438. if (rc)
  439. return rc;
  440. }
  441. i_vpr_h(inst, "Streamon: %s successful\n", v4l2_type_name(q->type));
  442. return rc;
  443. }
  444. int msm_vidc_stop_streaming(struct msm_vidc_inst *inst, struct vb2_queue *q)
  445. {
  446. int rc = 0;
  447. if (!inst || !q) {
  448. d_vpr_e("%s: invalid params\n", __func__);
  449. return -EINVAL;
  450. }
  451. if (q->type == INPUT_META_PLANE || q->type == OUTPUT_META_PLANE) {
  452. i_vpr_h(inst, "%s: nothing to stop on %s\n",
  453. __func__, v4l2_type_name(q->type));
  454. return 0;
  455. }
  456. if (q->type != INPUT_MPLANE && q->type != OUTPUT_MPLANE) {
  457. i_vpr_e(inst, "%s: invalid type %d\n", __func__, q->type);
  458. return -EINVAL;
  459. }
  460. if (!is_decode_session(inst) && !is_encode_session(inst)) {
  461. i_vpr_e(inst, "%s: invalid session %d\n", __func__, inst->domain);
  462. return -EINVAL;
  463. }
  464. i_vpr_h(inst, "Streamoff: %s\n", v4l2_type_name(q->type));
  465. if (q->type == INPUT_MPLANE) {
  466. if (is_decode_session(inst))
  467. rc = msm_vdec_streamoff_input(inst);
  468. else if (is_encode_session(inst))
  469. rc = msm_venc_streamoff_input(inst);
  470. } else if (q->type == OUTPUT_MPLANE) {
  471. if (is_decode_session(inst))
  472. rc = msm_vdec_streamoff_output(inst);
  473. else if (is_encode_session(inst))
  474. rc = msm_venc_streamoff_output(inst);
  475. }
  476. if (rc)
  477. return rc;
  478. /* Input port streamoff */
  479. if (q->type == INPUT_MPLANE) {
  480. /* flush timestamps list */
  481. msm_vidc_flush_ts(inst);
  482. /* flush buffer_stats list */
  483. msm_vidc_flush_buffer_stats(inst);
  484. }
  485. i_vpr_h(inst, "Streamoff: %s successful\n", v4l2_type_name(q->type));
  486. return rc;
  487. }
  488. void msm_vb2_stop_streaming(struct vb2_queue *q)
  489. {
  490. struct msm_vidc_inst *inst;
  491. int rc = 0;
  492. if (!q || !q->drv_priv) {
  493. d_vpr_e("%s: invalid input, q = %pK\n", __func__, q);
  494. return;
  495. }
  496. inst = q->drv_priv;
  497. if (!inst) {
  498. d_vpr_e("%s: invalid params\n", __func__);
  499. return;
  500. }
  501. rc = inst->event_handle(inst, MSM_VIDC_STREAMOFF, q);
  502. if (rc) {
  503. i_vpr_e(inst, "Streamoff: %s failed\n", v4l2_type_name(q->type));
  504. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  505. }
  506. return;
  507. }
  508. void msm_vb2_buf_queue(struct vb2_buffer *vb2)
  509. {
  510. int rc = 0;
  511. struct msm_vidc_inst *inst;
  512. u64 timestamp_us = 0;
  513. u64 ktime_ns = ktime_get_ns();
  514. inst = vb2_get_drv_priv(vb2->vb2_queue);
  515. if (!inst) {
  516. d_vpr_e("%s: invalid params\n", __func__);
  517. return;
  518. }
  519. inst = get_inst_ref(g_core, inst);
  520. if (!inst) {
  521. d_vpr_e("%s: invalid instance\n", __func__);
  522. return;
  523. }
  524. /*
  525. * As part of every qbuf initalise request to true.
  526. * If there are any dynamic controls associated with qbuf,
  527. * they will set as part s_ctrl() from v4l2_ctrl_request_setup().
  528. * Once v4l2_ctrl_request_setup() is done, reset request variable.
  529. * If the buffer does not have any requests with it, then
  530. * v4l2_ctrl_request_setup() will return 0.
  531. */
  532. inst->request = true;
  533. rc = v4l2_ctrl_request_setup(vb2->req_obj.req,
  534. &inst->ctrl_handler);
  535. inst->request = false;
  536. v4l2_ctrl_request_complete(vb2->req_obj.req, &inst->ctrl_handler);
  537. /*
  538. * call request_setup and request_complete without acquiring lock
  539. * to avoid deadlock issues because request_setup or request_complete
  540. * would call .s_ctrl and .g_volatile_ctrl respectively which acquire
  541. * lock too.
  542. */
  543. client_lock(inst, __func__);
  544. inst_lock(inst, __func__);
  545. if (rc) {
  546. i_vpr_e(inst, "%s: request setup failed, error %d\n",
  547. __func__, rc);
  548. goto unlock;
  549. }
  550. if (is_session_error(inst)) {
  551. i_vpr_e(inst, "%s: inst in error state\n", __func__);
  552. rc = -EINVAL;
  553. goto unlock;
  554. }
  555. if (!vb2->planes[0].bytesused) {
  556. if (vb2->type == INPUT_MPLANE) {
  557. /* Expecting non-zero filledlen on INPUT port */
  558. i_vpr_e(inst,
  559. "%s: zero bytesused input buffer not supported\n", __func__);
  560. rc = -EINVAL;
  561. goto unlock;
  562. }
  563. if ((vb2->type == OUTPUT_META_PLANE && is_any_meta_tx_out_enabled(inst)) ||
  564. (vb2->type == INPUT_META_PLANE && is_any_meta_tx_inp_enabled(inst))) {
  565. /*
  566. * vb2 is not allowing client to pass data in output meta plane.
  567. * adjust the bytesused as client will send buffer tag metadata
  568. * in output meta plane if DPB_TAG_LIST, or OUTBUF_FENCE metadata
  569. * is enabled.
  570. */
  571. vb2->planes[0].bytesused = vb2->planes[0].length;
  572. }
  573. }
  574. if (is_encode_session(inst) && vb2->type == INPUT_MPLANE) {
  575. timestamp_us = div_u64(vb2->timestamp, 1000);
  576. msm_vidc_set_auto_framerate(inst, timestamp_us);
  577. }
  578. inst->last_qbuf_time_ns = ktime_ns;
  579. if (vb2->type == INPUT_MPLANE) {
  580. rc = msm_vidc_update_input_rate(inst, div_u64(ktime_ns, 1000));
  581. if (rc)
  582. goto unlock;
  583. }
  584. if (is_decode_session(inst))
  585. rc = msm_vdec_qbuf(inst, vb2);
  586. else if (is_encode_session(inst))
  587. rc = msm_venc_qbuf(inst, vb2);
  588. else
  589. rc = -EINVAL;
  590. if (rc) {
  591. print_vb2_buffer("failed vb2-qbuf", inst, vb2);
  592. goto unlock;
  593. }
  594. unlock:
  595. if (rc) {
  596. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  597. vb2_buffer_done(vb2, VB2_BUF_STATE_ERROR);
  598. }
  599. inst_unlock(inst, __func__);
  600. client_unlock(inst, __func__);
  601. put_inst(inst);
  602. }
  603. void msm_vb2_buf_cleanup(struct vb2_buffer *vb)
  604. {
  605. }
  606. int msm_vb2_buf_out_validate(struct vb2_buffer *vb)
  607. {
  608. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  609. vbuf->field = V4L2_FIELD_NONE;
  610. return 0;
  611. }
  612. void msm_vb2_request_complete(struct vb2_buffer *vb)
  613. {
  614. struct msm_vidc_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
  615. i_vpr_l(inst, "%s: vb type %d, index %d\n",
  616. __func__, vb->type, vb->index);
  617. v4l2_ctrl_request_complete(vb->req_obj.req, &inst->ctrl_handler);
  618. }