msm_vidc_vb2.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. /* Copyright (c) 2022-2023. Qualcomm Innovation Center, Inc. All rights reserved. */
  6. #include "msm_vidc_vb2.h"
  7. #include "msm_vidc_core.h"
  8. #include "msm_vidc_inst.h"
  9. #include "msm_vidc_internal.h"
  10. #include "msm_vidc_driver.h"
  11. #include "msm_vidc_power.h"
  12. #include "msm_vidc_debug.h"
  13. #include "msm_vdec.h"
  14. #include "msm_venc.h"
  15. #include "msm_vidc_control.h"
  16. #include "msm_vidc_platform.h"
  17. extern struct msm_vidc_core *g_core;
  18. struct vb2_queue *msm_vidc_get_vb2q(struct msm_vidc_inst *inst,
  19. u32 type, const char *func)
  20. {
  21. struct vb2_queue *q = NULL;
  22. if (!inst) {
  23. d_vpr_e("%s: invalid params\n", func);
  24. return NULL;
  25. }
  26. if (type == INPUT_MPLANE) {
  27. q = inst->bufq[INPUT_PORT].vb2q;
  28. } else if (type == OUTPUT_MPLANE) {
  29. q = inst->bufq[OUTPUT_PORT].vb2q;
  30. } else if (type == INPUT_META_PLANE) {
  31. q = inst->bufq[INPUT_META_PORT].vb2q;
  32. } else if (type == OUTPUT_META_PLANE) {
  33. q = inst->bufq[OUTPUT_META_PORT].vb2q;
  34. } else {
  35. i_vpr_e(inst, "%s: invalid buffer type %d\n",
  36. __func__, type);
  37. }
  38. return q;
  39. }
  40. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  41. void *msm_vb2_alloc(struct device *dev, unsigned long attrs,
  42. unsigned long size, enum dma_data_direction dma_dir,
  43. gfp_t gfp_flags)
  44. {
  45. return (void *)0xdeadbeef;
  46. }
  47. void *msm_vb2_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
  48. unsigned long size, enum dma_data_direction dma_dir)
  49. {
  50. return (void *)0xdeadbeef;
  51. }
  52. #else
  53. void *msm_vb2_alloc(struct vb2_buffer *vb, struct device *dev,
  54. unsigned long size)
  55. {
  56. return (void *)0xdeadbeef;
  57. }
  58. void *msm_vb2_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
  59. struct dma_buf *dbuf, unsigned long size)
  60. {
  61. struct msm_vidc_inst *inst;
  62. struct msm_vidc_core *core;
  63. struct msm_vidc_buffer *buf = NULL;
  64. if (!vb || !dev || !dbuf || !vb->vb2_queue) {
  65. d_vpr_e("%s: invalid params\n", __func__);
  66. return NULL;
  67. }
  68. inst = vb->vb2_queue->drv_priv;
  69. inst = get_inst_ref(g_core, inst);
  70. if (!inst || !inst->core) {
  71. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  72. return NULL;
  73. }
  74. core = inst->core;
  75. buf = msm_vidc_fetch_buffer(inst, vb);
  76. if (!buf) {
  77. i_vpr_e(inst, "%s: failed to fetch buffer\n", __func__);
  78. buf = NULL;
  79. goto exit;
  80. }
  81. buf->inst = inst;
  82. buf->attach = call_mem_op(core, dma_buf_attach, core, dbuf, dev);
  83. if (!buf->attach) {
  84. buf->attach = NULL;
  85. buf = NULL;
  86. goto exit;
  87. }
  88. buf->dmabuf = dbuf;
  89. print_vidc_buffer(VIDC_LOW, "low ", "attach", inst, buf);
  90. exit:
  91. if (!buf)
  92. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  93. put_inst(inst);
  94. return buf;
  95. }
  96. #endif
  97. void msm_vb2_put(void *buf_priv)
  98. {
  99. }
  100. int msm_vb2_mmap(void *buf_priv, struct vm_area_struct *vma)
  101. {
  102. return 0;
  103. }
  104. void msm_vb2_detach_dmabuf(void *buf_priv)
  105. {
  106. struct msm_vidc_buffer *vbuf = buf_priv;
  107. struct msm_vidc_buffer *ro_buf, *dummy;
  108. struct msm_vidc_core *core;
  109. struct msm_vidc_inst *inst;
  110. if (!vbuf || !vbuf->inst) {
  111. d_vpr_e("%s: invalid params\n", __func__);
  112. return;
  113. }
  114. inst = vbuf->inst;
  115. inst = get_inst_ref(g_core, inst);
  116. if (!inst || !inst->core) {
  117. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  118. return;
  119. }
  120. core = inst->core;
  121. if (is_decode_session(inst) && is_output_buffer(vbuf->type)) {
  122. list_for_each_entry_safe(ro_buf, dummy, &inst->buffers.read_only.list, list) {
  123. if (ro_buf->dmabuf == vbuf->dmabuf) {
  124. print_vidc_buffer(VIDC_LOW, "low ", "detach: found ro buf", inst, ro_buf);
  125. ro_buf->attach = vbuf->attach;
  126. vbuf->attach = NULL;
  127. goto exit;
  128. }
  129. }
  130. }
  131. print_vidc_buffer(VIDC_LOW, "low ", "detach", inst, vbuf);
  132. if (vbuf->attach && vbuf->dmabuf) {
  133. call_mem_op(core, dma_buf_detach, core, vbuf->dmabuf, vbuf->attach);
  134. vbuf->attach = NULL;
  135. vbuf->dmabuf = NULL;
  136. vbuf->inst = NULL;
  137. }
  138. vbuf->inst = NULL;
  139. exit:
  140. put_inst(inst);
  141. return;
  142. }
  143. int msm_vb2_map_dmabuf(void *buf_priv)
  144. {
  145. int rc = 0;
  146. struct msm_vidc_buffer *buf = buf_priv;
  147. struct msm_vidc_core *core;
  148. struct msm_vidc_inst *inst;
  149. if (!buf || !buf->inst) {
  150. d_vpr_e("%s: invalid params\n", __func__);
  151. return -EINVAL;
  152. }
  153. inst = buf->inst;
  154. inst = get_inst_ref(g_core, inst);
  155. if (!inst || !inst->core) {
  156. d_vpr_e("%s: invalid params\n", __func__);
  157. return -EINVAL;
  158. }
  159. core = inst->core;
  160. buf->sg_table = call_mem_op(core, dma_buf_map_attachment, core, buf->attach);
  161. if (!buf->sg_table || !buf->sg_table->sgl) {
  162. buf->sg_table = NULL;
  163. rc = -ENOMEM;
  164. goto exit;
  165. }
  166. buf->device_addr = sg_dma_address(buf->sg_table->sgl);
  167. print_vidc_buffer(VIDC_HIGH, "high", "map", inst, buf);
  168. exit:
  169. if (rc)
  170. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  171. put_inst(inst);
  172. return rc;
  173. }
  174. void msm_vb2_unmap_dmabuf(void *buf_priv)
  175. {
  176. struct msm_vidc_buffer *vbuf = buf_priv;
  177. struct msm_vidc_buffer *ro_buf, *dummy;
  178. struct msm_vidc_core *core;
  179. struct msm_vidc_inst *inst;
  180. if (!vbuf || !vbuf->inst) {
  181. d_vpr_e("%s: invalid params\n", __func__);
  182. return;
  183. }
  184. inst = vbuf->inst;
  185. inst = get_inst_ref(g_core, inst);
  186. if (!inst || !inst->core) {
  187. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  188. return;
  189. }
  190. core = inst->core;
  191. if (is_decode_session(inst) && is_output_buffer(vbuf->type)) {
  192. list_for_each_entry_safe(ro_buf, dummy, &inst->buffers.read_only.list, list) {
  193. if (ro_buf->dmabuf == vbuf->dmabuf) {
  194. print_vidc_buffer(VIDC_LOW, "low ", "unmap: found ro buf", inst, ro_buf);
  195. ro_buf->sg_table = vbuf->sg_table;
  196. ro_buf->attach = vbuf->attach;
  197. vbuf->sg_table = NULL;
  198. vbuf->device_addr = 0x0;
  199. goto exit;
  200. }
  201. }
  202. }
  203. print_vidc_buffer(VIDC_HIGH, "high", "unmap", inst, vbuf);
  204. if (vbuf->attach && vbuf->sg_table) {
  205. call_mem_op(core, dma_buf_unmap_attachment, core, vbuf->attach, vbuf->sg_table);
  206. vbuf->sg_table = NULL;
  207. vbuf->device_addr = 0x0;
  208. }
  209. exit:
  210. put_inst(inst);
  211. return;
  212. }
  213. int msm_vb2_queue_setup(struct vb2_queue *q,
  214. unsigned int *num_buffers, unsigned int *num_planes,
  215. unsigned int sizes[], struct device *alloc_devs[])
  216. {
  217. int rc = 0;
  218. struct msm_vidc_inst *inst;
  219. struct msm_vidc_core *core;
  220. int port;
  221. struct v4l2_format *f;
  222. enum msm_vidc_buffer_type buffer_type = 0;
  223. enum msm_vidc_buffer_region region = MSM_VIDC_REGION_NONE;
  224. struct context_bank_info *cb = NULL;
  225. if (!q || !num_buffers || !num_planes
  226. || !sizes || !q->drv_priv) {
  227. d_vpr_e("%s: invalid params, q = %pK, %pK, %pK\n",
  228. __func__, q, num_buffers, num_planes);
  229. return -EINVAL;
  230. }
  231. inst = q->drv_priv;
  232. if (!inst || !inst->core) {
  233. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  234. return -EINVAL;
  235. }
  236. core = inst->core;
  237. if (is_state(inst, MSM_VIDC_STREAMING)) {
  238. i_vpr_e(inst, "%s: invalid state %d\n", __func__, inst->state);
  239. return -EINVAL;
  240. }
  241. port = v4l2_type_to_driver_port(inst, q->type, __func__);
  242. if (port < 0)
  243. return -EINVAL;
  244. if (*num_planes && (port == INPUT_PORT || port == OUTPUT_PORT)) {
  245. f = &inst->fmts[port];
  246. if (*num_planes != f->fmt.pix_mp.num_planes) {
  247. i_vpr_e(inst, "%s: requested num_planes %d not supported %d\n",
  248. __func__, *num_planes, f->fmt.pix_mp.num_planes);
  249. return -EINVAL;
  250. }
  251. if (sizes[0] < inst->fmts[port].fmt.pix_mp.plane_fmt[0].sizeimage) {
  252. i_vpr_e(inst, "%s: requested size %d not acceptable\n",
  253. __func__, sizes[0]);
  254. return -EINVAL;
  255. }
  256. }
  257. buffer_type = v4l2_type_to_driver(q->type, __func__);
  258. if (!buffer_type)
  259. return -EINVAL;
  260. rc = msm_vidc_free_buffers(inst, buffer_type);
  261. if (rc) {
  262. i_vpr_e(inst, "%s: failed to free buffers, type %s\n",
  263. __func__, v4l2_type_name(q->type));
  264. return rc;
  265. }
  266. if (port == INPUT_PORT) {
  267. *num_planes = 1;
  268. if (*num_buffers < inst->buffers.input.min_count +
  269. inst->buffers.input.extra_count)
  270. *num_buffers = inst->buffers.input.min_count +
  271. inst->buffers.input.extra_count;
  272. inst->buffers.input.actual_count = *num_buffers;
  273. } else if (port == INPUT_META_PORT) {
  274. *num_planes = 1;
  275. if (*num_buffers < inst->buffers.input_meta.min_count +
  276. inst->buffers.input_meta.extra_count)
  277. *num_buffers = inst->buffers.input_meta.min_count +
  278. inst->buffers.input_meta.extra_count;
  279. inst->buffers.input_meta.actual_count = *num_buffers;
  280. } else if (port == OUTPUT_PORT) {
  281. *num_planes = 1;
  282. if (*num_buffers < inst->buffers.output.min_count +
  283. inst->buffers.output.extra_count)
  284. *num_buffers = inst->buffers.output.min_count +
  285. inst->buffers.output.extra_count;
  286. inst->buffers.output.actual_count = *num_buffers;
  287. } else if (port == OUTPUT_META_PORT) {
  288. *num_planes = 1;
  289. if (*num_buffers < inst->buffers.output_meta.min_count +
  290. inst->buffers.output_meta.extra_count)
  291. *num_buffers = inst->buffers.output_meta.min_count +
  292. inst->buffers.output_meta.extra_count;
  293. inst->buffers.output_meta.actual_count = *num_buffers;
  294. }
  295. if (port == INPUT_PORT || port == OUTPUT_PORT) {
  296. sizes[0] = inst->fmts[port].fmt.pix_mp.plane_fmt[0].sizeimage;
  297. } else if (port == OUTPUT_META_PORT) {
  298. sizes[0] = inst->fmts[port].fmt.meta.buffersize;
  299. } else if (port == INPUT_META_PORT) {
  300. if (inst->capabilities->cap[SUPER_FRAME].value)
  301. sizes[0] = inst->capabilities->cap[SUPER_FRAME].value *
  302. inst->fmts[port].fmt.meta.buffersize;
  303. else
  304. sizes[0] = inst->fmts[port].fmt.meta.buffersize;
  305. }
  306. rc = msm_vidc_allocate_buffers(inst, buffer_type, *num_buffers);
  307. if (rc) {
  308. i_vpr_e(inst, "%s: failed to allocate buffers, type %s\n",
  309. __func__, v4l2_type_name(q->type));
  310. return rc;
  311. }
  312. region = call_mem_op(core, buffer_region, inst, buffer_type);
  313. cb = msm_vidc_get_context_bank_for_region(core, region);
  314. if (!cb) {
  315. d_vpr_e("%s: Failed to get context bank device\n",
  316. __func__);
  317. return -EIO;
  318. }
  319. q->dev = cb->dev;
  320. i_vpr_h(inst,
  321. "queue_setup: type %s num_buffers %d sizes[0] %d cb %s\n",
  322. v4l2_type_name(q->type), *num_buffers, sizes[0], cb->name);
  323. return rc;
  324. }
  325. int msm_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
  326. {
  327. int rc = 0;
  328. struct msm_vidc_inst *inst;
  329. if (!q || !q->drv_priv) {
  330. d_vpr_e("%s: invalid input, q = %pK\n", __func__, q);
  331. return -EINVAL;
  332. }
  333. inst = q->drv_priv;
  334. inst = get_inst_ref(g_core, inst);
  335. if (!inst || !inst->core || !inst->capabilities) {
  336. d_vpr_e("%s: invalid params\n", __func__);
  337. return -EINVAL;
  338. }
  339. client_lock(inst, __func__);
  340. inst_lock(inst, __func__);
  341. if (is_session_error(inst)) {
  342. i_vpr_e(inst, "%s: inst in error state\n", __func__);
  343. rc = -EBUSY;
  344. goto unlock;
  345. }
  346. rc = inst->event_handle(inst, MSM_VIDC_STREAMON, q);
  347. if (rc) {
  348. i_vpr_e(inst, "Streamon: %s failed\n", v4l2_type_name(q->type));
  349. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  350. goto unlock;
  351. }
  352. unlock:
  353. inst_unlock(inst, __func__);
  354. client_unlock(inst, __func__);
  355. put_inst(inst);
  356. return rc;
  357. }
  358. int msm_vidc_start_streaming(struct msm_vidc_inst *inst, struct vb2_queue *q)
  359. {
  360. enum msm_vidc_buffer_type buf_type;
  361. int rc = 0;
  362. if (!inst || !q) {
  363. d_vpr_e("%s: invalid params\n", __func__);
  364. return -EINVAL;
  365. }
  366. if (q->type == INPUT_META_PLANE || q->type == OUTPUT_META_PLANE) {
  367. i_vpr_h(inst, "%s: nothing to start on %s\n",
  368. __func__, v4l2_type_name(q->type));
  369. return 0;
  370. }
  371. if (q->type != INPUT_MPLANE && q->type != OUTPUT_MPLANE) {
  372. i_vpr_e(inst, "%s: invalid type %d\n", __func__, q->type);
  373. return -EINVAL;
  374. }
  375. if (!is_decode_session(inst) && !is_encode_session(inst)) {
  376. i_vpr_e(inst, "%s: invalid session %d\n", __func__, inst->domain);
  377. return -EINVAL;
  378. }
  379. i_vpr_h(inst, "Streamon: %s\n", v4l2_type_name(q->type));
  380. if (!inst->once_per_session_set) {
  381. inst->once_per_session_set = true;
  382. rc = msm_vidc_prepare_dependency_list(inst);
  383. if (rc)
  384. return rc;
  385. rc = msm_vidc_session_set_codec(inst);
  386. if (rc)
  387. return rc;
  388. rc = msm_vidc_session_set_secure_mode(inst);
  389. if (rc)
  390. return rc;
  391. if (is_encode_session(inst)) {
  392. rc = msm_vidc_alloc_and_queue_session_internal_buffers(inst,
  393. MSM_VIDC_BUF_ARP);
  394. if (rc)
  395. return rc;
  396. } else if(is_decode_session(inst)) {
  397. rc = msm_vidc_session_set_default_header(inst);
  398. if (rc)
  399. return rc;
  400. rc = msm_vidc_alloc_and_queue_session_internal_buffers(inst,
  401. MSM_VIDC_BUF_PERSIST);
  402. if (rc)
  403. return rc;
  404. }
  405. }
  406. if (is_decode_session(inst))
  407. inst->decode_batch.enable = msm_vidc_allow_decode_batch(inst);
  408. msm_vidc_allow_dcvs(inst);
  409. msm_vidc_power_data_reset(inst);
  410. if (q->type == INPUT_MPLANE) {
  411. if (is_decode_session(inst))
  412. rc = msm_vdec_streamon_input(inst);
  413. else if (is_encode_session(inst))
  414. rc = msm_venc_streamon_input(inst);
  415. } else if (q->type == OUTPUT_MPLANE) {
  416. if (is_decode_session(inst))
  417. rc = msm_vdec_streamon_output(inst);
  418. else if (is_encode_session(inst))
  419. rc = msm_venc_streamon_output(inst);
  420. }
  421. if (rc)
  422. return rc;
  423. /* print final buffer counts & size details */
  424. msm_vidc_print_buffer_info(inst);
  425. buf_type = v4l2_type_to_driver(q->type, __func__);
  426. if (!buf_type)
  427. return -EINVAL;
  428. /* queue pending buffers */
  429. rc = msm_vidc_queue_deferred_buffers(inst, buf_type);
  430. if (rc)
  431. return rc;
  432. /* initialize statistics timer(one time) */
  433. if (!inst->stats.time_ms)
  434. inst->stats.time_ms = ktime_get_ns() / 1000 / 1000;
  435. /* schedule to print buffer statistics */
  436. rc = schedule_stats_work(inst);
  437. if (rc)
  438. return rc;
  439. if ((q->type == INPUT_MPLANE && inst->bufq[OUTPUT_PORT].vb2q->streaming) ||
  440. (q->type == OUTPUT_MPLANE && inst->bufq[INPUT_PORT].vb2q->streaming)) {
  441. rc = msm_vidc_get_properties(inst);
  442. if (rc)
  443. return rc;
  444. }
  445. i_vpr_h(inst, "Streamon: %s successful\n", v4l2_type_name(q->type));
  446. return rc;
  447. }
  448. int msm_vidc_stop_streaming(struct msm_vidc_inst *inst, struct vb2_queue *q)
  449. {
  450. int rc = 0;
  451. if (!inst || !q) {
  452. d_vpr_e("%s: invalid params\n", __func__);
  453. return -EINVAL;
  454. }
  455. if (q->type == INPUT_META_PLANE || q->type == OUTPUT_META_PLANE) {
  456. i_vpr_h(inst, "%s: nothing to stop on %s\n",
  457. __func__, v4l2_type_name(q->type));
  458. return 0;
  459. }
  460. if (q->type != INPUT_MPLANE && q->type != OUTPUT_MPLANE) {
  461. i_vpr_e(inst, "%s: invalid type %d\n", __func__, q->type);
  462. return -EINVAL;
  463. }
  464. if (!is_decode_session(inst) && !is_encode_session(inst)) {
  465. i_vpr_e(inst, "%s: invalid session %d\n", __func__, inst->domain);
  466. return -EINVAL;
  467. }
  468. i_vpr_h(inst, "Streamoff: %s\n", v4l2_type_name(q->type));
  469. if (q->type == INPUT_MPLANE) {
  470. if (is_decode_session(inst))
  471. rc = msm_vdec_streamoff_input(inst);
  472. else if (is_encode_session(inst))
  473. rc = msm_venc_streamoff_input(inst);
  474. } else if (q->type == OUTPUT_MPLANE) {
  475. if (is_decode_session(inst))
  476. rc = msm_vdec_streamoff_output(inst);
  477. else if (is_encode_session(inst))
  478. rc = msm_venc_streamoff_output(inst);
  479. }
  480. if (rc)
  481. return rc;
  482. /* Input port streamoff */
  483. if (q->type == INPUT_MPLANE) {
  484. /* flush timestamps list */
  485. msm_vidc_flush_ts(inst);
  486. /* flush buffer_stats list */
  487. msm_vidc_flush_buffer_stats(inst);
  488. }
  489. i_vpr_h(inst, "Streamoff: %s successful\n", v4l2_type_name(q->type));
  490. return rc;
  491. }
  492. void msm_vb2_stop_streaming(struct vb2_queue *q)
  493. {
  494. struct msm_vidc_inst *inst;
  495. int rc = 0;
  496. if (!q || !q->drv_priv) {
  497. d_vpr_e("%s: invalid input, q = %pK\n", __func__, q);
  498. return;
  499. }
  500. inst = q->drv_priv;
  501. if (!inst) {
  502. d_vpr_e("%s: invalid params\n", __func__);
  503. return;
  504. }
  505. rc = inst->event_handle(inst, MSM_VIDC_STREAMOFF, q);
  506. if (rc) {
  507. i_vpr_e(inst, "Streamoff: %s failed\n", v4l2_type_name(q->type));
  508. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  509. }
  510. return;
  511. }
  512. void msm_vb2_buf_queue(struct vb2_buffer *vb2)
  513. {
  514. int rc = 0;
  515. struct msm_vidc_inst *inst;
  516. u64 timestamp_us = 0;
  517. u64 ktime_ns = ktime_get_ns();
  518. inst = vb2_get_drv_priv(vb2->vb2_queue);
  519. if (!inst) {
  520. d_vpr_e("%s: invalid params\n", __func__);
  521. return;
  522. }
  523. inst = get_inst_ref(g_core, inst);
  524. if (!inst) {
  525. d_vpr_e("%s: invalid instance\n", __func__);
  526. return;
  527. }
  528. /*
  529. * As part of every qbuf initalise request to true.
  530. * If there are any dynamic controls associated with qbuf,
  531. * they will set as part s_ctrl() from v4l2_ctrl_request_setup().
  532. * Once v4l2_ctrl_request_setup() is done, reset request variable.
  533. * If the buffer does not have any requests with it, then
  534. * v4l2_ctrl_request_setup() will return 0.
  535. */
  536. inst->request = true;
  537. rc = v4l2_ctrl_request_setup(vb2->req_obj.req,
  538. &inst->ctrl_handler);
  539. inst->request = false;
  540. v4l2_ctrl_request_complete(vb2->req_obj.req, &inst->ctrl_handler);
  541. /*
  542. * call request_setup and request_complete without acquiring lock
  543. * to avoid deadlock issues because request_setup or request_complete
  544. * would call .s_ctrl and .g_volatile_ctrl respectively which acquire
  545. * lock too.
  546. */
  547. client_lock(inst, __func__);
  548. inst_lock(inst, __func__);
  549. if (rc) {
  550. i_vpr_e(inst, "%s: request setup failed, error %d\n",
  551. __func__, rc);
  552. goto unlock;
  553. }
  554. if (is_session_error(inst)) {
  555. i_vpr_e(inst, "%s: inst in error state\n", __func__);
  556. rc = -EINVAL;
  557. goto unlock;
  558. }
  559. if (!vb2->planes[0].bytesused) {
  560. if (vb2->type == INPUT_MPLANE) {
  561. /* Expecting non-zero filledlen on INPUT port */
  562. i_vpr_e(inst,
  563. "%s: zero bytesused input buffer not supported\n", __func__);
  564. rc = -EINVAL;
  565. goto unlock;
  566. }
  567. if ((vb2->type == OUTPUT_META_PLANE && is_any_meta_tx_out_enabled(inst)) ||
  568. (vb2->type == INPUT_META_PLANE && is_any_meta_tx_inp_enabled(inst))) {
  569. /*
  570. * vb2 is not allowing client to pass data in output meta plane.
  571. * adjust the bytesused as client will send buffer tag metadata
  572. * in output meta plane if DPB_TAG_LIST, or OUTBUF_FENCE metadata
  573. * is enabled.
  574. */
  575. vb2->planes[0].bytesused = vb2->planes[0].length;
  576. }
  577. }
  578. if (is_encode_session(inst) && vb2->type == INPUT_MPLANE) {
  579. timestamp_us = div_u64(vb2->timestamp, 1000);
  580. msm_vidc_set_auto_framerate(inst, timestamp_us);
  581. }
  582. inst->last_qbuf_time_ns = ktime_ns;
  583. if (vb2->type == INPUT_MPLANE) {
  584. rc = msm_vidc_update_input_rate(inst, div_u64(ktime_ns, 1000));
  585. if (rc)
  586. goto unlock;
  587. }
  588. if (is_decode_session(inst))
  589. rc = msm_vdec_qbuf(inst, vb2);
  590. else if (is_encode_session(inst))
  591. rc = msm_venc_qbuf(inst, vb2);
  592. else
  593. rc = -EINVAL;
  594. if (rc) {
  595. print_vb2_buffer("failed vb2-qbuf", inst, vb2);
  596. goto unlock;
  597. }
  598. unlock:
  599. if (rc) {
  600. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  601. vb2_buffer_done(vb2, VB2_BUF_STATE_ERROR);
  602. }
  603. inst_unlock(inst, __func__);
  604. client_unlock(inst, __func__);
  605. put_inst(inst);
  606. }
  607. void msm_vb2_buf_cleanup(struct vb2_buffer *vb)
  608. {
  609. }
  610. int msm_vb2_buf_out_validate(struct vb2_buffer *vb)
  611. {
  612. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  613. vbuf->field = V4L2_FIELD_NONE;
  614. return 0;
  615. }
  616. void msm_vb2_request_complete(struct vb2_buffer *vb)
  617. {
  618. struct msm_vidc_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
  619. i_vpr_l(inst, "%s: vb type %d, index %d\n",
  620. __func__, vb->type, vb->index);
  621. v4l2_ctrl_request_complete(vb->req_obj.req, &inst->ctrl_handler);
  622. }