msm_vidc_vb2.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. /* Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. */
  6. #include <linux/qcom-dma-mapping.h>
  7. #include "msm_vidc_vb2.h"
  8. #include "msm_vidc_core.h"
  9. #include "msm_vidc_inst.h"
  10. #include "msm_vidc_internal.h"
  11. #include "msm_vidc_driver.h"
  12. #include "msm_vidc_power.h"
  13. #include "msm_vdec.h"
  14. #include "msm_venc.h"
  15. #include "msm_vidc_debug.h"
  16. #include "msm_vidc_control.h"
  17. #include "msm_vidc_dt.h"
  18. extern struct msm_vidc_core *g_core;
  19. struct vb2_queue *msm_vidc_get_vb2q(struct msm_vidc_inst *inst,
  20. u32 type, const char *func)
  21. {
  22. struct vb2_queue *q = NULL;
  23. if (!inst) {
  24. d_vpr_e("%s: invalid params\n", func);
  25. return NULL;
  26. }
  27. if (type == INPUT_MPLANE) {
  28. q = inst->bufq[INPUT_PORT].vb2q;
  29. } else if (type == OUTPUT_MPLANE) {
  30. q = inst->bufq[OUTPUT_PORT].vb2q;
  31. } else if (type == INPUT_META_PLANE) {
  32. q = inst->bufq[INPUT_META_PORT].vb2q;
  33. } else if (type == OUTPUT_META_PLANE) {
  34. q = inst->bufq[OUTPUT_META_PORT].vb2q;
  35. } else {
  36. i_vpr_e(inst, "%s: invalid buffer type %d\n",
  37. __func__, type);
  38. }
  39. return q;
  40. }
  41. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  42. void *msm_vb2_alloc(struct device *dev, unsigned long attrs,
  43. unsigned long size, enum dma_data_direction dma_dir,
  44. gfp_t gfp_flags)
  45. {
  46. return (void *)0xdeadbeef;
  47. }
  48. void *msm_vb2_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
  49. unsigned long size, enum dma_data_direction dma_dir)
  50. {
  51. return (void *)0xdeadbeef;
  52. }
  53. #else
  54. void *msm_vb2_alloc(struct vb2_buffer *vb, struct device *dev,
  55. unsigned long size)
  56. {
  57. return (void *)0xdeadbeef;
  58. }
  59. void *msm_vb2_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
  60. struct dma_buf *dbuf, unsigned long size)
  61. {
  62. struct msm_vidc_inst *inst;
  63. struct msm_vidc_core *core;
  64. struct msm_vidc_buffer *buf = NULL;
  65. if (!vb || !dev || !dbuf || !vb->vb2_queue) {
  66. d_vpr_e("%s: invalid params\n", __func__);
  67. return NULL;
  68. }
  69. inst = vb->vb2_queue->drv_priv;
  70. inst = get_inst_ref(g_core, inst);
  71. if (!inst || !inst->core) {
  72. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  73. return NULL;
  74. }
  75. core = inst->core;
  76. buf = msm_vidc_fetch_buffer(inst, vb);
  77. if (!buf) {
  78. i_vpr_e(inst, "%s: failed to fetch buffer\n", __func__);
  79. buf = NULL;
  80. goto exit;
  81. }
  82. buf->inst = inst;
  83. buf->attach = msm_vidc_dma_buf_attach(dbuf, dev);
  84. if (!buf->attach) {
  85. buf->attach = NULL;
  86. buf = NULL;
  87. goto exit;
  88. }
  89. buf->attach->dma_map_attrs |= DMA_ATTR_SKIP_CPU_SYNC;
  90. /* remove below flag when userspace recycles buffers in same index */
  91. buf->attach->dma_map_attrs |= DMA_ATTR_DELAYED_UNMAP;
  92. if (core->dt->sys_cache_present)
  93. buf->attach->dma_map_attrs |=
  94. DMA_ATTR_IOMMU_USE_UPSTREAM_HINT;
  95. buf->dmabuf = dbuf;
  96. print_vidc_buffer(VIDC_LOW, "low ", "attach", inst, buf);
  97. exit:
  98. if (!buf)
  99. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  100. put_inst(inst);
  101. return buf;
  102. }
  103. #endif
  104. void msm_vb2_put(void *buf_priv)
  105. {
  106. }
  107. int msm_vb2_mmap(void *buf_priv, struct vm_area_struct *vma)
  108. {
  109. return 0;
  110. }
  111. void msm_vb2_detach_dmabuf(void *buf_priv)
  112. {
  113. struct msm_vidc_buffer *vbuf = buf_priv;
  114. struct msm_vidc_buffer *ro_buf, *dummy;
  115. struct msm_vidc_inst *inst;
  116. if (!vbuf || !vbuf->inst) {
  117. d_vpr_e("%s: invalid params\n", __func__);
  118. return;
  119. }
  120. inst = vbuf->inst;
  121. inst = get_inst_ref(g_core, inst);
  122. if (!inst || !inst->core) {
  123. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  124. return;
  125. }
  126. if (is_decode_session(inst) && is_output_buffer(vbuf->type)) {
  127. list_for_each_entry_safe(ro_buf, dummy, &inst->buffers.read_only.list, list) {
  128. if (ro_buf->dmabuf == vbuf->dmabuf) {
  129. print_vidc_buffer(VIDC_LOW, "low ", "detach: found ro buf", inst, ro_buf);
  130. ro_buf->attach = vbuf->attach;
  131. vbuf->attach = NULL;
  132. goto exit;
  133. }
  134. }
  135. }
  136. print_vidc_buffer(VIDC_LOW, "low ", "detach", inst, vbuf);
  137. if (vbuf->attach && vbuf->dmabuf) {
  138. msm_vidc_dma_buf_detach(vbuf->dmabuf, vbuf->attach);
  139. vbuf->attach = NULL;
  140. vbuf->dmabuf = NULL;
  141. vbuf->inst = NULL;
  142. }
  143. vbuf->inst = NULL;
  144. exit:
  145. put_inst(inst);
  146. return;
  147. }
  148. int msm_vb2_map_dmabuf(void *buf_priv)
  149. {
  150. int rc = 0;
  151. struct msm_vidc_buffer *buf = buf_priv;
  152. struct msm_vidc_inst *inst;
  153. if (!buf || !buf->inst) {
  154. d_vpr_e("%s: invalid params\n", __func__);
  155. return -EINVAL;
  156. }
  157. inst = buf->inst;
  158. inst = get_inst_ref(g_core, inst);
  159. if (!inst || !inst->core) {
  160. d_vpr_e("%s: invalid params\n", __func__);
  161. return -EINVAL;
  162. }
  163. buf->sg_table = msm_vidc_dma_buf_map_attachment(buf->attach);
  164. if (!buf->sg_table) {
  165. buf->sg_table = NULL;
  166. rc = -ENOMEM;
  167. goto exit;
  168. }
  169. if (!buf->sg_table->sgl) {
  170. i_vpr_e(inst, "%s: sgl is NULL\n", __func__);
  171. rc = -ENOMEM;
  172. goto exit;
  173. }
  174. buf->device_addr = buf->sg_table->sgl->dma_address;
  175. print_vidc_buffer(VIDC_HIGH, "high", "map", inst, buf);
  176. exit:
  177. if (rc)
  178. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  179. put_inst(inst);
  180. return rc;
  181. }
  182. void msm_vb2_unmap_dmabuf(void *buf_priv)
  183. {
  184. struct msm_vidc_buffer *vbuf = buf_priv;
  185. struct msm_vidc_buffer *ro_buf, *dummy;
  186. struct msm_vidc_inst *inst;
  187. if (!vbuf || !vbuf->inst) {
  188. d_vpr_e("%s: invalid params\n", __func__);
  189. return;
  190. }
  191. inst = vbuf->inst;
  192. inst = get_inst_ref(g_core, inst);
  193. if (!inst || !inst->core) {
  194. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  195. return;
  196. }
  197. if (is_decode_session(inst) && is_output_buffer(vbuf->type)) {
  198. list_for_each_entry_safe(ro_buf, dummy, &inst->buffers.read_only.list, list) {
  199. if (ro_buf->dmabuf == vbuf->dmabuf) {
  200. print_vidc_buffer(VIDC_LOW, "low ", "unmap: found ro buf", inst, ro_buf);
  201. ro_buf->sg_table = vbuf->sg_table;
  202. ro_buf->attach = vbuf->attach;
  203. vbuf->sg_table = NULL;
  204. vbuf->device_addr = 0x0;
  205. goto exit;
  206. }
  207. }
  208. }
  209. print_vidc_buffer(VIDC_HIGH, "high", "unmap", inst, vbuf);
  210. if (vbuf->attach && vbuf->sg_table) {
  211. msm_vidc_dma_buf_unmap_attachment(vbuf->attach, vbuf->sg_table);
  212. vbuf->sg_table = NULL;
  213. vbuf->device_addr = 0x0;
  214. }
  215. exit:
  216. put_inst(inst);
  217. return;
  218. }
  219. int msm_vidc_queue_setup(struct vb2_queue *q,
  220. unsigned int *num_buffers, unsigned int *num_planes,
  221. unsigned int sizes[], struct device *alloc_devs[])
  222. {
  223. int rc = 0;
  224. struct msm_vidc_inst *inst;
  225. struct msm_vidc_core *core;
  226. int port;
  227. struct v4l2_format *f;
  228. enum msm_vidc_buffer_type buffer_type = 0;
  229. enum msm_vidc_buffer_region region = MSM_VIDC_REGION_NONE;
  230. struct context_bank_info *cb = NULL;
  231. if (!q || !num_buffers || !num_planes
  232. || !sizes || !q->drv_priv) {
  233. d_vpr_e("%s: invalid params, q = %pK, %pK, %pK\n",
  234. __func__, q, num_buffers, num_planes);
  235. return -EINVAL;
  236. }
  237. inst = q->drv_priv;
  238. if (!inst || !inst->core) {
  239. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  240. return -EINVAL;
  241. }
  242. core = inst->core;
  243. if (is_state(inst, MSM_VIDC_STREAMING)) {
  244. i_vpr_e(inst, "%s: invalid state %d\n", __func__, inst->state);
  245. return -EINVAL;
  246. }
  247. port = v4l2_type_to_driver_port(inst, q->type, __func__);
  248. if (port < 0)
  249. return -EINVAL;
  250. if (*num_planes && (port == INPUT_PORT || port == OUTPUT_PORT)) {
  251. f = &inst->fmts[port];
  252. if (*num_planes != f->fmt.pix_mp.num_planes) {
  253. i_vpr_e(inst, "%s: requested num_planes %d not supported %d\n",
  254. __func__, *num_planes, f->fmt.pix_mp.num_planes);
  255. return -EINVAL;
  256. }
  257. if (sizes[0] < inst->fmts[port].fmt.pix_mp.plane_fmt[0].sizeimage) {
  258. i_vpr_e(inst, "%s: requested size %d not acceptable\n",
  259. __func__, sizes[0]);
  260. return -EINVAL;
  261. }
  262. }
  263. buffer_type = v4l2_type_to_driver(q->type, __func__);
  264. if (!buffer_type)
  265. return -EINVAL;
  266. rc = msm_vidc_free_buffers(inst, buffer_type);
  267. if (rc) {
  268. i_vpr_e(inst, "%s: failed to free buffers, type %s\n",
  269. __func__, v4l2_type_name(q->type));
  270. return rc;
  271. }
  272. if (port == INPUT_PORT) {
  273. *num_planes = 1;
  274. if (*num_buffers < inst->buffers.input.min_count +
  275. inst->buffers.input.extra_count)
  276. *num_buffers = inst->buffers.input.min_count +
  277. inst->buffers.input.extra_count;
  278. inst->buffers.input.actual_count = *num_buffers;
  279. } else if (port == INPUT_META_PORT) {
  280. *num_planes = 1;
  281. if (*num_buffers < inst->buffers.input_meta.min_count +
  282. inst->buffers.input_meta.extra_count)
  283. *num_buffers = inst->buffers.input_meta.min_count +
  284. inst->buffers.input_meta.extra_count;
  285. inst->buffers.input_meta.actual_count = *num_buffers;
  286. } else if (port == OUTPUT_PORT) {
  287. *num_planes = 1;
  288. if (*num_buffers < inst->buffers.output.min_count +
  289. inst->buffers.output.extra_count)
  290. *num_buffers = inst->buffers.output.min_count +
  291. inst->buffers.output.extra_count;
  292. inst->buffers.output.actual_count = *num_buffers;
  293. } else if (port == OUTPUT_META_PORT) {
  294. *num_planes = 1;
  295. if (*num_buffers < inst->buffers.output_meta.min_count +
  296. inst->buffers.output_meta.extra_count)
  297. *num_buffers = inst->buffers.output_meta.min_count +
  298. inst->buffers.output_meta.extra_count;
  299. inst->buffers.output_meta.actual_count = *num_buffers;
  300. }
  301. if (port == INPUT_PORT || port == OUTPUT_PORT) {
  302. sizes[0] = inst->fmts[port].fmt.pix_mp.plane_fmt[0].sizeimage;
  303. } else if (port == OUTPUT_META_PORT) {
  304. sizes[0] = inst->fmts[port].fmt.meta.buffersize;
  305. } else if (port == INPUT_META_PORT) {
  306. if (inst->capabilities->cap[SUPER_FRAME].value)
  307. sizes[0] = inst->capabilities->cap[SUPER_FRAME].value *
  308. inst->fmts[port].fmt.meta.buffersize;
  309. else
  310. sizes[0] = inst->fmts[port].fmt.meta.buffersize;
  311. }
  312. rc = msm_vidc_allocate_buffers(inst, buffer_type, *num_buffers);
  313. if (rc) {
  314. i_vpr_e(inst, "%s: failed to allocate buffers, type %s\n",
  315. __func__, v4l2_type_name(q->type));
  316. return rc;
  317. }
  318. region = msm_vidc_get_buffer_region(inst, buffer_type, __func__);
  319. cb = msm_vidc_get_context_bank(core, region);
  320. if (!cb) {
  321. d_vpr_e("%s: Failed to get context bank device\n",
  322. __func__);
  323. return -EIO;
  324. }
  325. q->dev = cb->dev;
  326. i_vpr_h(inst,
  327. "queue_setup: type %s num_buffers %d sizes[0] %d cb %s\n",
  328. v4l2_type_name(q->type), *num_buffers, sizes[0], cb->name);
  329. return rc;
  330. }
  331. int msm_vidc_start_streaming(struct vb2_queue *q, unsigned int count)
  332. {
  333. int rc = 0;
  334. struct msm_vidc_inst *inst;
  335. enum msm_vidc_buffer_type buf_type;
  336. if (!q || !q->drv_priv) {
  337. d_vpr_e("%s: invalid input, q = %pK\n", __func__, q);
  338. return -EINVAL;
  339. }
  340. inst = q->drv_priv;
  341. inst = get_inst_ref(g_core, inst);
  342. if (!inst || !inst->core || !inst->capabilities) {
  343. d_vpr_e("%s: invalid params\n", __func__);
  344. return -EINVAL;
  345. }
  346. client_lock(inst, __func__);
  347. inst_lock(inst, __func__);
  348. if (is_session_error(inst)) {
  349. i_vpr_e(inst, "%s: inst in error state\n", __func__);
  350. rc = -EBUSY;
  351. goto unlock;
  352. }
  353. if (!msm_vidc_allow_streamon(inst, q->type)) {
  354. rc = -EBUSY;
  355. goto unlock;
  356. }
  357. if (q->type == INPUT_META_PLANE || q->type == OUTPUT_META_PLANE) {
  358. i_vpr_h(inst, "%s: nothing to start on %s\n",
  359. __func__, v4l2_type_name(q->type));
  360. rc = 0;
  361. goto unlock;
  362. }
  363. if (!is_decode_session(inst) && !is_encode_session(inst)) {
  364. i_vpr_e(inst, "%s: invalid session %d\n",
  365. __func__, inst->domain);
  366. rc = -EINVAL;
  367. goto unlock;
  368. }
  369. i_vpr_h(inst, "Streamon: %s\n", v4l2_type_name(q->type));
  370. if (!inst->once_per_session_set) {
  371. inst->once_per_session_set = true;
  372. rc = msm_vidc_prepare_dependency_list(inst);
  373. if (rc)
  374. goto unlock;
  375. rc = msm_vidc_session_set_codec(inst);
  376. if (rc)
  377. goto unlock;
  378. rc = msm_vidc_session_set_secure_mode(inst);
  379. if (rc)
  380. goto unlock;
  381. if (is_encode_session(inst)) {
  382. rc = msm_vidc_alloc_and_queue_session_internal_buffers(inst,
  383. MSM_VIDC_BUF_ARP);
  384. if (rc)
  385. goto unlock;
  386. } else if(is_decode_session(inst)) {
  387. rc = msm_vidc_session_set_default_header(inst);
  388. if (rc)
  389. goto unlock;
  390. rc = msm_vidc_alloc_and_queue_session_internal_buffers(inst,
  391. MSM_VIDC_BUF_PERSIST);
  392. if (rc)
  393. goto unlock;
  394. }
  395. }
  396. if (is_decode_session(inst))
  397. inst->decode_batch.enable = msm_vidc_allow_decode_batch(inst);
  398. msm_vidc_allow_dcvs(inst);
  399. msm_vidc_power_data_reset(inst);
  400. if (q->type == INPUT_MPLANE) {
  401. if (is_decode_session(inst))
  402. rc = msm_vdec_streamon_input(inst);
  403. else if (is_encode_session(inst))
  404. rc = msm_venc_streamon_input(inst);
  405. else
  406. goto unlock;
  407. } else if (q->type == OUTPUT_MPLANE) {
  408. if (is_decode_session(inst))
  409. rc = msm_vdec_streamon_output(inst);
  410. else if (is_encode_session(inst))
  411. rc = msm_venc_streamon_output(inst);
  412. else
  413. goto unlock;
  414. } else {
  415. i_vpr_e(inst, "%s: invalid type %d\n", __func__, q->type);
  416. goto unlock;
  417. }
  418. if (rc)
  419. goto unlock;
  420. /* print final buffer counts & size details */
  421. msm_vidc_print_buffer_info(inst);
  422. buf_type = v4l2_type_to_driver(q->type, __func__);
  423. if (!buf_type)
  424. goto unlock;
  425. /* queue pending buffers */
  426. rc = msm_vidc_queue_deferred_buffers(inst, buf_type);
  427. if (rc)
  428. goto unlock;
  429. /* initialize statistics timer(one time) */
  430. if (!inst->stats.time_ms)
  431. inst->stats.time_ms = ktime_get_ns() / 1000 / 1000;
  432. /* schedule to print buffer statistics */
  433. rc = schedule_stats_work(inst);
  434. if (rc)
  435. goto unlock;
  436. if ((q->type == INPUT_MPLANE && inst->bufq[OUTPUT_PORT].vb2q->streaming) ||
  437. (q->type == OUTPUT_MPLANE && inst->bufq[INPUT_PORT].vb2q->streaming)) {
  438. rc = msm_vidc_get_properties(inst);
  439. if (rc)
  440. goto unlock;
  441. }
  442. i_vpr_h(inst, "Streamon: %s successful\n", v4l2_type_name(q->type));
  443. unlock:
  444. if (rc) {
  445. i_vpr_e(inst, "Streamon: %s failed\n", v4l2_type_name(q->type));
  446. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  447. }
  448. inst_unlock(inst, __func__);
  449. client_unlock(inst, __func__);
  450. put_inst(inst);
  451. return rc;
  452. }
  453. void msm_vidc_stop_streaming(struct vb2_queue *q)
  454. {
  455. int rc = 0;
  456. struct msm_vidc_inst *inst;
  457. enum msm_vidc_allow allow;
  458. if (!q || !q->drv_priv) {
  459. d_vpr_e("%s: invalid input, q = %pK\n", __func__, q);
  460. return;
  461. }
  462. inst = q->drv_priv;
  463. inst = get_inst_ref(g_core, inst);
  464. if (!inst || !inst->core) {
  465. d_vpr_e("%s: invalid params\n", __func__);
  466. return;
  467. }
  468. client_lock(inst, __func__);
  469. inst_lock(inst, __func__);
  470. if (q->type == INPUT_META_PLANE || q->type == OUTPUT_META_PLANE) {
  471. i_vpr_h(inst, "%s: nothing to stop on %s\n",
  472. __func__, v4l2_type_name(q->type));
  473. rc = 0;
  474. goto unlock;
  475. }
  476. allow = msm_vidc_allow_streamoff(inst, q->type);
  477. if (allow == MSM_VIDC_DISALLOW) {
  478. rc = -EBUSY;
  479. goto unlock;
  480. } else if (allow == MSM_VIDC_IGNORE) {
  481. rc = 0;
  482. goto unlock;
  483. } else if (allow != MSM_VIDC_ALLOW) {
  484. rc = -EINVAL;
  485. goto unlock;
  486. }
  487. if (!is_decode_session(inst) && !is_encode_session(inst)) {
  488. i_vpr_e(inst, "%s: invalid session %d\n",
  489. __func__, inst->domain);
  490. rc = -EINVAL;
  491. goto unlock;
  492. }
  493. i_vpr_h(inst, "Streamoff: %s\n", v4l2_type_name(q->type));
  494. if (q->type == INPUT_MPLANE) {
  495. if (is_decode_session(inst))
  496. rc = msm_vdec_streamoff_input(inst);
  497. else if (is_encode_session(inst))
  498. rc = msm_venc_streamoff_input(inst);
  499. } else if (q->type == OUTPUT_MPLANE) {
  500. if (is_decode_session(inst))
  501. rc = msm_vdec_streamoff_output(inst);
  502. else if (is_encode_session(inst))
  503. rc = msm_venc_streamoff_output(inst);
  504. } else {
  505. i_vpr_e(inst, "%s: invalid type %d\n", __func__, q->type);
  506. rc = -EINVAL;
  507. goto unlock;
  508. }
  509. if (rc)
  510. goto unlock;
  511. /* Input port streamoff */
  512. if (q->type == INPUT_MPLANE) {
  513. /* flush timestamps list */
  514. msm_vidc_flush_ts(inst);
  515. /* flush buffer_stats list */
  516. msm_vidc_flush_buffer_stats(inst);
  517. }
  518. i_vpr_h(inst, "Streamoff: %s successful\n", v4l2_type_name(q->type));
  519. unlock:
  520. if (rc) {
  521. i_vpr_e(inst, "Streamoff: %s failed\n", v4l2_type_name(q->type));
  522. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  523. }
  524. inst_unlock(inst, __func__);
  525. client_unlock(inst, __func__);
  526. put_inst(inst);
  527. return;
  528. }
  529. void msm_vidc_buf_queue(struct vb2_buffer *vb2)
  530. {
  531. int rc = 0;
  532. struct msm_vidc_inst *inst;
  533. u64 timestamp_us = 0;
  534. u64 ktime_ns = ktime_get_ns();
  535. inst = vb2_get_drv_priv(vb2->vb2_queue);
  536. if (!inst) {
  537. d_vpr_e("%s: invalid params\n", __func__);
  538. return;
  539. }
  540. inst = get_inst_ref(g_core, inst);
  541. if (!inst) {
  542. d_vpr_e("%s: invalid instance\n", __func__);
  543. return;
  544. }
  545. /*
  546. * As part of every qbuf initalise request to true.
  547. * If there are any dynamic controls associated with qbuf,
  548. * they will set as part s_ctrl() from v4l2_ctrl_request_setup().
  549. * Once v4l2_ctrl_request_setup() is done, reset request variable.
  550. * If the buffer does not have any requests with it, then
  551. * v4l2_ctrl_request_setup() will return 0.
  552. */
  553. inst->request = true;
  554. rc = v4l2_ctrl_request_setup(vb2->req_obj.req,
  555. &inst->ctrl_handler);
  556. inst->request = false;
  557. v4l2_ctrl_request_complete(vb2->req_obj.req, &inst->ctrl_handler);
  558. /*
  559. * call request_setup and request_complete without acquiring lock
  560. * to avoid deadlock issues because request_setup or request_complete
  561. * would call .s_ctrl and .g_volatile_ctrl respectively which acquire
  562. * lock too.
  563. */
  564. client_lock(inst, __func__);
  565. inst_lock(inst, __func__);
  566. if (rc) {
  567. i_vpr_e(inst, "%s: request setup failed, error %d\n",
  568. __func__, rc);
  569. goto unlock;
  570. }
  571. if (is_session_error(inst)) {
  572. i_vpr_e(inst, "%s: inst in error state\n", __func__);
  573. rc = -EINVAL;
  574. goto unlock;
  575. }
  576. if (!vb2->planes[0].bytesused) {
  577. if (vb2->type == INPUT_MPLANE) {
  578. /* Expecting non-zero filledlen on INPUT port */
  579. i_vpr_e(inst,
  580. "%s: zero bytesused input buffer not supported\n", __func__);
  581. rc = -EINVAL;
  582. goto unlock;
  583. }
  584. if ((vb2->type == OUTPUT_META_PLANE && is_any_meta_tx_out_enabled(inst)) ||
  585. (vb2->type == INPUT_META_PLANE && is_any_meta_tx_inp_enabled(inst))) {
  586. /*
  587. * vb2 is not allowing client to pass data in output meta plane.
  588. * adjust the bytesused as client will send buffer tag metadata
  589. * in output meta plane if DPB_TAG_LIST, or OUTBUF_FENCE metadata
  590. * is enabled.
  591. */
  592. vb2->planes[0].bytesused = vb2->planes[0].length;
  593. }
  594. }
  595. if (is_encode_session(inst) && vb2->type == INPUT_MPLANE) {
  596. timestamp_us = div_u64(vb2->timestamp, 1000);
  597. msm_vidc_set_auto_framerate(inst, timestamp_us);
  598. }
  599. inst->last_qbuf_time_ns = ktime_ns;
  600. if (is_decode_session(inst) && vb2->type == INPUT_MPLANE) {
  601. rc = msm_vidc_update_input_rate(inst, div_u64(ktime_ns, 1000));
  602. if (rc)
  603. goto unlock;
  604. }
  605. if (is_decode_session(inst))
  606. rc = msm_vdec_qbuf(inst, vb2);
  607. else if (is_encode_session(inst))
  608. rc = msm_venc_qbuf(inst, vb2);
  609. else
  610. rc = -EINVAL;
  611. if (rc) {
  612. print_vb2_buffer("failed vb2-qbuf", inst, vb2);
  613. goto unlock;
  614. }
  615. unlock:
  616. if (rc) {
  617. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  618. vb2_buffer_done(vb2, VB2_BUF_STATE_ERROR);
  619. }
  620. inst_unlock(inst, __func__);
  621. client_unlock(inst, __func__);
  622. put_inst(inst);
  623. }
  624. void msm_vidc_buf_cleanup(struct vb2_buffer *vb)
  625. {
  626. }
  627. int msm_vidc_buf_out_validate(struct vb2_buffer *vb)
  628. {
  629. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  630. vbuf->field = V4L2_FIELD_NONE;
  631. return 0;
  632. }
  633. void msm_vidc_buf_request_complete(struct vb2_buffer *vb)
  634. {
  635. struct msm_vidc_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
  636. i_vpr_l(inst, "%s: vb type %d, index %d\n",
  637. __func__, vb->type, vb->index);
  638. v4l2_ctrl_request_complete(vb->req_obj.req, &inst->ctrl_handler);
  639. }