msm_vidc_vb2.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. */
  5. /* Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. */
  6. #include "msm_vidc_vb2.h"
  7. #include "msm_vidc_core.h"
  8. #include "msm_vidc_inst.h"
  9. #include "msm_vidc_internal.h"
  10. #include "msm_vidc_driver.h"
  11. #include "msm_vidc_power.h"
  12. #include "msm_vdec.h"
  13. #include "msm_venc.h"
  14. #include "msm_vidc_debug.h"
  15. #include "msm_vidc_control.h"
  16. extern struct msm_vidc_core *g_core;
  17. struct vb2_queue *msm_vidc_get_vb2q(struct msm_vidc_inst *inst,
  18. u32 type, const char *func)
  19. {
  20. struct vb2_queue *q = NULL;
  21. if (!inst) {
  22. d_vpr_e("%s: invalid params\n", func);
  23. return NULL;
  24. }
  25. if (type == INPUT_MPLANE) {
  26. q = inst->bufq[INPUT_PORT].vb2q;
  27. } else if (type == OUTPUT_MPLANE) {
  28. q = inst->bufq[OUTPUT_PORT].vb2q;
  29. } else if (type == INPUT_META_PLANE) {
  30. q = inst->bufq[INPUT_META_PORT].vb2q;
  31. } else if (type == OUTPUT_META_PLANE) {
  32. q = inst->bufq[OUTPUT_META_PORT].vb2q;
  33. } else {
  34. i_vpr_e(inst, "%s: invalid buffer type %d\n",
  35. __func__, type);
  36. }
  37. return q;
  38. }
  39. #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 15, 0))
  40. void *msm_vb2_alloc(struct device *dev, unsigned long attrs,
  41. unsigned long size, enum dma_data_direction dma_dir,
  42. gfp_t gfp_flags)
  43. {
  44. return (void *)0xdeadbeef;
  45. }
  46. void *msm_vb2_attach_dmabuf(struct device *dev, struct dma_buf *dbuf,
  47. unsigned long size, enum dma_data_direction dma_dir)
  48. {
  49. return (void *)0xdeadbeef;
  50. }
  51. #else
  52. void *msm_vb2_alloc(struct vb2_buffer *vb, struct device *dev,
  53. unsigned long size)
  54. {
  55. return (void *)0xdeadbeef;
  56. }
  57. void *msm_vb2_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
  58. struct dma_buf *dbuf, unsigned long size)
  59. {
  60. return (void *)0xdeadbeef;
  61. }
  62. #endif
  63. void msm_vb2_put(void *buf_priv)
  64. {
  65. }
  66. int msm_vb2_mmap(void *buf_priv, struct vm_area_struct *vma)
  67. {
  68. return 0;
  69. }
  70. void msm_vb2_detach_dmabuf(void *buf_priv)
  71. {
  72. }
  73. int msm_vb2_map_dmabuf(void *buf_priv)
  74. {
  75. return 0;
  76. }
  77. void msm_vb2_unmap_dmabuf(void *buf_priv)
  78. {
  79. }
  80. int msm_vidc_queue_setup(struct vb2_queue *q,
  81. unsigned int *num_buffers, unsigned int *num_planes,
  82. unsigned int sizes[], struct device *alloc_devs[])
  83. {
  84. int rc = 0;
  85. struct msm_vidc_inst *inst;
  86. int port;
  87. struct v4l2_format *f;
  88. if (!q || !num_buffers || !num_planes
  89. || !sizes || !q->drv_priv) {
  90. d_vpr_e("%s: invalid params, q = %pK, %pK, %pK\n",
  91. __func__, q, num_buffers, num_planes);
  92. return -EINVAL;
  93. }
  94. inst = q->drv_priv;
  95. if (!inst || !inst->core) {
  96. d_vpr_e("%s: invalid params %pK\n", __func__, inst);
  97. return -EINVAL;
  98. }
  99. if (inst->state == MSM_VIDC_START) {
  100. i_vpr_e(inst, "%s: invalid state %d\n", __func__, inst->state);
  101. return -EINVAL;
  102. }
  103. port = v4l2_type_to_driver_port(inst, q->type, __func__);
  104. if (port < 0)
  105. return -EINVAL;
  106. if (*num_planes && (port == INPUT_PORT || port == OUTPUT_PORT)) {
  107. f = &inst->fmts[port];
  108. if (*num_planes != f->fmt.pix_mp.num_planes) {
  109. i_vpr_e(inst, "%s: requested num_planes %d not supported %d\n",
  110. __func__, *num_planes, f->fmt.pix_mp.num_planes);
  111. return -EINVAL;
  112. }
  113. if (sizes[0] < inst->fmts[port].fmt.pix_mp.plane_fmt[0].sizeimage) {
  114. i_vpr_e(inst, "%s: requested size %d not acceptable\n",
  115. __func__, sizes[0]);
  116. return -EINVAL;
  117. }
  118. }
  119. if (port == INPUT_PORT) {
  120. *num_planes = 1;
  121. if (*num_buffers < inst->buffers.input.min_count +
  122. inst->buffers.input.extra_count)
  123. *num_buffers = inst->buffers.input.min_count +
  124. inst->buffers.input.extra_count;
  125. inst->buffers.input.actual_count = *num_buffers;
  126. } else if (port == INPUT_META_PORT) {
  127. *num_planes = 1;
  128. if (*num_buffers < inst->buffers.input_meta.min_count +
  129. inst->buffers.input_meta.extra_count)
  130. *num_buffers = inst->buffers.input_meta.min_count +
  131. inst->buffers.input_meta.extra_count;
  132. inst->buffers.input_meta.actual_count = *num_buffers;
  133. } else if (port == OUTPUT_PORT) {
  134. *num_planes = 1;
  135. if (*num_buffers < inst->buffers.output.min_count +
  136. inst->buffers.output.extra_count)
  137. *num_buffers = inst->buffers.output.min_count +
  138. inst->buffers.output.extra_count;
  139. inst->buffers.output.actual_count = *num_buffers;
  140. } else if (port == OUTPUT_META_PORT) {
  141. *num_planes = 1;
  142. if (*num_buffers < inst->buffers.output_meta.min_count +
  143. inst->buffers.output_meta.extra_count)
  144. *num_buffers = inst->buffers.output_meta.min_count +
  145. inst->buffers.output_meta.extra_count;
  146. inst->buffers.output_meta.actual_count = *num_buffers;
  147. }
  148. if (port == INPUT_PORT || port == OUTPUT_PORT) {
  149. sizes[0] = inst->fmts[port].fmt.pix_mp.plane_fmt[0].sizeimage;
  150. } else if (port == OUTPUT_META_PORT) {
  151. sizes[0] = inst->fmts[port].fmt.meta.buffersize;
  152. } else if (port == INPUT_META_PORT) {
  153. if (inst->capabilities->cap[SUPER_FRAME].value)
  154. sizes[0] = inst->capabilities->cap[SUPER_FRAME].value *
  155. inst->fmts[port].fmt.meta.buffersize;
  156. else
  157. sizes[0] = inst->fmts[port].fmt.meta.buffersize;
  158. }
  159. i_vpr_h(inst,
  160. "queue_setup: type %s num_buffers %d sizes[0] %d\n",
  161. v4l2_type_name(q->type), *num_buffers, sizes[0]);
  162. return rc;
  163. }
  164. int msm_vidc_start_streaming(struct vb2_queue *q, unsigned int count)
  165. {
  166. int rc = 0;
  167. struct msm_vidc_inst *inst;
  168. enum msm_vidc_buffer_type buf_type;
  169. if (!q || !q->drv_priv) {
  170. d_vpr_e("%s: invalid input, q = %pK\n", __func__, q);
  171. return -EINVAL;
  172. }
  173. inst = q->drv_priv;
  174. inst = get_inst_ref(g_core, inst);
  175. if (!inst || !inst->core || !inst->capabilities) {
  176. d_vpr_e("%s: invalid params\n", __func__);
  177. return -EINVAL;
  178. }
  179. client_lock(inst, __func__);
  180. inst_lock(inst, __func__);
  181. if (is_session_error(inst)) {
  182. i_vpr_e(inst, "%s: inst in error state\n", __func__);
  183. rc = -EBUSY;
  184. goto unlock;
  185. }
  186. if (!msm_vidc_allow_streamon(inst, q->type)) {
  187. rc = -EBUSY;
  188. goto unlock;
  189. }
  190. rc = msm_vidc_state_change_streamon(inst, q->type);
  191. if (rc)
  192. goto unlock;
  193. if (q->type == INPUT_META_PLANE &&
  194. inst->capabilities->cap[INPUT_META_VIA_REQUEST].value) {
  195. i_vpr_e(inst,
  196. "%s: invalid input meta port start when request enabled\n",
  197. __func__);
  198. rc = -EINVAL;
  199. goto unlock;
  200. }
  201. if (q->type == INPUT_META_PLANE || q->type == OUTPUT_META_PLANE) {
  202. i_vpr_h(inst, "%s: nothing to start on %s\n",
  203. __func__, v4l2_type_name(q->type));
  204. rc = 0;
  205. goto unlock;
  206. }
  207. if (!is_decode_session(inst) && !is_encode_session(inst)) {
  208. i_vpr_e(inst, "%s: invalid session %d\n",
  209. __func__, inst->domain);
  210. rc = -EINVAL;
  211. goto unlock;
  212. }
  213. i_vpr_h(inst, "Streamon: %s\n", v4l2_type_name(q->type));
  214. if (!inst->once_per_session_set) {
  215. inst->once_per_session_set = true;
  216. rc = msm_vidc_prepare_dependency_list(inst);
  217. if (rc)
  218. goto unlock;
  219. rc = msm_vidc_session_set_codec(inst);
  220. if (rc)
  221. goto unlock;
  222. rc = msm_vidc_session_set_secure_mode(inst);
  223. if (rc)
  224. goto unlock;
  225. if (is_encode_session(inst)) {
  226. rc = msm_vidc_alloc_and_queue_session_internal_buffers(inst,
  227. MSM_VIDC_BUF_ARP);
  228. if (rc)
  229. goto unlock;
  230. } else if(is_decode_session(inst)) {
  231. rc = msm_vidc_session_set_default_header(inst);
  232. if (rc)
  233. goto unlock;
  234. rc = msm_vidc_alloc_and_queue_session_internal_buffers(inst,
  235. MSM_VIDC_BUF_PERSIST);
  236. if (rc)
  237. goto unlock;
  238. }
  239. }
  240. if (is_decode_session(inst))
  241. inst->decode_batch.enable = msm_vidc_allow_decode_batch(inst);
  242. msm_vidc_allow_dcvs(inst);
  243. msm_vidc_power_data_reset(inst);
  244. if (q->type == INPUT_MPLANE) {
  245. if (is_decode_session(inst))
  246. rc = msm_vdec_streamon_input(inst);
  247. else if (is_encode_session(inst))
  248. rc = msm_venc_streamon_input(inst);
  249. else
  250. goto unlock;
  251. } else if (q->type == OUTPUT_MPLANE) {
  252. if (is_decode_session(inst))
  253. rc = msm_vdec_streamon_output(inst);
  254. else if (is_encode_session(inst))
  255. rc = msm_venc_streamon_output(inst);
  256. else
  257. goto unlock;
  258. } else {
  259. i_vpr_e(inst, "%s: invalid type %d\n", __func__, q->type);
  260. goto unlock;
  261. }
  262. if (rc)
  263. goto unlock;
  264. /* print final buffer counts & size details */
  265. msm_vidc_print_buffer_info(inst);
  266. buf_type = v4l2_type_to_driver(q->type, __func__);
  267. if (!buf_type)
  268. goto unlock;
  269. /* queue pending buffers */
  270. rc = msm_vidc_queue_deferred_buffers(inst, buf_type);
  271. if (rc)
  272. goto unlock;
  273. /* initialize statistics timer(one time) */
  274. if (!inst->stats.time_ms)
  275. inst->stats.time_ms = ktime_get_ns() / 1000 / 1000;
  276. /* schedule to print buffer statistics */
  277. rc = schedule_stats_work(inst);
  278. if (rc)
  279. goto unlock;
  280. if ((q->type == INPUT_MPLANE && inst->bufq[OUTPUT_PORT].vb2q->streaming) ||
  281. (q->type == OUTPUT_MPLANE && inst->bufq[INPUT_PORT].vb2q->streaming)) {
  282. rc = msm_vidc_get_properties(inst);
  283. if (rc)
  284. goto unlock;
  285. }
  286. i_vpr_h(inst, "Streamon: %s successful\n", v4l2_type_name(q->type));
  287. unlock:
  288. if (rc) {
  289. i_vpr_e(inst, "Streamon: %s failed\n", v4l2_type_name(q->type));
  290. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  291. }
  292. inst_unlock(inst, __func__);
  293. client_unlock(inst, __func__);
  294. put_inst(inst);
  295. return rc;
  296. }
  297. void msm_vidc_stop_streaming(struct vb2_queue *q)
  298. {
  299. int rc = 0;
  300. struct msm_vidc_inst *inst;
  301. enum msm_vidc_allow allow;
  302. if (!q || !q->drv_priv) {
  303. d_vpr_e("%s: invalid input, q = %pK\n", __func__, q);
  304. return;
  305. }
  306. inst = q->drv_priv;
  307. inst = get_inst_ref(g_core, inst);
  308. if (!inst || !inst->core) {
  309. d_vpr_e("%s: invalid params\n", __func__);
  310. return;
  311. }
  312. client_lock(inst, __func__);
  313. inst_lock(inst, __func__);
  314. if (q->type == INPUT_META_PLANE || q->type == OUTPUT_META_PLANE) {
  315. i_vpr_h(inst, "%s: nothing to stop on %s\n",
  316. __func__, v4l2_type_name(q->type));
  317. rc = 0;
  318. goto unlock;
  319. }
  320. allow = msm_vidc_allow_streamoff(inst, q->type);
  321. if (allow == MSM_VIDC_DISALLOW) {
  322. rc = -EBUSY;
  323. goto unlock;
  324. } else if (allow == MSM_VIDC_IGNORE) {
  325. rc = 0;
  326. goto unlock;
  327. } else if (allow != MSM_VIDC_ALLOW) {
  328. rc = -EINVAL;
  329. goto unlock;
  330. }
  331. rc = msm_vidc_state_change_streamoff(inst, q->type);
  332. if (rc)
  333. goto unlock;
  334. if (!is_decode_session(inst) && !is_encode_session(inst)) {
  335. i_vpr_e(inst, "%s: invalid session %d\n",
  336. __func__, inst->domain);
  337. rc = -EINVAL;
  338. goto unlock;
  339. }
  340. i_vpr_h(inst, "Streamoff: %s\n", v4l2_type_name(q->type));
  341. if (q->type == INPUT_MPLANE) {
  342. if (is_decode_session(inst))
  343. rc = msm_vdec_streamoff_input(inst);
  344. else if (is_encode_session(inst))
  345. rc = msm_venc_streamoff_input(inst);
  346. } else if (q->type == OUTPUT_MPLANE) {
  347. if (is_decode_session(inst))
  348. rc = msm_vdec_streamoff_output(inst);
  349. else if (is_encode_session(inst))
  350. rc = msm_venc_streamoff_output(inst);
  351. } else {
  352. i_vpr_e(inst, "%s: invalid type %d\n", __func__, q->type);
  353. rc = -EINVAL;
  354. goto unlock;
  355. }
  356. if (rc)
  357. goto unlock;
  358. /* Input port streamoff - flush timestamps list*/
  359. if (q->type == INPUT_MPLANE)
  360. msm_vidc_flush_ts(inst);
  361. i_vpr_h(inst, "Streamoff: %s successful\n", v4l2_type_name(q->type));
  362. unlock:
  363. if (rc) {
  364. i_vpr_e(inst, "Streamoff: %s failed\n", v4l2_type_name(q->type));
  365. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  366. }
  367. inst_unlock(inst, __func__);
  368. client_unlock(inst, __func__);
  369. put_inst(inst);
  370. return;
  371. }
  372. void msm_vidc_buf_queue(struct vb2_buffer *vb2)
  373. {
  374. int rc = 0;
  375. struct msm_vidc_inst *inst;
  376. u64 timestamp_us = 0;
  377. u64 ktime_ns = ktime_get_ns();
  378. inst = vb2_get_drv_priv(vb2->vb2_queue);
  379. if (!inst) {
  380. d_vpr_e("%s: invalid params\n", __func__);
  381. return;
  382. }
  383. inst = get_inst_ref(g_core, inst);
  384. if (!inst) {
  385. d_vpr_e("%s: invalid instance\n", __func__);
  386. return;
  387. }
  388. /*
  389. * As part of every qbuf initalise request to true.
  390. * If there are any dynamic controls associated with qbuf,
  391. * they will set as part s_ctrl() from v4l2_ctrl_request_setup().
  392. * Once v4l2_ctrl_request_setup() is done, reset request variable.
  393. * If the buffer does not have any requests with it, then
  394. * v4l2_ctrl_request_setup() will return 0.
  395. */
  396. inst->request = true;
  397. rc = v4l2_ctrl_request_setup(vb2->req_obj.req,
  398. &inst->ctrl_handler);
  399. inst->request = false;
  400. v4l2_ctrl_request_complete(vb2->req_obj.req, &inst->ctrl_handler);
  401. /*
  402. * call request_setup and request_complete without acquiring lock
  403. * to avoid deadlock issues because request_setup or request_complete
  404. * would call .s_ctrl and .g_volatile_ctrl respectively which acquire
  405. * lock too.
  406. */
  407. client_lock(inst, __func__);
  408. inst_lock(inst, __func__);
  409. if (rc) {
  410. i_vpr_e(inst, "%s: request setup failed, error %d\n",
  411. __func__, rc);
  412. goto unlock;
  413. }
  414. if (is_session_error(inst)) {
  415. i_vpr_e(inst, "%s: inst in error state\n", __func__);
  416. rc = -EINVAL;
  417. goto unlock;
  418. }
  419. if (!vb2->planes[0].bytesused) {
  420. if (vb2->type == INPUT_MPLANE) {
  421. /* Expecting non-zero filledlen on INPUT port */
  422. i_vpr_e(inst,
  423. "%s: zero bytesused input buffer not supported\n", __func__);
  424. rc = -EINVAL;
  425. goto unlock;
  426. }
  427. if ((vb2->type == OUTPUT_META_PLANE && is_any_meta_tx_out_enabled(inst)) ||
  428. (vb2->type == INPUT_META_PLANE && is_any_meta_tx_inp_enabled(inst))) {
  429. /*
  430. * vb2 is not allowing client to pass data in output meta plane.
  431. * adjust the bytesused as client will send buffer tag metadata
  432. * in output meta plane if DPB_TAG_LIST, or OUTBUF_FENCE metadata
  433. * is enabled.
  434. */
  435. vb2->planes[0].bytesused = vb2->planes[0].length;
  436. }
  437. }
  438. if (is_encode_session(inst) && vb2->type == INPUT_MPLANE) {
  439. timestamp_us = div_u64(vb2->timestamp, 1000);
  440. msm_vidc_set_auto_framerate(inst, timestamp_us);
  441. }
  442. inst->last_qbuf_time_ns = ktime_ns;
  443. if (is_decode_session(inst) && vb2->type == INPUT_MPLANE) {
  444. rc = msm_vidc_update_input_rate(inst, div_u64(ktime_ns, 1000));
  445. if (rc)
  446. goto unlock;
  447. }
  448. if (inst->capabilities->cap[INPUT_META_VIA_REQUEST].value) {
  449. rc = msm_vidc_update_input_meta_buffer_index(inst, vb2);
  450. if (rc)
  451. goto unlock;
  452. }
  453. if (is_decode_session(inst))
  454. rc = msm_vdec_qbuf(inst, vb2);
  455. else if (is_encode_session(inst))
  456. rc = msm_venc_qbuf(inst, vb2);
  457. else
  458. rc = -EINVAL;
  459. if (rc) {
  460. print_vb2_buffer("failed vb2-qbuf", inst, vb2);
  461. goto unlock;
  462. }
  463. unlock:
  464. if (rc) {
  465. msm_vidc_change_inst_state(inst, MSM_VIDC_ERROR, __func__);
  466. vb2_buffer_done(vb2, VB2_BUF_STATE_ERROR);
  467. }
  468. inst_unlock(inst, __func__);
  469. client_unlock(inst, __func__);
  470. put_inst(inst);
  471. }
  472. void msm_vidc_buf_cleanup(struct vb2_buffer *vb)
  473. {
  474. }
  475. int msm_vidc_buf_out_validate(struct vb2_buffer *vb)
  476. {
  477. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  478. vbuf->field = V4L2_FIELD_NONE;
  479. return 0;
  480. }
  481. void msm_vidc_buf_request_complete(struct vb2_buffer *vb)
  482. {
  483. struct msm_vidc_inst *inst = vb2_get_drv_priv(vb->vb2_queue);
  484. i_vpr_l(inst, "%s: vb type %d, index %d\n",
  485. __func__, vb->type, vb->index);
  486. v4l2_ctrl_request_complete(vb->req_obj.req, &inst->ctrl_handler);
  487. }