msm_vidc_driver.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/workqueue.h>
  7. #include <media/msm_vidc_utils.h>
  8. #include <media/msm_media_info.h>
  9. #include "msm_vidc_driver.h"
  10. #include "msm_vidc_platform.h"
  11. #include "msm_vidc_internal.h"
  12. #include "msm_vidc_memory.h"
  13. #include "msm_vidc_debug.h"
  14. #include "venus_hfi.h"
  15. #define COUNT_BITS(a, out) ({ \
  16. while ((a) >= 1) { \
  17. (out) += (a) & (1); \
  18. (a) >>= (1); \
  19. } \
  20. })
  21. void print_vidc_buffer(struct msm_vidc_inst *inst, struct msm_vidc_buffer *b)
  22. {
  23. }
  24. int msm_vidc_get_port_from_v4l2_type(u32 type)
  25. {
  26. int port;
  27. if (type == INPUT_PLANE) {
  28. port = INPUT_PORT;
  29. } else if (type == INPUT_META_PLANE) {
  30. port = INPUT_META_PORT;
  31. } else if (type == OUTPUT_PLANE) {
  32. port = OUTPUT_PORT;
  33. } else if (type == OUTPUT_META_PLANE) {
  34. port = OUTPUT_META_PORT;
  35. } else {
  36. d_vpr_e("%s: invalid type %d\n", __func__, type);
  37. port = -EINVAL;
  38. }
  39. return port;
  40. }
  41. u32 msm_vidc_get_buffer_region(struct msm_vidc_inst *inst,
  42. enum msm_vidc_buffer_type buffer_type)
  43. {
  44. u32 region = MSM_VIDC_NON_SECURE;
  45. if (!is_secure_session(inst))
  46. return region;
  47. switch (buffer_type) {
  48. case MSM_VIDC_BUF_INPUT:
  49. if (is_encode_session(inst))
  50. region = MSM_VIDC_SECURE_PIXEL;
  51. else
  52. region = MSM_VIDC_SECURE_BITSTREAM;
  53. break;
  54. case MSM_VIDC_BUF_OUTPUT:
  55. if (is_encode_session(inst))
  56. region = MSM_VIDC_SECURE_BITSTREAM;
  57. else
  58. region = MSM_VIDC_SECURE_PIXEL;
  59. break;
  60. case MSM_VIDC_BUF_INPUT_META:
  61. case MSM_VIDC_BUF_OUTPUT_META:
  62. region = MSM_VIDC_NON_SECURE;
  63. break;
  64. case MSM_VIDC_BUF_SCRATCH:
  65. region = MSM_VIDC_SECURE_BITSTREAM;
  66. break;
  67. case MSM_VIDC_BUF_SCRATCH_1:
  68. region = MSM_VIDC_SECURE_NONPIXEL;
  69. break;
  70. case MSM_VIDC_BUF_SCRATCH_2:
  71. region = MSM_VIDC_SECURE_PIXEL;
  72. break;
  73. case MSM_VIDC_BUF_PERSIST:
  74. if (is_encode_session(inst))
  75. region = MSM_VIDC_SECURE_NONPIXEL;
  76. else
  77. region = MSM_VIDC_SECURE_BITSTREAM;
  78. break;
  79. case MSM_VIDC_BUF_PERSIST_1:
  80. region = MSM_VIDC_SECURE_NONPIXEL;
  81. break;
  82. default:
  83. s_vpr_e(inst->sid, "%s: invalid buffer type %d\n",
  84. __func__, buffer_type);
  85. }
  86. return region;
  87. }
  88. struct msm_vidc_buffer_info *msm_vidc_get_buffer_info(struct msm_vidc_inst *inst,
  89. enum msm_vidc_buffer_type buffer_type)
  90. {
  91. switch (buffer_type) {
  92. case MSM_VIDC_BUF_INPUT:
  93. return &inst->buffers.input;
  94. case MSM_VIDC_BUF_INPUT_META:
  95. return &inst->buffers.input_meta;
  96. case MSM_VIDC_BUF_OUTPUT:
  97. return &inst->buffers.output;
  98. case MSM_VIDC_BUF_OUTPUT_META:
  99. return &inst->buffers.output_meta;
  100. case MSM_VIDC_BUF_SCRATCH:
  101. return &inst->buffers.scratch;
  102. case MSM_VIDC_BUF_SCRATCH_1:
  103. return &inst->buffers.scratch_1;
  104. case MSM_VIDC_BUF_SCRATCH_2:
  105. return &inst->buffers.scratch_2;
  106. case MSM_VIDC_BUF_PERSIST:
  107. return &inst->buffers.persist;
  108. case MSM_VIDC_BUF_PERSIST_1:
  109. return &inst->buffers.persist_1;
  110. default:
  111. s_vpr_e(inst->sid, "%s: invalid buffer type %d\n",
  112. __func__, buffer_type);
  113. return NULL;
  114. }
  115. }
  116. struct msm_vidc_map_info *msm_vidc_get_map_info(struct msm_vidc_inst *inst,
  117. enum msm_vidc_buffer_type buffer_type)
  118. {
  119. switch (buffer_type) {
  120. case MSM_VIDC_BUF_INPUT:
  121. return &inst->maps.input;
  122. case MSM_VIDC_BUF_INPUT_META:
  123. return &inst->maps.input_meta;
  124. case MSM_VIDC_BUF_OUTPUT:
  125. return &inst->maps.output;
  126. case MSM_VIDC_BUF_OUTPUT_META:
  127. return &inst->maps.output_meta;
  128. case MSM_VIDC_BUF_SCRATCH:
  129. return &inst->maps.scratch;
  130. case MSM_VIDC_BUF_SCRATCH_1:
  131. return &inst->maps.scratch_1;
  132. case MSM_VIDC_BUF_SCRATCH_2:
  133. return &inst->maps.scratch_2;
  134. case MSM_VIDC_BUF_PERSIST:
  135. return &inst->maps.persist;
  136. case MSM_VIDC_BUF_PERSIST_1:
  137. return &inst->maps.persist_1;
  138. default:
  139. s_vpr_e(inst->sid, "%s: invalid buffer type %d\n",
  140. __func__, buffer_type);
  141. return NULL;
  142. }
  143. }
  144. struct msm_vidc_alloc_info *msm_vidc_get_alloc_info(struct msm_vidc_inst *inst,
  145. enum msm_vidc_buffer_type buffer_type)
  146. {
  147. switch (buffer_type) {
  148. case MSM_VIDC_BUF_SCRATCH:
  149. return &inst->allocations.scratch;
  150. case MSM_VIDC_BUF_SCRATCH_1:
  151. return &inst->allocations.scratch_1;
  152. case MSM_VIDC_BUF_SCRATCH_2:
  153. return &inst->allocations.scratch_2;
  154. case MSM_VIDC_BUF_PERSIST:
  155. return &inst->allocations.persist;
  156. case MSM_VIDC_BUF_PERSIST_1:
  157. return &inst->allocations.persist_1;
  158. default:
  159. s_vpr_e(inst->sid, "%s: invalid buffer type %d\n",
  160. __func__, buffer_type);
  161. return NULL;
  162. }
  163. }
  164. int msm_vidc_change_inst_state(struct msm_vidc_inst *inst,
  165. enum msm_vidc_inst_state request_state)
  166. {
  167. if (!inst) {
  168. d_vpr_e("%s: invalid params\n", __func__);
  169. return -EINVAL;
  170. }
  171. if (!request_state) {
  172. d_vpr_e("%s: invalid request state\n", __func__);
  173. return -EINVAL;
  174. }
  175. if (inst->state == MSM_VIDC_ERROR) {
  176. s_vpr_h(inst->sid,
  177. "inst is in bad state, can not change state to %d\n",
  178. request_state);
  179. return 0;
  180. }
  181. s_vpr_h(inst->sid, "state changed from %d to %d\n",
  182. inst->state, request_state);
  183. inst->state = request_state;
  184. return 0;
  185. }
  186. int msm_vidc_get_control(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
  187. {
  188. int rc = 0;
  189. if (!inst || !ctrl) {
  190. d_vpr_e("%s: invalid params\n", __func__);
  191. return -EINVAL;
  192. }
  193. switch (ctrl->id) {
  194. case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
  195. ctrl->val = inst->buffers.output.min_count +
  196. inst->buffers.output.extra_count;
  197. s_vpr_h(inst->sid, "g_min: output buffers %d\n", ctrl->val);
  198. break;
  199. case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
  200. ctrl->val = inst->buffers.input.min_count +
  201. inst->buffers.input.extra_count;
  202. s_vpr_h(inst->sid, "g_min: input buffers %d\n", ctrl->val);
  203. break;
  204. default:
  205. break;
  206. }
  207. return rc;
  208. }
  209. u32 msm_vidc_convert_color_fmt(u32 v4l2_fmt)
  210. {
  211. switch (v4l2_fmt) {
  212. case V4L2_PIX_FMT_NV12:
  213. return COLOR_FMT_NV12;
  214. case V4L2_PIX_FMT_NV21:
  215. return COLOR_FMT_NV21;
  216. case V4L2_PIX_FMT_NV12_512:
  217. return COLOR_FMT_NV12_512;
  218. case V4L2_PIX_FMT_SDE_Y_CBCR_H2V2_P010_VENUS:
  219. return COLOR_FMT_P010;
  220. case V4L2_PIX_FMT_NV12_UBWC:
  221. return COLOR_FMT_NV12_UBWC;
  222. case V4L2_PIX_FMT_NV12_TP10_UBWC:
  223. return COLOR_FMT_NV12_BPP10_UBWC;
  224. case V4L2_PIX_FMT_RGBA8888_UBWC:
  225. return COLOR_FMT_RGBA8888_UBWC;
  226. default:
  227. d_vpr_e(
  228. "Invalid v4l2 color fmt FMT : %x, Set default(NV12)",
  229. v4l2_fmt);
  230. return COLOR_FMT_NV12;
  231. }
  232. }
  233. int msm_vidc_create_internal_buffers(struct msm_vidc_inst *inst,
  234. enum msm_vidc_buffer_type buffer_type)
  235. {
  236. int rc = 0;
  237. struct msm_vidc_buffer_info *buffer_info;
  238. struct msm_vidc_alloc_info *alloc_info;
  239. struct msm_vidc_map_info *map_info;
  240. int i;
  241. d_vpr_h("%s()\n", __func__);
  242. if (!inst || !inst->core) {
  243. d_vpr_e("%s: invalid params\n", __func__);
  244. return -EINVAL;
  245. }
  246. if (!is_internal_buffer(buffer_type)) {
  247. s_vpr_e(inst->sid, "%s: buffer type %#d is not internal\n",
  248. __func__, buffer_type);
  249. return 0;
  250. }
  251. buffer_info = msm_vidc_get_buffer_info(inst, buffer_type);
  252. if (!buffer_info)
  253. return -EINVAL;
  254. alloc_info = msm_vidc_get_alloc_info(inst, buffer_type);
  255. if (!alloc_info)
  256. return -EINVAL;
  257. map_info = msm_vidc_get_map_info(inst, buffer_type);
  258. if (!alloc_info)
  259. return -EINVAL;
  260. for (i = 0; i < buffer_info->min_count; i++) {
  261. struct msm_vidc_buffer *buffer;
  262. struct msm_vidc_alloc *alloc;
  263. struct msm_vidc_map *map;
  264. if (!buffer_info->size) {
  265. d_vpr_e("%s: invalid buffer %#x\n", __func__, buffer_type);
  266. return -EINVAL;
  267. }
  268. buffer = kzalloc(sizeof(struct msm_vidc_buffer), GFP_KERNEL);
  269. if (!buffer) {
  270. s_vpr_e(inst->sid, "%s: msm_vidc_buffer alloc failed\n", __func__);
  271. return -ENOMEM;
  272. }
  273. INIT_LIST_HEAD(&buffer->list);
  274. buffer->valid = true;
  275. buffer->type = buffer_type;
  276. buffer->index = i;
  277. buffer->buffer_size = buffer_info->size;
  278. list_add_tail(&buffer->list, &buffer_info->list);
  279. alloc = kzalloc(sizeof(struct msm_vidc_alloc), GFP_KERNEL);
  280. if (!alloc) {
  281. s_vpr_e(inst->sid, "%s: msm_vidc_alloc alloc failed\n", __func__);
  282. return -ENOMEM;
  283. }
  284. INIT_LIST_HEAD(&alloc->list);
  285. alloc->buffer_type = buffer_type;
  286. alloc->region = msm_vidc_get_buffer_region(inst, buffer_type);
  287. alloc->size = buffer->buffer_size;
  288. rc = msm_vidc_memory_alloc(inst->core, alloc);
  289. if (rc)
  290. return -ENOMEM;
  291. list_add_tail(&alloc->list, &alloc_info->list);
  292. map = kzalloc(sizeof(struct msm_vidc_map), GFP_KERNEL);
  293. if (!map) {
  294. s_vpr_e(inst->sid, "%s: msm_vidc_map alloc failed\n", __func__);
  295. return -ENOMEM;
  296. }
  297. INIT_LIST_HEAD(&map->list);
  298. map->buffer_type = alloc->buffer_type;
  299. map->region = alloc->region;
  300. map->dmabuf = alloc->dmabuf;
  301. rc = msm_vidc_memory_map(inst->core, map);
  302. if (rc)
  303. return -ENOMEM;
  304. list_add_tail(&map->list, &map_info->list);
  305. s_vpr_e(inst->sid, "%s: created buffer_type %d, size %d\n",
  306. __func__, buffer_type, buffer_info->size);
  307. }
  308. return 0;
  309. }
  310. int msm_vidc_queue_internal_buffers(struct msm_vidc_inst *inst,
  311. enum msm_vidc_buffer_type buffer_type)
  312. {
  313. int rc = 0;
  314. struct msm_vidc_buffer_info *buffer_info;
  315. struct msm_vidc_buffer *buffer, *dummy;
  316. d_vpr_h("%s()\n", __func__);
  317. if (!inst || !inst->core) {
  318. d_vpr_e("%s: invalid params\n", __func__);
  319. return -EINVAL;
  320. }
  321. if (!is_internal_buffer(buffer_type)) {
  322. s_vpr_e(inst->sid, "%s: buffer type %#d is not internal\n",
  323. __func__, buffer_type);
  324. return 0;
  325. }
  326. buffer_info = msm_vidc_get_buffer_info(inst, buffer_type);
  327. if (!buffer_info)
  328. return -EINVAL;
  329. list_for_each_entry_safe(buffer, dummy, &buffer_info->list, list) {
  330. /* do not queue pending release buffers */
  331. if (buffer->flags & MSM_VIDC_ATTR_PENDING_RELEASE)
  332. continue;
  333. /* do not queue already queued buffers */
  334. if (buffer->attr & MSM_VIDC_ATTR_QUEUED)
  335. continue;
  336. rc = venus_hfi_queue_buffer(inst, buffer, NULL);
  337. if (rc)
  338. return rc;
  339. /* mark queued */
  340. buffer->attr |= MSM_VIDC_ATTR_QUEUED;
  341. s_vpr_e(inst->sid, "%s: queued buffer_type %d, size %d\n",
  342. __func__, buffer_type, buffer_info->size);
  343. }
  344. return 0;
  345. }
  346. int msm_vidc_release_internal_buffers(struct msm_vidc_inst *inst,
  347. enum msm_vidc_buffer_type buffer_type)
  348. {
  349. int rc = 0;
  350. struct msm_vidc_buffer_info *buffer_info;
  351. struct msm_vidc_buffer *buffer, *dummy;
  352. d_vpr_h("%s()\n", __func__);
  353. if (!inst || !inst->core) {
  354. d_vpr_e("%s: invalid params\n", __func__);
  355. return -EINVAL;
  356. }
  357. if (!is_internal_buffer(buffer_type)) {
  358. s_vpr_e(inst->sid, "%s: buffer type %#d is not internal\n",
  359. __func__, buffer_type);
  360. return 0;
  361. }
  362. buffer_info = msm_vidc_get_buffer_info(inst, buffer_type);
  363. if (!buffer_info)
  364. return -EINVAL;
  365. list_for_each_entry_safe(buffer, dummy, &buffer_info->list, list) {
  366. /* do not release already pending release buffers */
  367. if (buffer->attr & MSM_VIDC_ATTR_PENDING_RELEASE)
  368. continue;
  369. /* release only queued buffers */
  370. if (!(buffer->attr & MSM_VIDC_ATTR_QUEUED))
  371. continue;
  372. rc = venus_hfi_release_buffer(inst, buffer);
  373. if (rc)
  374. return rc;
  375. /* mark pending release */
  376. buffer->attr |= MSM_VIDC_ATTR_PENDING_RELEASE;
  377. s_vpr_e(inst->sid, "%s: released buffer_type %d, size %d\n",
  378. __func__, buffer_type, buffer_info->size);
  379. }
  380. return 0;
  381. }
  382. int msm_vidc_setup_event_queue(struct msm_vidc_inst *inst)
  383. {
  384. int rc = 0;
  385. int index;
  386. struct msm_vidc_core *core;
  387. d_vpr_h("%s()\n", __func__);
  388. if (!inst || !inst->core) {
  389. d_vpr_e("%s: invalid params\n", __func__);
  390. return -EINVAL;
  391. }
  392. core = inst->core;
  393. // TODO: check decode is index = 0 and encode is index 1
  394. if (is_decode_session(inst))
  395. index = 0;
  396. else if (is_encode_session(inst))
  397. index = 1;
  398. else
  399. return -EINVAL;
  400. v4l2_fh_init(&inst->event_handler, &core->vdev[index].vdev);
  401. v4l2_fh_add(&inst->event_handler);
  402. return rc;
  403. }
  404. static int vb2q_init(struct msm_vidc_inst *inst,
  405. struct vb2_queue *q, enum v4l2_buf_type type)
  406. {
  407. struct msm_vidc_core *core;
  408. if (!inst || !q || !inst->core) {
  409. d_vpr_e("%s: invalid params\n", __func__);
  410. return -EINVAL;
  411. }
  412. core = inst->core;
  413. q->type = type;
  414. q->io_modes = VB2_MMAP | VB2_USERPTR;
  415. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  416. q->ops = core->vb2_ops;
  417. q->mem_ops = core->vb2_mem_ops;
  418. q->drv_priv = inst;
  419. q->allow_zero_bytesused = 1;
  420. q->copy_timestamp = 1;
  421. return vb2_queue_init(q);
  422. }
  423. int msm_vidc_vb2_queue_init(struct msm_vidc_inst *inst)
  424. {
  425. int rc = 0;
  426. d_vpr_h("%s()\n", __func__);
  427. if (!inst) {
  428. d_vpr_e("%s: invalid params\n", __func__);
  429. return -EINVAL;
  430. }
  431. rc = vb2q_init(inst, &inst->vb2q[INPUT_PORT], INPUT_PLANE);
  432. if (rc)
  433. return rc;
  434. rc = vb2q_init(inst, &inst->vb2q[OUTPUT_PORT], OUTPUT_PLANE);
  435. if (rc)
  436. return rc;
  437. rc = vb2q_init(inst, &inst->vb2q[INPUT_META_PORT], INPUT_META_PLANE);
  438. if (rc)
  439. return rc;
  440. rc = vb2q_init(inst, &inst->vb2q[OUTPUT_META_PORT], OUTPUT_META_PLANE);
  441. if (rc)
  442. return rc;
  443. return rc;
  444. }
  445. int msm_vidc_add_session(struct msm_vidc_inst *inst)
  446. {
  447. int rc = 0;
  448. struct msm_vidc_inst *i;
  449. struct msm_vidc_core *core;
  450. u32 count = 0;
  451. if (!inst || !inst->core) {
  452. d_vpr_e("%s: invalid params\n", __func__);
  453. return -EINVAL;
  454. }
  455. core = inst->core;
  456. mutex_lock(&core->lock);
  457. list_for_each_entry(i, &core->instances, list)
  458. count++;
  459. if (count < MAX_SUPPORTED_INSTANCES) {
  460. list_add_tail(&inst->list, &core->instances);
  461. } else {
  462. d_vpr_e("%s: total sessions %d exceeded max limit %d\n",
  463. __func__, count, MAX_SUPPORTED_INSTANCES);
  464. rc = -EINVAL;
  465. }
  466. mutex_unlock(&core->lock);
  467. /* assign session_id */
  468. inst->session_id = count + 1;
  469. inst->sid = inst->session_id;
  470. return rc;
  471. }
  472. int msm_vidc_session_open(struct msm_vidc_inst *inst)
  473. {
  474. int rc = 0;
  475. if (!inst) {
  476. d_vpr_e("%s: invalid params\n", __func__);
  477. return -EINVAL;
  478. }
  479. rc = venus_hfi_session_open(inst);
  480. if (rc)
  481. return rc;
  482. inst->session_created = true;
  483. return 0;
  484. }
  485. static int msm_vidc_init_core_caps(struct msm_vidc_core *core)
  486. {
  487. int rc = 0;
  488. int i, num_platform_caps;
  489. struct msm_platform_core_capability *platform_data;
  490. if (!core || !core->platform) {
  491. d_vpr_e("%s: invalid params\n", __func__);
  492. rc = -EINVAL;
  493. goto exit;
  494. }
  495. platform_data = core->platform->data.core_data;
  496. if (!platform_data) {
  497. d_vpr_e("%s: platform core data is NULL\n",
  498. __func__);
  499. rc = -EINVAL;
  500. goto exit;
  501. }
  502. if (!core->capabilities) {
  503. core->capabilities = kcalloc(1,
  504. (sizeof(struct msm_vidc_core_capability) *
  505. CORE_CAP_MAX), GFP_KERNEL);
  506. if (!core->capabilities) {
  507. d_vpr_e("%s: failed to allocate core capabilities\n",
  508. __func__);
  509. rc = -ENOMEM;
  510. goto exit;
  511. }
  512. } else {
  513. d_vpr_e("%s: capabilities memory is expected to be freed\n",
  514. __func__);
  515. }
  516. num_platform_caps = core->platform->data.core_data_size;
  517. /* loop over platform caps */
  518. for (i = 0; i < num_platform_caps; i++) {
  519. core->capabilities[platform_data[i].type].type = platform_data[i].type;
  520. core->capabilities[platform_data[i].type].value = platform_data[i].value;
  521. }
  522. exit:
  523. return rc;
  524. }
  525. static void update_inst_capability(struct msm_platform_inst_capability *in,
  526. struct msm_vidc_inst_capability *capability)
  527. {
  528. if (!in || !capability) {
  529. d_vpr_e("%s: invalid params %pK %pK\n",
  530. __func__, in, capability);
  531. return;
  532. }
  533. if (in->cap < INST_CAP_MAX) {
  534. capability->cap[in->cap].cap = in->cap;
  535. capability->cap[in->cap].min = in->min;
  536. capability->cap[in->cap].max = in->max;
  537. capability->cap[in->cap].step_or_menu = in->step_or_menu;
  538. capability->cap[in->cap].value = in->value;
  539. capability->cap[in->cap].flags = in->flags;
  540. capability->cap[in->cap].v4l2_id = in->v4l2_id;
  541. capability->cap[in->cap].hfi_id = in->hfi_id;
  542. memcpy(capability->cap[in->cap].parents, in->parents,
  543. sizeof(capability->cap[in->cap].parents));
  544. memcpy(capability->cap[in->cap].children, in->children,
  545. sizeof(capability->cap[in->cap].children));
  546. capability->cap[in->cap].adjust = in->adjust;
  547. capability->cap[in->cap].set = in->set;
  548. } else {
  549. d_vpr_e("%s: invalid cap %d\n",
  550. __func__, in->cap);
  551. }
  552. }
  553. static int msm_vidc_init_instance_caps(struct msm_vidc_core *core)
  554. {
  555. int rc = 0;
  556. u8 enc_valid_codecs, dec_valid_codecs;
  557. u8 count_bits, enc_codec_count;
  558. u8 codecs_count = 0;
  559. int i, j, check_bit, num_platform_caps;
  560. struct msm_platform_inst_capability *platform_data = NULL;
  561. if (!core || !core->platform || !core->capabilities) {
  562. d_vpr_e("%s: invalid params\n", __func__);
  563. rc = -EINVAL;
  564. goto exit;
  565. }
  566. platform_data = core->platform->data.instance_data;
  567. if (!platform_data) {
  568. d_vpr_e("%s: platform instance data is NULL\n",
  569. __func__);
  570. rc = -EINVAL;
  571. goto exit;
  572. }
  573. enc_valid_codecs = core->capabilities[ENC_CODECS].value;
  574. count_bits = enc_valid_codecs;
  575. COUNT_BITS(count_bits, codecs_count);
  576. enc_codec_count = codecs_count;
  577. dec_valid_codecs = core->capabilities[DEC_CODECS].value;
  578. count_bits = dec_valid_codecs;
  579. COUNT_BITS(count_bits, codecs_count);
  580. if (!core->inst_caps) {
  581. core->inst_caps = kcalloc(codecs_count,
  582. sizeof(struct msm_vidc_inst_capability),
  583. GFP_KERNEL);
  584. if (!core->inst_caps) {
  585. d_vpr_e("%s: failed to allocate core capabilities\n",
  586. __func__);
  587. rc = -ENOMEM;
  588. goto exit;
  589. }
  590. } else {
  591. d_vpr_e("%s: capabilities memory is expected to be freed\n",
  592. __func__);
  593. }
  594. check_bit = 0;
  595. /* determine codecs for enc domain */
  596. for (i = 0; i < enc_codec_count; i++) {
  597. while (check_bit < (sizeof(enc_valid_codecs) * 8)) {
  598. if (enc_valid_codecs & BIT(check_bit)) {
  599. core->inst_caps[i].domain = MSM_VIDC_ENCODER;
  600. core->inst_caps[i].codec = enc_valid_codecs &
  601. BIT(check_bit);
  602. check_bit++;
  603. break;
  604. }
  605. check_bit++;
  606. }
  607. }
  608. /* reset checkbit to check from 0th bit of decoder codecs set bits*/
  609. check_bit = 0;
  610. /* determine codecs for dec domain */
  611. for (; i < codecs_count; i++) {
  612. while (check_bit < (sizeof(dec_valid_codecs) * 8)) {
  613. if (dec_valid_codecs & BIT(check_bit)) {
  614. core->inst_caps[i].domain = MSM_VIDC_DECODER;
  615. core->inst_caps[i].codec = dec_valid_codecs &
  616. BIT(check_bit);
  617. check_bit++;
  618. break;
  619. }
  620. check_bit++;
  621. }
  622. }
  623. num_platform_caps = core->platform->data.instance_data_size;
  624. d_vpr_h("%s: num caps %d\n", __func__, num_platform_caps);
  625. /* loop over each platform capability */
  626. for (i = 0; i < num_platform_caps; i++) {
  627. /* select matching core codec and update it */
  628. for (j = 0; j < codecs_count; j++) {
  629. if ((platform_data[i].domain &
  630. core->inst_caps[j].domain) &&
  631. (platform_data[i].codec &
  632. core->inst_caps[j].codec)) {
  633. /* update core capability */
  634. update_inst_capability(&platform_data[i],
  635. &core->inst_caps[j]);
  636. }
  637. }
  638. }
  639. exit:
  640. return rc;
  641. }
  642. int msm_vidc_core_init(struct msm_vidc_core *core)
  643. {
  644. int rc = 0;
  645. d_vpr_h("%s()\n", __func__);
  646. if (!core || !core->platform) {
  647. d_vpr_e("%s: invalid params\n", __func__);
  648. return -EINVAL;
  649. }
  650. mutex_lock(&core->lock);
  651. if (core->state == MSM_VIDC_CORE_ERROR) {
  652. d_vpr_e("%s: core invalid state\n", __func__);
  653. rc = -EINVAL;
  654. goto unlock;
  655. }
  656. if (core->state == MSM_VIDC_CORE_INIT) {
  657. rc = 0;
  658. goto unlock;
  659. }
  660. rc = msm_vidc_init_core_caps(core);
  661. if (rc)
  662. goto unlock;
  663. rc = msm_vidc_init_instance_caps(core);
  664. if (rc)
  665. goto unlock;
  666. rc = venus_hfi_core_init(core);
  667. if (rc) {
  668. d_vpr_e("%s: core init failed\n", __func__);
  669. core->state = MSM_VIDC_CORE_DEINIT;
  670. goto unlock;
  671. }
  672. core->state = MSM_VIDC_CORE_INIT;
  673. core->smmu_fault_handled = false;
  674. core->ssr.trigger = false;
  675. unlock:
  676. mutex_unlock(&core->lock);
  677. return rc;
  678. }
  679. int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
  680. struct device *dev, unsigned long iova, int flags, void *data)
  681. {
  682. return -EINVAL;
  683. }
  684. int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
  685. enum msm_vidc_ssr_trigger_type type)
  686. {
  687. return 0;
  688. }
  689. void msm_vidc_ssr_handler(struct work_struct *work)
  690. {
  691. }
  692. void msm_vidc_pm_work_handler(struct work_struct *work)
  693. {
  694. }
  695. void msm_vidc_fw_unload_handler(struct work_struct *work)
  696. {
  697. }
  698. void msm_vidc_batch_handler(struct work_struct *work)
  699. {
  700. }