msm_vidc_driver.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include <linux/iommu.h>
  6. #include <linux/workqueue.h>
  7. #include <media/v4l2_vidc_extensions.h>
  8. #include <media/msm_media_info.h>
  9. #include "msm_vidc_driver.h"
  10. #include "msm_vidc_platform.h"
  11. #include "msm_vidc_internal.h"
  12. #include "msm_vidc_memory.h"
  13. #include "msm_vidc_debug.h"
  14. #include "venus_hfi.h"
  15. #include "msm_vidc.h"
  16. #define COUNT_BITS(a, out) { \
  17. while ((a) >= 1) { \
  18. (out) += (a) & (1); \
  19. (a) >>= (1); \
  20. } \
  21. }
  22. void print_vidc_buffer(u32 tag, const char *str, struct msm_vidc_inst *inst,
  23. struct msm_vidc_buffer *vbuf)
  24. {
  25. if (!(tag & msm_vidc_debug) || !inst || !vbuf)
  26. return;
  27. if (vbuf->type == MSM_VIDC_BUF_INPUT || vbuf->type == MSM_VIDC_BUF_OUTPUT) {
  28. dprintk(tag, inst->sid,
  29. "%s: %s: idx %2d fd %3d off %d daddr %#llx size %d filled %d flags %#x ts %lld attr %#x\n",
  30. str, vbuf->type == MSM_VIDC_BUF_INPUT ? "INPUT" : "OUTPUT",
  31. vbuf->index, vbuf->fd, vbuf->data_offset,
  32. vbuf->device_addr, vbuf->buffer_size, vbuf->data_size,
  33. vbuf->flags, vbuf->timestamp, vbuf->attr);
  34. } else if (vbuf->type == MSM_VIDC_BUF_INPUT_META ||
  35. vbuf->type == MSM_VIDC_BUF_OUTPUT_META) {
  36. dprintk(tag, inst->sid,
  37. "%s: %s: idx %2d fd %3d off %d daddr %#llx size %d filled %d flags %#x ts %lld attr %#x\n",
  38. str, vbuf->type == MSM_VIDC_BUF_INPUT_META ? "INPUT_META" : "OUTPUT_META",
  39. vbuf->index, vbuf->fd, vbuf->data_offset,
  40. vbuf->device_addr, vbuf->buffer_size, vbuf->data_size,
  41. vbuf->flags, vbuf->timestamp, vbuf->attr);
  42. }
  43. }
  44. void print_vb2_buffer(const char *str, struct msm_vidc_inst *inst,
  45. struct vb2_buffer *vb2)
  46. {
  47. if (!inst || !vb2)
  48. return;
  49. s_vpr_e(inst->sid,
  50. "%s: %s: idx %2d fd %d off %d size %d filled %d\n",
  51. str, vb2->type == INPUT_MPLANE ? "INPUT" : "OUTPUT",
  52. vb2->index, vb2->planes[0].m.fd,
  53. vb2->planes[0].data_offset, vb2->planes[0].length,
  54. vb2->planes[0].bytesused);
  55. }
  56. enum msm_vidc_buffer_type v4l2_type_to_driver(u32 type, const char *func)
  57. {
  58. enum msm_vidc_buffer_type buffer_type = 0;
  59. switch (type) {
  60. case INPUT_MPLANE:
  61. buffer_type = MSM_VIDC_BUF_INPUT;
  62. break;
  63. case OUTPUT_MPLANE:
  64. buffer_type = MSM_VIDC_BUF_OUTPUT;
  65. break;
  66. case INPUT_META_PLANE:
  67. buffer_type = MSM_VIDC_BUF_INPUT_META;
  68. break;
  69. case OUTPUT_META_PLANE:
  70. buffer_type = MSM_VIDC_BUF_OUTPUT_META;
  71. break;
  72. default:
  73. d_vpr_e("%s: invalid v4l2 buffer type %#x\n", func, type);
  74. break;
  75. }
  76. return buffer_type;
  77. }
  78. u32 v4l2_type_from_driver(enum msm_vidc_buffer_type buffer_type,
  79. const char *func)
  80. {
  81. u32 type = 0;
  82. switch (buffer_type) {
  83. case MSM_VIDC_BUF_INPUT:
  84. type = INPUT_MPLANE;
  85. break;
  86. case MSM_VIDC_BUF_OUTPUT:
  87. type = OUTPUT_MPLANE;
  88. break;
  89. case MSM_VIDC_BUF_INPUT_META:
  90. type = INPUT_META_PLANE;
  91. break;
  92. case MSM_VIDC_BUF_OUTPUT_META:
  93. type = OUTPUT_META_PLANE;
  94. break;
  95. default:
  96. d_vpr_e("%s: invalid driver buffer type %d\n",
  97. func, buffer_type);
  98. break;
  99. }
  100. return type;
  101. }
  102. enum msm_vidc_codec_type v4l2_codec_to_driver(u32 v4l2_codec, const char *func)
  103. {
  104. enum msm_vidc_codec_type codec = 0;
  105. switch (v4l2_codec) {
  106. case V4L2_PIX_FMT_H264:
  107. codec = MSM_VIDC_H264;
  108. break;
  109. case V4L2_PIX_FMT_HEVC:
  110. codec = MSM_VIDC_HEVC;
  111. break;
  112. case V4L2_PIX_FMT_VP9:
  113. codec = MSM_VIDC_VP9;
  114. break;
  115. case V4L2_PIX_FMT_MPEG2:
  116. codec = MSM_VIDC_MPEG2;
  117. break;
  118. default:
  119. d_vpr_e("%s: invalid v4l2 codec %#x\n", func, v4l2_codec);
  120. break;
  121. }
  122. return codec;
  123. }
  124. u32 v4l2_codec_from_driver(enum msm_vidc_codec_type codec, const char *func)
  125. {
  126. u32 v4l2_codec = 0;
  127. switch (codec) {
  128. case MSM_VIDC_H264:
  129. v4l2_codec = V4L2_PIX_FMT_H264;
  130. break;
  131. case MSM_VIDC_HEVC:
  132. v4l2_codec = V4L2_PIX_FMT_HEVC;
  133. break;
  134. case MSM_VIDC_VP9:
  135. v4l2_codec = V4L2_PIX_FMT_VP9;
  136. break;
  137. case MSM_VIDC_MPEG2:
  138. v4l2_codec = V4L2_PIX_FMT_MPEG2;
  139. break;
  140. default:
  141. d_vpr_e("%s: invalid driver codec %#x\n", func, codec);
  142. break;
  143. }
  144. return v4l2_codec;
  145. }
  146. enum msm_vidc_colorformat_type v4l2_colorformat_to_driver(u32 v4l2_colorformat,
  147. const char *func)
  148. {
  149. enum msm_vidc_colorformat_type colorformat = 0;
  150. switch (v4l2_colorformat) {
  151. case V4L2_PIX_FMT_NV12:
  152. colorformat = MSM_VIDC_FMT_NV12;
  153. break;
  154. case V4L2_PIX_FMT_NV21:
  155. colorformat = MSM_VIDC_FMT_NV21;
  156. break;
  157. case V4L2_PIX_FMT_VIDC_NV12C:
  158. colorformat = MSM_VIDC_FMT_NV12_UBWC;
  159. break;
  160. case V4L2_PIX_FMT_VIDC_TP10C:
  161. colorformat = MSM_VIDC_FMT_NV12_TP10_UBWC;
  162. break;
  163. case V4L2_PIX_FMT_VIDC_ARGB32C:
  164. colorformat = MSM_VIDC_FMT_RGBA8888_UBWC;
  165. break;
  166. case V4L2_PIX_FMT_VIDC_P010:
  167. colorformat = MSM_VIDC_FMT_NV12_P010;
  168. break;
  169. default:
  170. d_vpr_e("%s: invalid v4l2 color format %#x\n",
  171. func, v4l2_colorformat);
  172. break;
  173. }
  174. return colorformat;
  175. }
  176. u32 v4l2_colorformat_from_driver(enum msm_vidc_colorformat_type colorformat,
  177. const char *func)
  178. {
  179. u32 v4l2_colorformat = 0;
  180. switch (colorformat) {
  181. case MSM_VIDC_FMT_NV12:
  182. v4l2_colorformat = V4L2_PIX_FMT_NV12;
  183. break;
  184. case MSM_VIDC_FMT_NV21:
  185. v4l2_colorformat = V4L2_PIX_FMT_NV21;
  186. break;
  187. case MSM_VIDC_FMT_NV12_UBWC:
  188. v4l2_colorformat = V4L2_PIX_FMT_VIDC_NV12C;
  189. break;
  190. case MSM_VIDC_FMT_NV12_TP10_UBWC:
  191. v4l2_colorformat = V4L2_PIX_FMT_VIDC_TP10C;
  192. break;
  193. case MSM_VIDC_FMT_RGBA8888_UBWC:
  194. v4l2_colorformat = V4L2_PIX_FMT_VIDC_ARGB32C;
  195. break;
  196. case MSM_VIDC_FMT_NV12_P010:
  197. v4l2_colorformat = V4L2_PIX_FMT_VIDC_P010;
  198. break;
  199. default:
  200. d_vpr_e("%s: invalid driver color format %#x\n",
  201. func, colorformat);
  202. break;
  203. }
  204. return v4l2_colorformat;
  205. }
  206. u32 v4l2_colorformat_to_media(u32 v4l2_fmt, const char *func)
  207. {
  208. switch (v4l2_fmt) {
  209. case V4L2_PIX_FMT_NV12:
  210. return COLOR_FMT_NV12;
  211. case V4L2_PIX_FMT_NV21:
  212. return COLOR_FMT_NV21;
  213. case V4L2_PIX_FMT_VIDC_P010:
  214. return COLOR_FMT_P010;
  215. case V4L2_PIX_FMT_VIDC_NV12C:
  216. return COLOR_FMT_NV12_UBWC;
  217. case V4L2_PIX_FMT_VIDC_TP10C:
  218. return COLOR_FMT_NV12_BPP10_UBWC;
  219. case V4L2_PIX_FMT_VIDC_ARGB32C:
  220. return COLOR_FMT_RGBA8888_UBWC;
  221. default:
  222. d_vpr_e("%s: invalid v4l2 color fmt: %#x, set default (NV12)",
  223. func, v4l2_fmt);
  224. return COLOR_FMT_NV12;
  225. }
  226. }
  227. int v4l2_type_to_driver_port(struct msm_vidc_inst *inst, u32 type,
  228. const char *func)
  229. {
  230. int port;
  231. if (type == INPUT_MPLANE) {
  232. port = INPUT_PORT;
  233. } else if (type == INPUT_META_PLANE) {
  234. port = INPUT_META_PORT;
  235. } else if (type == OUTPUT_MPLANE) {
  236. port = OUTPUT_PORT;
  237. } else if (type == OUTPUT_META_PLANE) {
  238. port = OUTPUT_META_PORT;
  239. } else {
  240. s_vpr_e(inst->sid, "%s: port not found for v4l2 type %d\n",
  241. func, type);
  242. port = -EINVAL;
  243. }
  244. return port;
  245. }
  246. u32 msm_vidc_get_buffer_region(struct msm_vidc_inst *inst,
  247. enum msm_vidc_buffer_type buffer_type, const char *func)
  248. {
  249. u32 region = MSM_VIDC_NON_SECURE;
  250. if (!is_secure_session(inst))
  251. return region;
  252. switch (buffer_type) {
  253. case MSM_VIDC_BUF_INPUT:
  254. if (is_encode_session(inst))
  255. region = MSM_VIDC_SECURE_PIXEL;
  256. else
  257. region = MSM_VIDC_SECURE_BITSTREAM;
  258. break;
  259. case MSM_VIDC_BUF_OUTPUT:
  260. if (is_encode_session(inst))
  261. region = MSM_VIDC_SECURE_BITSTREAM;
  262. else
  263. region = MSM_VIDC_SECURE_PIXEL;
  264. break;
  265. case MSM_VIDC_BUF_INPUT_META:
  266. case MSM_VIDC_BUF_OUTPUT_META:
  267. region = MSM_VIDC_NON_SECURE;
  268. break;
  269. case MSM_VIDC_BUF_BIN:
  270. region = MSM_VIDC_SECURE_BITSTREAM;
  271. break;
  272. case MSM_VIDC_BUF_COMV:
  273. case MSM_VIDC_BUF_NON_COMV:
  274. case MSM_VIDC_BUF_LINE:
  275. region = MSM_VIDC_SECURE_NONPIXEL;
  276. break;
  277. case MSM_VIDC_BUF_DPB:
  278. region = MSM_VIDC_SECURE_PIXEL;
  279. break;
  280. case MSM_VIDC_BUF_PERSIST:
  281. // TODO: Need to revisit for ARP
  282. case MSM_VIDC_BUF_ARP:
  283. region = MSM_VIDC_SECURE_NONPIXEL;
  284. break;
  285. default:
  286. s_vpr_e(inst->sid, "%s: invalid driver buffer type %d\n",
  287. func, buffer_type);
  288. }
  289. return region;
  290. }
  291. struct msm_vidc_buffers *msm_vidc_get_buffers(
  292. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  293. const char *func)
  294. {
  295. switch (buffer_type) {
  296. case MSM_VIDC_BUF_INPUT:
  297. return &inst->buffers.input;
  298. case MSM_VIDC_BUF_INPUT_META:
  299. return &inst->buffers.input_meta;
  300. case MSM_VIDC_BUF_OUTPUT:
  301. return &inst->buffers.output;
  302. case MSM_VIDC_BUF_OUTPUT_META:
  303. return &inst->buffers.output_meta;
  304. case MSM_VIDC_BUF_BIN:
  305. return &inst->buffers.bin;
  306. case MSM_VIDC_BUF_ARP:
  307. return &inst->buffers.arp;
  308. case MSM_VIDC_BUF_COMV:
  309. return &inst->buffers.comv;
  310. case MSM_VIDC_BUF_NON_COMV:
  311. return &inst->buffers.non_comv;
  312. case MSM_VIDC_BUF_LINE:
  313. return &inst->buffers.line;
  314. case MSM_VIDC_BUF_DPB:
  315. return &inst->buffers.dpb;
  316. case MSM_VIDC_BUF_PERSIST:
  317. return &inst->buffers.persist;
  318. default:
  319. s_vpr_e(inst->sid, "%s: invalid driver buffer type %d\n",
  320. func, buffer_type);
  321. return NULL;
  322. }
  323. }
  324. struct msm_vidc_mappings *msm_vidc_get_mappings(
  325. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  326. const char *func)
  327. {
  328. switch (buffer_type) {
  329. case MSM_VIDC_BUF_INPUT:
  330. return &inst->mappings.input;
  331. case MSM_VIDC_BUF_INPUT_META:
  332. return &inst->mappings.input_meta;
  333. case MSM_VIDC_BUF_OUTPUT:
  334. return &inst->mappings.output;
  335. case MSM_VIDC_BUF_OUTPUT_META:
  336. return &inst->mappings.output_meta;
  337. case MSM_VIDC_BUF_BIN:
  338. return &inst->mappings.bin;
  339. case MSM_VIDC_BUF_ARP:
  340. return &inst->mappings.arp;
  341. case MSM_VIDC_BUF_COMV:
  342. return &inst->mappings.comv;
  343. case MSM_VIDC_BUF_NON_COMV:
  344. return &inst->mappings.non_comv;
  345. case MSM_VIDC_BUF_LINE:
  346. return &inst->mappings.line;
  347. case MSM_VIDC_BUF_DPB:
  348. return &inst->mappings.dpb;
  349. case MSM_VIDC_BUF_PERSIST:
  350. return &inst->mappings.persist;
  351. default:
  352. s_vpr_e(inst->sid, "%s: invalid driver buffer type %d\n",
  353. func, buffer_type);
  354. return NULL;
  355. }
  356. }
  357. struct msm_vidc_allocations *msm_vidc_get_allocations(
  358. struct msm_vidc_inst *inst, enum msm_vidc_buffer_type buffer_type,
  359. const char *func)
  360. {
  361. switch (buffer_type) {
  362. case MSM_VIDC_BUF_BIN:
  363. return &inst->allocations.bin;
  364. case MSM_VIDC_BUF_ARP:
  365. return &inst->allocations.arp;
  366. case MSM_VIDC_BUF_COMV:
  367. return &inst->allocations.comv;
  368. case MSM_VIDC_BUF_NON_COMV:
  369. return &inst->allocations.non_comv;
  370. case MSM_VIDC_BUF_LINE:
  371. return &inst->allocations.line;
  372. case MSM_VIDC_BUF_DPB:
  373. return &inst->allocations.dpb;
  374. case MSM_VIDC_BUF_PERSIST:
  375. return &inst->allocations.persist;
  376. default:
  377. s_vpr_e(inst->sid, "%s: invalid driver buffer type %d\n",
  378. func, buffer_type);
  379. return NULL;
  380. }
  381. }
  382. int msm_vidc_change_inst_state(struct msm_vidc_inst *inst,
  383. enum msm_vidc_inst_state request_state, const char *func)
  384. {
  385. if (!inst) {
  386. d_vpr_e("%s: invalid params\n", __func__);
  387. return -EINVAL;
  388. }
  389. if (!request_state) {
  390. s_vpr_e(inst->sid, "%s: invalid request state\n", func);
  391. return -EINVAL;
  392. }
  393. if (inst->state == MSM_VIDC_ERROR) {
  394. s_vpr_h(inst->sid,
  395. "%s: inst is in bad state, can not change state to %d\n",
  396. func, request_state);
  397. return 0;
  398. }
  399. s_vpr_h(inst->sid, "%s: state changed from %d to %d\n",
  400. func, inst->state, request_state);
  401. inst->state = request_state;
  402. return 0;
  403. }
  404. int msm_vidc_get_control(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
  405. {
  406. int rc = 0;
  407. if (!inst || !ctrl) {
  408. d_vpr_e("%s: invalid params\n", __func__);
  409. return -EINVAL;
  410. }
  411. switch (ctrl->id) {
  412. case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
  413. ctrl->val = inst->buffers.output.min_count +
  414. inst->buffers.output.extra_count;
  415. s_vpr_h(inst->sid, "g_min: output buffers %d\n", ctrl->val);
  416. break;
  417. case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
  418. ctrl->val = inst->buffers.input.min_count +
  419. inst->buffers.input.extra_count;
  420. s_vpr_h(inst->sid, "g_min: input buffers %d\n", ctrl->val);
  421. break;
  422. default:
  423. break;
  424. }
  425. return rc;
  426. }
  427. static int vb2_buffer_to_driver(struct vb2_buffer *vb2,
  428. struct msm_vidc_buffer *buf)
  429. {
  430. int rc = 0;
  431. if (!vb2 || !buf) {
  432. d_vpr_e("%s: invalid params\n", __func__);
  433. return -EINVAL;
  434. }
  435. buf->valid = true;
  436. buf->type = v4l2_type_to_driver(vb2->type, __func__);
  437. if (!buf->type)
  438. return -EINVAL;
  439. buf->index = vb2->index;
  440. buf->fd = vb2->planes[0].m.fd;
  441. buf->data_offset = vb2->planes[0].data_offset;
  442. buf->data_size = vb2->planes[0].bytesused;
  443. buf->buffer_size = vb2->planes[0].length;
  444. buf->timestamp = vb2->timestamp;
  445. return rc;
  446. }
  447. int msm_vidc_unmap_driver_buf(struct msm_vidc_inst *inst,
  448. struct msm_vidc_buffer *buf)
  449. {
  450. int rc = 0;
  451. struct msm_vidc_mappings *mappings;
  452. struct msm_vidc_map *map = NULL;
  453. bool found = false;
  454. if (!inst || !buf) {
  455. d_vpr_e("%s: invalid params\n", __func__);
  456. return -EINVAL;
  457. }
  458. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  459. if (!mappings)
  460. return -EINVAL;
  461. /* sanity check to see if it was not removed */
  462. list_for_each_entry(map, &mappings->list, list) {
  463. if (map->dmabuf == buf->dmabuf) {
  464. found = true;
  465. break;
  466. }
  467. }
  468. if (!found) {
  469. print_vidc_buffer(VIDC_ERR, "no buf in mappings", inst, buf);
  470. return -EINVAL;
  471. }
  472. rc = msm_vidc_memory_unmap(inst->core, map);
  473. if (rc) {
  474. print_vidc_buffer(VIDC_ERR, "unmap failed", inst, buf);
  475. return -EINVAL;
  476. }
  477. /* finally delete if refcount is zero */
  478. if (!map->refcount) {
  479. list_del(&map->list);
  480. kfree(map);
  481. }
  482. return 0;
  483. }
  484. int msm_vidc_put_driver_buf(struct msm_vidc_inst *inst,
  485. struct msm_vidc_buffer *buf)
  486. {
  487. int rc = 0;
  488. if (!inst || !buf) {
  489. d_vpr_e("%s: invalid params\n", __func__);
  490. return -EINVAL;
  491. }
  492. /* do not unmap / delete read only buffer */
  493. if (buf->attr & MSM_VIDC_ATTR_READ_ONLY)
  494. return 0;
  495. rc = msm_vidc_unmap_driver_buf(inst, buf);
  496. if (rc)
  497. return rc;
  498. msm_vidc_memory_put_dmabuf(buf->dmabuf);
  499. /* delete the buffer from buffers->list */
  500. list_del(&buf->list);
  501. kfree(buf);
  502. return 0;
  503. }
  504. int msm_vidc_map_driver_buf(struct msm_vidc_inst *inst,
  505. struct msm_vidc_buffer *buf)
  506. {
  507. int rc = 0;
  508. struct msm_vidc_mappings *mappings;
  509. struct msm_vidc_map *map = NULL;
  510. bool found = false;
  511. if (!inst || !buf) {
  512. d_vpr_e("%s: invalid params\n", __func__);
  513. return -EINVAL;
  514. }
  515. mappings = msm_vidc_get_mappings(inst, buf->type, __func__);
  516. if (!mappings)
  517. return -EINVAL;
  518. /* check if it is an existing one */
  519. list_for_each_entry(map, &mappings->list, list) {
  520. if (map->dmabuf == buf->dmabuf) {
  521. found = true;
  522. break;
  523. }
  524. }
  525. if (found) {
  526. /* skip mapping for RO buffer */
  527. if (!(buf->attr & MSM_VIDC_ATTR_READ_ONLY)) {
  528. rc = msm_vidc_memory_map(inst->core, map);
  529. if (rc)
  530. return -ENOMEM;
  531. buf->device_addr = map->device_addr;
  532. }
  533. return 0;
  534. }
  535. map = kzalloc(sizeof(struct msm_vidc_map), GFP_KERNEL);
  536. if (!map) {
  537. s_vpr_e(inst->sid, "%s: alloc failed\n", __func__);
  538. return -ENOMEM;
  539. }
  540. INIT_LIST_HEAD(&map->list);
  541. map->type = buf->type;
  542. map->dmabuf = buf->dmabuf;
  543. map->region = msm_vidc_get_buffer_region(inst, buf->type, __func__);
  544. rc = msm_vidc_memory_map(inst->core, map);
  545. if (rc) {
  546. kfree(map);
  547. return -ENOMEM;
  548. }
  549. buf->device_addr = map->device_addr;
  550. list_add_tail(&map->list, &mappings->list);
  551. return 0;
  552. }
  553. struct msm_vidc_buffer *msm_vidc_get_driver_buf(struct msm_vidc_inst *inst,
  554. struct vb2_buffer *vb2)
  555. {
  556. int rc = 0;
  557. struct msm_vidc_buffer *buf = NULL;
  558. struct msm_vidc_buffers *buffers;
  559. struct dma_buf *dmabuf;
  560. enum msm_vidc_buffer_type buf_type;
  561. bool found = false;
  562. if (!inst || !vb2) {
  563. d_vpr_e("%s: invalid params\n", __func__);
  564. return NULL;
  565. }
  566. buf_type = v4l2_type_to_driver(vb2->type, __func__);
  567. if (!buf_type)
  568. return NULL;
  569. buffers = msm_vidc_get_buffers(inst, buf_type, __func__);
  570. if (!buffers)
  571. return NULL;
  572. dmabuf = msm_vidc_memory_get_dmabuf(vb2->planes[0].m.fd);
  573. if (!dmabuf)
  574. return NULL;
  575. /* check if it is an existing buffer */
  576. list_for_each_entry(buf, &buffers->list, list) {
  577. if (buf->dmabuf == dmabuf &&
  578. buf->data_offset == vb2->planes[0].data_offset) {
  579. found = true;
  580. break;
  581. }
  582. }
  583. if (found) {
  584. /* only YUV buffers are allowed to repeat */
  585. if ((is_decode_session(inst) && vb2->type != OUTPUT_MPLANE) ||
  586. (is_encode_session(inst) && vb2->type != INPUT_MPLANE)) {
  587. print_vidc_buffer(VIDC_ERR,
  588. "existing buffer", inst, buf);
  589. goto error;
  590. }
  591. /* for decoder, YUV with RO flag are allowed to repeat */
  592. if (is_decode_session(inst) &&
  593. !(buf->attr & MSM_VIDC_ATTR_READ_ONLY)) {
  594. print_vidc_buffer(VIDC_ERR,
  595. "existing buffer without RO flag", inst, buf);
  596. goto error;
  597. }
  598. /* for encoder, treat the repeated buffer as new buffer */
  599. if (is_encode_session(inst) && vb2->type == INPUT_MPLANE)
  600. found = false;
  601. } else {
  602. buf = kzalloc(sizeof(struct msm_vidc_buffer), GFP_KERNEL);
  603. if (!buf) {
  604. s_vpr_e(inst->sid, "%s: alloc failed\n", __func__);
  605. goto error;
  606. }
  607. INIT_LIST_HEAD(&buf->list);
  608. buf->dmabuf = dmabuf;
  609. }
  610. rc = vb2_buffer_to_driver(vb2, buf);
  611. if (rc)
  612. goto error;
  613. if (!found)
  614. list_add_tail(&buf->list, &buffers->list);
  615. rc = msm_vidc_map_driver_buf(inst, buf);
  616. if (rc)
  617. goto error;
  618. return buf;
  619. error:
  620. msm_vidc_memory_put_dmabuf(dmabuf);
  621. if (!found)
  622. kfree(buf);
  623. return NULL;
  624. }
  625. struct msm_vidc_buffer *get_meta_buffer(struct msm_vidc_inst *inst,
  626. struct msm_vidc_buffer *buf)
  627. {
  628. struct msm_vidc_buffer *mbuf;
  629. struct msm_vidc_buffers *buffers;
  630. bool found = false;
  631. if (!inst || !buf) {
  632. d_vpr_e("%s: invalid params\n", __func__);
  633. return NULL;
  634. }
  635. if (buf->type == MSM_VIDC_BUF_INPUT) {
  636. buffers = &inst->buffers.input_meta;
  637. } else if (buf->type == MSM_VIDC_BUF_OUTPUT) {
  638. buffers = &inst->buffers.output_meta;
  639. } else {
  640. s_vpr_e(inst->sid, "%s: invalid buffer type %d\n",
  641. __func__, buf->type);
  642. return NULL;
  643. }
  644. list_for_each_entry(mbuf, &buffers->list, list) {
  645. if (!mbuf->valid)
  646. continue;
  647. if (mbuf->index == buf->index) {
  648. found = true;
  649. break;
  650. }
  651. }
  652. if (!found)
  653. return NULL;
  654. return mbuf;
  655. }
  656. int msm_vidc_queue_buffer(struct msm_vidc_inst *inst, struct vb2_buffer *vb2)
  657. {
  658. int rc = 0;
  659. struct msm_vidc_buffer *buf;
  660. struct msm_vidc_buffer *meta;
  661. int port;
  662. if (!inst || !vb2) {
  663. d_vpr_e("%s: invalid params\n", __func__);
  664. return -EINVAL;
  665. }
  666. buf = msm_vidc_get_driver_buf(inst, vb2);
  667. if (!buf)
  668. return -EINVAL;
  669. /* meta buffer will be queued along with actual buffer */
  670. if (buf->type == MSM_VIDC_BUF_INPUT_META ||
  671. buf->type == MSM_VIDC_BUF_OUTPUT_META) {
  672. buf->attr |= MSM_VIDC_ATTR_DEFERRED;
  673. s_vpr_l(inst->sid, "metabuf fd %3d daddr %#x deferred\n",
  674. buf->fd, buf->device_addr);
  675. return 0;
  676. }
  677. /* skip queuing if streamon not completed */
  678. port = v4l2_type_to_driver_port(inst, vb2->type, __func__);
  679. if (port < 0)
  680. return -EINVAL;
  681. if (!inst->vb2q[port].streaming) {
  682. buf->attr |= MSM_VIDC_ATTR_DEFERRED;
  683. print_vidc_buffer(VIDC_HIGH, "qbuf deferred", inst, buf);
  684. return 0;
  685. }
  686. print_vidc_buffer(VIDC_HIGH, "qbuf", inst, buf);
  687. meta = get_meta_buffer(inst, buf);
  688. rc = venus_hfi_queue_buffer(inst, buf, meta);
  689. if (rc)
  690. return rc;
  691. buf->attr &= ~MSM_VIDC_ATTR_DEFERRED;
  692. buf->attr |= MSM_VIDC_ATTR_QUEUED;
  693. if (meta) {
  694. meta->attr &= ~MSM_VIDC_ATTR_DEFERRED;
  695. meta->attr |= MSM_VIDC_ATTR_QUEUED;
  696. }
  697. return rc;
  698. }
  699. int msm_vidc_destroy_internal_buffer(struct msm_vidc_inst *inst,
  700. struct msm_vidc_buffer *buffer)
  701. {
  702. struct msm_vidc_buffers *buffers;
  703. struct msm_vidc_allocations *allocations;
  704. struct msm_vidc_mappings *mappings;
  705. struct msm_vidc_alloc *alloc, *alloc_dummy;
  706. struct msm_vidc_map *map, *map_dummy;
  707. struct msm_vidc_buffer *buf, *dummy;
  708. if (!inst || !inst->core) {
  709. d_vpr_e("%s: invalid params\n", __func__);
  710. return -EINVAL;
  711. }
  712. if (!is_internal_buffer(buffer->type)) {
  713. s_vpr_e(inst->sid, "%s: buffer type %#x is not internal\n",
  714. __func__, buffer->type);
  715. return 0;
  716. }
  717. s_vpr_h(inst->sid,
  718. "%s: destroy buffer_type %#x, size %d device_addr %#x\n",
  719. __func__, buffer->type, buffer->buffer_size,
  720. buffer->device_addr);
  721. buffers = msm_vidc_get_buffers(inst, buffer->type, __func__);
  722. if (!buffers)
  723. return -EINVAL;
  724. allocations = msm_vidc_get_allocations(inst, buffer->type, __func__);
  725. if (!allocations)
  726. return -EINVAL;
  727. mappings = msm_vidc_get_mappings(inst, buffer->type, __func__);
  728. if (!mappings)
  729. return -EINVAL;
  730. list_for_each_entry_safe(map, map_dummy, &mappings->list, list) {
  731. if (map->dmabuf == buffer->dmabuf) {
  732. msm_vidc_memory_unmap(inst->core, map);
  733. list_del(&map->list);
  734. kfree(map);
  735. }
  736. }
  737. list_for_each_entry_safe(alloc, alloc_dummy, &allocations->list, list) {
  738. if (alloc->dmabuf == buffer->dmabuf) {
  739. msm_vidc_memory_free(inst->core, alloc);
  740. list_del(&alloc->list);
  741. kfree(alloc);
  742. }
  743. }
  744. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  745. if (buf->dmabuf == buffer->dmabuf) {
  746. list_del(&buf->list);
  747. kfree(buf);
  748. }
  749. }
  750. return 0;
  751. }
  752. int msm_vidc_create_internal_buffer(struct msm_vidc_inst *inst,
  753. enum msm_vidc_buffer_type buffer_type, u32 index)
  754. {
  755. int rc = 0;
  756. struct msm_vidc_buffers *buffers;
  757. struct msm_vidc_allocations *allocations;
  758. struct msm_vidc_mappings *mappings;
  759. struct msm_vidc_buffer *buffer;
  760. struct msm_vidc_alloc *alloc;
  761. struct msm_vidc_map *map;
  762. d_vpr_h("%s()\n", __func__);
  763. if (!inst || !inst->core) {
  764. d_vpr_e("%s: invalid params\n", __func__);
  765. return -EINVAL;
  766. }
  767. if (!is_internal_buffer(buffer_type)) {
  768. s_vpr_e(inst->sid, "%s: buffer type %#x is not internal\n",
  769. __func__, buffer_type);
  770. return 0;
  771. }
  772. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  773. if (!buffers)
  774. return -EINVAL;
  775. allocations = msm_vidc_get_allocations(inst, buffer_type, __func__);
  776. if (!allocations)
  777. return -EINVAL;
  778. mappings = msm_vidc_get_mappings(inst, buffer_type, __func__);
  779. if (!mappings)
  780. return -EINVAL;
  781. if (!buffers->size) {
  782. s_vpr_e(inst->sid, "%s: invalid buffer %#x\n",
  783. __func__, buffer_type);
  784. return -EINVAL;
  785. }
  786. buffer = kzalloc(sizeof(struct msm_vidc_buffer), GFP_KERNEL);
  787. if (!buffer) {
  788. s_vpr_e(inst->sid, "%s: buf alloc failed\n", __func__);
  789. return -ENOMEM;
  790. }
  791. INIT_LIST_HEAD(&buffer->list);
  792. buffer->valid = true;
  793. buffer->type = buffer_type;
  794. buffer->index = index;
  795. buffer->buffer_size = buffers->size;
  796. list_add_tail(&buffer->list, &buffers->list);
  797. alloc = kzalloc(sizeof(struct msm_vidc_alloc), GFP_KERNEL);
  798. if (!alloc) {
  799. s_vpr_e(inst->sid, "%s: alloc failed\n", __func__);
  800. return -ENOMEM;
  801. }
  802. INIT_LIST_HEAD(&alloc->list);
  803. alloc->type = buffer_type;
  804. alloc->region = msm_vidc_get_buffer_region(inst,
  805. buffer_type, __func__);
  806. alloc->size = buffer->buffer_size;
  807. rc = msm_vidc_memory_alloc(inst->core, alloc);
  808. if (rc)
  809. return -ENOMEM;
  810. list_add_tail(&alloc->list, &allocations->list);
  811. map = kzalloc(sizeof(struct msm_vidc_map), GFP_KERNEL);
  812. if (!map) {
  813. s_vpr_e(inst->sid, "%s: map alloc failed\n", __func__);
  814. return -ENOMEM;
  815. }
  816. INIT_LIST_HEAD(&map->list);
  817. map->type = alloc->type;
  818. map->region = alloc->region;
  819. map->dmabuf = alloc->dmabuf;
  820. rc = msm_vidc_memory_map(inst->core, map);
  821. if (rc)
  822. return -ENOMEM;
  823. list_add_tail(&map->list, &mappings->list);
  824. buffer->dmabuf = alloc->dmabuf;
  825. buffer->device_addr = map->device_addr;
  826. s_vpr_h(inst->sid,
  827. "%s: created buffer_type %#x, size %d device_addr %#x\n",
  828. __func__, buffer_type, buffers->size,
  829. buffer->device_addr);
  830. return 0;
  831. }
  832. int msm_vidc_create_internal_buffers(struct msm_vidc_inst *inst,
  833. enum msm_vidc_buffer_type buffer_type)
  834. {
  835. int rc = 0;
  836. struct msm_vidc_buffers *buffers;
  837. int i;
  838. d_vpr_h("%s()\n", __func__);
  839. if (!inst || !inst->core) {
  840. d_vpr_e("%s: invalid params\n", __func__);
  841. return -EINVAL;
  842. }
  843. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  844. if (!buffers)
  845. return -EINVAL;
  846. for (i = 0; i < buffers->min_count; i++)
  847. rc = msm_vidc_create_internal_buffer(inst, buffer_type, i);
  848. return rc;
  849. }
  850. int msm_vidc_queue_internal_buffers(struct msm_vidc_inst *inst,
  851. enum msm_vidc_buffer_type buffer_type)
  852. {
  853. int rc = 0;
  854. struct msm_vidc_buffers *buffers;
  855. struct msm_vidc_buffer *buffer, *dummy;
  856. d_vpr_h("%s()\n", __func__);
  857. if (!inst || !inst->core) {
  858. d_vpr_e("%s: invalid params\n", __func__);
  859. return -EINVAL;
  860. }
  861. if (!is_internal_buffer(buffer_type)) {
  862. s_vpr_e(inst->sid, "%s: buffer type %#x is not internal\n",
  863. __func__, buffer_type);
  864. return 0;
  865. }
  866. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  867. if (!buffers)
  868. return -EINVAL;
  869. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  870. /* do not queue pending release buffers */
  871. if (buffer->flags & MSM_VIDC_ATTR_PENDING_RELEASE)
  872. continue;
  873. /* do not queue already queued buffers */
  874. if (buffer->attr & MSM_VIDC_ATTR_QUEUED)
  875. continue;
  876. rc = venus_hfi_queue_buffer(inst, buffer, NULL);
  877. if (rc)
  878. return rc;
  879. /* mark queued */
  880. buffer->attr |= MSM_VIDC_ATTR_QUEUED;
  881. s_vpr_h(inst->sid, "%s: queued buffer_type %#x, size %d\n",
  882. __func__, buffer_type, buffers->size);
  883. }
  884. return 0;
  885. }
  886. int msm_vidc_release_internal_buffers(struct msm_vidc_inst *inst,
  887. enum msm_vidc_buffer_type buffer_type)
  888. {
  889. int rc = 0;
  890. struct msm_vidc_buffers *buffers;
  891. struct msm_vidc_buffer *buffer, *dummy;
  892. d_vpr_h("%s()\n", __func__);
  893. if (!inst || !inst->core) {
  894. d_vpr_e("%s: invalid params\n", __func__);
  895. return -EINVAL;
  896. }
  897. if (!is_internal_buffer(buffer_type)) {
  898. s_vpr_e(inst->sid, "%s: buffer type %#x is not internal\n",
  899. __func__, buffer_type);
  900. return 0;
  901. }
  902. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  903. if (!buffers)
  904. return -EINVAL;
  905. list_for_each_entry_safe(buffer, dummy, &buffers->list, list) {
  906. /* do not release already pending release buffers */
  907. if (buffer->attr & MSM_VIDC_ATTR_PENDING_RELEASE)
  908. continue;
  909. /* release only queued buffers */
  910. if (!(buffer->attr & MSM_VIDC_ATTR_QUEUED))
  911. continue;
  912. rc = venus_hfi_release_buffer(inst, buffer);
  913. if (rc)
  914. return rc;
  915. /* mark pending release */
  916. buffer->attr |= MSM_VIDC_ATTR_PENDING_RELEASE;
  917. s_vpr_e(inst->sid, "%s: released buffer_type %#x, size %d\n",
  918. __func__, buffer_type, buffers->size);
  919. }
  920. return 0;
  921. }
  922. int msm_vidc_vb2_buffer_done(struct msm_vidc_inst *inst,
  923. struct msm_vidc_buffer *buf)
  924. {
  925. int type, port;
  926. struct vb2_queue *q;
  927. struct vb2_buffer *vb2;
  928. struct vb2_v4l2_buffer *vbuf;
  929. bool found;
  930. if (!inst || !buf) {
  931. d_vpr_e("%s: invalid params\n", __func__);
  932. return -EINVAL;
  933. }
  934. type = v4l2_type_from_driver(buf->type, __func__);
  935. if (!type)
  936. return -EINVAL;
  937. port = v4l2_type_to_driver_port(inst, type, __func__);
  938. if (port < 0)
  939. return -EINVAL;
  940. q = &inst->vb2q[port];
  941. if (!q->streaming) {
  942. s_vpr_e(inst->sid, "%s: port %d is not streaming\n",
  943. __func__, port);
  944. return -EINVAL;
  945. }
  946. found = false;
  947. list_for_each_entry(vb2, &q->queued_list, queued_entry) {
  948. if (vb2->state != VB2_BUF_STATE_ACTIVE)
  949. continue;
  950. if (vb2->index == buf->index) {
  951. found = true;
  952. break;
  953. }
  954. }
  955. if (!found) {
  956. print_vidc_buffer(VIDC_ERR, "vb2 not found for", inst, buf);
  957. return -EINVAL;
  958. }
  959. vbuf = to_vb2_v4l2_buffer(vb2);
  960. vbuf->flags = buf->flags;
  961. vb2->timestamp = buf->timestamp;
  962. vb2->planes[0].bytesused = buf->data_size;
  963. vb2_buffer_done(vb2, VB2_BUF_STATE_DONE);
  964. return 0;
  965. }
  966. int msm_vidc_setup_event_queue(struct msm_vidc_inst *inst)
  967. {
  968. int rc = 0;
  969. int index;
  970. struct msm_vidc_core *core;
  971. d_vpr_h("%s()\n", __func__);
  972. if (!inst || !inst->core) {
  973. d_vpr_e("%s: invalid params\n", __func__);
  974. return -EINVAL;
  975. }
  976. core = inst->core;
  977. // TODO: check decode is index = 0 and encode is index 1
  978. if (is_decode_session(inst))
  979. index = 0;
  980. else if (is_encode_session(inst))
  981. index = 1;
  982. else
  983. return -EINVAL;
  984. v4l2_fh_init(&inst->event_handler, &core->vdev[index].vdev);
  985. v4l2_fh_add(&inst->event_handler);
  986. return rc;
  987. }
  988. static int vb2q_init(struct msm_vidc_inst *inst,
  989. struct vb2_queue *q, enum v4l2_buf_type type)
  990. {
  991. struct msm_vidc_core *core;
  992. if (!inst || !q || !inst->core) {
  993. d_vpr_e("%s: invalid params\n", __func__);
  994. return -EINVAL;
  995. }
  996. core = inst->core;
  997. q->type = type;
  998. q->io_modes = VB2_DMABUF;
  999. q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  1000. q->ops = core->vb2_ops;
  1001. q->mem_ops = core->vb2_mem_ops;
  1002. q->drv_priv = inst;
  1003. q->allow_zero_bytesused = 1;
  1004. q->copy_timestamp = 1;
  1005. return vb2_queue_init(q);
  1006. }
  1007. int msm_vidc_vb2_queue_init(struct msm_vidc_inst *inst)
  1008. {
  1009. int rc = 0;
  1010. d_vpr_h("%s()\n", __func__);
  1011. if (!inst) {
  1012. d_vpr_e("%s: invalid params\n", __func__);
  1013. return -EINVAL;
  1014. }
  1015. rc = vb2q_init(inst, &inst->vb2q[INPUT_PORT], INPUT_MPLANE);
  1016. if (rc)
  1017. return rc;
  1018. rc = vb2q_init(inst, &inst->vb2q[OUTPUT_PORT], OUTPUT_MPLANE);
  1019. if (rc)
  1020. return rc;
  1021. rc = vb2q_init(inst, &inst->vb2q[INPUT_META_PORT], INPUT_META_PLANE);
  1022. if (rc)
  1023. return rc;
  1024. rc = vb2q_init(inst, &inst->vb2q[OUTPUT_META_PORT], OUTPUT_META_PLANE);
  1025. if (rc)
  1026. return rc;
  1027. return rc;
  1028. }
  1029. int msm_vidc_add_session(struct msm_vidc_inst *inst)
  1030. {
  1031. int rc = 0;
  1032. struct msm_vidc_inst *i;
  1033. struct msm_vidc_core *core;
  1034. u32 count = 0;
  1035. if (!inst || !inst->core) {
  1036. d_vpr_e("%s: invalid params\n", __func__);
  1037. return -EINVAL;
  1038. }
  1039. core = inst->core;
  1040. mutex_lock(&core->lock);
  1041. list_for_each_entry(i, &core->instances, list)
  1042. count++;
  1043. if (count < 0xffffff /*TODO: MAX_SUPPORTED_INSTANCES*/) {
  1044. list_add_tail(&inst->list, &core->instances);
  1045. } else {
  1046. d_vpr_e("%s: total sessions %d exceeded max limit %d\n",
  1047. __func__, count, MAX_SUPPORTED_INSTANCES);
  1048. rc = -EINVAL;
  1049. }
  1050. mutex_unlock(&core->lock);
  1051. /* assign session_id */
  1052. inst->session_id = count + 1;
  1053. inst->sid = inst->session_id;
  1054. return rc;
  1055. }
  1056. int msm_vidc_remove_session(struct msm_vidc_inst *inst)
  1057. {
  1058. struct msm_vidc_inst *i, *temp;
  1059. struct msm_vidc_core *core;
  1060. u32 count = 0;
  1061. if (!inst || !inst->core) {
  1062. d_vpr_e("%s: invalid params\n", __func__);
  1063. return -EINVAL;
  1064. }
  1065. core = inst->core;
  1066. mutex_lock(&core->lock);
  1067. list_for_each_entry_safe(i, temp, &core->instances, list) {
  1068. if (i->session_id == inst->session_id) {
  1069. list_del_init(&i->list);
  1070. d_vpr_h("%s: removed session %d\n",
  1071. __func__, i->session_id);
  1072. inst->sid = 0;
  1073. }
  1074. }
  1075. list_for_each_entry(i, &core->instances, list)
  1076. count++;
  1077. d_vpr_h("%s: remaining sessions %d\n", __func__, count);
  1078. mutex_unlock(&core->lock);
  1079. return 0;
  1080. }
  1081. int msm_vidc_session_open(struct msm_vidc_inst *inst)
  1082. {
  1083. int rc = 0;
  1084. if (!inst) {
  1085. d_vpr_e("%s: invalid params\n", __func__);
  1086. return -EINVAL;
  1087. }
  1088. rc = venus_hfi_session_open(inst);
  1089. return rc;
  1090. }
  1091. int msm_vidc_session_set_codec(struct msm_vidc_inst *inst)
  1092. {
  1093. int rc = 0;
  1094. if (!inst) {
  1095. d_vpr_e("%s: invalid params\n", __func__);
  1096. return -EINVAL;
  1097. }
  1098. rc = venus_hfi_session_set_codec(inst);
  1099. if (rc)
  1100. return rc;
  1101. return 0;
  1102. }
  1103. int msm_vidc_session_stop(struct msm_vidc_inst *inst,
  1104. enum msm_vidc_port_type port)
  1105. {
  1106. int rc = 0;
  1107. struct msm_vidc_core *core;
  1108. enum signal_session_response signal_type;
  1109. if (!inst || !inst->core) {
  1110. d_vpr_e("%s: invalid params\n", __func__);
  1111. return -EINVAL;
  1112. }
  1113. if (port == INPUT_PORT) {
  1114. signal_type = SIGNAL_CMD_STOP_INPUT;
  1115. } else if (port == OUTPUT_PORT) {
  1116. signal_type = SIGNAL_CMD_STOP_OUTPUT;
  1117. } else {
  1118. s_vpr_e(inst->sid, "%s: invalid port: %d\n", __func__, port);
  1119. return -EINVAL;
  1120. }
  1121. rc = venus_hfi_stop(inst, port);
  1122. if (rc)
  1123. return rc;
  1124. core = inst->core;
  1125. mutex_unlock(&inst->lock);
  1126. s_vpr_h(inst->sid, "%s: wait on port: %d for time: %d ms\n",
  1127. __func__, port, core->capabilities[HW_RESPONSE_TIMEOUT].value);
  1128. rc = wait_for_completion_timeout(
  1129. &inst->completions[signal_type],
  1130. msecs_to_jiffies(
  1131. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  1132. mutex_lock(&inst->lock);
  1133. if (!rc) {
  1134. s_vpr_e(inst->sid, "%s: session stop timed out for port: %d\n",
  1135. __func__, port);
  1136. //msm_comm_kill_session(inst);
  1137. rc = -EIO;
  1138. } else {
  1139. rc = 0;
  1140. s_vpr_h(inst->sid, "%s: stop successful on port: %d\n",
  1141. __func__, port);
  1142. }
  1143. return rc;
  1144. }
  1145. int msm_vidc_session_close(struct msm_vidc_inst *inst)
  1146. {
  1147. int rc = 0;
  1148. struct msm_vidc_core *core;
  1149. if (!inst || !inst->core) {
  1150. d_vpr_e("%s: invalid params\n", __func__);
  1151. return -EINVAL;
  1152. }
  1153. rc = venus_hfi_session_close(inst);
  1154. if (rc)
  1155. return rc;
  1156. core = inst->core;
  1157. s_vpr_h(inst->sid, "%s: wait on close for time: %d ms\n",
  1158. __func__, core->capabilities[HW_RESPONSE_TIMEOUT].value);
  1159. rc = wait_for_completion_timeout(
  1160. &inst->completions[SIGNAL_CMD_CLOSE],
  1161. msecs_to_jiffies(
  1162. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  1163. if (!rc) {
  1164. s_vpr_e(inst->sid, "%s: session close timed out\n", __func__);
  1165. //msm_comm_kill_session(inst);
  1166. rc = -EIO;
  1167. } else {
  1168. rc = 0;
  1169. s_vpr_h(inst->sid, "%s: close successful\n", __func__);
  1170. }
  1171. return rc;
  1172. }
  1173. int msm_vidc_get_inst_capability(struct msm_vidc_inst *inst)
  1174. {
  1175. int rc = 0;
  1176. int i;
  1177. struct msm_vidc_core *core;
  1178. d_vpr_h("%s()\n", __func__);
  1179. if (!inst || !inst->core || !inst->capabilities) {
  1180. d_vpr_e("%s: invalid params\n", __func__);
  1181. return -EINVAL;
  1182. }
  1183. core = inst->core;
  1184. for (i = 0; i < core->codecs_count; i++) {
  1185. if (core->inst_caps[i].domain == inst->domain &&
  1186. core->inst_caps[i].codec == inst->codec) {
  1187. s_vpr_h(inst->sid,
  1188. "%s: copied capabilities with %#x codec, %#x domain\n",
  1189. __func__, inst->codec, inst->domain);
  1190. memcpy(inst->capabilities, &core->inst_caps[i],
  1191. sizeof(struct msm_vidc_inst_capability));
  1192. }
  1193. }
  1194. if (!inst->capabilities) {
  1195. s_vpr_e(inst->sid, "%s: capabilities not found\n", __func__);
  1196. return -EINVAL;
  1197. }
  1198. return rc;
  1199. }
  1200. static int msm_vidc_init_core_caps(struct msm_vidc_core *core)
  1201. {
  1202. int rc = 0;
  1203. int i, num_platform_caps;
  1204. struct msm_platform_core_capability *platform_data;
  1205. if (!core || !core->platform) {
  1206. d_vpr_e("%s: invalid params\n", __func__);
  1207. rc = -EINVAL;
  1208. goto exit;
  1209. }
  1210. platform_data = core->platform->data.core_data;
  1211. if (!platform_data) {
  1212. d_vpr_e("%s: platform core data is NULL\n",
  1213. __func__);
  1214. rc = -EINVAL;
  1215. goto exit;
  1216. }
  1217. if (!core->capabilities) {
  1218. core->capabilities = kcalloc(1,
  1219. (sizeof(struct msm_vidc_core_capability) *
  1220. CORE_CAP_MAX), GFP_KERNEL);
  1221. if (!core->capabilities) {
  1222. d_vpr_e("%s: failed to allocate core capabilities\n",
  1223. __func__);
  1224. rc = -ENOMEM;
  1225. goto exit;
  1226. }
  1227. } else {
  1228. d_vpr_e("%s: capabilities memory is expected to be freed\n",
  1229. __func__);
  1230. }
  1231. num_platform_caps = core->platform->data.core_data_size;
  1232. /* loop over platform caps */
  1233. for (i = 0; i < num_platform_caps; i++) {
  1234. core->capabilities[platform_data[i].type].type = platform_data[i].type;
  1235. core->capabilities[platform_data[i].type].value = platform_data[i].value;
  1236. }
  1237. exit:
  1238. return rc;
  1239. }
  1240. static void update_inst_capability(struct msm_platform_inst_capability *in,
  1241. struct msm_vidc_inst_capability *capability)
  1242. {
  1243. if (!in || !capability) {
  1244. d_vpr_e("%s: invalid params %pK %pK\n",
  1245. __func__, in, capability);
  1246. return;
  1247. }
  1248. if (in->cap < INST_CAP_MAX) {
  1249. capability->cap[in->cap].cap = in->cap;
  1250. capability->cap[in->cap].min = in->min;
  1251. capability->cap[in->cap].max = in->max;
  1252. capability->cap[in->cap].step_or_mask = in->step_or_mask;
  1253. capability->cap[in->cap].value = in->value;
  1254. capability->cap[in->cap].flags = in->flags;
  1255. capability->cap[in->cap].v4l2_id = in->v4l2_id;
  1256. capability->cap[in->cap].hfi_id = in->hfi_id;
  1257. memcpy(capability->cap[in->cap].parents, in->parents,
  1258. sizeof(capability->cap[in->cap].parents));
  1259. memcpy(capability->cap[in->cap].children, in->children,
  1260. sizeof(capability->cap[in->cap].children));
  1261. capability->cap[in->cap].adjust = in->adjust;
  1262. capability->cap[in->cap].set = in->set;
  1263. } else {
  1264. d_vpr_e("%s: invalid cap %d\n",
  1265. __func__, in->cap);
  1266. }
  1267. }
  1268. static int msm_vidc_init_instance_caps(struct msm_vidc_core *core)
  1269. {
  1270. int rc = 0;
  1271. u8 enc_valid_codecs, dec_valid_codecs;
  1272. u8 count_bits, enc_codec_count;
  1273. u8 codecs_count = 0;
  1274. int i, j, check_bit, num_platform_caps;
  1275. struct msm_platform_inst_capability *platform_data = NULL;
  1276. if (!core || !core->platform || !core->capabilities) {
  1277. d_vpr_e("%s: invalid params\n", __func__);
  1278. rc = -EINVAL;
  1279. goto exit;
  1280. }
  1281. platform_data = core->platform->data.instance_data;
  1282. if (!platform_data) {
  1283. d_vpr_e("%s: platform instance data is NULL\n",
  1284. __func__);
  1285. rc = -EINVAL;
  1286. goto exit;
  1287. }
  1288. enc_valid_codecs = core->capabilities[ENC_CODECS].value;
  1289. count_bits = enc_valid_codecs;
  1290. COUNT_BITS(count_bits, codecs_count);
  1291. enc_codec_count = codecs_count;
  1292. dec_valid_codecs = core->capabilities[DEC_CODECS].value;
  1293. count_bits = dec_valid_codecs;
  1294. COUNT_BITS(count_bits, codecs_count);
  1295. core->codecs_count = codecs_count;
  1296. if (!core->inst_caps) {
  1297. core->inst_caps = kcalloc(codecs_count,
  1298. sizeof(struct msm_vidc_inst_capability),
  1299. GFP_KERNEL);
  1300. if (!core->inst_caps) {
  1301. d_vpr_e("%s: failed to allocate core capabilities\n",
  1302. __func__);
  1303. rc = -ENOMEM;
  1304. goto exit;
  1305. }
  1306. } else {
  1307. d_vpr_e("%s: capabilities memory is expected to be freed\n",
  1308. __func__);
  1309. }
  1310. check_bit = 0;
  1311. /* determine codecs for enc domain */
  1312. for (i = 0; i < enc_codec_count; i++) {
  1313. while (check_bit < (sizeof(enc_valid_codecs) * 8)) {
  1314. if (enc_valid_codecs & BIT(check_bit)) {
  1315. core->inst_caps[i].domain = MSM_VIDC_ENCODER;
  1316. core->inst_caps[i].codec = enc_valid_codecs &
  1317. BIT(check_bit);
  1318. check_bit++;
  1319. break;
  1320. }
  1321. check_bit++;
  1322. }
  1323. }
  1324. /* reset checkbit to check from 0th bit of decoder codecs set bits*/
  1325. check_bit = 0;
  1326. /* determine codecs for dec domain */
  1327. for (; i < codecs_count; i++) {
  1328. while (check_bit < (sizeof(dec_valid_codecs) * 8)) {
  1329. if (dec_valid_codecs & BIT(check_bit)) {
  1330. core->inst_caps[i].domain = MSM_VIDC_DECODER;
  1331. core->inst_caps[i].codec = dec_valid_codecs &
  1332. BIT(check_bit);
  1333. check_bit++;
  1334. break;
  1335. }
  1336. check_bit++;
  1337. }
  1338. }
  1339. num_platform_caps = core->platform->data.instance_data_size;
  1340. d_vpr_h("%s: num caps %d\n", __func__, num_platform_caps);
  1341. /* loop over each platform capability */
  1342. for (i = 0; i < num_platform_caps; i++) {
  1343. /* select matching core codec and update it */
  1344. for (j = 0; j < codecs_count; j++) {
  1345. if ((platform_data[i].domain &
  1346. core->inst_caps[j].domain) &&
  1347. (platform_data[i].codec &
  1348. core->inst_caps[j].codec)) {
  1349. /* update core capability */
  1350. update_inst_capability(&platform_data[i],
  1351. &core->inst_caps[j]);
  1352. }
  1353. }
  1354. }
  1355. exit:
  1356. return rc;
  1357. }
  1358. int msm_vidc_core_init(struct msm_vidc_core *core)
  1359. {
  1360. int rc = 0;
  1361. d_vpr_h("%s()\n", __func__);
  1362. if (!core || !core->platform) {
  1363. d_vpr_e("%s: invalid params\n", __func__);
  1364. return -EINVAL;
  1365. }
  1366. mutex_lock(&core->lock);
  1367. if (core->state == MSM_VIDC_CORE_ERROR) {
  1368. d_vpr_e("%s: core invalid state\n", __func__);
  1369. rc = -EINVAL;
  1370. goto unlock;
  1371. }
  1372. if (core->state == MSM_VIDC_CORE_INIT) {
  1373. rc = 0;
  1374. goto unlock;
  1375. }
  1376. rc = msm_vidc_init_core_caps(core);
  1377. if (rc)
  1378. goto unlock;
  1379. rc = msm_vidc_init_instance_caps(core);
  1380. if (rc)
  1381. goto unlock;
  1382. core->state = MSM_VIDC_CORE_INIT;
  1383. init_completion(&core->init_done);
  1384. core->smmu_fault_handled = false;
  1385. core->ssr.trigger = false;
  1386. rc = venus_hfi_core_init(core);
  1387. if (rc) {
  1388. d_vpr_e("%s: core init failed\n", __func__);
  1389. core->state = MSM_VIDC_CORE_DEINIT;
  1390. goto unlock;
  1391. }
  1392. mutex_unlock(&core->lock);
  1393. /*TODO: acquire lock or not */
  1394. d_vpr_h("%s(): waiting for sys init done, %d ms\n", __func__,
  1395. core->capabilities[HW_RESPONSE_TIMEOUT].value);
  1396. rc = wait_for_completion_timeout(&core->init_done, msecs_to_jiffies(
  1397. core->capabilities[HW_RESPONSE_TIMEOUT].value));
  1398. if (!rc) {
  1399. d_vpr_e("%s: system init timed out\n", __func__);
  1400. //msm_comm_kill_session(inst);
  1401. //rc = -EIO;
  1402. } else {
  1403. d_vpr_h("%s: system init wait completed\n", __func__);
  1404. rc = 0;
  1405. }
  1406. mutex_lock(&core->lock);
  1407. unlock:
  1408. mutex_unlock(&core->lock);
  1409. return rc;
  1410. }
  1411. int msm_vidc_smmu_fault_handler(struct iommu_domain *domain,
  1412. struct device *dev, unsigned long iova, int flags, void *data)
  1413. {
  1414. return -EINVAL;
  1415. }
  1416. int msm_vidc_trigger_ssr(struct msm_vidc_core *core,
  1417. enum msm_vidc_ssr_trigger_type type)
  1418. {
  1419. return 0;
  1420. }
  1421. void msm_vidc_ssr_handler(struct work_struct *work)
  1422. {
  1423. }
  1424. void msm_vidc_pm_work_handler(struct work_struct *work)
  1425. {
  1426. }
  1427. void msm_vidc_fw_unload_handler(struct work_struct *work)
  1428. {
  1429. }
  1430. void msm_vidc_batch_handler(struct work_struct *work)
  1431. {
  1432. }
  1433. struct msm_vidc_inst *get_inst(struct msm_vidc_core *core,
  1434. u32 session_id)
  1435. {
  1436. struct msm_vidc_inst *inst = NULL;
  1437. bool matches = false;
  1438. if (!core) {
  1439. d_vpr_e("%s: invalid params\n", __func__);
  1440. return NULL;
  1441. }
  1442. mutex_lock(&core->lock);
  1443. list_for_each_entry(inst, &core->instances, list) {
  1444. if (inst->session_id == session_id) {
  1445. matches = true;
  1446. break;
  1447. }
  1448. }
  1449. inst = (matches && kref_get_unless_zero(&inst->kref)) ? inst : NULL;
  1450. mutex_unlock(&core->lock);
  1451. return inst;
  1452. }
  1453. static void put_inst_helper(struct kref *kref)
  1454. {
  1455. struct msm_vidc_inst *inst = container_of(kref,
  1456. struct msm_vidc_inst, kref);
  1457. msm_vidc_close(inst);
  1458. }
  1459. void put_inst(struct msm_vidc_inst *inst)
  1460. {
  1461. if (!inst) {
  1462. d_vpr_e("%s: invalid params\n", __func__);
  1463. return;
  1464. }
  1465. kref_put(&inst->kref, put_inst_helper);
  1466. }
  1467. void core_lock(struct msm_vidc_core *core, const char *function)
  1468. {
  1469. mutex_lock(&core->lock);
  1470. }
  1471. void core_unlock(struct msm_vidc_core *core, const char *function)
  1472. {
  1473. mutex_unlock(&core->lock);
  1474. }
  1475. void inst_lock(struct msm_vidc_inst *inst, const char *function)
  1476. {
  1477. mutex_lock(&inst->lock);
  1478. }
  1479. void inst_unlock(struct msm_vidc_inst *inst, const char *function)
  1480. {
  1481. mutex_unlock(&inst->lock);
  1482. }