venus_hfi_response.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/devcoredump.h>
  7. #include <linux/of_address.h>
  8. #include "hfi_packet.h"
  9. #include "venus_hfi.h"
  10. #include "venus_hfi_response.h"
  11. #include "msm_vidc_debug.h"
  12. #include "msm_vidc_driver.h"
  13. #include "msm_vdec.h"
  14. #include "msm_vidc_memory.h"
  15. #include "msm_vidc_fence.h"
  16. #include "msm_vidc_platform.h"
  17. #define in_range(range, val) (((range.begin) < (val)) && ((range.end) > (val)))
  18. extern struct msm_vidc_core *g_core;
  19. struct msm_vidc_core_hfi_range {
  20. u32 begin;
  21. u32 end;
  22. int (*handle)(struct msm_vidc_core *core, struct hfi_packet *pkt);
  23. };
  24. struct msm_vidc_inst_hfi_range {
  25. u32 begin;
  26. u32 end;
  27. int (*handle)(struct msm_vidc_inst *inst, struct hfi_packet *pkt);
  28. };
  29. struct msm_vidc_hfi_buffer_handle {
  30. enum hfi_buffer_type type;
  31. int (*handle)(struct msm_vidc_inst *inst, struct hfi_buffer *buffer);
  32. };
  33. struct msm_vidc_hfi_packet_handle {
  34. enum hfi_buffer_type type;
  35. int (*handle)(struct msm_vidc_inst *inst, struct hfi_packet *pkt);
  36. };
  37. void print_psc_properties(const char *str, struct msm_vidc_inst *inst,
  38. struct msm_vidc_subscription_params subsc_params)
  39. {
  40. if (!inst || !str)
  41. return;
  42. i_vpr_h(inst,
  43. "%s: width %d, height %d, crop offsets[0] %#x, crop offsets[1] %#x, bit depth %#x, coded frames %d "
  44. "fw min count %d, poc %d, color info %d, profile %d, level %d, tier %d, fg present %d, sb enabled %d\n",
  45. str, (subsc_params.bitstream_resolution & HFI_BITMASK_BITSTREAM_WIDTH) >> 16,
  46. (subsc_params.bitstream_resolution & HFI_BITMASK_BITSTREAM_HEIGHT),
  47. subsc_params.crop_offsets[0], subsc_params.crop_offsets[1],
  48. subsc_params.bit_depth, subsc_params.coded_frames,
  49. subsc_params.fw_min_count, subsc_params.pic_order_cnt,
  50. subsc_params.color_info, subsc_params.profile, subsc_params.level,
  51. subsc_params.tier, subsc_params.av1_film_grain_present,
  52. subsc_params.av1_super_block_enabled);
  53. }
  54. static void print_sfr_message(struct msm_vidc_core *core)
  55. {
  56. struct msm_vidc_sfr *vsfr = NULL;
  57. u32 vsfr_size = 0;
  58. void *p = NULL;
  59. vsfr = (struct msm_vidc_sfr *)core->sfr.align_virtual_addr;
  60. if (vsfr) {
  61. if (vsfr->bufSize != core->sfr.mem_size) {
  62. d_vpr_e("Invalid SFR buf size %d actual %d\n",
  63. vsfr->bufSize, core->sfr.mem_size);
  64. return;
  65. }
  66. vsfr_size = vsfr->bufSize - sizeof(u32);
  67. p = memchr(vsfr->rg_data, '\0', vsfr_size);
  68. /* SFR isn't guaranteed to be NULL terminated */
  69. if (p == NULL)
  70. vsfr->rg_data[vsfr_size - 1] = '\0';
  71. d_vpr_e(FMT_STRING_MSG_SFR, vsfr->rg_data);
  72. }
  73. }
  74. u32 vidc_port_from_hfi(struct msm_vidc_inst *inst,
  75. enum hfi_packet_port_type hfi_port)
  76. {
  77. enum msm_vidc_port_type port = MAX_PORT;
  78. if (is_decode_session(inst)) {
  79. switch (hfi_port) {
  80. case HFI_PORT_BITSTREAM:
  81. port = INPUT_PORT;
  82. break;
  83. case HFI_PORT_RAW:
  84. port = OUTPUT_PORT;
  85. break;
  86. case HFI_PORT_NONE:
  87. port = PORT_NONE;
  88. break;
  89. default:
  90. i_vpr_e(inst, "%s: invalid hfi port type %d\n",
  91. __func__, hfi_port);
  92. break;
  93. }
  94. } else if (is_encode_session(inst)) {
  95. switch (hfi_port) {
  96. case HFI_PORT_RAW:
  97. port = INPUT_PORT;
  98. break;
  99. case HFI_PORT_BITSTREAM:
  100. port = OUTPUT_PORT;
  101. break;
  102. case HFI_PORT_NONE:
  103. port = PORT_NONE;
  104. break;
  105. default:
  106. i_vpr_e(inst, "%s: invalid hfi port type %d\n",
  107. __func__, hfi_port);
  108. break;
  109. }
  110. } else {
  111. i_vpr_e(inst, "%s: invalid domain %#x\n",
  112. __func__, inst->domain);
  113. }
  114. return port;
  115. }
  116. bool is_valid_hfi_port(struct msm_vidc_inst *inst, u32 port,
  117. u32 buffer_type, const char *func)
  118. {
  119. if (!inst) {
  120. i_vpr_e(inst, "%s: invalid params\n", func);
  121. return false;
  122. }
  123. if (port == HFI_PORT_NONE &&
  124. buffer_type != HFI_BUFFER_ARP &&
  125. buffer_type != HFI_BUFFER_PERSIST)
  126. goto invalid;
  127. if (port != HFI_PORT_BITSTREAM && port != HFI_PORT_RAW)
  128. goto invalid;
  129. return true;
  130. invalid:
  131. i_vpr_e(inst, "%s: invalid port %#x buffer_type %u\n",
  132. func, port, buffer_type);
  133. return false;
  134. }
  135. bool is_valid_hfi_buffer_type(struct msm_vidc_inst *inst,
  136. u32 buffer_type, const char *func)
  137. {
  138. if (!inst) {
  139. i_vpr_e(inst, "%s: invalid params\n", func);
  140. return false;
  141. }
  142. if (buffer_type != HFI_BUFFER_BITSTREAM &&
  143. buffer_type != HFI_BUFFER_RAW &&
  144. buffer_type != HFI_BUFFER_METADATA &&
  145. buffer_type != HFI_BUFFER_BIN &&
  146. buffer_type != HFI_BUFFER_ARP &&
  147. buffer_type != HFI_BUFFER_COMV &&
  148. buffer_type != HFI_BUFFER_NON_COMV &&
  149. buffer_type != HFI_BUFFER_LINE &&
  150. buffer_type != HFI_BUFFER_DPB &&
  151. buffer_type != HFI_BUFFER_PERSIST &&
  152. buffer_type != HFI_BUFFER_VPSS &&
  153. buffer_type != HFI_BUFFER_PARTIAL_DATA) {
  154. i_vpr_e(inst, "%s: invalid buffer type %#x\n",
  155. func, buffer_type);
  156. return false;
  157. }
  158. return true;
  159. }
  160. int validate_packet(u8 *response_pkt, u8 *core_resp_pkt,
  161. u32 core_resp_pkt_size, const char *func)
  162. {
  163. u8 *response_limit;
  164. u32 response_pkt_size = 0;
  165. if (!response_pkt || !core_resp_pkt || !core_resp_pkt_size) {
  166. d_vpr_e("%s: invalid params\n", func);
  167. return -EINVAL;
  168. }
  169. response_limit = core_resp_pkt + core_resp_pkt_size;
  170. if (response_pkt < core_resp_pkt || response_pkt > response_limit) {
  171. d_vpr_e("%s: invalid packet address\n", func);
  172. return -EINVAL;
  173. }
  174. response_pkt_size = *(u32 *)response_pkt;
  175. if (!response_pkt_size) {
  176. d_vpr_e("%s: response packet size cannot be zero\n", func);
  177. return -EINVAL;
  178. }
  179. if (response_pkt_size < sizeof(struct hfi_packet)) {
  180. d_vpr_e("%s: invalid packet size %d\n",
  181. func, response_pkt_size);
  182. return -EINVAL;
  183. }
  184. if (response_pkt + response_pkt_size > response_limit) {
  185. d_vpr_e("%s: invalid packet size %d\n",
  186. func, response_pkt_size);
  187. return -EINVAL;
  188. }
  189. return 0;
  190. }
  191. static int validate_hdr_packet(struct msm_vidc_core *core,
  192. struct hfi_header *hdr, const char *function)
  193. {
  194. struct hfi_packet *packet;
  195. u8 *pkt;
  196. int i, rc = 0;
  197. if (!core || !hdr || !function) {
  198. d_vpr_e("%s: invalid params\n", __func__);
  199. return -EINVAL;
  200. }
  201. if (hdr->size < sizeof(struct hfi_header) + sizeof(struct hfi_packet)) {
  202. d_vpr_e("%s: invalid header size %d\n", __func__, hdr->size);
  203. return -EINVAL;
  204. }
  205. pkt = (u8 *)((u8 *)hdr + sizeof(struct hfi_header));
  206. /* validate all packets */
  207. for (i = 0; i < hdr->num_packets; i++) {
  208. packet = (struct hfi_packet *)pkt;
  209. rc = validate_packet(pkt, core->response_packet, core->packet_size, function);
  210. if (rc)
  211. return rc;
  212. pkt += packet->size;
  213. }
  214. return 0;
  215. }
  216. static bool check_for_packet_payload(struct msm_vidc_inst *inst,
  217. struct hfi_packet *pkt, const char *func)
  218. {
  219. u32 payload_size = 0;
  220. if (!inst || !pkt) {
  221. d_vpr_e("%s: invalid params\n", __func__);
  222. return false;
  223. }
  224. if (pkt->payload_info == HFI_PAYLOAD_NONE) {
  225. i_vpr_h(inst, "%s: no playload available for packet %#x\n",
  226. func, pkt->type);
  227. return false;
  228. }
  229. switch (pkt->payload_info) {
  230. case HFI_PAYLOAD_U32:
  231. case HFI_PAYLOAD_S32:
  232. case HFI_PAYLOAD_Q16:
  233. case HFI_PAYLOAD_U32_ENUM:
  234. case HFI_PAYLOAD_32_PACKED:
  235. payload_size = 4;
  236. break;
  237. case HFI_PAYLOAD_U64:
  238. case HFI_PAYLOAD_S64:
  239. case HFI_PAYLOAD_64_PACKED:
  240. payload_size = 8;
  241. break;
  242. case HFI_PAYLOAD_STRUCTURE:
  243. if (pkt->type == HFI_CMD_BUFFER)
  244. payload_size = sizeof(struct hfi_buffer);
  245. break;
  246. default:
  247. payload_size = 0;
  248. break;
  249. }
  250. if (pkt->size < sizeof(struct hfi_packet) + payload_size) {
  251. i_vpr_e(inst,
  252. "%s: invalid payload size %u payload type %#x for packet %#x\n",
  253. func, pkt->size, pkt->payload_info, pkt->type);
  254. return false;
  255. }
  256. return true;
  257. }
  258. static int handle_session_last_flag_info(struct msm_vidc_inst *inst,
  259. struct hfi_packet *pkt)
  260. {
  261. int rc = 0;
  262. if (pkt->type == HFI_INFO_HFI_FLAG_PSC_LAST) {
  263. if (msm_vidc_allow_psc_last_flag(inst))
  264. rc = msm_vidc_process_psc_last_flag(inst);
  265. else
  266. rc = -EINVAL;
  267. } else if (pkt->type == HFI_INFO_HFI_FLAG_DRAIN_LAST) {
  268. if (msm_vidc_allow_drain_last_flag(inst))
  269. rc = msm_vidc_process_drain_last_flag(inst);
  270. else
  271. rc = -EINVAL;
  272. } else {
  273. i_vpr_e(inst, "%s: invalid packet type %#x\n", __func__,
  274. pkt->type);
  275. }
  276. if (rc)
  277. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  278. return rc;
  279. }
  280. static int handle_session_info(struct msm_vidc_inst *inst,
  281. struct hfi_packet *pkt)
  282. {
  283. int rc = 0;
  284. char *info;
  285. switch (pkt->type) {
  286. case HFI_INFO_UNSUPPORTED:
  287. info = "unsupported";
  288. break;
  289. case HFI_INFO_DATA_CORRUPT:
  290. info = "data corrupt";
  291. inst->hfi_frame_info.data_corrupt = 1;
  292. break;
  293. case HFI_INFO_BUFFER_OVERFLOW:
  294. info = "buffer overflow";
  295. inst->hfi_frame_info.overflow = 1;
  296. break;
  297. case HFI_INFO_HFI_FLAG_DRAIN_LAST:
  298. info = "drain last flag";
  299. rc = handle_session_last_flag_info(inst, pkt);
  300. break;
  301. case HFI_INFO_HFI_FLAG_PSC_LAST:
  302. info = "drc last flag";
  303. rc = handle_session_last_flag_info(inst, pkt);
  304. break;
  305. default:
  306. info = "unknown";
  307. break;
  308. }
  309. i_vpr_h(inst, "session info (%#x): %s\n", pkt->type, info);
  310. return rc;
  311. }
  312. static int handle_session_error(struct msm_vidc_inst *inst,
  313. struct hfi_packet *pkt)
  314. {
  315. int rc = 0;
  316. char *error;
  317. switch (pkt->type) {
  318. case HFI_ERROR_MAX_SESSIONS:
  319. error = "exceeded max sessions";
  320. break;
  321. case HFI_ERROR_UNKNOWN_SESSION:
  322. error = "unknown session id";
  323. break;
  324. case HFI_ERROR_INVALID_STATE:
  325. error = "invalid operation for current state";
  326. break;
  327. case HFI_ERROR_INSUFFICIENT_RESOURCES:
  328. error = "insufficient resources";
  329. break;
  330. case HFI_ERROR_BUFFER_NOT_SET:
  331. error = "internal buffers not set";
  332. break;
  333. case HFI_ERROR_FATAL:
  334. error = "fatal error";
  335. break;
  336. default:
  337. error = "unknown";
  338. break;
  339. }
  340. i_vpr_e(inst, "%s: session error received %#x: %s\n",
  341. __func__, pkt->type, error);
  342. rc = msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  343. return rc;
  344. }
  345. void fw_coredump(struct msm_vidc_core *core)
  346. {
  347. int rc = 0;
  348. struct platform_device *pdev;
  349. struct device_node *node = NULL;
  350. struct resource res = {0};
  351. phys_addr_t mem_phys = 0;
  352. size_t res_size = 0;
  353. void *mem_va = NULL;
  354. char *data = NULL, *dump = NULL;
  355. u64 total_size;
  356. if (!core) {
  357. d_vpr_e("%s: invalid params\n", __func__);
  358. return;
  359. }
  360. pdev = core->pdev;
  361. node = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
  362. if (!node) {
  363. d_vpr_e("%s: DT error getting \"memory-region\" property\n",
  364. __func__);
  365. return;
  366. }
  367. rc = of_address_to_resource(node, 0, &res);
  368. if (rc) {
  369. d_vpr_e("%s: error %d while getting \"memory-region\" resource\n",
  370. __func__, rc);
  371. return;
  372. }
  373. mem_phys = res.start;
  374. res_size = (size_t)resource_size(&res);
  375. mem_va = memremap(mem_phys, res_size, MEMREMAP_WC);
  376. if (!mem_va) {
  377. d_vpr_e("%s: unable to remap firmware memory\n", __func__);
  378. return;
  379. }
  380. total_size = res_size + TOTAL_QSIZE + ALIGNED_SFR_SIZE;
  381. data = vmalloc(total_size);
  382. if (!data) {
  383. memunmap(mem_va);
  384. return;
  385. }
  386. dump = data;
  387. /* copy firmware dump */
  388. memcpy(data, mem_va, res_size);
  389. memunmap(mem_va);
  390. /* copy queues(cmd, msg, dbg) dump(along with headers) */
  391. data += res_size;
  392. memcpy(data, (char *)core->iface_q_table.align_virtual_addr, TOTAL_QSIZE);
  393. /* copy sfr dump */
  394. data += TOTAL_QSIZE;
  395. memcpy(data, (char *)core->sfr.align_virtual_addr, ALIGNED_SFR_SIZE);
  396. dev_coredumpv(&pdev->dev, dump, total_size, GFP_KERNEL);
  397. }
  398. int handle_system_error(struct msm_vidc_core *core,
  399. struct hfi_packet *pkt)
  400. {
  401. bool bug_on = false;
  402. d_vpr_e("%s: system error received\n", __func__);
  403. print_sfr_message(core);
  404. venus_hfi_noc_error_info(core);
  405. if (pkt) {
  406. /* enable force bugon for requested type */
  407. if (pkt->type == HFI_SYS_ERROR_FATAL)
  408. bug_on = !!(msm_vidc_enable_bugon & MSM_VIDC_BUG_ON_FATAL);
  409. else if (pkt->type == HFI_SYS_ERROR_NOC)
  410. bug_on = !!(msm_vidc_enable_bugon & MSM_VIDC_BUG_ON_NOC);
  411. else if (pkt->type == HFI_SYS_ERROR_WD_TIMEOUT)
  412. bug_on = !!(msm_vidc_enable_bugon & MSM_VIDC_BUG_ON_WD_TIMEOUT);
  413. if (bug_on) {
  414. d_vpr_e("%s: force bugon for type %#x\n", __func__, pkt->type);
  415. MSM_VIDC_FATAL(true);
  416. }
  417. }
  418. msm_vidc_core_deinit(core, true);
  419. return 0;
  420. }
  421. static int handle_system_init(struct msm_vidc_core *core,
  422. struct hfi_packet *pkt)
  423. {
  424. if (!(pkt->flags & HFI_FW_FLAGS_SUCCESS)) {
  425. d_vpr_h("%s: unhandled. flags=%d\n", __func__, pkt->flags);
  426. return 0;
  427. }
  428. core_lock(core, __func__);
  429. if (pkt->packet_id != core->sys_init_id) {
  430. d_vpr_e("%s: invalid pkt id %u, expected %u\n", __func__,
  431. pkt->packet_id, core->sys_init_id);
  432. goto unlock;
  433. }
  434. msm_vidc_change_core_state(core, MSM_VIDC_CORE_INIT, __func__);
  435. d_vpr_h("%s: successful\n", __func__);
  436. unlock:
  437. core_unlock(core, __func__);
  438. return 0;
  439. }
  440. static int handle_session_open(struct msm_vidc_inst *inst,
  441. struct hfi_packet *pkt)
  442. {
  443. if (pkt->flags & HFI_FW_FLAGS_SUCCESS)
  444. i_vpr_h(inst, "%s: successful\n", __func__);
  445. return 0;
  446. }
  447. static int handle_session_close(struct msm_vidc_inst *inst,
  448. struct hfi_packet *pkt)
  449. {
  450. if (pkt->flags & HFI_FW_FLAGS_SUCCESS)
  451. i_vpr_h(inst, "%s: successful\n", __func__);
  452. signal_session_msg_receipt(inst, SIGNAL_CMD_CLOSE);
  453. return 0;
  454. }
  455. static int handle_session_start(struct msm_vidc_inst *inst,
  456. struct hfi_packet *pkt)
  457. {
  458. if (pkt->flags & HFI_FW_FLAGS_SUCCESS)
  459. i_vpr_h(inst, "%s: successful for port %d\n",
  460. __func__, pkt->port);
  461. return 0;
  462. }
  463. static int handle_session_stop(struct msm_vidc_inst *inst,
  464. struct hfi_packet *pkt)
  465. {
  466. int rc = 0;
  467. enum signal_session_response signal_type = -1;
  468. if (pkt->flags & HFI_FW_FLAGS_SUCCESS)
  469. i_vpr_h(inst, "%s: successful for port %d\n",
  470. __func__, pkt->port);
  471. if (is_encode_session(inst)) {
  472. if (pkt->port == HFI_PORT_RAW) {
  473. signal_type = SIGNAL_CMD_STOP_INPUT;
  474. } else if (pkt->port == HFI_PORT_BITSTREAM) {
  475. signal_type = SIGNAL_CMD_STOP_OUTPUT;
  476. } else {
  477. i_vpr_e(inst, "%s: invalid port: %d\n",
  478. __func__, pkt->port);
  479. return -EINVAL;
  480. }
  481. } else if (is_decode_session(inst)) {
  482. if (pkt->port == HFI_PORT_RAW) {
  483. signal_type = SIGNAL_CMD_STOP_OUTPUT;
  484. } else if (pkt->port == HFI_PORT_BITSTREAM) {
  485. signal_type = SIGNAL_CMD_STOP_INPUT;
  486. } else {
  487. i_vpr_e(inst, "%s: invalid port: %d\n",
  488. __func__, pkt->port);
  489. return -EINVAL;
  490. }
  491. } else {
  492. i_vpr_e(inst, "%s: invalid session\n", __func__);
  493. return -EINVAL;
  494. }
  495. if (signal_type != -1) {
  496. rc = msm_vidc_process_stop_done(inst, signal_type);
  497. if (rc)
  498. return rc;
  499. }
  500. return 0;
  501. }
  502. static int handle_session_drain(struct msm_vidc_inst *inst,
  503. struct hfi_packet *pkt)
  504. {
  505. int rc = 0;
  506. if (pkt->flags & HFI_FW_FLAGS_SUCCESS)
  507. i_vpr_h(inst, "%s: successful\n", __func__);
  508. rc = msm_vidc_process_drain_done(inst);
  509. if (rc)
  510. return rc;
  511. return rc;
  512. }
  513. static int get_driver_buffer_flags(struct msm_vidc_inst *inst, u32 hfi_flags)
  514. {
  515. u32 driver_flags = 0;
  516. if (inst->hfi_frame_info.picture_type & HFI_PICTURE_IDR) {
  517. driver_flags |= MSM_VIDC_BUF_FLAG_KEYFRAME;
  518. } else if (inst->hfi_frame_info.picture_type & HFI_PICTURE_P) {
  519. driver_flags |= MSM_VIDC_BUF_FLAG_PFRAME;
  520. } else if (inst->hfi_frame_info.picture_type & HFI_PICTURE_B) {
  521. driver_flags |= MSM_VIDC_BUF_FLAG_BFRAME;
  522. } else if (inst->hfi_frame_info.picture_type & HFI_PICTURE_I) {
  523. driver_flags |= MSM_VIDC_BUF_FLAG_KEYFRAME;
  524. } else if (inst->hfi_frame_info.picture_type & HFI_PICTURE_CRA) {
  525. driver_flags |= MSM_VIDC_BUF_FLAG_KEYFRAME;
  526. } else if (inst->hfi_frame_info.picture_type & HFI_PICTURE_BLA) {
  527. driver_flags |= MSM_VIDC_BUF_FLAG_KEYFRAME;
  528. }
  529. if (inst->hfi_frame_info.data_corrupt)
  530. driver_flags |= MSM_VIDC_BUF_FLAG_ERROR;
  531. if (inst->hfi_frame_info.overflow)
  532. driver_flags |= MSM_VIDC_BUF_FLAG_ERROR;
  533. if (inst->hfi_frame_info.no_output) {
  534. if (inst->capabilities->cap[META_BUF_TAG].value &&
  535. !(hfi_flags & HFI_BUF_FW_FLAG_CODEC_CONFIG))
  536. driver_flags |= MSM_VIDC_BUF_FLAG_ERROR;
  537. }
  538. if (inst->hfi_frame_info.subframe_input)
  539. if (inst->capabilities->cap[META_BUF_TAG].value)
  540. driver_flags |= MSM_VIDC_BUF_FLAG_ERROR;
  541. if (hfi_flags & HFI_BUF_FW_FLAG_CODEC_CONFIG)
  542. driver_flags |= MSM_VIDC_BUF_FLAG_CODECCONFIG;
  543. /*
  544. * attach last flag to the buffer for encode session.
  545. * For decode session attach only if control(LAST_FLAG_EVENT_ENABLE)
  546. * is not set by client. If this control is enabled, last flag
  547. * info will be sent via event(V4L2_EVENT_EOS) to client.
  548. */
  549. if ((is_encode_session(inst) &&
  550. (hfi_flags & HFI_BUF_FW_FLAG_LAST)) ||
  551. (is_decode_session(inst) &&
  552. !inst->capabilities->cap[LAST_FLAG_EVENT_ENABLE].value &&
  553. ((hfi_flags & HFI_BUF_FW_FLAG_LAST) ||
  554. (hfi_flags & HFI_BUF_FW_FLAG_PSC_LAST))))
  555. driver_flags |= MSM_VIDC_BUF_FLAG_LAST;
  556. return driver_flags;
  557. }
  558. static int handle_read_only_buffer(struct msm_vidc_inst *inst,
  559. struct msm_vidc_buffer *buf)
  560. {
  561. struct msm_vidc_buffer *ro_buf;
  562. struct msm_vidc_core *core;
  563. bool found = false;
  564. if (!inst || !inst->core || !buf) {
  565. d_vpr_e("%s: invalid params\n", __func__);
  566. return -EINVAL;
  567. }
  568. core = inst->core;
  569. if (!is_decode_session(inst) || !is_output_buffer(buf->type))
  570. return 0;
  571. if (!(buf->attr & MSM_VIDC_ATTR_READ_ONLY))
  572. return 0;
  573. list_for_each_entry(ro_buf, &inst->buffers.read_only.list, list) {
  574. if (ro_buf->device_addr == buf->device_addr) {
  575. found = true;
  576. break;
  577. }
  578. }
  579. /*
  580. * RO flag: add to read_only list if buffer is not present
  581. * if present, do nothing
  582. */
  583. if (!found) {
  584. ro_buf = msm_vidc_pool_alloc(inst, MSM_MEM_POOL_BUFFER);
  585. if (!ro_buf) {
  586. i_vpr_e(inst, "%s: buffer alloc failed\n", __func__);
  587. return -ENOMEM;
  588. }
  589. ro_buf->index = -1;
  590. ro_buf->inst = inst;
  591. ro_buf->type = buf->type;
  592. ro_buf->fd = buf->fd;
  593. ro_buf->dmabuf = buf->dmabuf;
  594. ro_buf->device_addr = buf->device_addr;
  595. ro_buf->data_offset = buf->data_offset;
  596. ro_buf->dbuf_get = buf->dbuf_get;
  597. buf->dbuf_get = 0;
  598. INIT_LIST_HEAD(&ro_buf->list);
  599. list_add_tail(&ro_buf->list, &inst->buffers.read_only.list);
  600. print_vidc_buffer(VIDC_LOW, "low ", "ro buf added", inst, ro_buf);
  601. } else {
  602. print_vidc_buffer(VIDC_LOW, "low ", "ro buf found", inst, ro_buf);
  603. }
  604. ro_buf->attr |= MSM_VIDC_ATTR_READ_ONLY;
  605. return 0;
  606. }
  607. static int handle_non_read_only_buffer(struct msm_vidc_inst *inst,
  608. struct hfi_buffer *buffer)
  609. {
  610. struct msm_vidc_buffer *ro_buf;
  611. if (!inst || !buffer) {
  612. d_vpr_e("%s: invalid params\n", __func__);
  613. return -EINVAL;
  614. }
  615. if (!is_decode_session(inst) || buffer->type != HFI_BUFFER_RAW)
  616. return 0;
  617. if (buffer->flags & HFI_BUF_FW_FLAG_READONLY)
  618. return 0;
  619. list_for_each_entry(ro_buf, &inst->buffers.read_only.list, list) {
  620. if (ro_buf->device_addr == buffer->base_address) {
  621. ro_buf->attr &= ~MSM_VIDC_ATTR_READ_ONLY;
  622. break;
  623. }
  624. }
  625. return 0;
  626. }
  627. static int handle_psc_last_flag_buffer(struct msm_vidc_inst *inst,
  628. struct hfi_buffer *buffer)
  629. {
  630. int rc = 0;
  631. if (!(buffer->flags & HFI_BUF_FW_FLAG_PSC_LAST))
  632. return 0;
  633. if (!msm_vidc_allow_psc_last_flag(inst))
  634. return -EINVAL;
  635. rc = msm_vidc_process_psc_last_flag(inst);
  636. if (rc)
  637. return rc;
  638. return rc;
  639. }
  640. static int handle_drain_last_flag_buffer(struct msm_vidc_inst *inst,
  641. struct hfi_buffer *buffer)
  642. {
  643. int rc = 0;
  644. if (!(buffer->flags & HFI_BUF_FW_FLAG_LAST))
  645. return 0;
  646. if (!msm_vidc_allow_drain_last_flag(inst))
  647. return -EINVAL;
  648. if (is_decode_session(inst)) {
  649. rc = msm_vidc_process_drain_last_flag(inst);
  650. if (rc)
  651. return rc;
  652. } else if (is_encode_session(inst)) {
  653. rc = msm_vidc_state_change_drain_last_flag(inst);
  654. if (rc)
  655. return rc;
  656. }
  657. return rc;
  658. }
  659. static int handle_input_buffer(struct msm_vidc_inst *inst,
  660. struct hfi_buffer *buffer)
  661. {
  662. int rc = 0;
  663. struct msm_vidc_buffers *buffers;
  664. struct msm_vidc_buffer *buf;
  665. struct msm_vidc_core *core;
  666. u32 frame_size, batch_size;
  667. bool found;
  668. if (!inst || !buffer || !inst->capabilities || !inst->core) {
  669. d_vpr_e("%s: invalid params\n", __func__);
  670. return -EINVAL;
  671. }
  672. core = inst->core;
  673. buffers = msm_vidc_get_buffers(inst, MSM_VIDC_BUF_INPUT, __func__);
  674. if (!buffers)
  675. return -EINVAL;
  676. found = false;
  677. list_for_each_entry(buf, &buffers->list, list) {
  678. if (buf->index == buffer->index) {
  679. found = true;
  680. break;
  681. }
  682. }
  683. if (!found) {
  684. i_vpr_e(inst, "%s: invalid buffer idx %d addr %#llx data_offset %d\n",
  685. __func__, buffer->index, buffer->base_address,
  686. buffer->data_offset);
  687. return -EINVAL;
  688. }
  689. /* attach dequeued flag for, only last frame in the batch */
  690. if (msm_vidc_is_super_buffer(inst)) {
  691. frame_size = call_session_op(core, buffer_size, inst, MSM_VIDC_BUF_INPUT);
  692. batch_size = inst->capabilities->cap[SUPER_FRAME].value;
  693. if (!frame_size || !batch_size) {
  694. i_vpr_e(inst, "%s: invalid size: frame %u, batch %u\n",
  695. __func__, frame_size, batch_size);
  696. return -EINVAL;
  697. }
  698. if (buffer->addr_offset / frame_size < batch_size - 1) {
  699. i_vpr_l(inst, "%s: superframe last buffer not reached: %u, %u, %u\n",
  700. __func__, buffer->addr_offset, frame_size, batch_size);
  701. /* remove buffer stats for all the subframes in a superframe */
  702. msm_vidc_remove_buffer_stats(inst, buf, buffer->timestamp);
  703. return 0;
  704. }
  705. }
  706. if (!(buf->attr & MSM_VIDC_ATTR_QUEUED)) {
  707. print_vidc_buffer(VIDC_ERR, "err ", "not queued", inst, buf);
  708. return 0;
  709. }
  710. buf->data_size = buffer->data_size;
  711. buf->attr &= ~MSM_VIDC_ATTR_QUEUED;
  712. buf->attr |= MSM_VIDC_ATTR_DEQUEUED;
  713. buf->flags = 0;
  714. buf->flags = get_driver_buffer_flags(inst, buffer->flags);
  715. /* handle ts_reorder for no_output prop attached input buffer */
  716. if (is_ts_reorder_allowed(inst) && inst->hfi_frame_info.no_output) {
  717. i_vpr_h(inst, "%s: received no_output buffer. remove timestamp %lld\n",
  718. __func__, buf->timestamp);
  719. msm_vidc_ts_reorder_remove_timestamp(inst, buf->timestamp);
  720. }
  721. print_vidc_buffer(VIDC_HIGH, "high", "dqbuf", inst, buf);
  722. msm_vidc_update_stats(inst, buf, MSM_VIDC_DEBUGFS_EVENT_EBD);
  723. /* ebd: update end timestamp and flags in stats entry */
  724. msm_vidc_remove_buffer_stats(inst, buf, buffer->timestamp);
  725. return rc;
  726. }
  727. static int handle_output_buffer(struct msm_vidc_inst *inst,
  728. struct hfi_buffer *buffer)
  729. {
  730. int rc = 0;
  731. struct msm_vidc_buffers *buffers;
  732. struct msm_vidc_buffer *buf;
  733. struct msm_vidc_core *core;
  734. bool found, fatal = false;
  735. if (!inst || !inst->core || !inst->capabilities) {
  736. d_vpr_e("%s: invalid params\n", __func__);
  737. return -EINVAL;
  738. }
  739. core = inst->core;
  740. /* handle drain last flag buffer */
  741. if (buffer->flags & HFI_BUF_FW_FLAG_LAST) {
  742. rc = handle_drain_last_flag_buffer(inst, buffer);
  743. if (rc)
  744. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  745. }
  746. if (is_decode_session(inst)) {
  747. /* handle release response for decoder output buffer */
  748. if (buffer->flags & HFI_BUF_FW_FLAG_RELEASE_DONE)
  749. return handle_release_output_buffer(inst, buffer);
  750. /* handle psc last flag buffer */
  751. if (buffer->flags & HFI_BUF_FW_FLAG_PSC_LAST) {
  752. rc = handle_psc_last_flag_buffer(inst, buffer);
  753. if (rc)
  754. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  755. }
  756. /* handle non-read only buffer */
  757. if (!(buffer->flags & HFI_BUF_FW_FLAG_READONLY)) {
  758. rc = handle_non_read_only_buffer(inst, buffer);
  759. if (rc)
  760. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  761. }
  762. }
  763. buffers = msm_vidc_get_buffers(inst, MSM_VIDC_BUF_OUTPUT, __func__);
  764. if (!buffers)
  765. return -EINVAL;
  766. found = false;
  767. list_for_each_entry(buf, &buffers->list, list) {
  768. if (!(buf->attr & MSM_VIDC_ATTR_QUEUED))
  769. continue;
  770. if (is_decode_session(inst))
  771. found = (buf->index == buffer->index &&
  772. buf->device_addr == buffer->base_address &&
  773. buf->data_offset == buffer->data_offset);
  774. else
  775. found = (buf->index == buffer->index);
  776. if (found)
  777. break;
  778. }
  779. if (!found) {
  780. i_vpr_l(inst, "%s: invalid idx %d daddr %#llx\n",
  781. __func__, buffer->index, buffer->base_address);
  782. return 0;
  783. }
  784. buf->data_offset = buffer->data_offset;
  785. buf->data_size = buffer->data_size;
  786. buf->timestamp = buffer->timestamp;
  787. buf->attr &= ~MSM_VIDC_ATTR_QUEUED;
  788. buf->attr |= MSM_VIDC_ATTR_DEQUEUED;
  789. if (is_encode_session(inst)) {
  790. /* encoder output is not expected to be corrupted */
  791. if (inst->hfi_frame_info.data_corrupt) {
  792. i_vpr_e(inst, "%s: encode output is corrupted\n", __func__);
  793. fatal = true;
  794. }
  795. if (inst->hfi_frame_info.overflow) {
  796. /* overflow not expected for image session */
  797. if (is_image_session(inst)) {
  798. i_vpr_e(inst, "%s: overflow detected for an image session\n",
  799. __func__);
  800. fatal = true;
  801. }
  802. /* overflow not expected for cbr_cfr session */
  803. if (!buffer->data_size && inst->hfi_rc_type == HFI_RC_CBR_CFR) {
  804. i_vpr_e(inst, "%s: overflow detected for cbr_cfr session\n",
  805. __func__);
  806. fatal = true;
  807. }
  808. }
  809. if (fatal)
  810. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  811. }
  812. /*
  813. * reset data size to zero for last flag buffer.
  814. * reset RO flag for last flag buffer.
  815. */
  816. if ((buffer->flags & HFI_BUF_FW_FLAG_LAST) ||
  817. (buffer->flags & HFI_BUF_FW_FLAG_PSC_LAST)) {
  818. if (buffer->data_size) {
  819. i_vpr_e(inst, "%s: reset data size to zero for last flag buffer\n",
  820. __func__);
  821. buf->data_size = 0;
  822. }
  823. if (buffer->flags & HFI_BUF_FW_FLAG_READONLY) {
  824. i_vpr_e(inst, "%s: reset RO flag for last flag buffer\n",
  825. __func__);
  826. buffer->flags &= ~HFI_BUF_FW_FLAG_READONLY;
  827. }
  828. }
  829. if (is_decode_session(inst)) {
  830. /* RO flag is not expected for linear colorformat */
  831. if (is_linear_colorformat(inst->capabilities->cap[PIX_FMTS].value) &&
  832. (buffer->flags & HFI_BUF_FW_FLAG_READONLY)) {
  833. buffer->flags &= ~HFI_BUF_FW_FLAG_READONLY;
  834. print_vidc_buffer(
  835. VIDC_HIGH, "high", "RO flag in linear colorformat", inst, buf);
  836. }
  837. if (buffer->flags & HFI_BUF_FW_FLAG_READONLY) {
  838. buf->attr |= MSM_VIDC_ATTR_READ_ONLY;
  839. rc = handle_read_only_buffer(inst, buf);
  840. if (rc)
  841. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  842. } else {
  843. buf->attr &= ~MSM_VIDC_ATTR_READ_ONLY;
  844. }
  845. if (buf->dbuf_get) {
  846. call_mem_op(core, dma_buf_put, inst, buf->dmabuf);
  847. buf->dbuf_get = 0;
  848. }
  849. }
  850. buf->flags = 0;
  851. buf->flags = get_driver_buffer_flags(inst, buffer->flags);
  852. /* fence signalling */
  853. if (inst->hfi_frame_info.fence_id) {
  854. if (buf->data_size) {
  855. /* signal fence */
  856. call_fence_op(core, fence_signal, inst,
  857. inst->hfi_frame_info.fence_id);
  858. } else {
  859. /* destroy fence */
  860. call_fence_op(core, fence_destroy, inst,
  861. inst->hfi_frame_info.fence_id);
  862. }
  863. }
  864. if (is_decode_session(inst)) {
  865. inst->power.fw_cr = inst->hfi_frame_info.cr;
  866. inst->power.fw_cf = inst->hfi_frame_info.cf;
  867. } else {
  868. inst->power.fw_cr = inst->hfi_frame_info.cr;
  869. }
  870. if (!is_image_session(inst) && is_decode_session(inst) && buf->data_size)
  871. msm_vidc_update_timestamp_rate(inst, buf->timestamp);
  872. /* update output buffer timestamp, if ts_reorder is enabled */
  873. if (is_ts_reorder_allowed(inst) && buf->data_size)
  874. msm_vidc_ts_reorder_get_first_timestamp(inst, &buf->timestamp);
  875. print_vidc_buffer(VIDC_HIGH, "high", "dqbuf", inst, buf);
  876. msm_vidc_update_stats(inst, buf, MSM_VIDC_DEBUGFS_EVENT_FBD);
  877. /* fbd: print stats and remove entry */
  878. msm_vidc_remove_buffer_stats(inst, buf, buffer->timestamp);
  879. return rc;
  880. }
  881. static int handle_input_metadata_buffer(struct msm_vidc_inst *inst,
  882. struct hfi_buffer *buffer)
  883. {
  884. int rc = 0;
  885. struct msm_vidc_buffers *buffers;
  886. struct msm_vidc_buffer *buf;
  887. struct msm_vidc_core *core;
  888. u32 frame_size, batch_size;
  889. bool found;
  890. if (!inst || !buffer || !inst->capabilities || !inst->core) {
  891. d_vpr_e("%s: invalid params\n", __func__);
  892. return -EINVAL;
  893. }
  894. core = inst->core;
  895. buffers = msm_vidc_get_buffers(inst, MSM_VIDC_BUF_INPUT_META, __func__);
  896. if (!buffers)
  897. return -EINVAL;
  898. found = false;
  899. list_for_each_entry(buf, &buffers->list, list) {
  900. if (buf->index == buffer->index) {
  901. found = true;
  902. break;
  903. }
  904. }
  905. if (!found) {
  906. i_vpr_e(inst, "%s: invalid idx %d daddr %#llx data_offset %d\n",
  907. __func__, buffer->index, buffer->base_address,
  908. buffer->data_offset);
  909. return -EINVAL;
  910. }
  911. /* attach dequeued flag for, only last frame in the batch */
  912. if (msm_vidc_is_super_buffer(inst)) {
  913. frame_size = call_session_op(core, buffer_size, inst, MSM_VIDC_BUF_INPUT_META);
  914. batch_size = inst->capabilities->cap[SUPER_FRAME].value;
  915. if (!frame_size || !batch_size) {
  916. i_vpr_e(inst, "%s: invalid size: frame %u, batch %u\n",
  917. __func__, frame_size, batch_size);
  918. return -EINVAL;
  919. }
  920. if (buffer->addr_offset / frame_size < batch_size - 1) {
  921. i_vpr_l(inst, "%s: superframe last buffer not reached: %u, %u, %u\n",
  922. __func__, buffer->addr_offset, frame_size, batch_size);
  923. return 0;
  924. }
  925. }
  926. if (!(buf->attr & MSM_VIDC_ATTR_QUEUED)) {
  927. print_vidc_buffer(VIDC_ERR, "err ", "not queued", inst, buf);
  928. return 0;
  929. }
  930. buf->data_size = buffer->data_size;
  931. buf->attr &= ~MSM_VIDC_ATTR_QUEUED;
  932. buf->attr |= MSM_VIDC_ATTR_DEQUEUED;
  933. buf->flags = 0;
  934. if ((is_encode_session(inst) &&
  935. (buffer->flags & HFI_BUF_FW_FLAG_LAST)) ||
  936. (is_decode_session(inst) &&
  937. !inst->capabilities->cap[LAST_FLAG_EVENT_ENABLE].value &&
  938. ((buffer->flags & HFI_BUF_FW_FLAG_LAST) ||
  939. (buffer->flags & HFI_BUF_FW_FLAG_PSC_LAST))))
  940. buf->flags |= MSM_VIDC_BUF_FLAG_LAST;
  941. print_vidc_buffer(VIDC_LOW, "low ", "dqbuf", inst, buf);
  942. return rc;
  943. }
  944. static int handle_output_metadata_buffer(struct msm_vidc_inst *inst,
  945. struct hfi_buffer *buffer)
  946. {
  947. int rc = 0;
  948. struct msm_vidc_buffers *buffers;
  949. struct msm_vidc_buffer *buf;
  950. bool found;
  951. if (!inst || !inst->capabilities) {
  952. d_vpr_e("%s: Invalid params\n", __func__);
  953. return -EINVAL;
  954. }
  955. buffers = msm_vidc_get_buffers(inst, MSM_VIDC_BUF_OUTPUT_META, __func__);
  956. if (!buffers)
  957. return -EINVAL;
  958. found = false;
  959. list_for_each_entry(buf, &buffers->list, list) {
  960. if (buf->index == buffer->index) {
  961. found = true;
  962. break;
  963. }
  964. }
  965. if (!found) {
  966. i_vpr_e(inst, "%s: invalid idx %d daddr %#llx data_offset %d\n",
  967. __func__, buffer->index, buffer->base_address,
  968. buffer->data_offset);
  969. return -EINVAL;
  970. }
  971. if (!(buf->attr & MSM_VIDC_ATTR_QUEUED)) {
  972. print_vidc_buffer(VIDC_ERR, "err ", "not queued", inst, buf);
  973. return 0;
  974. }
  975. buf->data_size = buffer->data_size;
  976. buf->attr &= ~MSM_VIDC_ATTR_QUEUED;
  977. buf->attr |= MSM_VIDC_ATTR_DEQUEUED;
  978. buf->flags = 0;
  979. if ((is_encode_session(inst) &&
  980. (buffer->flags & HFI_BUF_FW_FLAG_LAST)) ||
  981. (is_decode_session(inst) &&
  982. !inst->capabilities->cap[LAST_FLAG_EVENT_ENABLE].value &&
  983. ((buffer->flags & HFI_BUF_FW_FLAG_LAST) ||
  984. (buffer->flags & HFI_BUF_FW_FLAG_PSC_LAST))))
  985. buf->flags |= MSM_VIDC_BUF_FLAG_LAST;
  986. print_vidc_buffer(VIDC_LOW, "low ", "dqbuf", inst, buf);
  987. return rc;
  988. }
  989. static bool is_metabuffer_dequeued(struct msm_vidc_inst *inst,
  990. struct msm_vidc_buffer *buf)
  991. {
  992. bool found = false;
  993. struct msm_vidc_buffers *buffers;
  994. struct msm_vidc_buffer *buffer;
  995. enum msm_vidc_buffer_type buffer_type;
  996. if (is_input_buffer(buf->type) && is_input_meta_enabled(inst))
  997. buffer_type = MSM_VIDC_BUF_INPUT_META;
  998. else if (is_output_buffer(buf->type) && is_output_meta_enabled(inst))
  999. buffer_type = MSM_VIDC_BUF_OUTPUT_META;
  1000. else
  1001. return true;
  1002. buffers = msm_vidc_get_buffers(inst, buffer_type, __func__);
  1003. if (!buffers)
  1004. return false;
  1005. list_for_each_entry(buffer, &buffers->list, list) {
  1006. if (buffer->index == buf->index &&
  1007. buffer->attr & MSM_VIDC_ATTR_DEQUEUED) {
  1008. found = true;
  1009. break;
  1010. }
  1011. }
  1012. return found;
  1013. }
  1014. static int msm_vidc_check_meta_buffers(struct msm_vidc_inst *inst)
  1015. {
  1016. int rc = 0;
  1017. int i;
  1018. struct msm_vidc_buffers *buffers;
  1019. struct msm_vidc_buffer *buf;
  1020. static const enum msm_vidc_buffer_type buffer_type[] = {
  1021. MSM_VIDC_BUF_INPUT,
  1022. MSM_VIDC_BUF_OUTPUT,
  1023. };
  1024. for (i = 0; i < ARRAY_SIZE(buffer_type); i++) {
  1025. /*
  1026. * skip input meta buffers check as meta buffers were
  1027. * already delivered if output fence enabled.
  1028. */
  1029. if (is_meta_rx_inp_enabled(inst, META_OUTBUF_FENCE)) {
  1030. if (buffer_type[i] == MSM_VIDC_BUF_INPUT)
  1031. continue;
  1032. }
  1033. buffers = msm_vidc_get_buffers(inst, buffer_type[i], __func__);
  1034. if (!buffers)
  1035. return -EINVAL;
  1036. list_for_each_entry(buf, &buffers->list, list) {
  1037. if (buf->attr & MSM_VIDC_ATTR_DEQUEUED) {
  1038. if (!is_metabuffer_dequeued(inst, buf)) {
  1039. print_vidc_buffer(VIDC_ERR, "err ",
  1040. "meta not dequeued", inst, buf);
  1041. return -EINVAL;
  1042. }
  1043. }
  1044. }
  1045. }
  1046. return rc;
  1047. }
  1048. static int handle_dequeue_buffers(struct msm_vidc_inst *inst)
  1049. {
  1050. int rc = 0;
  1051. int i;
  1052. struct msm_vidc_buffers *buffers;
  1053. struct msm_vidc_buffer *buf;
  1054. struct msm_vidc_buffer *dummy;
  1055. static const enum msm_vidc_buffer_type buffer_type[] = {
  1056. MSM_VIDC_BUF_INPUT_META,
  1057. MSM_VIDC_BUF_INPUT,
  1058. MSM_VIDC_BUF_OUTPUT_META,
  1059. MSM_VIDC_BUF_OUTPUT,
  1060. };
  1061. /* check metabuffers dequeued before sending vb2_buffer_done() */
  1062. rc = msm_vidc_check_meta_buffers(inst);
  1063. if (rc)
  1064. return rc;
  1065. for (i = 0; i < ARRAY_SIZE(buffer_type); i++) {
  1066. buffers = msm_vidc_get_buffers(inst, buffer_type[i], __func__);
  1067. if (!buffers)
  1068. return -EINVAL;
  1069. list_for_each_entry_safe(buf, dummy, &buffers->list, list) {
  1070. if (buf->attr & MSM_VIDC_ATTR_DEQUEUED) {
  1071. buf->attr &= ~MSM_VIDC_ATTR_DEQUEUED;
  1072. /*
  1073. * do not send vb2_buffer_done when fw returns
  1074. * same buffer again
  1075. */
  1076. if (buf->attr & MSM_VIDC_ATTR_BUFFER_DONE) {
  1077. print_vidc_buffer(VIDC_HIGH, "high",
  1078. "vb2 done already", inst, buf);
  1079. } else {
  1080. buf->attr |= MSM_VIDC_ATTR_BUFFER_DONE;
  1081. rc = msm_vidc_vb2_buffer_done(inst, buf);
  1082. if (rc) {
  1083. print_vidc_buffer(VIDC_HIGH, "err ",
  1084. "vb2 done failed", inst, buf);
  1085. /* ignore the error */
  1086. rc = 0;
  1087. }
  1088. }
  1089. }
  1090. }
  1091. }
  1092. return rc;
  1093. }
  1094. static int handle_release_internal_buffer(struct msm_vidc_inst *inst,
  1095. struct hfi_buffer *buffer)
  1096. {
  1097. int rc = 0;
  1098. struct msm_vidc_buffers *buffers;
  1099. struct msm_vidc_buffer *buf;
  1100. bool found;
  1101. buffers = msm_vidc_get_buffers(inst, hfi_buf_type_to_driver(inst->domain,
  1102. buffer->type, HFI_PORT_NONE), __func__);
  1103. if (!buffers)
  1104. return -EINVAL;
  1105. found = false;
  1106. list_for_each_entry(buf, &buffers->list, list) {
  1107. if (buf->device_addr == buffer->base_address) {
  1108. found = true;
  1109. break;
  1110. }
  1111. }
  1112. if (!is_internal_buffer(buf->type))
  1113. return 0;
  1114. if (found) {
  1115. rc = msm_vidc_destroy_internal_buffer(inst, buf);
  1116. if (rc)
  1117. return rc;
  1118. } else {
  1119. i_vpr_e(inst, "%s: invalid idx %d daddr %#llx\n",
  1120. __func__, buffer->index, buffer->base_address);
  1121. return -EINVAL;
  1122. }
  1123. return rc;
  1124. }
  1125. int handle_release_output_buffer(struct msm_vidc_inst *inst,
  1126. struct hfi_buffer *buffer)
  1127. {
  1128. int rc = 0;
  1129. struct msm_vidc_buffer *buf;
  1130. bool found = false;
  1131. list_for_each_entry(buf, &inst->buffers.read_only.list, list) {
  1132. if (buf->device_addr == buffer->base_address &&
  1133. buf->attr & MSM_VIDC_ATTR_PENDING_RELEASE) {
  1134. found = true;
  1135. break;
  1136. }
  1137. }
  1138. if (!found) {
  1139. i_vpr_e(inst, "%s: invalid idx %d daddr %#llx\n",
  1140. __func__, buffer->index, buffer->base_address);
  1141. return -EINVAL;
  1142. }
  1143. buf->attr &= ~MSM_VIDC_ATTR_READ_ONLY;
  1144. print_vidc_buffer(VIDC_LOW, "low ", "release done", inst, buf);
  1145. return rc;
  1146. }
  1147. static int handle_session_buffer(struct msm_vidc_inst *inst,
  1148. struct hfi_packet *pkt)
  1149. {
  1150. int i, rc = 0;
  1151. struct hfi_buffer *buffer;
  1152. u32 hfi_handle_size = 0;
  1153. const struct msm_vidc_hfi_buffer_handle *hfi_handle_arr = NULL;
  1154. static const struct msm_vidc_hfi_buffer_handle enc_input_hfi_handle[] = {
  1155. {HFI_BUFFER_METADATA, handle_input_metadata_buffer },
  1156. {HFI_BUFFER_RAW, handle_input_buffer },
  1157. {HFI_BUFFER_VPSS, handle_release_internal_buffer },
  1158. };
  1159. static const struct msm_vidc_hfi_buffer_handle enc_output_hfi_handle[] = {
  1160. {HFI_BUFFER_METADATA, handle_output_metadata_buffer },
  1161. {HFI_BUFFER_BITSTREAM, handle_output_buffer },
  1162. {HFI_BUFFER_BIN, handle_release_internal_buffer },
  1163. {HFI_BUFFER_COMV, handle_release_internal_buffer },
  1164. {HFI_BUFFER_NON_COMV, handle_release_internal_buffer },
  1165. {HFI_BUFFER_LINE, handle_release_internal_buffer },
  1166. {HFI_BUFFER_ARP, handle_release_internal_buffer },
  1167. {HFI_BUFFER_DPB, handle_release_internal_buffer },
  1168. };
  1169. static const struct msm_vidc_hfi_buffer_handle dec_input_hfi_handle[] = {
  1170. {HFI_BUFFER_METADATA, handle_input_metadata_buffer },
  1171. {HFI_BUFFER_BITSTREAM, handle_input_buffer },
  1172. {HFI_BUFFER_BIN, handle_release_internal_buffer },
  1173. {HFI_BUFFER_COMV, handle_release_internal_buffer },
  1174. {HFI_BUFFER_NON_COMV, handle_release_internal_buffer },
  1175. {HFI_BUFFER_LINE, handle_release_internal_buffer },
  1176. {HFI_BUFFER_PERSIST, handle_release_internal_buffer },
  1177. {HFI_BUFFER_PARTIAL_DATA, handle_release_internal_buffer },
  1178. };
  1179. static const struct msm_vidc_hfi_buffer_handle dec_output_hfi_handle[] = {
  1180. {HFI_BUFFER_METADATA, handle_output_metadata_buffer },
  1181. {HFI_BUFFER_RAW, handle_output_buffer },
  1182. {HFI_BUFFER_DPB, handle_release_internal_buffer },
  1183. };
  1184. if (pkt->payload_info == HFI_PAYLOAD_NONE) {
  1185. i_vpr_h(inst, "%s: received hfi buffer packet without payload\n",
  1186. __func__);
  1187. return 0;
  1188. }
  1189. if (!check_for_packet_payload(inst, pkt, __func__)) {
  1190. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  1191. return 0;
  1192. }
  1193. buffer = (struct hfi_buffer *)((u8 *)pkt + sizeof(struct hfi_packet));
  1194. if (!is_valid_hfi_buffer_type(inst, buffer->type, __func__)) {
  1195. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  1196. return 0;
  1197. }
  1198. if (!is_valid_hfi_port(inst, pkt->port, buffer->type, __func__)) {
  1199. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  1200. return 0;
  1201. }
  1202. if (is_encode_session(inst)) {
  1203. if (pkt->port == HFI_PORT_RAW) {
  1204. hfi_handle_size = ARRAY_SIZE(enc_input_hfi_handle);
  1205. hfi_handle_arr = enc_input_hfi_handle;
  1206. } else if (pkt->port == HFI_PORT_BITSTREAM) {
  1207. hfi_handle_size = ARRAY_SIZE(enc_output_hfi_handle);
  1208. hfi_handle_arr = enc_output_hfi_handle;
  1209. }
  1210. } else if (is_decode_session(inst)) {
  1211. if (pkt->port == HFI_PORT_BITSTREAM) {
  1212. hfi_handle_size = ARRAY_SIZE(dec_input_hfi_handle);
  1213. hfi_handle_arr = dec_input_hfi_handle;
  1214. } else if (pkt->port == HFI_PORT_RAW) {
  1215. hfi_handle_size = ARRAY_SIZE(dec_output_hfi_handle);
  1216. hfi_handle_arr = dec_output_hfi_handle;
  1217. }
  1218. }
  1219. /* handle invalid session */
  1220. if (!hfi_handle_arr || !hfi_handle_size) {
  1221. i_vpr_e(inst, "%s: invalid session %d\n", __func__, inst->domain);
  1222. return -EINVAL;
  1223. }
  1224. /* handle session buffer */
  1225. for (i = 0; i < hfi_handle_size; i++) {
  1226. if (hfi_handle_arr[i].type == buffer->type) {
  1227. rc = hfi_handle_arr[i].handle(inst, buffer);
  1228. if (rc)
  1229. return rc;
  1230. break;
  1231. }
  1232. }
  1233. /* handle unknown buffer type */
  1234. if (i == hfi_handle_size) {
  1235. i_vpr_e(inst, "%s: port %u, unknown buffer type %#x\n", __func__,
  1236. pkt->port, buffer->type);
  1237. return -EINVAL;
  1238. }
  1239. return rc;
  1240. }
  1241. static int handle_input_port_settings_change(struct msm_vidc_inst *inst)
  1242. {
  1243. int rc = 0;
  1244. enum msm_vidc_allow allow = MSM_VIDC_DISALLOW;
  1245. allow = msm_vidc_allow_input_psc(inst);
  1246. if (allow == MSM_VIDC_DISALLOW) {
  1247. return -EINVAL;
  1248. } else if (allow == MSM_VIDC_ALLOW) {
  1249. rc = msm_vidc_state_change_input_psc(inst);
  1250. if (rc)
  1251. return rc;
  1252. print_psc_properties("INPUT_PSC", inst, inst->subcr_params[INPUT_PORT]);
  1253. rc = msm_vdec_input_port_settings_change(inst);
  1254. if (rc)
  1255. return rc;
  1256. }
  1257. return rc;
  1258. }
  1259. static int handle_output_port_settings_change(struct msm_vidc_inst *inst)
  1260. {
  1261. int rc = 0;
  1262. print_psc_properties("OUTPUT_PSC", inst, inst->subcr_params[OUTPUT_PORT]);
  1263. rc = msm_vdec_output_port_settings_change(inst);
  1264. if (rc)
  1265. return rc;
  1266. return rc;
  1267. }
  1268. static int handle_port_settings_change(struct msm_vidc_inst *inst,
  1269. struct hfi_packet *pkt)
  1270. {
  1271. int rc = 0;
  1272. i_vpr_h(inst, "%s: Received port settings change, type %d\n",
  1273. __func__, pkt->port);
  1274. if (pkt->port == HFI_PORT_RAW) {
  1275. rc = handle_output_port_settings_change(inst);
  1276. if (rc)
  1277. goto exit;
  1278. } else if (pkt->port == HFI_PORT_BITSTREAM) {
  1279. rc = handle_input_port_settings_change(inst);
  1280. if (rc)
  1281. goto exit;
  1282. } else {
  1283. i_vpr_e(inst, "%s: invalid port type: %#x\n",
  1284. __func__, pkt->port);
  1285. rc = -EINVAL;
  1286. goto exit;
  1287. }
  1288. exit:
  1289. if (rc)
  1290. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  1291. return rc;
  1292. }
  1293. static int handle_session_subscribe_mode(struct msm_vidc_inst *inst,
  1294. struct hfi_packet *pkt)
  1295. {
  1296. if (pkt->flags & HFI_FW_FLAGS_SUCCESS)
  1297. i_vpr_h(inst, "%s: successful\n", __func__);
  1298. return 0;
  1299. }
  1300. static int handle_session_delivery_mode(struct msm_vidc_inst *inst,
  1301. struct hfi_packet *pkt)
  1302. {
  1303. if (pkt->flags & HFI_FW_FLAGS_SUCCESS)
  1304. i_vpr_h(inst, "%s: successful\n", __func__);
  1305. return 0;
  1306. }
  1307. static int handle_session_pause(struct msm_vidc_inst *inst,
  1308. struct hfi_packet *pkt)
  1309. {
  1310. if (pkt->flags & HFI_FW_FLAGS_SUCCESS)
  1311. i_vpr_h(inst, "%s: successful\n", __func__);
  1312. return 0;
  1313. }
  1314. static int handle_session_resume(struct msm_vidc_inst *inst,
  1315. struct hfi_packet *pkt)
  1316. {
  1317. if (pkt->flags & HFI_FW_FLAGS_SUCCESS)
  1318. i_vpr_h(inst, "%s: successful\n", __func__);
  1319. return 0;
  1320. }
  1321. static int handle_session_stability(struct msm_vidc_inst *inst,
  1322. struct hfi_packet *pkt)
  1323. {
  1324. if (pkt->flags & HFI_FW_FLAGS_SUCCESS)
  1325. i_vpr_h(inst, "%s: successful\n", __func__);
  1326. return 0;
  1327. }
  1328. static int handle_session_command(struct msm_vidc_inst *inst,
  1329. struct hfi_packet *pkt)
  1330. {
  1331. int i, rc;
  1332. static const struct msm_vidc_hfi_packet_handle hfi_pkt_handle[] = {
  1333. {HFI_CMD_OPEN, handle_session_open },
  1334. {HFI_CMD_CLOSE, handle_session_close },
  1335. {HFI_CMD_START, handle_session_start },
  1336. {HFI_CMD_STOP, handle_session_stop },
  1337. {HFI_CMD_DRAIN, handle_session_drain },
  1338. {HFI_CMD_BUFFER, handle_session_buffer },
  1339. {HFI_CMD_SETTINGS_CHANGE, handle_port_settings_change },
  1340. {HFI_CMD_SUBSCRIBE_MODE, handle_session_subscribe_mode },
  1341. {HFI_CMD_DELIVERY_MODE, handle_session_delivery_mode },
  1342. {HFI_CMD_PAUSE, handle_session_pause },
  1343. {HFI_CMD_RESUME, handle_session_resume },
  1344. {HFI_CMD_STABILITY, handle_session_stability },
  1345. };
  1346. /* handle session pkt */
  1347. for (i = 0; i < ARRAY_SIZE(hfi_pkt_handle); i++) {
  1348. if (hfi_pkt_handle[i].type == pkt->type) {
  1349. rc = hfi_pkt_handle[i].handle(inst, pkt);
  1350. if (rc)
  1351. return rc;
  1352. break;
  1353. }
  1354. }
  1355. /* handle unknown buffer type */
  1356. if (i == ARRAY_SIZE(hfi_pkt_handle)) {
  1357. i_vpr_e(inst, "%s: Unsupported command type: %#x\n", __func__, pkt->type);
  1358. return -EINVAL;
  1359. }
  1360. return 0;
  1361. }
  1362. static int handle_dpb_list_property(struct msm_vidc_inst *inst,
  1363. struct hfi_packet *pkt)
  1364. {
  1365. u32 payload_size, num_words_in_payload;
  1366. u8 *payload_start;
  1367. int i = 0;
  1368. payload_size = pkt->size - sizeof(struct hfi_packet);
  1369. num_words_in_payload = payload_size / 4;
  1370. payload_start = (u8 *)((u8 *)pkt + sizeof(struct hfi_packet));
  1371. memset(inst->dpb_list_payload, 0, MAX_DPB_LIST_ARRAY_SIZE);
  1372. if (payload_size > MAX_DPB_LIST_PAYLOAD_SIZE) {
  1373. i_vpr_e(inst,
  1374. "%s: dpb list payload size %d exceeds expected max size %d\n",
  1375. __func__, payload_size, MAX_DPB_LIST_PAYLOAD_SIZE);
  1376. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  1377. return -EINVAL;
  1378. }
  1379. memcpy(inst->dpb_list_payload, payload_start, payload_size);
  1380. for (i = 0; (i + 3) < num_words_in_payload; i = i + 4) {
  1381. i_vpr_l(inst,
  1382. "%s: base addr %#x %#x, addr offset %#x, data offset %#x\n",
  1383. __func__, inst->dpb_list_payload[i], inst->dpb_list_payload[i + 1],
  1384. inst->dpb_list_payload[i + 2], inst->dpb_list_payload[i + 3]);
  1385. }
  1386. return 0;
  1387. }
  1388. static int handle_property_with_payload(struct msm_vidc_inst *inst,
  1389. struct hfi_packet *pkt, u32 port)
  1390. {
  1391. int rc = 0;
  1392. u32 *payload_ptr = NULL;
  1393. payload_ptr = (u32 *)((u8 *)pkt + sizeof(struct hfi_packet));
  1394. if (!payload_ptr) {
  1395. i_vpr_e(inst,
  1396. "%s: payload_ptr cannot be null\n", __func__);
  1397. return -EINVAL;
  1398. }
  1399. switch (pkt->type) {
  1400. case HFI_PROP_BITSTREAM_RESOLUTION:
  1401. inst->subcr_params[port].bitstream_resolution = payload_ptr[0];
  1402. break;
  1403. case HFI_PROP_CROP_OFFSETS:
  1404. inst->subcr_params[port].crop_offsets[0] = payload_ptr[0];
  1405. inst->subcr_params[port].crop_offsets[1] = payload_ptr[1];
  1406. break;
  1407. case HFI_PROP_LUMA_CHROMA_BIT_DEPTH:
  1408. inst->subcr_params[port].bit_depth = payload_ptr[0];
  1409. break;
  1410. case HFI_PROP_CODED_FRAMES:
  1411. inst->subcr_params[port].coded_frames = payload_ptr[0];
  1412. break;
  1413. case HFI_PROP_BUFFER_FW_MIN_OUTPUT_COUNT:
  1414. inst->subcr_params[port].fw_min_count = payload_ptr[0];
  1415. break;
  1416. case HFI_PROP_PIC_ORDER_CNT_TYPE:
  1417. inst->subcr_params[port].pic_order_cnt = payload_ptr[0];
  1418. break;
  1419. case HFI_PROP_SIGNAL_COLOR_INFO:
  1420. inst->subcr_params[port].color_info = payload_ptr[0];
  1421. break;
  1422. case HFI_PROP_PROFILE:
  1423. inst->subcr_params[port].profile = payload_ptr[0];
  1424. break;
  1425. case HFI_PROP_LEVEL:
  1426. inst->subcr_params[port].level = payload_ptr[0];
  1427. break;
  1428. case HFI_PROP_TIER:
  1429. inst->subcr_params[port].tier = payload_ptr[0];
  1430. break;
  1431. case HFI_PROP_AV1_FILM_GRAIN_PRESENT:
  1432. inst->subcr_params[port].av1_film_grain_present = payload_ptr[0];
  1433. break;
  1434. case HFI_PROP_AV1_SUPER_BLOCK_ENABLED:
  1435. inst->subcr_params[port].av1_super_block_enabled = payload_ptr[0];
  1436. break;
  1437. case HFI_PROP_PICTURE_TYPE:
  1438. inst->hfi_frame_info.picture_type = payload_ptr[0];
  1439. if (inst->hfi_frame_info.picture_type & HFI_PICTURE_B)
  1440. inst->has_bframe = true;
  1441. if (inst->hfi_frame_info.picture_type & HFI_PICTURE_IDR)
  1442. inst->iframe = true;
  1443. else
  1444. inst->iframe = false;
  1445. break;
  1446. case HFI_PROP_SUBFRAME_INPUT:
  1447. if (port != INPUT_PORT) {
  1448. i_vpr_e(inst,
  1449. "%s: invalid port: %d for property %#x\n",
  1450. __func__, pkt->port, pkt->type);
  1451. break;
  1452. }
  1453. inst->hfi_frame_info.subframe_input = 1;
  1454. break;
  1455. case HFI_PROP_WORST_COMPRESSION_RATIO:
  1456. inst->hfi_frame_info.cr = payload_ptr[0];
  1457. break;
  1458. case HFI_PROP_WORST_COMPLEXITY_FACTOR:
  1459. inst->hfi_frame_info.cf = payload_ptr[0];
  1460. break;
  1461. case HFI_PROP_CABAC_SESSION:
  1462. if (payload_ptr[0] == 1)
  1463. msm_vidc_update_cap_value(inst, ENTROPY_MODE,
  1464. V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC,
  1465. __func__);
  1466. else
  1467. msm_vidc_update_cap_value(inst, ENTROPY_MODE,
  1468. V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC,
  1469. __func__);
  1470. break;
  1471. case HFI_PROP_DPB_LIST:
  1472. if (is_decode_session(inst) &&
  1473. inst->capabilities->cap[DPB_LIST].value) {
  1474. rc = handle_dpb_list_property(inst, pkt);
  1475. if (rc)
  1476. break;
  1477. } else {
  1478. i_vpr_e(inst,
  1479. "%s: invalid property %#x for %s port %d dpb cap value %d\n",
  1480. __func__, pkt->type, is_decode_session(inst) ? "decode" : "encode",
  1481. port, inst->capabilities->cap[DPB_LIST].value);
  1482. }
  1483. break;
  1484. case HFI_PROP_QUALITY_MODE:
  1485. if (inst->capabilities->cap[QUALITY_MODE].value != payload_ptr[0])
  1486. i_vpr_e(inst,
  1487. "%s: fw quality mode(%d) not matching the capability value(%d)\n",
  1488. __func__, payload_ptr[0],
  1489. inst->capabilities->cap[QUALITY_MODE].value);
  1490. break;
  1491. case HFI_PROP_STAGE:
  1492. if (inst->capabilities->cap[STAGE].value != payload_ptr[0])
  1493. i_vpr_e(inst,
  1494. "%s: fw stage mode(%d) not matching the capability value(%d)\n",
  1495. __func__, payload_ptr[0], inst->capabilities->cap[STAGE].value);
  1496. break;
  1497. case HFI_PROP_PIPE:
  1498. if (inst->capabilities->cap[PIPE].value != payload_ptr[0])
  1499. i_vpr_e(inst,
  1500. "%s: fw pipe mode(%d) not matching the capability value(%d)\n",
  1501. __func__, payload_ptr[0], inst->capabilities->cap[PIPE].value);
  1502. break;
  1503. case HFI_PROP_FENCE:
  1504. inst->hfi_frame_info.fence_id = payload_ptr[0];
  1505. break;
  1506. default:
  1507. i_vpr_e(inst, "%s: invalid property %#x\n",
  1508. __func__, pkt->type);
  1509. break;
  1510. }
  1511. return rc;
  1512. }
  1513. static int handle_property_without_payload(struct msm_vidc_inst *inst,
  1514. struct hfi_packet *pkt, u32 port)
  1515. {
  1516. switch (pkt->type) {
  1517. case HFI_PROP_NO_OUTPUT:
  1518. if (port != INPUT_PORT) {
  1519. i_vpr_e(inst,
  1520. "%s: invalid port: %d for property %#x\n",
  1521. __func__, pkt->port, pkt->type);
  1522. break;
  1523. }
  1524. i_vpr_h(inst, "received no_output property\n");
  1525. inst->hfi_frame_info.no_output = 1;
  1526. break;
  1527. default:
  1528. i_vpr_e(inst, "%s: invalid property %#x\n",
  1529. __func__, pkt->type);
  1530. break;
  1531. }
  1532. return 0;
  1533. }
  1534. static int handle_session_property(struct msm_vidc_inst *inst,
  1535. struct hfi_packet *pkt)
  1536. {
  1537. int rc = 0;
  1538. u32 port;
  1539. if (!inst || !inst->capabilities) {
  1540. d_vpr_e("%s: Invalid params\n", __func__);
  1541. return -EINVAL;
  1542. }
  1543. i_vpr_l(inst, "%s: property type %#x\n", __func__, pkt->type);
  1544. port = vidc_port_from_hfi(inst, pkt->port);
  1545. if (port >= MAX_PORT) {
  1546. i_vpr_e(inst,
  1547. "%s: invalid port: %d for property %#x\n",
  1548. __func__, pkt->port, pkt->type);
  1549. return -EINVAL;
  1550. }
  1551. if (pkt->flags & HFI_FW_FLAGS_INFORMATION) {
  1552. i_vpr_h(inst,
  1553. "%s: information flag received for property %#x packet\n",
  1554. __func__, pkt->type);
  1555. return 0;
  1556. }
  1557. if (check_for_packet_payload(inst, pkt, __func__)) {
  1558. rc = handle_property_with_payload(inst, pkt, port);
  1559. if (rc)
  1560. return rc;
  1561. } else {
  1562. rc = handle_property_without_payload(inst, pkt, port);
  1563. if (rc)
  1564. return rc;
  1565. }
  1566. return rc;
  1567. }
  1568. static int handle_image_version_property(struct msm_vidc_core *core,
  1569. struct hfi_packet *pkt)
  1570. {
  1571. u32 i = 0;
  1572. u8 *str_image_version;
  1573. u32 req_bytes;
  1574. req_bytes = pkt->size - sizeof(*pkt);
  1575. if (req_bytes < VENUS_VERSION_LENGTH - 1) {
  1576. d_vpr_e("%s: bad_pkt: %d\n", __func__, req_bytes);
  1577. return -EINVAL;
  1578. }
  1579. str_image_version = (u8 *)pkt + sizeof(struct hfi_packet);
  1580. /*
  1581. * The version string returned by firmware includes null
  1582. * characters at the start and in between. Replace the null
  1583. * characters with space, to print the version info.
  1584. */
  1585. for (i = 0; i < VENUS_VERSION_LENGTH - 1; i++) {
  1586. if (str_image_version[i] != '\0')
  1587. core->fw_version[i] = str_image_version[i];
  1588. else
  1589. core->fw_version[i] = ' ';
  1590. }
  1591. core->fw_version[i] = '\0';
  1592. d_vpr_h("%s: F/W version: %s\n", __func__, core->fw_version);
  1593. return 0;
  1594. }
  1595. static int handle_system_property(struct msm_vidc_core *core,
  1596. struct hfi_packet *pkt)
  1597. {
  1598. int rc = 0;
  1599. switch (pkt->type) {
  1600. case HFI_PROP_IMAGE_VERSION:
  1601. rc = handle_image_version_property(core, pkt);
  1602. break;
  1603. default:
  1604. d_vpr_h("%s: property type %#x successful\n",
  1605. __func__, pkt->type);
  1606. break;
  1607. }
  1608. return rc;
  1609. }
  1610. static int handle_system_response(struct msm_vidc_core *core,
  1611. struct hfi_header *hdr)
  1612. {
  1613. int rc = 0;
  1614. struct hfi_packet *packet;
  1615. u8 *pkt, *start_pkt;
  1616. int i, j;
  1617. static const struct msm_vidc_core_hfi_range be[] = {
  1618. {HFI_SYSTEM_ERROR_BEGIN, HFI_SYSTEM_ERROR_END, handle_system_error },
  1619. {HFI_PROP_BEGIN, HFI_PROP_END, handle_system_property },
  1620. {HFI_CMD_BEGIN, HFI_CMD_END, handle_system_init },
  1621. };
  1622. start_pkt = (u8 *)((u8 *)hdr + sizeof(struct hfi_header));
  1623. for (i = 0; i < ARRAY_SIZE(be); i++) {
  1624. pkt = start_pkt;
  1625. for (j = 0; j < hdr->num_packets; j++) {
  1626. packet = (struct hfi_packet *)pkt;
  1627. /* handle system error */
  1628. if (packet->flags & HFI_FW_FLAGS_SYSTEM_ERROR) {
  1629. d_vpr_e("%s: received system error %#x\n",
  1630. __func__, packet->type);
  1631. rc = handle_system_error(core, packet);
  1632. if (rc)
  1633. goto exit;
  1634. goto exit;
  1635. }
  1636. if (in_range(be[i], packet->type)) {
  1637. rc = be[i].handle(core, packet);
  1638. if (rc)
  1639. goto exit;
  1640. /* skip processing anymore packets after system error */
  1641. if (!i) {
  1642. d_vpr_e("%s: skip processing anymore packets\n", __func__);
  1643. goto exit;
  1644. }
  1645. }
  1646. pkt += packet->size;
  1647. }
  1648. }
  1649. exit:
  1650. return rc;
  1651. }
  1652. static int __handle_session_response(struct msm_vidc_inst *inst,
  1653. struct hfi_header *hdr)
  1654. {
  1655. int rc = 0;
  1656. struct hfi_packet *packet;
  1657. u8 *pkt, *start_pkt;
  1658. bool dequeue = false;
  1659. int i, j;
  1660. static const struct msm_vidc_inst_hfi_range be[] = {
  1661. {HFI_SESSION_ERROR_BEGIN, HFI_SESSION_ERROR_END, handle_session_error },
  1662. {HFI_INFORMATION_BEGIN, HFI_INFORMATION_END, handle_session_info },
  1663. {HFI_PROP_BEGIN, HFI_PROP_END, handle_session_property },
  1664. {HFI_CMD_BEGIN, HFI_CMD_END, handle_session_command },
  1665. };
  1666. memset(&inst->hfi_frame_info, 0, sizeof(struct msm_vidc_hfi_frame_info));
  1667. start_pkt = (u8 *)((u8 *)hdr + sizeof(struct hfi_header));
  1668. for (i = 0; i < ARRAY_SIZE(be); i++) {
  1669. pkt = start_pkt;
  1670. for (j = 0; j < hdr->num_packets; j++) {
  1671. packet = (struct hfi_packet *)pkt;
  1672. /* handle session error */
  1673. if (packet->flags & HFI_FW_FLAGS_SESSION_ERROR) {
  1674. i_vpr_e(inst, "%s: received session error %#x\n",
  1675. __func__, packet->type);
  1676. handle_session_error(inst, packet);
  1677. }
  1678. if (in_range(be[i], packet->type)) {
  1679. dequeue |= (packet->type == HFI_CMD_BUFFER);
  1680. rc = be[i].handle(inst, packet);
  1681. if (rc)
  1682. msm_vidc_change_state(inst, MSM_VIDC_ERROR, __func__);
  1683. }
  1684. pkt += packet->size;
  1685. }
  1686. }
  1687. if (dequeue) {
  1688. rc = handle_dequeue_buffers(inst);
  1689. if (rc)
  1690. return rc;
  1691. }
  1692. memset(&inst->hfi_frame_info, 0, sizeof(struct msm_vidc_hfi_frame_info));
  1693. return rc;
  1694. }
  1695. static int handle_session_response(struct msm_vidc_core *core,
  1696. struct hfi_header *hdr)
  1697. {
  1698. struct msm_vidc_inst *inst;
  1699. struct hfi_packet *packet;
  1700. u8 *pkt;
  1701. int i, rc = 0;
  1702. bool found_ipsc = false;
  1703. if (!core || !hdr) {
  1704. d_vpr_e("%s: Invalid params\n", __func__);
  1705. return -EINVAL;
  1706. }
  1707. inst = get_inst(core, hdr->session_id);
  1708. if (!inst) {
  1709. d_vpr_e("%s: Invalid inst\n", __func__);
  1710. return -EINVAL;
  1711. }
  1712. inst_lock(inst, __func__);
  1713. /* search for cmd settings change pkt */
  1714. pkt = (u8 *)((u8 *)hdr + sizeof(struct hfi_header));
  1715. for (i = 0; i < hdr->num_packets; i++) {
  1716. packet = (struct hfi_packet *)pkt;
  1717. if (packet->type == HFI_CMD_SETTINGS_CHANGE) {
  1718. if (packet->port == HFI_PORT_BITSTREAM) {
  1719. found_ipsc = true;
  1720. break;
  1721. }
  1722. }
  1723. pkt += packet->size;
  1724. }
  1725. /* if ipsc packet is found, initialise subsc_params */
  1726. if (found_ipsc)
  1727. msm_vdec_init_input_subcr_params(inst);
  1728. rc = __handle_session_response(inst, hdr);
  1729. if (rc)
  1730. goto exit;
  1731. exit:
  1732. inst_unlock(inst, __func__);
  1733. put_inst(inst);
  1734. return rc;
  1735. }
  1736. int handle_response(struct msm_vidc_core *core, void *response)
  1737. {
  1738. struct hfi_header *hdr;
  1739. int rc = 0;
  1740. if (!core || !response) {
  1741. d_vpr_e("%s: invalid params\n", __func__);
  1742. return -EINVAL;
  1743. }
  1744. hdr = (struct hfi_header *)response;
  1745. rc = validate_hdr_packet(core, hdr, __func__);
  1746. if (rc) {
  1747. d_vpr_e("%s: hdr pkt validation failed\n", __func__);
  1748. return handle_system_error(core, NULL);
  1749. }
  1750. if (!hdr->session_id)
  1751. return handle_system_response(core, hdr);
  1752. else
  1753. return handle_session_response(core, hdr);
  1754. return 0;
  1755. }