msm_cvp.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
  4. */
  5. #include "msm_cvp.h"
  6. #include "cvp_hfi.h"
  7. #include "cvp_core_hfi.h"
  8. #include "msm_cvp_buf.h"
  9. struct cvp_power_level {
  10. unsigned long core_sum;
  11. unsigned long op_core_sum;
  12. unsigned long bw_sum;
  13. };
  14. static int msm_cvp_get_session_info(struct msm_cvp_inst *inst,
  15. struct cvp_kmd_session_info *session)
  16. {
  17. int rc = 0;
  18. struct msm_cvp_inst *s;
  19. if (!inst || !inst->core || !session) {
  20. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  21. return -EINVAL;
  22. }
  23. s = cvp_get_inst_validate(inst->core, inst);
  24. if (!s)
  25. return -ECONNRESET;
  26. s->cur_cmd_type = CVP_KMD_GET_SESSION_INFO;
  27. session->session_id = hash32_ptr(inst->session);
  28. dprintk(CVP_SESS, "%s: id 0x%x\n", __func__, session->session_id);
  29. s->cur_cmd_type = 0;
  30. cvp_put_inst(s);
  31. return rc;
  32. }
  33. static bool cvp_msg_pending(struct cvp_session_queue *sq,
  34. struct cvp_session_msg **msg, u64 *ktid)
  35. {
  36. struct cvp_session_msg *mptr, *dummy;
  37. bool result = false;
  38. mptr = NULL;
  39. spin_lock(&sq->lock);
  40. if (sq->state != QUEUE_ACTIVE) {
  41. /* The session is being deleted */
  42. spin_unlock(&sq->lock);
  43. *msg = NULL;
  44. return true;
  45. }
  46. result = list_empty(&sq->msgs);
  47. if (!result) {
  48. if (!ktid) {
  49. mptr =
  50. list_first_entry(&sq->msgs, struct cvp_session_msg,
  51. node);
  52. list_del_init(&mptr->node);
  53. sq->msg_count--;
  54. } else {
  55. result = true;
  56. list_for_each_entry_safe(mptr, dummy, &sq->msgs, node) {
  57. if (*ktid == mptr->pkt.client_data.kdata) {
  58. list_del_init(&mptr->node);
  59. sq->msg_count--;
  60. result = false;
  61. break;
  62. }
  63. }
  64. if (result)
  65. mptr = NULL;
  66. }
  67. }
  68. spin_unlock(&sq->lock);
  69. *msg = mptr;
  70. return !result;
  71. }
  72. static int cvp_wait_process_message(struct msm_cvp_inst *inst,
  73. struct cvp_session_queue *sq, u64 *ktid,
  74. unsigned long timeout,
  75. struct cvp_kmd_hfi_packet *out)
  76. {
  77. struct cvp_session_msg *msg = NULL;
  78. struct cvp_hfi_msg_session_hdr *hdr;
  79. int rc = 0;
  80. if (wait_event_timeout(sq->wq,
  81. cvp_msg_pending(sq, &msg, ktid), timeout) == 0) {
  82. dprintk(CVP_WARN, "session queue wait timeout\n");
  83. rc = -ETIMEDOUT;
  84. goto exit;
  85. }
  86. if (msg == NULL) {
  87. dprintk(CVP_WARN, "%s: queue state %d, msg cnt %d\n", __func__,
  88. sq->state, sq->msg_count);
  89. if (inst->state >= MSM_CVP_CLOSE_DONE ||
  90. sq->state != QUEUE_ACTIVE) {
  91. rc = -ECONNRESET;
  92. goto exit;
  93. }
  94. msm_cvp_comm_kill_session(inst);
  95. goto exit;
  96. }
  97. if (out)
  98. memcpy(out, &msg->pkt, sizeof(struct cvp_hfi_msg_session_hdr));
  99. kmem_cache_free(cvp_driver->msg_cache, msg);
  100. hdr = (struct cvp_hfi_msg_session_hdr *)out;
  101. msm_cvp_unmap_frame(inst, hdr->client_data.kdata);
  102. exit:
  103. return rc;
  104. }
  105. static int msm_cvp_session_receive_hfi(struct msm_cvp_inst *inst,
  106. struct cvp_kmd_hfi_packet *out_pkt)
  107. {
  108. unsigned long wait_time;
  109. struct cvp_session_queue *sq;
  110. struct msm_cvp_inst *s;
  111. int rc = 0;
  112. if (!inst) {
  113. dprintk(CVP_ERR, "%s invalid session\n", __func__);
  114. return -EINVAL;
  115. }
  116. s = cvp_get_inst_validate(inst->core, inst);
  117. if (!s)
  118. return -ECONNRESET;
  119. s->cur_cmd_type = CVP_KMD_RECEIVE_MSG_PKT;
  120. wait_time = msecs_to_jiffies(CVP_MAX_WAIT_TIME);
  121. sq = &inst->session_queue;
  122. rc = cvp_wait_process_message(inst, sq, NULL, wait_time, out_pkt);
  123. s->cur_cmd_type = 0;
  124. cvp_put_inst(inst);
  125. return rc;
  126. }
  127. static int msm_cvp_session_process_hfi(
  128. struct msm_cvp_inst *inst,
  129. struct cvp_kmd_hfi_packet *in_pkt,
  130. unsigned int in_offset,
  131. unsigned int in_buf_num)
  132. {
  133. int pkt_idx, pkt_type, rc = 0;
  134. struct cvp_hfi_device *hdev;
  135. unsigned int offset, buf_num, signal;
  136. struct cvp_session_queue *sq;
  137. struct msm_cvp_inst *s;
  138. if (!inst || !inst->core || !in_pkt) {
  139. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  140. return -EINVAL;
  141. }
  142. s = cvp_get_inst_validate(inst->core, inst);
  143. if (!s)
  144. return -ECONNRESET;
  145. inst->cur_cmd_type = CVP_KMD_SEND_CMD_PKT;
  146. hdev = inst->core->device;
  147. pkt_idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)in_pkt);
  148. if (pkt_idx < 0) {
  149. dprintk(CVP_ERR, "%s incorrect packet %d, %x\n", __func__,
  150. in_pkt->pkt_data[0],
  151. in_pkt->pkt_data[1]);
  152. offset = in_offset;
  153. buf_num = in_buf_num;
  154. signal = HAL_NO_RESP;
  155. } else {
  156. offset = cvp_hfi_defs[pkt_idx].buf_offset;
  157. buf_num = cvp_hfi_defs[pkt_idx].buf_num;
  158. signal = cvp_hfi_defs[pkt_idx].resp;
  159. }
  160. if (signal == HAL_NO_RESP) {
  161. /* Frame packets are not allowed before session starts*/
  162. sq = &inst->session_queue;
  163. spin_lock(&sq->lock);
  164. if (sq->state != QUEUE_ACTIVE) {
  165. spin_unlock(&sq->lock);
  166. dprintk(CVP_ERR, "%s: invalid queue state\n", __func__);
  167. rc = -EINVAL;
  168. goto exit;
  169. }
  170. spin_unlock(&sq->lock);
  171. }
  172. if (in_offset && in_buf_num) {
  173. offset = in_offset;
  174. buf_num = in_buf_num;
  175. }
  176. if (!is_buf_param_valid(buf_num, offset)) {
  177. dprintk(CVP_ERR, "Incorrect buffer num and offset in cmd\n");
  178. return -EINVAL;
  179. }
  180. pkt_type = in_pkt->pkt_data[1];
  181. if (pkt_type == HFI_CMD_SESSION_CVP_SET_PERSIST_BUFFERS ||
  182. pkt_type == HFI_CMD_SESSION_CVP_SET_MODEL_BUFFERS)
  183. rc = msm_cvp_map_user_persist(inst, in_pkt, offset, buf_num);
  184. else if (pkt_type == HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS)
  185. rc = msm_cvp_mark_user_persist(inst, in_pkt, offset, buf_num);
  186. else
  187. rc = msm_cvp_map_frame(inst, in_pkt, offset, buf_num);
  188. if (rc)
  189. goto exit;
  190. rc = call_hfi_op(hdev, session_send, (void *)inst->session, in_pkt);
  191. if (rc) {
  192. dprintk(CVP_ERR,
  193. "%s: Failed in call_hfi_op %d, %x\n",
  194. __func__, in_pkt->pkt_data[0], in_pkt->pkt_data[1]);
  195. goto exit;
  196. }
  197. if (signal != HAL_NO_RESP) {
  198. rc = wait_for_sess_signal_receipt(inst, signal);
  199. if (rc) {
  200. dprintk(CVP_ERR,
  201. "%s: wait for signal failed, rc %d %d, %x %d\n",
  202. __func__, rc,
  203. in_pkt->pkt_data[0],
  204. in_pkt->pkt_data[1],
  205. signal);
  206. goto exit;
  207. }
  208. if (pkt_type == HFI_CMD_SESSION_CVP_RELEASE_PERSIST_BUFFERS)
  209. rc = msm_cvp_unmap_user_persist(inst, in_pkt,
  210. offset, buf_num);
  211. }
  212. exit:
  213. inst->cur_cmd_type = 0;
  214. cvp_put_inst(inst);
  215. return rc;
  216. }
  217. static bool cvp_fence_wait(struct cvp_fence_queue *q,
  218. struct cvp_fence_command **fence,
  219. enum queue_state *state)
  220. {
  221. struct cvp_fence_command *f;
  222. *fence = NULL;
  223. mutex_lock(&q->lock);
  224. *state = q->state;
  225. if (*state != QUEUE_ACTIVE) {
  226. mutex_unlock(&q->lock);
  227. return true;
  228. }
  229. if (list_empty(&q->wait_list)) {
  230. mutex_unlock(&q->lock);
  231. return false;
  232. }
  233. f = list_first_entry(&q->wait_list, struct cvp_fence_command, list);
  234. list_del_init(&f->list);
  235. list_add_tail(&q->sched_list, &f->list);
  236. mutex_unlock(&q->lock);
  237. *fence = f;
  238. return true;
  239. }
  240. static int cvp_fence_proc(struct msm_cvp_inst *inst,
  241. struct cvp_fence_command *fc,
  242. struct cvp_hfi_cmd_session_hdr *pkt)
  243. {
  244. int rc = 0;
  245. unsigned long timeout;
  246. u64 ktid;
  247. int synx_state = SYNX_STATE_SIGNALED_SUCCESS;
  248. struct cvp_hfi_device *hdev;
  249. struct cvp_session_queue *sq;
  250. u32 hfi_err = HFI_ERR_NONE;
  251. struct cvp_hfi_msg_session_hdr *hdr;
  252. dprintk(CVP_SYNX, "%s %s\n", current->comm, __func__);
  253. hdev = inst->core->device;
  254. sq = &inst->session_queue_fence;
  255. ktid = pkt->client_data.kdata;
  256. if (cvp_synx_ops(inst, CVP_INPUT_SYNX, fc, &synx_state)) {
  257. msm_cvp_unmap_frame(inst, pkt->client_data.kdata);
  258. goto exit;
  259. }
  260. rc = call_hfi_op(hdev, session_send, (void *)inst->session,
  261. (struct cvp_kmd_hfi_packet *)pkt);
  262. if (rc) {
  263. dprintk(CVP_ERR, "%s %s: Failed in call_hfi_op %d, %x\n",
  264. current->comm, __func__, pkt->size, pkt->packet_type);
  265. synx_state = SYNX_STATE_SIGNALED_ERROR;
  266. goto exit;
  267. }
  268. timeout = msecs_to_jiffies(CVP_MAX_WAIT_TIME);
  269. rc = cvp_wait_process_message(inst, sq, &ktid, timeout,
  270. (struct cvp_kmd_hfi_packet *)pkt);
  271. hdr = (struct cvp_hfi_msg_session_hdr *)pkt;
  272. hfi_err = hdr->error_type;
  273. if (rc) {
  274. dprintk(CVP_ERR, "%s %s: cvp_wait_process_message rc %d\n",
  275. current->comm, __func__, rc);
  276. synx_state = SYNX_STATE_SIGNALED_ERROR;
  277. goto exit;
  278. }
  279. if (hfi_err == HFI_ERR_SESSION_FLUSHED) {
  280. dprintk(CVP_SYNX, "%s %s: cvp_wait_process_message flushed\n",
  281. current->comm, __func__);
  282. synx_state = SYNX_STATE_SIGNALED_CANCEL;
  283. } else if (hfi_err == HFI_ERR_SESSION_STREAM_CORRUPT) {
  284. dprintk(CVP_WARN, "%s %s: cvp_wait_process_msg non-fatal %d\n",
  285. current->comm, __func__, hfi_err);
  286. synx_state = SYNX_STATE_SIGNALED_SUCCESS;
  287. } else if (hfi_err != HFI_ERR_NONE) {
  288. dprintk(CVP_ERR, "%s %s: cvp_wait_process_message hfi err %d\n",
  289. current->comm, __func__, hfi_err);
  290. synx_state = SYNX_STATE_SIGNALED_CANCEL;
  291. }
  292. exit:
  293. rc = cvp_synx_ops(inst, CVP_OUTPUT_SYNX, fc, &synx_state);
  294. return rc;
  295. }
  296. static int cvp_alloc_fence_data(struct cvp_fence_command **f, u32 size)
  297. {
  298. struct cvp_fence_command *fcmd;
  299. fcmd = kzalloc(sizeof(struct cvp_fence_command), GFP_KERNEL);
  300. if (!fcmd)
  301. return -ENOMEM;
  302. fcmd->pkt = kzalloc(size, GFP_KERNEL);
  303. if (!fcmd->pkt) {
  304. kfree(fcmd);
  305. return -ENOMEM;
  306. }
  307. *f = fcmd;
  308. return 0;
  309. }
  310. static void cvp_free_fence_data(struct cvp_fence_command *f)
  311. {
  312. kfree(f->pkt);
  313. f->pkt = NULL;
  314. kfree(f);
  315. f = NULL;
  316. }
  317. static int cvp_fence_thread(void *data)
  318. {
  319. int rc = 0;
  320. struct msm_cvp_inst *inst;
  321. struct cvp_fence_queue *q;
  322. enum queue_state state;
  323. struct cvp_fence_command *f;
  324. struct cvp_hfi_cmd_session_hdr *pkt;
  325. u32 *synx;
  326. u64 ktid;
  327. dprintk(CVP_SYNX, "Enter %s\n", current->comm);
  328. inst = (struct msm_cvp_inst *)data;
  329. if (!inst || !inst->core || !inst->core->device) {
  330. dprintk(CVP_ERR, "%s invalid inst %pK\n", current->comm, inst);
  331. rc = -EINVAL;
  332. goto exit;
  333. }
  334. q = &inst->fence_cmd_queue;
  335. wait:
  336. dprintk(CVP_SYNX, "%s starts wait\n", current->comm);
  337. f = NULL;
  338. wait_event_interruptible(q->wq, cvp_fence_wait(q, &f, &state));
  339. if (state != QUEUE_ACTIVE)
  340. goto exit;
  341. if (!f)
  342. goto wait;
  343. pkt = f->pkt;
  344. synx = (u32 *)f->synx;
  345. ktid = pkt->client_data.kdata & (FENCE_BIT - 1);
  346. dprintk(CVP_SYNX, "%s pkt type %d on ktid %llu frameID %llu\n",
  347. current->comm, pkt->packet_type, ktid, f->frame_id);
  348. rc = cvp_fence_proc(inst, f, pkt);
  349. mutex_lock(&q->lock);
  350. cvp_release_synx(inst, f);
  351. list_del_init(&f->list);
  352. mutex_unlock(&q->lock);
  353. dprintk(CVP_SYNX, "%s done with %d ktid %llu frameID %llu rc %d\n",
  354. current->comm, pkt->packet_type, ktid, f->frame_id, rc);
  355. cvp_free_fence_data(f);
  356. goto wait;
  357. exit:
  358. dprintk(CVP_SYNX, "%s exit\n", current->comm);
  359. cvp_put_inst(inst);
  360. do_exit(rc);
  361. }
  362. static int msm_cvp_session_process_hfi_fence(struct msm_cvp_inst *inst,
  363. struct cvp_kmd_arg *arg)
  364. {
  365. int rc = 0;
  366. int idx;
  367. struct cvp_kmd_hfi_fence_packet *fence_pkt;
  368. struct cvp_kmd_hfi_synx_packet *synx_pkt;
  369. struct cvp_kmd_fence_ctrl *kfc;
  370. struct cvp_hfi_cmd_session_hdr *pkt;
  371. unsigned int offset, buf_num, in_offset, in_buf_num;
  372. struct msm_cvp_inst *s;
  373. struct cvp_fence_command *f;
  374. struct cvp_fence_queue *q;
  375. u32 *fence;
  376. enum op_mode mode;
  377. if (!inst || !inst->core || !arg || !inst->core->device) {
  378. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  379. return -EINVAL;
  380. }
  381. s = cvp_get_inst_validate(inst->core, inst);
  382. if (!s)
  383. return -ECONNRESET;
  384. q = &inst->fence_cmd_queue;
  385. mutex_lock(&q->lock);
  386. mode = q->mode;
  387. mutex_unlock(&q->lock);
  388. if (mode == OP_DRAINING) {
  389. dprintk(CVP_SYNX, "%s: flush in progress\n", __func__);
  390. rc = -EBUSY;
  391. goto exit;
  392. }
  393. in_offset = arg->buf_offset;
  394. in_buf_num = arg->buf_num;
  395. fence_pkt = &arg->data.hfi_fence_pkt;
  396. pkt = (struct cvp_hfi_cmd_session_hdr *)&fence_pkt->pkt_data;
  397. idx = get_pkt_index((struct cvp_hal_session_cmd_pkt *)pkt);
  398. if (idx < 0 || pkt->size > MAX_HFI_FENCE_OFFSET) {
  399. dprintk(CVP_ERR, "%s incorrect packet %d %#x\n", __func__,
  400. pkt->size, pkt->packet_type);
  401. goto exit;
  402. }
  403. if (in_offset && in_buf_num) {
  404. offset = in_offset;
  405. buf_num = in_buf_num;
  406. } else {
  407. offset = cvp_hfi_defs[idx].buf_offset;
  408. buf_num = cvp_hfi_defs[idx].buf_num;
  409. }
  410. if (!is_buf_param_valid(buf_num, offset)) {
  411. dprintk(CVP_ERR, "Incorrect buf num and offset in cmd\n");
  412. goto exit;
  413. }
  414. rc = msm_cvp_map_frame(inst, (struct cvp_kmd_hfi_packet *)pkt, offset,
  415. buf_num);
  416. if (rc)
  417. goto exit;
  418. rc = cvp_alloc_fence_data(&f, pkt->size);
  419. if (rc)
  420. goto exit;
  421. f->type = cvp_hfi_defs[idx].type;
  422. f->mode = OP_NORMAL;
  423. synx_pkt = &arg->data.hfi_synx_pkt;
  424. if (synx_pkt->fence_data[0] != 0xFEEDFACE) {
  425. dprintk(CVP_ERR, "%s deprecated synx path\n", __func__);
  426. cvp_free_fence_data(f);
  427. msm_cvp_unmap_frame(inst, pkt->client_data.kdata);
  428. goto exit;
  429. } else {
  430. kfc = &synx_pkt->fc;
  431. fence = (u32 *)&kfc->fences;
  432. f->frame_id = kfc->frame_id;
  433. f->signature = 0xFEEDFACE;
  434. f->num_fences = kfc->num_fences;
  435. f->output_index = kfc->output_index;
  436. }
  437. dprintk(CVP_SYNX, "%s: frameID %llu ktid %llu\n",
  438. __func__, f->frame_id, pkt->client_data.kdata);
  439. memcpy(f->pkt, pkt, pkt->size);
  440. f->pkt->client_data.kdata |= FENCE_BIT;
  441. rc = cvp_import_synx(inst, f, fence);
  442. if (rc) {
  443. kfree(f);
  444. goto exit;
  445. }
  446. mutex_lock(&q->lock);
  447. list_add_tail(&f->list, &inst->fence_cmd_queue.wait_list);
  448. mutex_unlock(&q->lock);
  449. wake_up(&inst->fence_cmd_queue.wq);
  450. exit:
  451. cvp_put_inst(s);
  452. return rc;
  453. }
  454. static inline int div_by_1dot5(unsigned int a)
  455. {
  456. unsigned long i = a << 1;
  457. return (unsigned int) i/3;
  458. }
  459. static inline int max_3(unsigned int a, unsigned int b, unsigned int c)
  460. {
  461. return (a >= b) ? ((a >= c) ? a : c) : ((b >= c) ? b : c);
  462. }
  463. static bool is_subblock_profile_existed(struct msm_cvp_inst *inst)
  464. {
  465. return (inst->prop.od_cycles ||
  466. inst->prop.mpu_cycles ||
  467. inst->prop.fdu_cycles ||
  468. inst->prop.ica_cycles);
  469. }
  470. static void aggregate_power_update(struct msm_cvp_core *core,
  471. struct cvp_power_level *nrt_pwr,
  472. struct cvp_power_level *rt_pwr,
  473. unsigned int max_clk_rate)
  474. {
  475. struct msm_cvp_inst *inst;
  476. int i;
  477. unsigned long fdu_sum[2] = {0}, od_sum[2] = {0}, mpu_sum[2] = {0};
  478. unsigned long ica_sum[2] = {0}, fw_sum[2] = {0};
  479. unsigned long op_fdu_max[2] = {0}, op_od_max[2] = {0};
  480. unsigned long op_mpu_max[2] = {0}, op_ica_max[2] = {0};
  481. unsigned long op_fw_max[2] = {0}, bw_sum[2] = {0}, op_bw_max[2] = {0};
  482. list_for_each_entry(inst, &core->instances, list) {
  483. if (inst->state == MSM_CVP_CORE_INVALID ||
  484. inst->state == MSM_CVP_CORE_UNINIT ||
  485. !is_subblock_profile_existed(inst))
  486. continue;
  487. if (inst->prop.priority <= CVP_RT_PRIO_THRESHOLD) {
  488. /* Non-realtime session use index 0 */
  489. i = 0;
  490. } else {
  491. i = 1;
  492. }
  493. dprintk(CVP_PROF, "pwrUpdate %pK fdu %u od %u mpu %u ica %u\n",
  494. inst->prop.fdu_cycles,
  495. inst->prop.od_cycles,
  496. inst->prop.mpu_cycles,
  497. inst->prop.ica_cycles);
  498. dprintk(CVP_PROF, "pwrUpdate fw %u fdu_o %u od_o %u mpu_o %u\n",
  499. inst->prop.fw_cycles,
  500. inst->prop.fdu_op_cycles,
  501. inst->prop.od_op_cycles,
  502. inst->prop.mpu_op_cycles);
  503. dprintk(CVP_PROF, "pwrUpdate ica_o %u fw_o %u bw %u bw_o %u\n",
  504. inst->prop.ica_op_cycles,
  505. inst->prop.fw_op_cycles,
  506. inst->prop.ddr_bw,
  507. inst->prop.ddr_op_bw);
  508. fdu_sum[i] += inst->prop.fdu_cycles;
  509. od_sum[i] += inst->prop.od_cycles;
  510. mpu_sum[i] += inst->prop.mpu_cycles;
  511. ica_sum[i] += inst->prop.ica_cycles;
  512. fw_sum[i] += inst->prop.fw_cycles;
  513. op_fdu_max[i] =
  514. (op_fdu_max[i] >= inst->prop.fdu_op_cycles) ?
  515. op_fdu_max[i] : inst->prop.fdu_op_cycles;
  516. op_od_max[i] =
  517. (op_od_max[i] >= inst->prop.od_op_cycles) ?
  518. op_od_max[i] : inst->prop.od_op_cycles;
  519. op_mpu_max[i] =
  520. (op_mpu_max[i] >= inst->prop.mpu_op_cycles) ?
  521. op_mpu_max[i] : inst->prop.mpu_op_cycles;
  522. op_ica_max[i] =
  523. (op_ica_max[i] >= inst->prop.ica_op_cycles) ?
  524. op_ica_max[i] : inst->prop.ica_op_cycles;
  525. op_fw_max[i] =
  526. (op_fw_max[i] >= inst->prop.fw_op_cycles) ?
  527. op_fw_max[i] : inst->prop.fw_op_cycles;
  528. bw_sum[i] += inst->prop.ddr_bw;
  529. op_bw_max[i] =
  530. (op_bw_max[i] >= inst->prop.ddr_op_bw) ?
  531. op_bw_max[i] : inst->prop.ddr_op_bw;
  532. }
  533. for (i = 0; i < 2; i++) {
  534. fdu_sum[i] = max_3(fdu_sum[i], od_sum[i], mpu_sum[i]);
  535. fdu_sum[i] = max_3(fdu_sum[i], ica_sum[i], fw_sum[i]);
  536. op_fdu_max[i] = max_3(op_fdu_max[i], op_od_max[i],
  537. op_mpu_max[i]);
  538. op_fdu_max[i] = max_3(op_fdu_max[i],
  539. op_ica_max[i], op_fw_max[i]);
  540. op_fdu_max[i] =
  541. (op_fdu_max[i] > max_clk_rate) ?
  542. max_clk_rate : op_fdu_max[i];
  543. bw_sum[i] = (bw_sum[i] >= op_bw_max[i]) ?
  544. bw_sum[i] : op_bw_max[i];
  545. }
  546. nrt_pwr->core_sum += fdu_sum[0];
  547. nrt_pwr->op_core_sum = (nrt_pwr->op_core_sum >= op_fdu_max[0]) ?
  548. nrt_pwr->op_core_sum : op_fdu_max[0];
  549. nrt_pwr->bw_sum += bw_sum[0];
  550. rt_pwr->core_sum += fdu_sum[1];
  551. rt_pwr->op_core_sum = (rt_pwr->op_core_sum >= op_fdu_max[1]) ?
  552. rt_pwr->op_core_sum : op_fdu_max[1];
  553. rt_pwr->bw_sum += bw_sum[1];
  554. }
  555. /**
  556. * adjust_bw_freqs(): calculate CVP clock freq and bw required to sustain
  557. * required use case.
  558. * Bandwidth vote will be best-effort, not returning error if the request
  559. * b/w exceeds max limit.
  560. * Clock vote from non-realtime sessions will be best effort, not returning
  561. * error if the aggreated session clock request exceeds max limit.
  562. * Clock vote from realtime session will be hard request. If aggregated
  563. * session clock request exceeds max limit, the function will return
  564. * error.
  565. */
  566. static int adjust_bw_freqs(void)
  567. {
  568. struct msm_cvp_core *core;
  569. struct iris_hfi_device *hdev;
  570. struct bus_info *bus;
  571. struct clock_set *clocks;
  572. struct clock_info *cl;
  573. struct allowed_clock_rates_table *tbl = NULL;
  574. unsigned int tbl_size;
  575. unsigned int cvp_min_rate, cvp_max_rate, max_bw, min_bw;
  576. struct cvp_power_level rt_pwr = {0}, nrt_pwr = {0};
  577. unsigned long tmp, core_sum, op_core_sum, bw_sum;
  578. int i, rc = 0;
  579. core = list_first_entry(&cvp_driver->cores, struct msm_cvp_core, list);
  580. hdev = core->device->hfi_device_data;
  581. clocks = &core->resources.clock_set;
  582. cl = &clocks->clock_tbl[clocks->count - 1];
  583. tbl = core->resources.allowed_clks_tbl;
  584. tbl_size = core->resources.allowed_clks_tbl_size;
  585. cvp_min_rate = tbl[0].clock_rate;
  586. cvp_max_rate = tbl[tbl_size - 1].clock_rate;
  587. bus = &core->resources.bus_set.bus_tbl[1];
  588. max_bw = bus->range[1];
  589. min_bw = max_bw/10;
  590. aggregate_power_update(core, &nrt_pwr, &rt_pwr, cvp_max_rate);
  591. dprintk(CVP_PROF, "PwrUpdate nrt %u %u rt %u %u\n",
  592. nrt_pwr.core_sum, nrt_pwr.op_core_sum,
  593. rt_pwr.core_sum, rt_pwr.op_core_sum);
  594. if (rt_pwr.core_sum > cvp_max_rate) {
  595. dprintk(CVP_WARN, "%s clk vote out of range %lld\n",
  596. __func__, rt_pwr.core_sum);
  597. return -ENOTSUPP;
  598. }
  599. core_sum = rt_pwr.core_sum + nrt_pwr.core_sum;
  600. op_core_sum = (rt_pwr.op_core_sum >= nrt_pwr.op_core_sum) ?
  601. rt_pwr.op_core_sum : nrt_pwr.op_core_sum;
  602. core_sum = (core_sum >= op_core_sum) ?
  603. core_sum : op_core_sum;
  604. if (core_sum > cvp_max_rate) {
  605. core_sum = cvp_max_rate;
  606. } else if (core_sum < cvp_min_rate) {
  607. core_sum = cvp_min_rate;
  608. } else {
  609. for (i = 1; i < tbl_size; i++)
  610. if (core_sum <= tbl[i].clock_rate)
  611. break;
  612. core_sum = tbl[i].clock_rate;
  613. }
  614. bw_sum = rt_pwr.bw_sum + nrt_pwr.bw_sum;
  615. bw_sum = bw_sum >> 10;
  616. bw_sum = (bw_sum > max_bw) ? max_bw : bw_sum;
  617. bw_sum = (bw_sum < min_bw) ? min_bw : bw_sum;
  618. dprintk(CVP_PROF, "%s %lld %lld\n", __func__,
  619. core_sum, bw_sum);
  620. if (!cl->has_scaling) {
  621. dprintk(CVP_ERR, "Cannot scale CVP clock\n");
  622. return -EINVAL;
  623. }
  624. tmp = core->curr_freq;
  625. core->curr_freq = core_sum;
  626. rc = msm_cvp_set_clocks(core);
  627. if (rc) {
  628. dprintk(CVP_ERR,
  629. "Failed to set clock rate %u %s: %d %s\n",
  630. core_sum, cl->name, rc, __func__);
  631. core->curr_freq = tmp;
  632. return rc;
  633. }
  634. hdev->clk_freq = core->curr_freq;
  635. rc = icc_set_bw(bus->client, bw_sum, 0);
  636. if (rc)
  637. dprintk(CVP_ERR, "Failed voting bus %s to ab %u\n",
  638. bus->name, bw_sum);
  639. return rc;
  640. }
  641. static int msm_cvp_update_power(struct msm_cvp_inst *inst)
  642. {
  643. int rc = 0;
  644. struct msm_cvp_core *core;
  645. struct msm_cvp_inst *s;
  646. if (!inst) {
  647. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  648. return -EINVAL;
  649. }
  650. s = cvp_get_inst_validate(inst->core, inst);
  651. if (!s)
  652. return -ECONNRESET;
  653. inst->cur_cmd_type = CVP_KMD_UPDATE_POWER;
  654. core = inst->core;
  655. mutex_lock(&core->lock);
  656. rc = adjust_bw_freqs();
  657. mutex_unlock(&core->lock);
  658. inst->cur_cmd_type = 0;
  659. cvp_put_inst(s);
  660. return rc;
  661. }
  662. static int msm_cvp_register_buffer(struct msm_cvp_inst *inst,
  663. struct cvp_kmd_buffer *buf)
  664. {
  665. struct cvp_hfi_device *hdev;
  666. struct cvp_hal_session *session;
  667. struct msm_cvp_inst *s;
  668. int rc = 0;
  669. if (!inst || !inst->core || !buf) {
  670. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  671. return -EINVAL;
  672. }
  673. if (!buf->index)
  674. return 0;
  675. s = cvp_get_inst_validate(inst->core, inst);
  676. if (!s)
  677. return -ECONNRESET;
  678. inst->cur_cmd_type = CVP_KMD_REGISTER_BUFFER;
  679. session = (struct cvp_hal_session *)inst->session;
  680. if (!session) {
  681. dprintk(CVP_ERR, "%s: invalid session\n", __func__);
  682. rc = -EINVAL;
  683. goto exit;
  684. }
  685. hdev = inst->core->device;
  686. print_client_buffer(CVP_HFI, "register", inst, buf);
  687. rc = msm_cvp_map_buf_dsp(inst, buf);
  688. exit:
  689. inst->cur_cmd_type = 0;
  690. cvp_put_inst(s);
  691. return rc;
  692. }
  693. static int msm_cvp_unregister_buffer(struct msm_cvp_inst *inst,
  694. struct cvp_kmd_buffer *buf)
  695. {
  696. struct msm_cvp_inst *s;
  697. int rc = 0;
  698. if (!inst || !inst->core || !buf) {
  699. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  700. return -EINVAL;
  701. }
  702. if (!buf->index)
  703. return 0;
  704. s = cvp_get_inst_validate(inst->core, inst);
  705. if (!s)
  706. return -ECONNRESET;
  707. inst->cur_cmd_type = CVP_KMD_UNREGISTER_BUFFER;
  708. print_client_buffer(CVP_HFI, "unregister", inst, buf);
  709. rc = msm_cvp_unmap_buf_dsp(inst, buf);
  710. inst->cur_cmd_type = 0;
  711. cvp_put_inst(s);
  712. return rc;
  713. }
  714. static int msm_cvp_session_create(struct msm_cvp_inst *inst)
  715. {
  716. int rc = 0;
  717. struct synx_initialization_params params;
  718. if (!inst || !inst->core)
  719. return -EINVAL;
  720. if (inst->state >= MSM_CVP_CLOSE_DONE)
  721. return -ECONNRESET;
  722. if (inst->state != MSM_CVP_CORE_INIT_DONE ||
  723. inst->state > MSM_CVP_OPEN_DONE) {
  724. dprintk(CVP_ERR,
  725. "%s Incorrect CVP state %d to create session\n",
  726. __func__, inst->state);
  727. return -EINVAL;
  728. }
  729. rc = msm_cvp_comm_try_state(inst, MSM_CVP_OPEN_DONE);
  730. if (rc) {
  731. dprintk(CVP_ERR,
  732. "Failed to move instance to open done state\n");
  733. goto fail_init;
  734. }
  735. rc = cvp_comm_set_arp_buffers(inst);
  736. if (rc) {
  737. dprintk(CVP_ERR,
  738. "Failed to set ARP buffers\n");
  739. goto fail_init;
  740. }
  741. params.name = "cvp-kernel-client";
  742. if (synx_initialize(&inst->synx_session_id, &params)) {
  743. dprintk(CVP_ERR, "%s synx_initialize failed\n", __func__);
  744. rc = -EFAULT;
  745. }
  746. fail_init:
  747. return rc;
  748. }
  749. static int session_state_check_init(struct msm_cvp_inst *inst)
  750. {
  751. mutex_lock(&inst->lock);
  752. if (inst->state == MSM_CVP_OPEN || inst->state == MSM_CVP_OPEN_DONE) {
  753. mutex_unlock(&inst->lock);
  754. return 0;
  755. }
  756. mutex_unlock(&inst->lock);
  757. return msm_cvp_session_create(inst);
  758. }
  759. static int cvp_fence_thread_start(struct msm_cvp_inst *inst)
  760. {
  761. u32 tnum = 0;
  762. u32 i = 0;
  763. int rc = 0;
  764. char tname[16];
  765. struct task_struct *thread;
  766. struct cvp_fence_queue *q;
  767. struct cvp_session_queue *sq;
  768. if (!inst->prop.fthread_nr)
  769. return 0;
  770. q = &inst->fence_cmd_queue;
  771. mutex_lock(&q->lock);
  772. q->state = QUEUE_ACTIVE;
  773. mutex_unlock(&q->lock);
  774. for (i = 0; i < inst->prop.fthread_nr; ++i) {
  775. if (!cvp_get_inst_validate(inst->core, inst)) {
  776. rc = -ECONNRESET;
  777. goto exit;
  778. }
  779. snprintf(tname, sizeof(tname), "fthread_%d", tnum++);
  780. thread = kthread_run(cvp_fence_thread, inst, tname);
  781. if (!thread) {
  782. dprintk(CVP_ERR, "%s create %s fail", __func__, tname);
  783. rc = -ECHILD;
  784. goto exit;
  785. }
  786. }
  787. sq = &inst->session_queue_fence;
  788. spin_lock(&sq->lock);
  789. sq->state = QUEUE_ACTIVE;
  790. spin_unlock(&sq->lock);
  791. exit:
  792. if (rc) {
  793. mutex_lock(&q->lock);
  794. q->state = QUEUE_STOP;
  795. mutex_unlock(&q->lock);
  796. wake_up_all(&q->wq);
  797. }
  798. return rc;
  799. }
  800. static int cvp_fence_thread_stop(struct msm_cvp_inst *inst)
  801. {
  802. struct cvp_fence_queue *q;
  803. struct cvp_session_queue *sq;
  804. if (!inst->prop.fthread_nr)
  805. return 0;
  806. q = &inst->fence_cmd_queue;
  807. mutex_lock(&q->lock);
  808. q->state = QUEUE_STOP;
  809. mutex_unlock(&q->lock);
  810. sq = &inst->session_queue_fence;
  811. spin_lock(&sq->lock);
  812. sq->state = QUEUE_STOP;
  813. spin_unlock(&sq->lock);
  814. wake_up_all(&q->wq);
  815. wake_up_all(&sq->wq);
  816. return 0;
  817. }
  818. static int msm_cvp_session_start(struct msm_cvp_inst *inst,
  819. struct cvp_kmd_arg *arg)
  820. {
  821. struct cvp_session_queue *sq;
  822. sq = &inst->session_queue;
  823. spin_lock(&sq->lock);
  824. if (sq->msg_count) {
  825. dprintk(CVP_ERR, "session start failed queue not empty%d\n",
  826. sq->msg_count);
  827. spin_unlock(&sq->lock);
  828. return -EINVAL;
  829. }
  830. sq->state = QUEUE_ACTIVE;
  831. spin_unlock(&sq->lock);
  832. return cvp_fence_thread_start(inst);
  833. }
  834. static int msm_cvp_session_stop(struct msm_cvp_inst *inst,
  835. struct cvp_kmd_arg *arg)
  836. {
  837. struct cvp_session_queue *sq;
  838. struct cvp_kmd_session_control *sc = &arg->data.session_ctrl;
  839. sq = &inst->session_queue;
  840. spin_lock(&sq->lock);
  841. if (sq->msg_count) {
  842. dprintk(CVP_ERR, "session stop incorrect: queue not empty%d\n",
  843. sq->msg_count);
  844. sc->ctrl_data[0] = sq->msg_count;
  845. spin_unlock(&sq->lock);
  846. return -EUCLEAN;
  847. }
  848. sq->state = QUEUE_STOP;
  849. pr_info(CVP_DBG_TAG "Stop session: %pK session_id = %d\n",
  850. "sess", inst, hash32_ptr(inst->session));
  851. spin_unlock(&sq->lock);
  852. wake_up_all(&inst->session_queue.wq);
  853. return cvp_fence_thread_stop(inst);
  854. }
  855. int msm_cvp_session_queue_stop(struct msm_cvp_inst *inst)
  856. {
  857. struct cvp_session_queue *sq;
  858. sq = &inst->session_queue;
  859. spin_lock(&sq->lock);
  860. if (sq->state == QUEUE_STOP) {
  861. spin_unlock(&sq->lock);
  862. return 0;
  863. }
  864. sq->state = QUEUE_STOP;
  865. dprintk(CVP_SESS, "Stop session queue: %pK session_id = %d\n",
  866. inst, hash32_ptr(inst->session));
  867. spin_unlock(&sq->lock);
  868. wake_up_all(&inst->session_queue.wq);
  869. return cvp_fence_thread_stop(inst);
  870. }
  871. static int msm_cvp_session_ctrl(struct msm_cvp_inst *inst,
  872. struct cvp_kmd_arg *arg)
  873. {
  874. struct cvp_kmd_session_control *ctrl = &arg->data.session_ctrl;
  875. int rc = 0;
  876. unsigned int ctrl_type;
  877. ctrl_type = ctrl->ctrl_type;
  878. if (!inst && ctrl_type != SESSION_CREATE) {
  879. dprintk(CVP_ERR, "%s invalid session\n", __func__);
  880. return -EINVAL;
  881. }
  882. switch (ctrl_type) {
  883. case SESSION_STOP:
  884. rc = msm_cvp_session_stop(inst, arg);
  885. break;
  886. case SESSION_START:
  887. rc = msm_cvp_session_start(inst, arg);
  888. break;
  889. case SESSION_CREATE:
  890. rc = msm_cvp_session_create(inst);
  891. case SESSION_DELETE:
  892. break;
  893. case SESSION_INFO:
  894. default:
  895. dprintk(CVP_ERR, "%s Unsupported session ctrl%d\n",
  896. __func__, ctrl->ctrl_type);
  897. rc = -EINVAL;
  898. }
  899. return rc;
  900. }
  901. static int msm_cvp_get_sysprop(struct msm_cvp_inst *inst,
  902. struct cvp_kmd_arg *arg)
  903. {
  904. struct cvp_kmd_sys_properties *props = &arg->data.sys_properties;
  905. struct cvp_hfi_device *hdev;
  906. struct iris_hfi_device *hfi;
  907. int i, rc = 0;
  908. if (!inst || !inst->core || !inst->core->device) {
  909. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  910. return -EINVAL;
  911. }
  912. hdev = inst->core->device;
  913. hfi = hdev->hfi_device_data;
  914. for (i = 0; i < props->prop_num; i++) {
  915. switch (props->prop_data[i].prop_type) {
  916. case CVP_KMD_PROP_HFI_VERSION:
  917. {
  918. props->prop_data[i].data = hfi->version;
  919. break;
  920. }
  921. default:
  922. dprintk(CVP_ERR, "unrecognized sys property %d\n",
  923. props->prop_data[i].prop_type);
  924. rc = -EFAULT;
  925. }
  926. }
  927. return rc;
  928. }
  929. static int msm_cvp_set_sysprop(struct msm_cvp_inst *inst,
  930. struct cvp_kmd_arg *arg)
  931. {
  932. struct cvp_kmd_sys_properties *props = &arg->data.sys_properties;
  933. struct cvp_kmd_sys_property *prop_array;
  934. struct cvp_session_prop *session_prop;
  935. int i, rc = 0;
  936. if (!inst) {
  937. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  938. return -EINVAL;
  939. }
  940. if (props->prop_num >= MAX_KMD_PROP_NUM) {
  941. dprintk(CVP_ERR, "Too many properties %d to set\n",
  942. props->prop_num);
  943. return -E2BIG;
  944. }
  945. prop_array = &arg->data.sys_properties.prop_data[0];
  946. session_prop = &inst->prop;
  947. for (i = 0; i < props->prop_num; i++) {
  948. switch (prop_array[i].prop_type) {
  949. case CVP_KMD_PROP_SESSION_TYPE:
  950. session_prop->type = prop_array[i].data;
  951. break;
  952. case CVP_KMD_PROP_SESSION_KERNELMASK:
  953. session_prop->kernel_mask = prop_array[i].data;
  954. break;
  955. case CVP_KMD_PROP_SESSION_PRIORITY:
  956. session_prop->priority = prop_array[i].data;
  957. break;
  958. case CVP_KMD_PROP_SESSION_SECURITY:
  959. session_prop->is_secure = prop_array[i].data;
  960. break;
  961. case CVP_KMD_PROP_SESSION_DSPMASK:
  962. session_prop->dsp_mask = prop_array[i].data;
  963. break;
  964. case CVP_KMD_PROP_PWR_FDU:
  965. session_prop->fdu_cycles = prop_array[i].data;
  966. break;
  967. case CVP_KMD_PROP_PWR_ICA:
  968. session_prop->ica_cycles =
  969. div_by_1dot5(prop_array[i].data);
  970. break;
  971. case CVP_KMD_PROP_PWR_OD:
  972. session_prop->od_cycles = prop_array[i].data;
  973. break;
  974. case CVP_KMD_PROP_PWR_MPU:
  975. session_prop->mpu_cycles = prop_array[i].data;
  976. break;
  977. case CVP_KMD_PROP_PWR_FW:
  978. session_prop->fw_cycles =
  979. div_by_1dot5(prop_array[i].data);
  980. break;
  981. case CVP_KMD_PROP_PWR_DDR:
  982. session_prop->ddr_bw = prop_array[i].data;
  983. break;
  984. case CVP_KMD_PROP_PWR_SYSCACHE:
  985. session_prop->ddr_cache = prop_array[i].data;
  986. break;
  987. case CVP_KMD_PROP_PWR_FDU_OP:
  988. session_prop->fdu_op_cycles = prop_array[i].data;
  989. break;
  990. case CVP_KMD_PROP_PWR_ICA_OP:
  991. session_prop->ica_op_cycles =
  992. div_by_1dot5(prop_array[i].data);
  993. break;
  994. case CVP_KMD_PROP_PWR_OD_OP:
  995. session_prop->od_op_cycles = prop_array[i].data;
  996. break;
  997. case CVP_KMD_PROP_PWR_MPU_OP:
  998. session_prop->mpu_op_cycles = prop_array[i].data;
  999. break;
  1000. case CVP_KMD_PROP_PWR_FW_OP:
  1001. session_prop->fw_op_cycles =
  1002. div_by_1dot5(prop_array[i].data);
  1003. break;
  1004. case CVP_KMD_PROP_PWR_DDR_OP:
  1005. session_prop->ddr_op_bw = prop_array[i].data;
  1006. break;
  1007. case CVP_KMD_PROP_PWR_SYSCACHE_OP:
  1008. session_prop->ddr_op_cache = prop_array[i].data;
  1009. break;
  1010. default:
  1011. dprintk(CVP_ERR,
  1012. "unrecognized sys property to set %d\n",
  1013. prop_array[i].prop_type);
  1014. rc = -EFAULT;
  1015. }
  1016. }
  1017. return rc;
  1018. }
  1019. static int cvp_drain_fence_cmd_queue_partial(struct msm_cvp_inst *inst)
  1020. {
  1021. unsigned long wait_time;
  1022. struct cvp_fence_queue *q;
  1023. struct cvp_fence_command *f;
  1024. int rc = 0;
  1025. int count = 0, max_count = 0;
  1026. q = &inst->fence_cmd_queue;
  1027. mutex_lock(&q->lock);
  1028. list_for_each_entry(f, &q->sched_list, list) {
  1029. if (f->mode == OP_FLUSH)
  1030. continue;
  1031. ++count;
  1032. }
  1033. list_for_each_entry(f, &q->wait_list, list) {
  1034. if (f->mode == OP_FLUSH)
  1035. continue;
  1036. ++count;
  1037. }
  1038. mutex_unlock(&q->lock);
  1039. wait_time = count * CVP_MAX_WAIT_TIME * 1000;
  1040. dprintk(CVP_SYNX, "%s: wait %d us for %d fence command\n",
  1041. __func__, wait_time, count);
  1042. count = 0;
  1043. max_count = wait_time / 100;
  1044. retry:
  1045. mutex_lock(&q->lock);
  1046. f = list_first_entry(&q->sched_list, struct cvp_fence_command, list);
  1047. /* Wait for all normal frames to finish before return */
  1048. if ((f && f->mode == OP_FLUSH) ||
  1049. (list_empty(&q->sched_list) && list_empty(&q->wait_list))) {
  1050. mutex_unlock(&q->lock);
  1051. return rc;
  1052. }
  1053. mutex_unlock(&q->lock);
  1054. usleep_range(100, 200);
  1055. ++count;
  1056. if (count < max_count) {
  1057. goto retry;
  1058. } else {
  1059. rc = -ETIMEDOUT;
  1060. dprintk(CVP_ERR, "%s: timed out!\n", __func__);
  1061. }
  1062. return rc;
  1063. }
  1064. static int cvp_drain_fence_sched_list(struct msm_cvp_inst *inst)
  1065. {
  1066. unsigned long wait_time;
  1067. struct cvp_fence_queue *q;
  1068. struct cvp_fence_command *f;
  1069. int rc = 0;
  1070. int count = 0, max_count = 0;
  1071. u64 ktid;
  1072. q = &inst->fence_cmd_queue;
  1073. mutex_lock(&q->lock);
  1074. list_for_each_entry(f, &q->sched_list, list) {
  1075. ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
  1076. dprintk(CVP_SYNX, "%s: frame %llu is in sched_list\n",
  1077. __func__, ktid);
  1078. dprintk(CVP_SYNX, "%s: frameID %llu is in sched_list\n",
  1079. __func__, f->frame_id);
  1080. ++count;
  1081. }
  1082. mutex_unlock(&q->lock);
  1083. wait_time = count * CVP_MAX_WAIT_TIME * 1000;
  1084. dprintk(CVP_SYNX, "%s: wait %d us for %d fence command\n",
  1085. __func__, wait_time, count);
  1086. count = 0;
  1087. max_count = wait_time / 100;
  1088. retry:
  1089. mutex_lock(&q->lock);
  1090. if (list_empty(&q->sched_list)) {
  1091. mutex_unlock(&q->lock);
  1092. return rc;
  1093. }
  1094. mutex_unlock(&q->lock);
  1095. usleep_range(100, 200);
  1096. ++count;
  1097. if (count < max_count) {
  1098. goto retry;
  1099. } else {
  1100. rc = -ETIMEDOUT;
  1101. dprintk(CVP_ERR, "%s: timed out!\n", __func__);
  1102. }
  1103. return rc;
  1104. }
  1105. static int cvp_flush_all(struct msm_cvp_inst *inst)
  1106. {
  1107. int rc = 0;
  1108. struct msm_cvp_inst *s;
  1109. struct cvp_fence_queue *q;
  1110. struct cvp_fence_command *f, *d;
  1111. struct cvp_hfi_device *hdev;
  1112. u64 ktid;
  1113. if (!inst || !inst->core) {
  1114. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1115. return -EINVAL;
  1116. }
  1117. s = cvp_get_inst_validate(inst->core, inst);
  1118. if (!s)
  1119. return -ECONNRESET;
  1120. q = &inst->fence_cmd_queue;
  1121. hdev = inst->core->device;
  1122. mutex_lock(&q->lock);
  1123. q->mode = OP_DRAINING;
  1124. list_for_each_entry_safe(f, d, &q->wait_list, list) {
  1125. ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
  1126. dprintk(CVP_SESS, "%s: flush frame %llu from wait_list\n",
  1127. __func__, ktid);
  1128. dprintk(CVP_SESS, "%s: flush frameID %llu from wait_list\n",
  1129. __func__, f->frame_id);
  1130. list_del_init(&f->list);
  1131. msm_cvp_unmap_frame(inst, f->pkt->client_data.kdata);
  1132. cvp_cancel_synx(inst, CVP_OUTPUT_SYNX, f);
  1133. cvp_release_synx(inst, f);
  1134. cvp_free_fence_data(f);
  1135. }
  1136. list_for_each_entry(f, &q->sched_list, list) {
  1137. ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
  1138. dprintk(CVP_SESS, "%s: flush frame %llu from sched_list\n",
  1139. __func__, ktid);
  1140. dprintk(CVP_SESS, "%s: flush frameID %llu from sched_list\n",
  1141. __func__, f->frame_id);
  1142. cvp_cancel_synx(inst, CVP_INPUT_SYNX, f);
  1143. }
  1144. mutex_unlock(&q->lock);
  1145. dprintk(CVP_SESS, "%s: send flush to fw\n", __func__);
  1146. /* Send flush to FW */
  1147. rc = call_hfi_op(hdev, session_flush, (void *)inst->session);
  1148. if (rc) {
  1149. dprintk(CVP_WARN, "%s: continue flush without fw. rc %d\n",
  1150. __func__, rc);
  1151. goto exit;
  1152. }
  1153. /* Wait for FW response */
  1154. rc = wait_for_sess_signal_receipt(inst, HAL_SESSION_FLUSH_DONE);
  1155. if (rc)
  1156. dprintk(CVP_WARN, "%s: wait for signal failed, rc %d\n",
  1157. __func__, rc);
  1158. dprintk(CVP_SESS, "%s: received flush from fw\n", __func__);
  1159. exit:
  1160. rc = cvp_drain_fence_sched_list(inst);
  1161. mutex_lock(&q->lock);
  1162. q->mode = OP_NORMAL;
  1163. mutex_unlock(&q->lock);
  1164. cvp_put_inst(s);
  1165. return rc;
  1166. }
  1167. static void cvp_mark_fence_command(struct msm_cvp_inst *inst, u64 frame_id)
  1168. {
  1169. int found = false;
  1170. struct cvp_fence_queue *q;
  1171. struct cvp_fence_command *f;
  1172. q = &inst->fence_cmd_queue;
  1173. list_for_each_entry(f, &q->sched_list, list) {
  1174. if (found) {
  1175. f->mode = OP_FLUSH;
  1176. continue;
  1177. }
  1178. if (f->frame_id >= frame_id) {
  1179. found = true;
  1180. f->mode = OP_FLUSH;
  1181. }
  1182. }
  1183. list_for_each_entry(f, &q->wait_list, list) {
  1184. if (found) {
  1185. f->mode = OP_FLUSH;
  1186. continue;
  1187. }
  1188. if (f->frame_id >= frame_id) {
  1189. found = true;
  1190. f->mode = OP_FLUSH;
  1191. }
  1192. }
  1193. }
  1194. static int cvp_flush_frame(struct msm_cvp_inst *inst, u64 frame_id)
  1195. {
  1196. int rc = 0;
  1197. struct msm_cvp_inst *s;
  1198. struct cvp_fence_queue *q;
  1199. struct cvp_fence_command *f, *d;
  1200. u64 ktid;
  1201. if (!inst || !inst->core) {
  1202. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1203. return -EINVAL;
  1204. }
  1205. s = cvp_get_inst_validate(inst->core, inst);
  1206. if (!s)
  1207. return -ECONNRESET;
  1208. q = &inst->fence_cmd_queue;
  1209. mutex_lock(&q->lock);
  1210. q->mode = OP_DRAINING;
  1211. cvp_mark_fence_command(inst, frame_id);
  1212. list_for_each_entry_safe(f, d, &q->wait_list, list) {
  1213. if (f->mode != OP_FLUSH)
  1214. continue;
  1215. ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
  1216. dprintk(CVP_SESS, "%s: flush frame %llu from wait_list\n",
  1217. __func__, ktid);
  1218. dprintk(CVP_SESS, "%s: flush frameID %llu from wait_list\n",
  1219. __func__, f->frame_id);
  1220. list_del_init(&f->list);
  1221. msm_cvp_unmap_frame(inst, f->pkt->client_data.kdata);
  1222. cvp_cancel_synx(inst, CVP_OUTPUT_SYNX, f);
  1223. cvp_release_synx(inst, f);
  1224. cvp_free_fence_data(f);
  1225. }
  1226. list_for_each_entry(f, &q->sched_list, list) {
  1227. if (f->mode != OP_FLUSH)
  1228. continue;
  1229. ktid = f->pkt->client_data.kdata & (FENCE_BIT - 1);
  1230. dprintk(CVP_SESS, "%s: flush frame %llu from sched_list\n",
  1231. __func__, ktid);
  1232. dprintk(CVP_SESS, "%s: flush frameID %llu from sched_list\n",
  1233. __func__, f->frame_id);
  1234. cvp_cancel_synx(inst, CVP_INPUT_SYNX, f);
  1235. }
  1236. mutex_unlock(&q->lock);
  1237. rc = cvp_drain_fence_cmd_queue_partial(inst);
  1238. if (rc)
  1239. dprintk(CVP_WARN, "%s: continue flush. rc %d\n",
  1240. __func__, rc);
  1241. rc = cvp_flush_all(inst);
  1242. cvp_put_inst(s);
  1243. return rc;
  1244. }
  1245. int msm_cvp_handle_syscall(struct msm_cvp_inst *inst, struct cvp_kmd_arg *arg)
  1246. {
  1247. int rc = 0;
  1248. if (!inst || !arg) {
  1249. dprintk(CVP_ERR, "%s: invalid args\n", __func__);
  1250. return -EINVAL;
  1251. }
  1252. dprintk(CVP_HFI, "%s: arg->type = %x", __func__, arg->type);
  1253. if (arg->type != CVP_KMD_SESSION_CONTROL &&
  1254. arg->type != CVP_KMD_SET_SYS_PROPERTY &&
  1255. arg->type != CVP_KMD_GET_SYS_PROPERTY) {
  1256. rc = session_state_check_init(inst);
  1257. if (rc) {
  1258. dprintk(CVP_ERR,
  1259. "Incorrect session state %d for command %#x",
  1260. inst->state, arg->type);
  1261. return rc;
  1262. }
  1263. }
  1264. switch (arg->type) {
  1265. case CVP_KMD_GET_SESSION_INFO:
  1266. {
  1267. struct cvp_kmd_session_info *session =
  1268. (struct cvp_kmd_session_info *)&arg->data.session;
  1269. rc = msm_cvp_get_session_info(inst, session);
  1270. break;
  1271. }
  1272. case CVP_KMD_UPDATE_POWER:
  1273. {
  1274. rc = msm_cvp_update_power(inst);
  1275. break;
  1276. }
  1277. case CVP_KMD_REGISTER_BUFFER:
  1278. {
  1279. struct cvp_kmd_buffer *buf =
  1280. (struct cvp_kmd_buffer *)&arg->data.regbuf;
  1281. rc = msm_cvp_register_buffer(inst, buf);
  1282. break;
  1283. }
  1284. case CVP_KMD_UNREGISTER_BUFFER:
  1285. {
  1286. struct cvp_kmd_buffer *buf =
  1287. (struct cvp_kmd_buffer *)&arg->data.unregbuf;
  1288. rc = msm_cvp_unregister_buffer(inst, buf);
  1289. break;
  1290. }
  1291. case CVP_KMD_RECEIVE_MSG_PKT:
  1292. {
  1293. struct cvp_kmd_hfi_packet *out_pkt =
  1294. (struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
  1295. rc = msm_cvp_session_receive_hfi(inst, out_pkt);
  1296. break;
  1297. }
  1298. case CVP_KMD_SEND_CMD_PKT:
  1299. {
  1300. struct cvp_kmd_hfi_packet *in_pkt =
  1301. (struct cvp_kmd_hfi_packet *)&arg->data.hfi_pkt;
  1302. rc = msm_cvp_session_process_hfi(inst, in_pkt,
  1303. arg->buf_offset, arg->buf_num);
  1304. break;
  1305. }
  1306. case CVP_KMD_SEND_FENCE_CMD_PKT:
  1307. {
  1308. rc = msm_cvp_session_process_hfi_fence(inst, arg);
  1309. break;
  1310. }
  1311. case CVP_KMD_SESSION_CONTROL:
  1312. rc = msm_cvp_session_ctrl(inst, arg);
  1313. break;
  1314. case CVP_KMD_GET_SYS_PROPERTY:
  1315. rc = msm_cvp_get_sysprop(inst, arg);
  1316. break;
  1317. case CVP_KMD_SET_SYS_PROPERTY:
  1318. rc = msm_cvp_set_sysprop(inst, arg);
  1319. break;
  1320. case CVP_KMD_FLUSH_ALL:
  1321. rc = cvp_flush_all(inst);
  1322. break;
  1323. case CVP_KMD_FLUSH_FRAME:
  1324. rc = cvp_flush_frame(inst, arg->data.frame_id);
  1325. break;
  1326. default:
  1327. dprintk(CVP_HFI, "%s: unknown arg type %#x\n",
  1328. __func__, arg->type);
  1329. rc = -ENOTSUPP;
  1330. break;
  1331. }
  1332. return rc;
  1333. }
  1334. int msm_cvp_session_deinit(struct msm_cvp_inst *inst)
  1335. {
  1336. int rc = 0;
  1337. struct cvp_hal_session *session;
  1338. if (!inst || !inst->core) {
  1339. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1340. return -EINVAL;
  1341. }
  1342. dprintk(CVP_SESS, "%s: inst %pK (%#x)\n", __func__,
  1343. inst, hash32_ptr(inst->session));
  1344. session = (struct cvp_hal_session *)inst->session;
  1345. if (!session)
  1346. return rc;
  1347. rc = msm_cvp_comm_try_state(inst, MSM_CVP_CLOSE_DONE);
  1348. if (rc)
  1349. dprintk(CVP_ERR, "%s: close failed\n", __func__);
  1350. rc = msm_cvp_session_deinit_buffers(inst);
  1351. return rc;
  1352. }
  1353. int msm_cvp_session_init(struct msm_cvp_inst *inst)
  1354. {
  1355. int rc = 0;
  1356. if (!inst) {
  1357. dprintk(CVP_ERR, "%s: invalid params\n", __func__);
  1358. return -EINVAL;
  1359. }
  1360. dprintk(CVP_SESS, "%s: inst %pK (%#x)\n", __func__,
  1361. inst, hash32_ptr(inst->session));
  1362. /* set default frequency */
  1363. inst->clk_data.core_id = 0;
  1364. inst->clk_data.min_freq = 1000;
  1365. inst->clk_data.ddr_bw = 1000;
  1366. inst->clk_data.sys_cache_bw = 1000;
  1367. inst->prop.type = HFI_SESSION_CV;
  1368. if (inst->session_type == MSM_CVP_KERNEL)
  1369. inst->prop.type = HFI_SESSION_DME;
  1370. inst->prop.kernel_mask = 0xFFFFFFFF;
  1371. inst->prop.priority = 0;
  1372. inst->prop.is_secure = 0;
  1373. inst->prop.dsp_mask = 0;
  1374. inst->prop.fthread_nr = 2;
  1375. return rc;
  1376. }