hab.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "hab.h"
  7. #define CREATE_TRACE_POINTS
  8. #include "hab_trace_os.h"
  9. #define HAB_DEVICE_CNSTR(__name__, __id__, __num__) { \
  10. .name = __name__,\
  11. .id = __id__,\
  12. .pchannels = LIST_HEAD_INIT(hab_devices[__num__].pchannels),\
  13. .pchan_lock = __RW_LOCK_UNLOCKED(hab_devices[__num__].pchan_lock),\
  14. .openq_list = LIST_HEAD_INIT(hab_devices[__num__].openq_list),\
  15. .openlock = __SPIN_LOCK_UNLOCKED(&hab_devices[__num__].openlock)\
  16. }
  17. static const char hab_info_str[] = "Change: 17280941 Revision: #81";
  18. /*
  19. * The following has to match habmm definitions, order does not matter if
  20. * hab config does not care either. When hab config is not present, the default
  21. * is as guest VM all pchans are pchan opener (FE)
  22. */
  23. static struct hab_device hab_devices[] = {
  24. HAB_DEVICE_CNSTR(DEVICE_AUD1_NAME, MM_AUD_1, 0),
  25. HAB_DEVICE_CNSTR(DEVICE_AUD2_NAME, MM_AUD_2, 1),
  26. HAB_DEVICE_CNSTR(DEVICE_AUD3_NAME, MM_AUD_3, 2),
  27. HAB_DEVICE_CNSTR(DEVICE_AUD4_NAME, MM_AUD_4, 3),
  28. HAB_DEVICE_CNSTR(DEVICE_CAM1_NAME, MM_CAM_1, 4),
  29. HAB_DEVICE_CNSTR(DEVICE_CAM2_NAME, MM_CAM_2, 5),
  30. HAB_DEVICE_CNSTR(DEVICE_DISP1_NAME, MM_DISP_1, 6),
  31. HAB_DEVICE_CNSTR(DEVICE_DISP2_NAME, MM_DISP_2, 7),
  32. HAB_DEVICE_CNSTR(DEVICE_DISP3_NAME, MM_DISP_3, 8),
  33. HAB_DEVICE_CNSTR(DEVICE_DISP4_NAME, MM_DISP_4, 9),
  34. HAB_DEVICE_CNSTR(DEVICE_DISP5_NAME, MM_DISP_5, 10),
  35. HAB_DEVICE_CNSTR(DEVICE_GFX_NAME, MM_GFX, 11),
  36. HAB_DEVICE_CNSTR(DEVICE_VID_NAME, MM_VID, 12),
  37. HAB_DEVICE_CNSTR(DEVICE_VID2_NAME, MM_VID_2, 13),
  38. HAB_DEVICE_CNSTR(DEVICE_VID3_NAME, MM_VID_3, 14),
  39. HAB_DEVICE_CNSTR(DEVICE_MISC_NAME, MM_MISC, 15),
  40. HAB_DEVICE_CNSTR(DEVICE_QCPE1_NAME, MM_QCPE_VM1, 16),
  41. HAB_DEVICE_CNSTR(DEVICE_CLK1_NAME, MM_CLK_VM1, 17),
  42. HAB_DEVICE_CNSTR(DEVICE_CLK2_NAME, MM_CLK_VM2, 18),
  43. HAB_DEVICE_CNSTR(DEVICE_FDE1_NAME, MM_FDE_1, 19),
  44. HAB_DEVICE_CNSTR(DEVICE_BUFFERQ1_NAME, MM_BUFFERQ_1, 20),
  45. HAB_DEVICE_CNSTR(DEVICE_DATA1_NAME, MM_DATA_NETWORK_1, 21),
  46. HAB_DEVICE_CNSTR(DEVICE_DATA2_NAME, MM_DATA_NETWORK_2, 22),
  47. HAB_DEVICE_CNSTR(DEVICE_HSI2S1_NAME, MM_HSI2S_1, 23),
  48. HAB_DEVICE_CNSTR(DEVICE_XVM1_NAME, MM_XVM_1, 24),
  49. HAB_DEVICE_CNSTR(DEVICE_XVM2_NAME, MM_XVM_2, 25),
  50. HAB_DEVICE_CNSTR(DEVICE_XVM3_NAME, MM_XVM_3, 26),
  51. HAB_DEVICE_CNSTR(DEVICE_VNW1_NAME, MM_VNW_1, 27),
  52. HAB_DEVICE_CNSTR(DEVICE_EXT1_NAME, MM_EXT_1, 28),
  53. HAB_DEVICE_CNSTR(DEVICE_GPCE1_NAME, MM_GPCE_1, 29),
  54. };
  55. struct hab_driver hab_driver = {
  56. .ndevices = ARRAY_SIZE(hab_devices),
  57. .devp = hab_devices,
  58. .uctx_list = LIST_HEAD_INIT(hab_driver.uctx_list),
  59. .drvlock = __SPIN_LOCK_UNLOCKED(hab_driver.drvlock),
  60. .imp_list = LIST_HEAD_INIT(hab_driver.imp_list),
  61. .imp_lock = __SPIN_LOCK_UNLOCKED(hab_driver.imp_lock),
  62. .hab_init_success = 0,
  63. .reclaim_list = LIST_HEAD_INIT(hab_driver.reclaim_list),
  64. .reclaim_lock = __SPIN_LOCK_UNLOCKED(hab_driver.reclaim_lock),
  65. };
  66. struct uhab_context *hab_ctx_alloc(int kernel)
  67. {
  68. struct uhab_context *ctx;
  69. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  70. if (!ctx)
  71. return NULL;
  72. ctx->closing = 0;
  73. INIT_LIST_HEAD(&ctx->vchannels);
  74. INIT_LIST_HEAD(&ctx->exp_whse);
  75. INIT_LIST_HEAD(&ctx->imp_whse);
  76. INIT_LIST_HEAD(&ctx->exp_rxq);
  77. init_waitqueue_head(&ctx->exp_wq);
  78. spin_lock_init(&ctx->expq_lock);
  79. INIT_LIST_HEAD(&ctx->imp_rxq);
  80. init_waitqueue_head(&ctx->imp_wq);
  81. spin_lock_init(&ctx->impq_lock);
  82. spin_lock_init(&ctx->imp_lock);
  83. rwlock_init(&ctx->exp_lock);
  84. rwlock_init(&ctx->ctx_lock);
  85. INIT_LIST_HEAD(&ctx->pending_open);
  86. kref_init(&ctx->refcount);
  87. ctx->import_ctx = habmem_imp_hyp_open();
  88. if (!ctx->import_ctx) {
  89. pr_err("habmem_imp_hyp_open failed\n");
  90. kfree(ctx);
  91. return NULL;
  92. }
  93. ctx->kernel = kernel;
  94. spin_lock_bh(&hab_driver.drvlock);
  95. list_add_tail(&ctx->node, &hab_driver.uctx_list);
  96. hab_driver.ctx_cnt++;
  97. ctx->lb_be = hab_driver.b_loopback_be; /* loopback only */
  98. hab_driver.b_loopback_be = ~hab_driver.b_loopback_be; /* loopback only*/
  99. spin_unlock_bh(&hab_driver.drvlock);
  100. pr_debug("ctx %pK live %d loopback be %d\n",
  101. ctx, hab_driver.ctx_cnt, ctx->lb_be);
  102. return ctx;
  103. }
  104. /*
  105. * This function might sleep. One scenario (only applicable for Linux)
  106. * is as below, hab_ctx_free_fn->habmem_remove_export->habmem_export_put
  107. * ->habmem_export_destroy->habmem_exp_release,
  108. * where dma_buf_unmap_attachment() & dma_buf_detach() might sleep.
  109. */
  110. void hab_ctx_free_fn(struct uhab_context *ctx)
  111. {
  112. struct hab_export_ack_recvd *exp_ack_recvd, *expack_tmp;
  113. struct hab_import_ack_recvd *imp_ack_recvd, *impack_tmp;
  114. struct virtual_channel *vchan;
  115. struct physical_channel *pchan;
  116. int i;
  117. struct uhab_context *ctxdel, *ctxtmp;
  118. struct hab_open_node *open_node;
  119. struct export_desc *exp = NULL, *exp_tmp = NULL;
  120. struct export_desc_super *exp_super = NULL;
  121. int irqs_disabled = irqs_disabled();
  122. struct hab_header header = HAB_HEADER_INITIALIZER;
  123. int ret;
  124. /* garbage-collect exp/imp buffers */
  125. write_lock(&ctx->exp_lock);
  126. list_for_each_entry_safe(exp, exp_tmp, &ctx->exp_whse, node) {
  127. list_del(&exp->node);
  128. exp_super = container_of(exp, struct export_desc_super, exp);
  129. if ((exp_super->remote_imported != 0) && (exp->pchan->mem_proto == 1)) {
  130. pr_warn("exp id %d still imported on remote side on pchan %s\n",
  131. exp->export_id, exp->pchan->name);
  132. hab_spin_lock(&hab_driver.reclaim_lock, irqs_disabled);
  133. list_add_tail(&exp->node, &hab_driver.reclaim_list);
  134. hab_spin_unlock(&hab_driver.reclaim_lock, irqs_disabled);
  135. schedule_work(&hab_driver.reclaim_work);
  136. } else {
  137. pr_debug("potential leak exp %d vcid %X recovered\n",
  138. exp->export_id, exp->vcid_local);
  139. habmem_hyp_revoke(exp->payload, exp->payload_count);
  140. write_unlock(&ctx->exp_lock);
  141. pchan = exp->pchan;
  142. hab_spin_lock(&pchan->expid_lock, irqs_disabled);
  143. idr_remove(&pchan->expid_idr, exp->export_id);
  144. hab_spin_unlock(&pchan->expid_lock, irqs_disabled);
  145. habmem_remove_export(exp);
  146. write_lock(&ctx->exp_lock);
  147. }
  148. }
  149. write_unlock(&ctx->exp_lock);
  150. spin_lock_bh(&ctx->imp_lock);
  151. list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
  152. list_del(&exp->node);
  153. ctx->import_total--;
  154. pr_debug("leaked imp %d vcid %X for ctx is collected total %d\n",
  155. exp->export_id, exp->vcid_local,
  156. ctx->import_total);
  157. ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp, ctx->kernel);
  158. if (exp->pchan->mem_proto == 1) {
  159. if (!ret) {
  160. pr_warn("unimp msg sent for exp id %u on %s\n",
  161. exp->export_id, exp->pchan->name);
  162. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_UNIMPORT);
  163. HAB_HEADER_SET_SIZE(header, sizeof(uint32_t));
  164. HAB_HEADER_SET_ID(header, HAB_VCID_UNIMPORT);
  165. HAB_HEADER_SET_SESSION_ID(header, HAB_SESSIONID_UNIMPORT);
  166. ret = physical_channel_send(exp->pchan, &header, &exp->export_id,
  167. HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING);
  168. if (ret != 0)
  169. pr_err("failed to send unimp msg %d, vcid %d, exp id %d\n",
  170. ret, exp->vcid_local, exp->export_id);
  171. } else
  172. pr_err("exp id %d unmap fail on vcid %X\n",
  173. exp->export_id, exp->vcid_local);
  174. }
  175. exp_super = container_of(exp, struct export_desc_super, exp);
  176. kfree(exp_super);
  177. }
  178. spin_unlock_bh(&ctx->imp_lock);
  179. habmem_imp_hyp_close(ctx->import_ctx, ctx->kernel);
  180. /*
  181. * Below rxq only used when vchan is alive. At this moment, it is safe without
  182. * holding lock as all vchans in this ctx have been freed.
  183. * Only one of the rx queues is used decided by the mem protocol. It cannot be
  184. * queried from pchan gracefully if above two warehouses are empty.
  185. * So both queues are always checked to decrease the code complexity.
  186. */
  187. list_for_each_entry_safe(imp_ack_recvd, impack_tmp, &ctx->imp_rxq, node) {
  188. list_del(&imp_ack_recvd->node);
  189. kfree(imp_ack_recvd);
  190. }
  191. list_for_each_entry_safe(exp_ack_recvd, expack_tmp, &ctx->exp_rxq, node) {
  192. list_del(&exp_ack_recvd->node);
  193. kfree(exp_ack_recvd);
  194. }
  195. /* walk vchan list to find the leakage */
  196. spin_lock_bh(&hab_driver.drvlock);
  197. hab_driver.ctx_cnt--;
  198. list_for_each_entry_safe(ctxdel, ctxtmp, &hab_driver.uctx_list, node) {
  199. if (ctxdel == ctx)
  200. list_del(&ctxdel->node);
  201. }
  202. spin_unlock_bh(&hab_driver.drvlock);
  203. pr_debug("live ctx %d refcnt %d kernel %d close %d owner %d\n",
  204. hab_driver.ctx_cnt, get_refcnt(ctx->refcount),
  205. ctx->kernel, ctx->closing, ctx->owner);
  206. /* check vchans in this ctx */
  207. read_lock(&ctx->ctx_lock);
  208. list_for_each_entry(vchan, &ctx->vchannels, node) {
  209. pr_warn("leak vchan id %X cnt %X remote %d in ctx\n",
  210. vchan->id, get_refcnt(vchan->refcount),
  211. vchan->otherend_id);
  212. }
  213. read_unlock(&ctx->ctx_lock);
  214. /* check pending open */
  215. if (ctx->pending_cnt)
  216. pr_warn("potential leak of pendin_open nodes %d\n",
  217. ctx->pending_cnt);
  218. read_lock(&ctx->ctx_lock);
  219. list_for_each_entry(open_node, &ctx->pending_open, node) {
  220. pr_warn("leak pending open vcid %X type %d subid %d openid %d\n",
  221. open_node->request.xdata.vchan_id,
  222. open_node->request.type,
  223. open_node->request.xdata.sub_id,
  224. open_node->request.xdata.open_id);
  225. }
  226. read_unlock(&ctx->ctx_lock);
  227. /* check vchans belong to this ctx in all hab/mmid devices */
  228. for (i = 0; i < hab_driver.ndevices; i++) {
  229. struct hab_device *habdev = &hab_driver.devp[i];
  230. read_lock_bh(&habdev->pchan_lock);
  231. list_for_each_entry(pchan, &habdev->pchannels, node) {
  232. /* check vchan ctx owner */
  233. read_lock(&pchan->vchans_lock);
  234. list_for_each_entry(vchan, &pchan->vchannels, pnode) {
  235. if (vchan->ctx == ctx) {
  236. pr_warn("leak vcid %X cnt %d pchan %s local %d remote %d\n",
  237. vchan->id,
  238. get_refcnt(vchan->refcount),
  239. pchan->name, pchan->vmid_local,
  240. pchan->vmid_remote);
  241. }
  242. }
  243. read_unlock(&pchan->vchans_lock);
  244. }
  245. read_unlock_bh(&habdev->pchan_lock);
  246. }
  247. kfree(ctx);
  248. }
  249. void hab_ctx_free(struct kref *ref)
  250. {
  251. hab_ctx_free_os(ref);
  252. }
  253. /*
  254. * caller needs to call vchan_put() afterwards. this is used to refcnt
  255. * the local ioctl access based on ctx
  256. */
  257. struct virtual_channel *hab_get_vchan_fromvcid(int32_t vcid,
  258. struct uhab_context *ctx, int ignore_remote)
  259. {
  260. struct virtual_channel *vchan;
  261. read_lock(&ctx->ctx_lock);
  262. list_for_each_entry(vchan, &ctx->vchannels, node) {
  263. if (vcid == vchan->id) {
  264. if ((ignore_remote ? 0 : vchan->otherend_closed) ||
  265. vchan->closed ||
  266. !kref_get_unless_zero(&vchan->refcount)) {
  267. pr_debug("failed to inc vcid %x remote %x session %d refcnt %d close_flg remote %d local %d\n",
  268. vchan->id, vchan->otherend_id,
  269. vchan->session_id,
  270. get_refcnt(vchan->refcount),
  271. vchan->otherend_closed, vchan->closed);
  272. vchan = NULL;
  273. }
  274. read_unlock(&ctx->ctx_lock);
  275. return vchan;
  276. }
  277. }
  278. read_unlock(&ctx->ctx_lock);
  279. return NULL;
  280. }
  281. struct hab_device *find_hab_device(unsigned int mm_id)
  282. {
  283. int i;
  284. for (i = 0; i < hab_driver.ndevices; i++) {
  285. if (hab_driver.devp[i].id == HAB_MMID_GET_MAJOR(mm_id))
  286. return &hab_driver.devp[i];
  287. }
  288. pr_err("%s: id=%d\n", __func__, mm_id);
  289. return NULL;
  290. }
  291. /*
  292. * open handshake in FE and BE
  293. * frontend backend
  294. * send(INIT) wait(INIT)
  295. * wait(INIT_ACK) send(INIT_ACK)
  296. * send(INIT_DONE) wait(INIT_DONE)
  297. */
  298. struct virtual_channel *frontend_open(struct uhab_context *ctx,
  299. unsigned int mm_id,
  300. int dom_id,
  301. uint32_t flags)
  302. {
  303. int ret, ret2, open_id = 0;
  304. struct physical_channel *pchan = NULL;
  305. struct hab_device *dev;
  306. struct virtual_channel *vchan = NULL;
  307. static atomic_t open_id_counter = ATOMIC_INIT(0);
  308. struct hab_open_request request;
  309. struct hab_open_request *recv_request;
  310. int sub_id = HAB_MMID_GET_MINOR(mm_id);
  311. struct hab_open_node pending_open = { { 0 } };
  312. dev = find_hab_device(mm_id);
  313. if (dev == NULL) {
  314. pr_err("HAB device %d is not initialized\n", mm_id);
  315. ret = -EINVAL;
  316. goto err;
  317. }
  318. /* guest can find its own id */
  319. pchan = hab_pchan_find_domid(dev, dom_id);
  320. if (!pchan) {
  321. pr_err("hab_pchan_find_domid failed: dom_id=%d\n", dom_id);
  322. ret = -EINVAL;
  323. goto err;
  324. }
  325. open_id = atomic_inc_return(&open_id_counter);
  326. vchan = hab_vchan_alloc(ctx, pchan, open_id);
  327. if (!vchan) {
  328. pr_err("vchan alloc failed\n");
  329. ret = -ENOMEM;
  330. goto err;
  331. }
  332. /* Send Init sequence */
  333. hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT, pchan,
  334. vchan->id, sub_id, open_id);
  335. request.xdata.ver_fe = HAB_API_VER;
  336. ret = hab_open_request_send(&request);
  337. if (ret) {
  338. pr_err("hab_open_request_send failed: %d\n", ret);
  339. goto err;
  340. }
  341. pending_open.request = request;
  342. /* during wait app could be terminated */
  343. hab_open_pending_enter(ctx, pchan, &pending_open);
  344. /* Wait for Init-Ack sequence */
  345. hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK, pchan,
  346. 0, sub_id, open_id);
  347. ret = hab_open_listen(ctx, dev, &request, &recv_request, 0, flags);
  348. if (!ret && recv_request && ((recv_request->xdata.ver_fe & 0xFFFF0000)
  349. != (recv_request->xdata.ver_be & 0xFFFF0000))) {
  350. /* version check */
  351. pr_err("hab major version mismatch fe %X be %X on mmid %d\n",
  352. recv_request->xdata.ver_fe,
  353. recv_request->xdata.ver_be, mm_id);
  354. hab_open_pending_exit(ctx, pchan, &pending_open);
  355. ret = -EPROTO;
  356. goto err;
  357. } else if (ret || !recv_request) {
  358. pr_err("hab_open_listen failed: %d, send cancel vcid %x subid %d openid %d\n",
  359. ret, vchan->id,
  360. sub_id, open_id);
  361. /* send cancel to BE due to FE's local close */
  362. hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_CANCEL,
  363. pchan, vchan->id, sub_id, open_id);
  364. request.xdata.ver_fe = HAB_API_VER;
  365. ret2 = hab_open_request_send(&request);
  366. if (ret2)
  367. pr_err("send init_cancel failed %d on vcid %x\n", ret2,
  368. vchan->id);
  369. hab_open_pending_exit(ctx, pchan, &pending_open);
  370. if (ret != -EINTR)
  371. ret = -EINVAL;
  372. goto err;
  373. }
  374. /* remove pending open locally after good pairing */
  375. hab_open_pending_exit(ctx, pchan, &pending_open);
  376. pr_debug("hab version fe %X be %X on mmid %d\n",
  377. recv_request->xdata.ver_fe, recv_request->xdata.ver_be,
  378. mm_id);
  379. pchan->mem_proto = (recv_request->xdata.ver_proto == 0) ? 0 : 1;
  380. pr_info_once("mem proto ver %u\n", pchan->mem_proto);
  381. vchan->otherend_id = recv_request->xdata.vchan_id;
  382. hab_open_request_free(recv_request);
  383. /* Send Init-Done sequence */
  384. hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_DONE, pchan,
  385. 0, sub_id, open_id);
  386. request.xdata.ver_fe = HAB_API_VER;
  387. ret = hab_open_request_send(&request);
  388. if (ret) {
  389. pr_err("failed to send init-done vcid %x remote %x openid %d\n",
  390. vchan->id, vchan->otherend_id, vchan->session_id);
  391. goto err;
  392. }
  393. hab_pchan_put(pchan);
  394. return vchan;
  395. err:
  396. if (vchan)
  397. hab_vchan_put(vchan);
  398. if (pchan)
  399. hab_pchan_put(pchan);
  400. return ERR_PTR(ret);
  401. }
  402. struct virtual_channel *backend_listen(struct uhab_context *ctx,
  403. unsigned int mm_id, int timeout, uint32_t flags)
  404. {
  405. int ret, ret2;
  406. int open_id, ver_fe;
  407. int sub_id = HAB_MMID_GET_MINOR(mm_id);
  408. struct physical_channel *pchan = NULL;
  409. struct hab_device *dev;
  410. struct virtual_channel *vchan = NULL;
  411. struct hab_open_request request;
  412. struct hab_open_request *recv_request;
  413. uint32_t otherend_vchan_id;
  414. struct hab_open_node pending_open = { { 0 } };
  415. dev = find_hab_device(mm_id);
  416. if (dev == NULL) {
  417. pr_err("failed to find dev based on id %d\n", mm_id);
  418. ret = -EINVAL;
  419. goto err;
  420. }
  421. while (1) {
  422. /* Wait for Init sequence */
  423. hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT,
  424. NULL, 0, sub_id, 0);
  425. /* cancel should not happen at this moment */
  426. ret = hab_open_listen(ctx, dev, &request, &recv_request,
  427. timeout, flags);
  428. if (ret || !recv_request) {
  429. if (!ret && !recv_request)
  430. ret = -EINVAL;
  431. if (-EAGAIN == ret) {
  432. ret = -ETIMEDOUT;
  433. } else {
  434. /* device is closed */
  435. pr_err("open request wait failed ctx closing %d\n",
  436. ctx->closing);
  437. }
  438. goto err;
  439. } else if (!ret && recv_request &&
  440. ((recv_request->xdata.ver_fe & 0xFFFF0000) !=
  441. (HAB_API_VER & 0xFFFF0000))) {
  442. int ret2;
  443. /* version check */
  444. pr_err("version mismatch fe %X be %X on mmid %d\n",
  445. recv_request->xdata.ver_fe, HAB_API_VER, mm_id);
  446. hab_open_request_init(&request,
  447. HAB_PAYLOAD_TYPE_INIT_ACK,
  448. NULL, 0, sub_id, recv_request->xdata.open_id);
  449. request.xdata.ver_be = HAB_API_VER;
  450. /* reply to allow FE to bail out */
  451. ret2 = hab_open_request_send(&request);
  452. if (ret2)
  453. pr_err("send FE version mismatch failed mmid %d sub %d\n",
  454. mm_id, sub_id);
  455. ret = -EPROTO;
  456. goto err;
  457. }
  458. recv_request->pchan->mem_proto = (recv_request->xdata.ver_proto == 0) ? 0 : 1;
  459. pr_info_once("mem proto ver %u\n", recv_request->pchan->mem_proto);
  460. /* guest id from guest */
  461. otherend_vchan_id = recv_request->xdata.vchan_id;
  462. open_id = recv_request->xdata.open_id;
  463. ver_fe = recv_request->xdata.ver_fe;
  464. pchan = recv_request->pchan;
  465. hab_pchan_get(pchan);
  466. hab_open_request_free(recv_request);
  467. recv_request = NULL;
  468. vchan = hab_vchan_alloc(ctx, pchan, open_id);
  469. if (!vchan) {
  470. ret = -ENOMEM;
  471. goto err;
  472. }
  473. vchan->otherend_id = otherend_vchan_id;
  474. /* Send Init-Ack sequence */
  475. hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_ACK,
  476. pchan, vchan->id, sub_id, open_id);
  477. request.xdata.ver_fe = ver_fe; /* carry over */
  478. request.xdata.ver_be = HAB_API_VER;
  479. ret = hab_open_request_send(&request);
  480. if (ret)
  481. goto err;
  482. pending_open.request = request;
  483. /* wait only after init-ack is sent */
  484. hab_open_pending_enter(ctx, pchan, &pending_open);
  485. /* Wait for Ack sequence */
  486. hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_DONE,
  487. pchan, 0, sub_id, open_id);
  488. ret = hab_open_listen(ctx, dev, &request, &recv_request,
  489. HAB_HS_TIMEOUT, flags);
  490. hab_open_pending_exit(ctx, pchan, &pending_open);
  491. if (ret && recv_request &&
  492. recv_request->type == HAB_PAYLOAD_TYPE_INIT_CANCEL) {
  493. pr_err("listen cancelled vcid %x subid %d openid %d ret %d\n",
  494. request.xdata.vchan_id, request.xdata.sub_id,
  495. request.xdata.open_id, ret);
  496. /* FE cancels this session.
  497. * So BE has to cancel its too
  498. */
  499. hab_open_request_init(&request,
  500. HAB_PAYLOAD_TYPE_INIT_CANCEL, pchan,
  501. vchan->id, sub_id, open_id);
  502. ret2 = hab_open_request_send(&request);
  503. if (ret2)
  504. pr_err("send init_ack failed %d on vcid %x\n",
  505. ret2, vchan->id);
  506. hab_open_pending_exit(ctx, pchan, &pending_open);
  507. ret = -ENODEV; /* open request cancelled remotely */
  508. break;
  509. } else if (ret != -EAGAIN) {
  510. hab_open_pending_exit(ctx, pchan, &pending_open);
  511. break; /* received something. good case! */
  512. }
  513. /* stay in the loop retry */
  514. pr_warn("retry open ret %d vcid %X remote %X sub %d open %d\n",
  515. ret, vchan->id, vchan->otherend_id, sub_id, open_id);
  516. /* retry path starting here. free previous vchan */
  517. hab_open_request_init(&request, HAB_PAYLOAD_TYPE_INIT_CANCEL,
  518. pchan, vchan->id, sub_id, open_id);
  519. request.xdata.ver_fe = ver_fe;
  520. request.xdata.ver_be = HAB_API_VER;
  521. ret2 = hab_open_request_send(&request);
  522. if (ret2)
  523. pr_err("send init_ack failed %d on vcid %x\n", ret2,
  524. vchan->id);
  525. hab_open_pending_exit(ctx, pchan, &pending_open);
  526. hab_vchan_put(vchan);
  527. vchan = NULL;
  528. hab_pchan_put(pchan);
  529. pchan = NULL;
  530. }
  531. if (ret || !recv_request) {
  532. pr_err("backend mmid %d listen error %d\n", mm_id, ret);
  533. ret = -EINVAL;
  534. goto err;
  535. }
  536. hab_open_request_free(recv_request);
  537. hab_pchan_put(pchan);
  538. return vchan;
  539. err:
  540. if (ret != -ETIMEDOUT)
  541. pr_err("listen on mmid %d failed\n", mm_id);
  542. if (vchan)
  543. hab_vchan_put(vchan);
  544. if (pchan)
  545. hab_pchan_put(pchan);
  546. return ERR_PTR(ret);
  547. }
  548. long hab_vchan_send(struct uhab_context *ctx,
  549. int vcid,
  550. size_t sizebytes,
  551. void *data,
  552. unsigned int flags)
  553. {
  554. struct virtual_channel *vchan;
  555. int ret;
  556. struct hab_header header = HAB_HEADER_INITIALIZER;
  557. unsigned int nonblocking_flag = flags & HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING;
  558. if (sizebytes > (size_t)HAB_HEADER_SIZE_MAX) {
  559. pr_err("Message too large, %lu bytes, max is %d\n",
  560. sizebytes, HAB_HEADER_SIZE_MAX);
  561. return -EINVAL;
  562. }
  563. vchan = hab_get_vchan_fromvcid(vcid, ctx, 0);
  564. if (!vchan || vchan->otherend_closed) {
  565. ret = -ENODEV;
  566. goto err;
  567. }
  568. /**
  569. * Without non-blocking configured, when the shared memory (vdev-shmem project) or
  570. * vh_buf_header (virtio-hab project) used by HAB for front-end and back-end messaging
  571. * is exhausted, the current path will be blocked.
  572. * 1. The vdev-shmem project will be blocked in the hab_vchan_send function;
  573. * 2. The virtio-hab project will be blocked in the hab_physical_send function;
  574. */
  575. if (!nonblocking_flag)
  576. might_sleep();
  577. /* log msg send timestamp: enter hab_vchan_send */
  578. trace_hab_vchan_send_start(vchan);
  579. HAB_HEADER_SET_SIZE(header, sizebytes);
  580. if (flags & HABMM_SOCKET_SEND_FLAGS_XING_VM_STAT) {
  581. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_PROFILE);
  582. if (sizebytes < sizeof(struct habmm_xing_vm_stat)) {
  583. pr_err("wrong profiling buffer size %zd, expect %zd\n",
  584. sizebytes,
  585. sizeof(struct habmm_xing_vm_stat));
  586. return -EINVAL;
  587. }
  588. } else if (flags & HABMM_SOCKET_XVM_SCHE_TEST) {
  589. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_SCHE_MSG);
  590. } else if (flags & HABMM_SOCKET_XVM_SCHE_TEST_ACK) {
  591. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_SCHE_MSG_ACK);
  592. } else if (flags & HABMM_SOCKET_XVM_SCHE_RESULT_REQ) {
  593. if (sizebytes < sizeof(unsigned long long)) {
  594. pr_err("Message buffer too small, %lu bytes, expect %d\n",
  595. sizebytes,
  596. sizeof(unsigned long long));
  597. return -EINVAL;
  598. }
  599. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_SCHE_RESULT_REQ);
  600. } else if (flags & HABMM_SOCKET_XVM_SCHE_RESULT_RSP) {
  601. if (sizebytes < 3 * sizeof(unsigned long long)) {
  602. pr_err("Message buffer too small, %lu bytes, expect %d\n",
  603. sizebytes,
  604. 3 * sizeof(unsigned long long));
  605. return -EINVAL;
  606. }
  607. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_SCHE_RESULT_RSP);
  608. } else {
  609. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_MSG);
  610. }
  611. HAB_HEADER_SET_ID(header, vchan->otherend_id);
  612. HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
  613. while (1) {
  614. ret = physical_channel_send(vchan->pchan, &header, data, nonblocking_flag);
  615. if (vchan->otherend_closed || nonblocking_flag ||
  616. ret != -EAGAIN)
  617. break;
  618. schedule();
  619. }
  620. /*
  621. * The ret here as 0 indicates the message was already sent out
  622. * from the hab_vchan_send()'s perspective.
  623. */
  624. if (!ret)
  625. atomic64_inc(&vchan->tx_cnt);
  626. err:
  627. /* log msg send timestamp: exit hab_vchan_send */
  628. trace_hab_vchan_send_done(vchan);
  629. if (vchan)
  630. hab_vchan_put(vchan);
  631. return ret;
  632. }
  633. int hab_vchan_recv(struct uhab_context *ctx,
  634. struct hab_message **message,
  635. int vcid,
  636. int *rsize,
  637. unsigned int timeout,
  638. unsigned int flags)
  639. {
  640. struct virtual_channel *vchan;
  641. int ret = 0;
  642. int nonblocking_flag = flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING;
  643. vchan = hab_get_vchan_fromvcid(vcid, ctx, 1);
  644. if (!vchan) {
  645. pr_err("vcid %X vchan 0x%pK ctx %pK\n", vcid, vchan, ctx);
  646. *message = NULL;
  647. return -ENODEV;
  648. }
  649. vchan->rx_inflight = 1;
  650. if (nonblocking_flag) {
  651. /*
  652. * Try to pull data from the ring in this context instead of
  653. * IRQ handler. Any available messages will be copied and queued
  654. * internally, then fetched by hab_msg_dequeue()
  655. */
  656. physical_channel_rx_dispatch((unsigned long) vchan->pchan);
  657. }
  658. ret = hab_msg_dequeue(vchan, message, rsize, timeout, flags);
  659. if (!ret && *message) {
  660. /* log msg recv timestamp: exit hab_vchan_recv */
  661. trace_hab_vchan_recv_done(vchan, *message);
  662. /*
  663. * Here, it is for sure that a message was received from the
  664. * hab_vchan_recv()'s view w/ the ret as 0 and *message as
  665. * non-zero.
  666. */
  667. atomic64_inc(&vchan->rx_cnt);
  668. }
  669. vchan->rx_inflight = 0;
  670. hab_vchan_put(vchan);
  671. return ret;
  672. }
  673. bool hab_is_loopback(void)
  674. {
  675. return hab_driver.b_loopback;
  676. }
  677. int hab_vchan_open(struct uhab_context *ctx,
  678. unsigned int mmid,
  679. int32_t *vcid,
  680. int32_t timeout,
  681. uint32_t flags)
  682. {
  683. struct virtual_channel *vchan = NULL;
  684. struct hab_device *dev;
  685. pr_debug("Open mmid=%d, loopback mode=%d, loopback be ctx %d\n",
  686. mmid, hab_driver.b_loopback, ctx->lb_be);
  687. if (!vcid)
  688. return -EINVAL;
  689. if (hab_is_loopback()) {
  690. if (ctx->lb_be)
  691. vchan = backend_listen(ctx, mmid, timeout, flags);
  692. else
  693. vchan = frontend_open(ctx, mmid, LOOPBACK_DOM, flags);
  694. } else {
  695. dev = find_hab_device(mmid);
  696. if (dev) {
  697. struct physical_channel *pchan =
  698. hab_pchan_find_domid(dev,
  699. HABCFG_VMID_DONT_CARE);
  700. if (pchan) {
  701. if (pchan->kernel_only && !ctx->kernel) {
  702. pr_err("pchan only serves the kernel: mmid %d\n", mmid);
  703. return -EPERM;
  704. }
  705. if (pchan->is_be)
  706. vchan = backend_listen(ctx, mmid,
  707. timeout, flags);
  708. else
  709. vchan = frontend_open(ctx, mmid,
  710. HABCFG_VMID_DONT_CARE, flags);
  711. } else {
  712. pr_err("open on nonexistent pchan (mmid %x)\n",
  713. mmid);
  714. return -ENODEV;
  715. }
  716. } else {
  717. pr_err("failed to find device, mmid %d\n", mmid);
  718. return -ENODEV;
  719. }
  720. }
  721. if (IS_ERR(vchan)) {
  722. if (-ETIMEDOUT != PTR_ERR(vchan) && -EAGAIN != PTR_ERR(vchan))
  723. pr_err("vchan open failed mmid=%d\n", mmid);
  724. return PTR_ERR(vchan);
  725. }
  726. pr_debug("vchan id %x remote id %x session %d\n", vchan->id,
  727. vchan->otherend_id, vchan->session_id);
  728. hab_write_lock(&ctx->ctx_lock, !ctx->kernel);
  729. list_add_tail(&vchan->node, &ctx->vchannels);
  730. ctx->vcnt++;
  731. *vcid = vchan->id;
  732. hab_write_unlock(&ctx->ctx_lock, !ctx->kernel);
  733. return 0;
  734. }
  735. void hab_send_close_msg(struct virtual_channel *vchan)
  736. {
  737. struct hab_header header = HAB_HEADER_INITIALIZER;
  738. int ret = 0;
  739. if (vchan && !vchan->otherend_closed) {
  740. HAB_HEADER_SET_SIZE(header, 0);
  741. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_CLOSE);
  742. HAB_HEADER_SET_ID(header, vchan->otherend_id);
  743. HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
  744. ret = physical_channel_send(vchan->pchan, &header, NULL,
  745. HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING);
  746. if (ret != 0)
  747. pr_err("failed to send close msg %d, vcid %x\n",
  748. ret, vchan->id);
  749. }
  750. }
  751. void hab_send_unimport_msg(struct virtual_channel *vchan, uint32_t exp_id)
  752. {
  753. struct hab_header header = HAB_HEADER_INITIALIZER;
  754. int ret = 0;
  755. if (vchan) {
  756. HAB_HEADER_SET_SIZE(header, sizeof(uint32_t));
  757. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_UNIMPORT);
  758. HAB_HEADER_SET_ID(header, vchan->otherend_id);
  759. HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
  760. ret = physical_channel_send(vchan->pchan, &header, &exp_id,
  761. HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING);
  762. if (ret != 0)
  763. pr_err("failed to send unimp msg %d, vcid %x\n",
  764. ret, vchan->id);
  765. }
  766. }
  767. int hab_vchan_close(struct uhab_context *ctx, int32_t vcid)
  768. {
  769. struct virtual_channel *vchan = NULL, *tmp = NULL;
  770. int vchan_found = 0;
  771. int ret = 0;
  772. int irqs_disabled = irqs_disabled();
  773. if (!ctx)
  774. return -EINVAL;
  775. hab_write_lock(&ctx->ctx_lock, !ctx->kernel || irqs_disabled);
  776. list_for_each_entry_safe(vchan, tmp, &ctx->vchannels, node) {
  777. if (vchan->id == vcid) {
  778. /* local close starts */
  779. vchan->closed = 1;
  780. /* vchan is not in this ctx anymore */
  781. list_del(&vchan->node);
  782. ctx->vcnt--;
  783. pr_debug("vcid %x remote %x session %d refcnt %d\n",
  784. vchan->id, vchan->otherend_id,
  785. vchan->session_id, get_refcnt(vchan->refcount));
  786. hab_write_unlock(&ctx->ctx_lock, !ctx->kernel || irqs_disabled);
  787. /* unblocking blocked in-calls */
  788. hab_vchan_stop_notify(vchan);
  789. hab_vchan_put(vchan); /* there is a lock inside */
  790. hab_write_lock(&ctx->ctx_lock, !ctx->kernel || irqs_disabled);
  791. vchan_found = 1;
  792. break;
  793. }
  794. }
  795. hab_write_unlock(&ctx->ctx_lock, !ctx->kernel || irqs_disabled);
  796. if (!vchan_found)
  797. ret = -ENODEV;
  798. return ret;
  799. }
  800. /*
  801. * To name the pchan - the pchan has two ends, either FE or BE locally.
  802. * if is_be is true, then this is listener for BE. pchane name use remote
  803. * FF's vmid from the table.
  804. * if is_be is false, then local is FE as opener. pchan name use local FE's
  805. * vmid (self)
  806. */
  807. static int hab_initialize_pchan_entry(struct hab_device *mmid_device,
  808. int vmid_local, int vmid_remote, int is_be, int kernel_only)
  809. {
  810. char pchan_name[MAX_VMID_NAME_SIZE];
  811. struct physical_channel *pchan = NULL;
  812. int ret;
  813. int vmid = is_be ? vmid_remote : vmid_local; /* used for naming only */
  814. if (!mmid_device) {
  815. pr_err("habdev %pK, vmid local %d, remote %d, is be %d\n",
  816. mmid_device, vmid_local, vmid_remote, is_be);
  817. return -EINVAL;
  818. }
  819. snprintf(pchan_name, MAX_VMID_NAME_SIZE, "vm%d-", vmid);
  820. strlcat(pchan_name, mmid_device->name, MAX_VMID_NAME_SIZE);
  821. ret = habhyp_commdev_alloc((void **)&pchan, is_be, pchan_name,
  822. vmid_remote, mmid_device);
  823. if (ret) {
  824. pr_err("failed %d to allocate pchan %s, vmid local %d, remote %d, is_be %d, total %d\n",
  825. ret, pchan_name, vmid_local, vmid_remote,
  826. is_be, mmid_device->pchan_cnt);
  827. } else {
  828. /* local/remote id setting should be kept in lower level */
  829. pchan->vmid_local = vmid_local;
  830. pchan->vmid_remote = vmid_remote;
  831. pchan->kernel_only = kernel_only;
  832. pr_debug("pchan %s mmid %s local %d remote %d role %d, kernel only %d\n",
  833. pchan_name, mmid_device->name,
  834. pchan->vmid_local, pchan->vmid_remote,
  835. pchan->dom_id, pchan->kernel_only);
  836. }
  837. return ret;
  838. }
  839. static int hab_generate_pchan_group(struct local_vmid *settings,
  840. int i, int j, int start, int end)
  841. {
  842. int k, ret = 0;
  843. for (k = start + 1; k < end; k++) {
  844. /*
  845. * if this local pchan end is BE, then use
  846. * remote FE's vmid. If local end is FE, then
  847. * use self vmid
  848. */
  849. ret += hab_initialize_pchan_entry(
  850. find_hab_device(k),
  851. settings->self,
  852. HABCFG_GET_VMID(settings, i),
  853. HABCFG_GET_BE(settings, i, j),
  854. HABCFG_GET_KERNEL(settings, i, j));
  855. }
  856. ret += hab_create_cdev_node(HABCFG_GET_MMID(settings, i, j));
  857. return ret;
  858. }
  859. /*
  860. * generate pchan list based on hab settings table.
  861. * return status 0: success, otherwise failure
  862. */
  863. static int hab_generate_pchan(struct local_vmid *settings, int i, int j)
  864. {
  865. int ret = 0;
  866. pr_debug("%d as mmid %d in vmid %d\n",
  867. HABCFG_GET_MMID(settings, i, j), j, i);
  868. switch (HABCFG_GET_MMID(settings, i, j)) {
  869. case MM_AUD_START/100:
  870. ret = hab_generate_pchan_group(settings, i, j, MM_AUD_START, MM_AUD_END);
  871. break;
  872. case MM_CAM_START/100:
  873. ret = hab_generate_pchan_group(settings, i, j, MM_CAM_START, MM_CAM_END);
  874. break;
  875. case MM_DISP_START/100:
  876. ret = hab_generate_pchan_group(settings, i, j, MM_DISP_START, MM_DISP_END);
  877. break;
  878. case MM_GFX_START/100:
  879. ret = hab_generate_pchan_group(settings, i, j, MM_GFX_START, MM_GFX_END);
  880. break;
  881. case MM_VID_START/100:
  882. ret = hab_generate_pchan_group(settings, i, j, MM_VID_START, MM_VID_END);
  883. break;
  884. case MM_MISC_START/100:
  885. ret = hab_generate_pchan_group(settings, i, j, MM_MISC_START, MM_MISC_END);
  886. break;
  887. case MM_QCPE_START/100:
  888. ret = hab_generate_pchan_group(settings, i, j, MM_QCPE_START, MM_QCPE_END);
  889. break;
  890. case MM_CLK_START/100:
  891. ret = hab_generate_pchan_group(settings, i, j, MM_CLK_START, MM_CLK_END);
  892. break;
  893. case MM_FDE_START/100:
  894. ret = hab_generate_pchan_group(settings, i, j, MM_FDE_START, MM_FDE_END);
  895. break;
  896. case MM_BUFFERQ_START/100:
  897. ret = hab_generate_pchan_group(settings, i, j, MM_BUFFERQ_START, MM_BUFFERQ_END);
  898. break;
  899. case MM_DATA_START/100:
  900. ret = hab_generate_pchan_group(settings, i, j, MM_DATA_START, MM_DATA_END);
  901. break;
  902. case MM_HSI2S_START/100:
  903. ret = hab_generate_pchan_group(settings, i, j, MM_HSI2S_START, MM_HSI2S_END);
  904. break;
  905. case MM_XVM_START/100:
  906. ret = hab_generate_pchan_group(settings, i, j, MM_XVM_START, MM_XVM_END);
  907. break;
  908. case MM_VNW_START/100:
  909. ret = hab_generate_pchan_group(settings, i, j, MM_VNW_START, MM_VNW_END);
  910. break;
  911. case MM_EXT_START/100:
  912. ret = hab_generate_pchan_group(settings, i, j, MM_EXT_START, MM_EXT_END);
  913. break;
  914. case MM_GPCE_START/100:
  915. ret = hab_generate_pchan_group(settings, i, j, MM_GPCE_START, MM_GPCE_END);
  916. break;
  917. default:
  918. pr_err("failed to find mmid %d, i %d, j %d\n",
  919. HABCFG_GET_MMID(settings, i, j), i, j);
  920. break;
  921. }
  922. return ret;
  923. }
  924. /*
  925. * generate pchan list based on hab settings table.
  926. * return status 0: success, otherwise failure
  927. */
  928. static int hab_generate_pchan_list(struct local_vmid *settings)
  929. {
  930. int i, j, ret = 0;
  931. /* scan by valid VMs, then mmid */
  932. pr_debug("self vmid is %d\n", settings->self);
  933. for (i = 0; i < HABCFG_VMID_MAX; i++) {
  934. if (HABCFG_GET_VMID(settings, i) != HABCFG_VMID_INVALID &&
  935. HABCFG_GET_VMID(settings, i) != settings->self) {
  936. pr_debug("create pchans for vm %d\n", i);
  937. for (j = 1; j <= HABCFG_MMID_AREA_MAX; j++) {
  938. if (HABCFG_GET_MMID(settings, i, j)
  939. != HABCFG_VMID_INVALID)
  940. ret = hab_generate_pchan(settings,
  941. i, j);
  942. }
  943. }
  944. }
  945. return ret;
  946. }
  947. /*
  948. * This function checks hypervisor plug-in readiness, read in hab configs,
  949. * and configure pchans
  950. */
  951. #ifdef CONFIG_MSM_HAB_DEFAULT_VMID
  952. #define DEFAULT_GVMID CONFIG_MSM_HAB_DEFAULT_VMID
  953. #else
  954. #define DEFAULT_GVMID 2
  955. #endif
  956. int do_hab_parse(void)
  957. {
  958. int result;
  959. int i;
  960. struct hab_device *device;
  961. /* single GVM is 2, multigvm is 2 or 3. GHS LV-GVM 2, LA-GVM 3 */
  962. int default_gvmid = DEFAULT_GVMID;
  963. pr_debug("hab parse starts for %s\n", hab_info_str);
  964. /* first check if hypervisor plug-in is ready */
  965. result = hab_hypervisor_register();
  966. if (result) {
  967. pr_err("register HYP plug-in failed, ret %d\n", result);
  968. return result;
  969. }
  970. /*
  971. * Initialize open Q before first pchan starts.
  972. * Each is for one pchan list
  973. */
  974. for (i = 0; i < hab_driver.ndevices; i++) {
  975. device = &hab_driver.devp[i];
  976. init_waitqueue_head(&device->openq);
  977. }
  978. /* read in hab config and create pchans*/
  979. memset(&hab_driver.settings, HABCFG_VMID_INVALID,
  980. sizeof(hab_driver.settings));
  981. result = hab_parse(&hab_driver.settings);
  982. if (result) {
  983. pr_err("hab config open failed, prepare default gvm %d settings\n",
  984. default_gvmid);
  985. fill_default_gvm_settings(&hab_driver.settings, default_gvmid,
  986. MM_AUD_START, MM_ID_MAX);
  987. }
  988. /* now generate hab pchan list */
  989. result = hab_generate_pchan_list(&hab_driver.settings);
  990. if (result) {
  991. pr_err("generate pchan list failed, ret %d\n", result);
  992. } else {
  993. int pchan_total = 0;
  994. for (i = 0; i < hab_driver.ndevices; i++) {
  995. device = &hab_driver.devp[i];
  996. pchan_total += device->pchan_cnt;
  997. }
  998. pr_debug("ret %d, total %d pchans added, ndevices %d\n",
  999. result, pchan_total, hab_driver.ndevices);
  1000. }
  1001. return result;
  1002. }
  1003. void hab_hypervisor_unregister_common(void)
  1004. {
  1005. int status, i;
  1006. struct uhab_context *ctx;
  1007. struct virtual_channel *vchan;
  1008. for (i = 0; i < hab_driver.ndevices; i++) {
  1009. struct hab_device *habdev = &hab_driver.devp[i];
  1010. struct physical_channel *pchan, *pchan_tmp;
  1011. list_for_each_entry_safe(pchan, pchan_tmp,
  1012. &habdev->pchannels, node) {
  1013. status = habhyp_commdev_dealloc(pchan);
  1014. if (status) {
  1015. pr_err("failed to free pchan %pK, i %d, ret %d\n",
  1016. pchan, i, status);
  1017. }
  1018. }
  1019. }
  1020. /* detect leaking uctx */
  1021. spin_lock_bh(&hab_driver.drvlock);
  1022. list_for_each_entry(ctx, &hab_driver.uctx_list, node) {
  1023. pr_warn("leaking ctx owner %d refcnt %d kernel %d\n",
  1024. ctx->owner, get_refcnt(ctx->refcount), ctx->kernel);
  1025. /* further check vchan leak */
  1026. read_lock(&ctx->ctx_lock);
  1027. list_for_each_entry(vchan, &ctx->vchannels, node) {
  1028. pr_warn("leaking vchan id %X remote %X refcnt %d\n",
  1029. vchan->id, vchan->otherend_id,
  1030. get_refcnt(vchan->refcount));
  1031. }
  1032. read_unlock(&ctx->ctx_lock);
  1033. }
  1034. spin_unlock_bh(&hab_driver.drvlock);
  1035. }