hab_mimex.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "hab.h"
  7. #include "hab_grantable.h"
  8. static int hab_import_ack_find(struct uhab_context *ctx,
  9. struct hab_import_ack *expect_ack, struct virtual_channel *vchan, uint32_t *scan_imp_whse)
  10. {
  11. int ret = 0;
  12. struct hab_import_ack_recvd *ack_recvd = NULL, *tmp = NULL;
  13. spin_lock_bh(&ctx->impq_lock);
  14. list_for_each_entry_safe(ack_recvd, tmp, &ctx->imp_rxq, node) {
  15. if (ack_recvd->ack.export_id == expect_ack->export_id &&
  16. ack_recvd->ack.vcid_local == expect_ack->vcid_local &&
  17. ack_recvd->ack.vcid_remote == expect_ack->vcid_remote) {
  18. list_del(&ack_recvd->node);
  19. *scan_imp_whse = ack_recvd->ack.imp_whse_added;
  20. kfree(ack_recvd);
  21. ret = 1;
  22. break;
  23. }
  24. ack_recvd->age++;
  25. if (ack_recvd->age > Q_AGE_THRESHOLD) {
  26. list_del(&ack_recvd->node);
  27. kfree(ack_recvd);
  28. }
  29. }
  30. if (!ret && vchan->otherend_closed) {
  31. pr_info("no expected imp ack, but vchan %x is remotely closed\n", vchan->id);
  32. ret = 1;
  33. }
  34. spin_unlock_bh(&ctx->impq_lock);
  35. return ret;
  36. }
  37. static int hab_import_ack_wait(struct uhab_context *ctx,
  38. struct hab_import_ack *import_ack, struct virtual_channel *vchan, uint32_t *scan_imp_whse)
  39. {
  40. int ret;
  41. ret = wait_event_interruptible_timeout(ctx->imp_wq,
  42. hab_import_ack_find(ctx, import_ack, vchan, scan_imp_whse),
  43. HAB_HS_TIMEOUT);
  44. if (!ret || (ret == -ERESTARTSYS))
  45. ret = -EAGAIN;
  46. else if (vchan->otherend_closed)
  47. ret = -ENODEV;
  48. else if (ret > 0)
  49. ret = 0;
  50. return ret;
  51. }
  52. /*
  53. * use physical channel to send export parcel
  54. * local remote
  55. * send(export) --> IRQ store to export warehouse
  56. * wait(export ack) <-- send(export ack)
  57. * the actual data consists the following 3 parts listed in order
  58. * 1. header (uint32_t) vcid|type|size
  59. * 2. export parcel (full struct)
  60. * 3. full contents in export->pdata
  61. */
  62. static int hab_export_ack_find(struct uhab_context *ctx,
  63. struct hab_export_ack *expect_ack, struct virtual_channel *vchan)
  64. {
  65. int ret = 0;
  66. struct hab_export_ack_recvd *ack_recvd, *tmp;
  67. spin_lock_bh(&ctx->expq_lock);
  68. list_for_each_entry_safe(ack_recvd, tmp, &ctx->exp_rxq, node) {
  69. if ((ack_recvd->ack.export_id == expect_ack->export_id &&
  70. ack_recvd->ack.vcid_local == expect_ack->vcid_local &&
  71. ack_recvd->ack.vcid_remote == expect_ack->vcid_remote)
  72. || vchan->otherend_closed) {
  73. list_del(&ack_recvd->node);
  74. kfree(ack_recvd);
  75. ret = 1;
  76. break;
  77. }
  78. ack_recvd->age++;
  79. if (ack_recvd->age > Q_AGE_THRESHOLD) {
  80. list_del(&ack_recvd->node);
  81. kfree(ack_recvd);
  82. }
  83. }
  84. spin_unlock_bh(&ctx->expq_lock);
  85. return ret;
  86. }
  87. static int hab_export_ack_wait(struct uhab_context *ctx,
  88. struct hab_export_ack *expect_ack, struct virtual_channel *vchan)
  89. {
  90. int ret;
  91. ret = wait_event_interruptible_timeout(ctx->exp_wq,
  92. hab_export_ack_find(ctx, expect_ack, vchan),
  93. HAB_HS_TIMEOUT);
  94. if (!ret || (ret == -ERESTARTSYS))
  95. ret = -EAGAIN;
  96. else if (vchan->otherend_closed)
  97. ret = -ENODEV;
  98. else if (ret > 0)
  99. ret = 0;
  100. return ret;
  101. }
  102. /*
  103. * Get id from free list first. if not available, new id is generated.
  104. * Once generated it will not be erased
  105. * assumptions: no handshake or memory map/unmap in this helper function
  106. */
  107. struct export_desc_super *habmem_add_export(
  108. struct virtual_channel *vchan,
  109. int sizebytes,
  110. uint32_t flags)
  111. {
  112. struct export_desc *exp = NULL;
  113. struct export_desc_super *exp_super = NULL;
  114. if (!vchan || !sizebytes)
  115. return NULL;
  116. exp_super = vzalloc(sizebytes);
  117. if (!exp_super)
  118. return NULL;
  119. exp = &exp_super->exp;
  120. idr_preload(GFP_KERNEL);
  121. spin_lock_bh(&vchan->pchan->expid_lock);
  122. /* using cyclic way to match with BE side */
  123. exp->export_id =
  124. idr_alloc_cyclic(&vchan->pchan->expid_idr, exp, 1, 0, GFP_NOWAIT);
  125. spin_unlock_bh(&vchan->pchan->expid_lock);
  126. idr_preload_end();
  127. exp->readonly = flags;
  128. exp->vcid_local = vchan->id;
  129. exp->vcid_remote = vchan->otherend_id;
  130. exp->domid_local = vchan->pchan->vmid_local;
  131. exp->domid_remote = vchan->pchan->vmid_remote;
  132. /*
  133. * In new protocol, exp_desc will not be sent to remote during hab export.
  134. * Below pointers are required for local usage and will be removed before sending.
  135. */
  136. if (vchan->pchan->mem_proto == 1) {
  137. exp->vchan = vchan;
  138. exp->ctx = vchan->ctx;
  139. exp->pchan = vchan->pchan;
  140. }
  141. return exp_super;
  142. }
  143. void habmem_remove_export(struct export_desc *exp)
  144. {
  145. struct uhab_context *ctx = NULL;
  146. struct export_desc_super *exp_super =
  147. container_of(exp,
  148. struct export_desc_super,
  149. exp);
  150. if (!exp || !exp->ctx) {
  151. if (exp)
  152. pr_err("invalid info in exp %pK ctx %pK\n",
  153. exp, exp->ctx);
  154. else
  155. pr_err("invalid exp\n");
  156. return;
  157. }
  158. ctx = exp->ctx;
  159. write_lock(&ctx->exp_lock);
  160. ctx->export_total--;
  161. write_unlock(&ctx->exp_lock);
  162. exp->ctx = NULL;
  163. habmem_export_put(exp_super);
  164. }
  165. static void habmem_export_destroy(struct kref *refcount)
  166. {
  167. struct export_desc_super *exp_super =
  168. container_of(
  169. refcount,
  170. struct export_desc_super,
  171. refcount);
  172. if (!exp_super) {
  173. pr_err("invalid exp_super\n");
  174. return;
  175. }
  176. habmem_exp_release(exp_super);
  177. vfree(exp_super);
  178. }
  179. /*
  180. * store the parcel to the warehouse, then send the parcel to remote side
  181. * both exporter composed export descriptor and the grantrefids are sent
  182. * as one msg to the importer side
  183. */
  184. static int habmem_export_vchan(struct uhab_context *ctx,
  185. struct virtual_channel *vchan,
  186. int payload_size,
  187. uint32_t flags,
  188. uint32_t export_id)
  189. {
  190. int ret = 0;
  191. struct export_desc *exp = NULL;
  192. struct export_desc_super *exp_super = NULL;
  193. uint32_t sizebytes = sizeof(*exp) + payload_size;
  194. struct hab_export_ack expected_ack = {0};
  195. struct hab_header header = HAB_HEADER_INITIALIZER;
  196. if (sizebytes > (uint32_t)HAB_HEADER_SIZE_MAX) {
  197. pr_err("exp message too large, %u bytes, max is %d\n",
  198. sizebytes, HAB_HEADER_SIZE_MAX);
  199. return -EINVAL;
  200. }
  201. spin_lock_bh(&vchan->pchan->expid_lock);
  202. exp = idr_find(&vchan->pchan->expid_idr, export_id);
  203. spin_unlock_bh(&vchan->pchan->expid_lock);
  204. if (!exp) {
  205. pr_err("export vchan failed: exp_id %d, pchan %s\n",
  206. export_id, vchan->pchan->name);
  207. return -EINVAL;
  208. }
  209. pr_debug("sizebytes including exp_desc: %u = %zu + %d\n",
  210. sizebytes, sizeof(*exp), payload_size);
  211. /* exp_desc will not be sent to remote during export in new protocol */
  212. if (vchan->pchan->mem_proto == 0) {
  213. HAB_HEADER_SET_SIZE(header, sizebytes);
  214. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT);
  215. HAB_HEADER_SET_ID(header, vchan->otherend_id);
  216. HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
  217. ret = physical_channel_send(vchan->pchan, &header, exp,
  218. HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING);
  219. if (ret != 0) {
  220. pr_err("failed to send imp msg %d, exp_id %d, vcid %x\n",
  221. ret, export_id, vchan->id);
  222. return ret;
  223. }
  224. expected_ack.export_id = exp->export_id;
  225. expected_ack.vcid_local = exp->vcid_local;
  226. expected_ack.vcid_remote = exp->vcid_remote;
  227. ret = hab_export_ack_wait(ctx, &expected_ack, vchan);
  228. if (ret != 0) {
  229. pr_err("failed to receive remote export ack %d on vc %x\n",
  230. ret, vchan->id);
  231. return ret;
  232. }
  233. exp->pchan = vchan->pchan;
  234. exp->vchan = vchan;
  235. exp->ctx = ctx;
  236. }
  237. write_lock(&ctx->exp_lock);
  238. ctx->export_total++;
  239. list_add_tail(&exp->node, &ctx->exp_whse);
  240. write_unlock(&ctx->exp_lock);
  241. exp_super = container_of(exp, struct export_desc_super, exp);
  242. WRITE_ONCE(exp_super->exp_state, HAB_EXP_SUCCESS);
  243. return ret;
  244. }
  245. /*
  246. * This function is a revoke function for habmm_hyp_grant_*(),
  247. * only call this function when habmm_hyp_grant_*() returns
  248. * success but exp hasn't been added to exp_whse.
  249. * hab_hyp_grant_*() do 4 things:
  250. * 1) add 1 to refcount of dma_buf.
  251. * 2) alloc memory for struct export_desc_super.
  252. * 3) alloc memory for struct exp_platform_data.
  253. * 4) alloc idr.
  254. * we revoke these 4 things in this function. we choose to call
  255. * idr_remove before habmem_export_put() to unpublish this
  256. * export desc as early as possible, however the racing between
  257. * habmem_export_put() and other concurrent user is handled by
  258. * state machine mechanism.
  259. */
  260. static int habmem_hyp_grant_undo(struct uhab_context *ctx,
  261. struct virtual_channel *vchan,
  262. uint32_t export_id)
  263. {
  264. struct export_desc *exp = NULL;
  265. struct export_desc_super *exp_super = NULL;
  266. int irqs_disabled = irqs_disabled();
  267. exp = idr_find(&vchan->pchan->expid_idr, export_id);
  268. if (!exp) {
  269. pr_err("export vchan failed: exp_id %d, pchan %s\n",
  270. export_id, vchan->pchan->name);
  271. return -EINVAL;
  272. }
  273. exp_super = container_of(exp,
  274. struct export_desc_super,
  275. exp);
  276. hab_spin_lock(&vchan->pchan->expid_lock, irqs_disabled);
  277. idr_remove(&vchan->pchan->expid_idr, exp->export_id);
  278. hab_spin_unlock(&vchan->pchan->expid_lock, irqs_disabled);
  279. exp->ctx = NULL;
  280. return habmem_export_put(exp_super);
  281. }
  282. void habmem_export_get(struct export_desc_super *exp_super)
  283. {
  284. kref_get(&exp_super->refcount);
  285. }
  286. int habmem_export_put(struct export_desc_super *exp_super)
  287. {
  288. return kref_put(&exp_super->refcount, habmem_export_destroy);
  289. }
  290. int hab_mem_export(struct uhab_context *ctx,
  291. struct hab_export *param,
  292. int kernel)
  293. {
  294. int ret = 0;
  295. unsigned int payload_size = 0;
  296. uint32_t export_id = 0;
  297. struct virtual_channel *vchan;
  298. int page_count;
  299. int compressed = 0;
  300. if (!ctx || !param || !param->sizebytes
  301. || ((param->sizebytes % PAGE_SIZE) != 0)
  302. || (!param->buffer && !(HABMM_EXPIMP_FLAGS_FD & param->flags))
  303. )
  304. return -EINVAL;
  305. param->exportid = 0;
  306. vchan = hab_get_vchan_fromvcid(param->vcid, ctx, 0);
  307. if (!vchan || !vchan->pchan) {
  308. ret = -ENODEV;
  309. goto err;
  310. }
  311. page_count = param->sizebytes/PAGE_SIZE;
  312. if (kernel) {
  313. ret = habmem_hyp_grant(vchan,
  314. (unsigned long)param->buffer,
  315. page_count,
  316. param->flags,
  317. vchan->pchan->dom_id,
  318. &compressed,
  319. &payload_size,
  320. &export_id);
  321. } else {
  322. ret = habmem_hyp_grant_user(vchan,
  323. (unsigned long)param->buffer,
  324. page_count,
  325. param->flags,
  326. vchan->pchan->dom_id,
  327. &compressed,
  328. &payload_size,
  329. &export_id);
  330. }
  331. if (ret < 0) {
  332. pr_err("habmem_hyp_grant vc %x failed size=%d ret=%d\n",
  333. param->vcid, payload_size, ret);
  334. goto err;
  335. }
  336. ret = habmem_export_vchan(ctx, vchan, payload_size, param->flags, export_id);
  337. if (!ret)
  338. param->exportid = export_id;
  339. else
  340. habmem_hyp_grant_undo(ctx, vchan, export_id);
  341. err:
  342. if (vchan)
  343. hab_vchan_put(vchan);
  344. return ret;
  345. }
  346. int hab_mem_unexport(struct uhab_context *ctx,
  347. struct hab_unexport *param,
  348. int kernel)
  349. {
  350. int ret = 0;
  351. struct export_desc *exp = NULL;
  352. struct export_desc_super *exp_super = NULL;
  353. struct virtual_channel *vchan;
  354. if (!ctx || !param)
  355. return -EINVAL;
  356. /* refcnt on the access */
  357. vchan = hab_get_vchan_fromvcid(param->vcid, ctx, 1);
  358. if (!vchan || !vchan->pchan) {
  359. ret = -ENODEV;
  360. goto err_novchan;
  361. }
  362. spin_lock_bh(&vchan->pchan->expid_lock);
  363. exp = idr_find(&vchan->pchan->expid_idr, param->exportid);
  364. if (!exp) {
  365. spin_unlock_bh(&vchan->pchan->expid_lock);
  366. pr_err("unexp fail, cannot find exp id %d on vc %x\n", param->exportid, vchan->id);
  367. ret = -EINVAL;
  368. goto err_novchan;
  369. }
  370. exp_super = container_of(exp, struct export_desc_super, exp);
  371. if (exp_super->exp_state == HAB_EXP_SUCCESS &&
  372. exp->ctx == ctx &&
  373. exp_super->remote_imported == 0)
  374. idr_remove(&vchan->pchan->expid_idr, param->exportid);
  375. else {
  376. ret = exp_super->remote_imported == 0 ? -EINVAL : -EBUSY;
  377. pr_err("unexp expid %d fail on vc %x, state %d, remote imp %d\n",
  378. param->exportid, vchan->id,
  379. exp_super->exp_state, exp_super->remote_imported);
  380. spin_unlock_bh(&vchan->pchan->expid_lock);
  381. goto err_novchan;
  382. }
  383. spin_unlock_bh(&vchan->pchan->expid_lock);
  384. /* TODO: hab stat is not accurate after idr_remove and before list_del here */
  385. write_lock(&ctx->exp_lock);
  386. list_del(&exp->node);
  387. write_unlock(&ctx->exp_lock);
  388. ret = habmem_hyp_revoke(exp->payload, exp->payload_count);
  389. if (ret) {
  390. /* unrecoverable scenario*/
  391. pr_err("Error found in revoke grant with ret %d\n", ret);
  392. goto err_novchan;
  393. }
  394. habmem_remove_export(exp);
  395. err_novchan:
  396. if (vchan)
  397. hab_vchan_put(vchan);
  398. return ret;
  399. }
  400. int hab_mem_import(struct uhab_context *ctx,
  401. struct hab_import *param,
  402. int kernel)
  403. {
  404. int ret = 0, found = 0;
  405. struct export_desc *exp = NULL;
  406. struct export_desc_super *exp_super = NULL;
  407. struct virtual_channel *vchan = NULL;
  408. struct hab_header header = HAB_HEADER_INITIALIZER;
  409. struct hab_import_ack expected_ack = {0};
  410. struct hab_import_data imp_data = {0};
  411. uint32_t scan_imp_whse = 0U;
  412. if (!ctx || !param)
  413. return -EINVAL;
  414. if ((param->sizebytes % PAGE_SIZE) != 0) {
  415. pr_err("request imp size %ld is not page aligned on vc %x\n",
  416. param->sizebytes, param->vcid);
  417. return -EINVAL;
  418. }
  419. vchan = hab_get_vchan_fromvcid(param->vcid, ctx, 0);
  420. if (!vchan || !vchan->pchan) {
  421. ret = -ENODEV;
  422. goto err_imp;
  423. }
  424. if (vchan->pchan->mem_proto == 1) {
  425. /* send import sync message to the remote side */
  426. imp_data.exp_id = param->exportid;
  427. imp_data.page_cnt = param->sizebytes >> PAGE_SHIFT;
  428. HAB_HEADER_SET_SIZE(header, sizeof(struct hab_import_data));
  429. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_IMPORT);
  430. HAB_HEADER_SET_ID(header, vchan->otherend_id);
  431. HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
  432. ret = physical_channel_send(vchan->pchan, &header, &imp_data,
  433. HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING);
  434. if (ret != 0) {
  435. pr_err("failed to send imp msg %d, exp_id %d, vcid %x\n",
  436. ret,
  437. param->exportid,
  438. vchan->id);
  439. goto err_imp;
  440. }
  441. expected_ack.export_id = param->exportid;
  442. expected_ack.vcid_local = vchan->id;
  443. expected_ack.vcid_remote = vchan->otherend_id;
  444. ret = hab_import_ack_wait(ctx, &expected_ack, vchan, &scan_imp_whse);
  445. if (ret != 0) {
  446. pr_err("failed to receive remote import ack %d on vc %x\n", ret, vchan->id);
  447. goto err_imp;
  448. }
  449. if (!scan_imp_whse) {
  450. ret = -EINVAL;
  451. pr_err("imp_ack_fail msg recv on vc %x\n", vchan->id);
  452. goto err_imp;
  453. }
  454. }
  455. spin_lock_bh(&ctx->imp_lock);
  456. list_for_each_entry(exp, &ctx->imp_whse, node) {
  457. if ((exp->export_id == param->exportid) &&
  458. (exp->pchan == vchan->pchan)) {
  459. exp_super = container_of(exp, struct export_desc_super, exp);
  460. /* not allowed to import one exp desc more than once */
  461. if (exp_super->import_state == EXP_DESC_IMPORTED
  462. || exp_super->import_state == EXP_DESC_IMPORTING) {
  463. pr_err("vc %x not allowed to import expid %u more than once\n",
  464. vchan->id, exp->export_id);
  465. spin_unlock_bh(&ctx->imp_lock);
  466. ret = -EINVAL;
  467. goto err_imp;
  468. }
  469. /*
  470. * set the flag to avoid another thread getting the exp desc again
  471. * and must be before unlock, otherwise it is no use.
  472. */
  473. exp_super->import_state = EXP_DESC_IMPORTING;
  474. found = 1;
  475. break;
  476. }
  477. }
  478. spin_unlock_bh(&ctx->imp_lock);
  479. if (!found) {
  480. pr_err("vc %x fail to get export descriptor from export id %d\n",
  481. vchan->id, param->exportid);
  482. ret = -ENODEV;
  483. goto err_imp;
  484. }
  485. if ((exp->payload_count << PAGE_SHIFT) != param->sizebytes) {
  486. pr_err("vc %x input size %d don't match buffer size %d\n",
  487. vchan->id, param->sizebytes, exp->payload_count << PAGE_SHIFT);
  488. ret = -EINVAL;
  489. exp_super->import_state = EXP_DESC_INIT;
  490. goto err_imp;
  491. }
  492. ret = habmem_imp_hyp_map(ctx->import_ctx, param, exp, kernel);
  493. if (ret) {
  494. pr_err("Import fail on vc %x ret:%d pcnt:%d rem:%d 1st_ref:0x%X\n",
  495. vchan->id, ret, exp->payload_count,
  496. exp->domid_local, *((uint32_t *)exp->payload));
  497. exp_super->import_state = EXP_DESC_INIT;
  498. goto err_imp;
  499. }
  500. exp->import_index = param->index;
  501. exp->kva = kernel ? (void *)param->kva : NULL;
  502. exp_super->import_state = EXP_DESC_IMPORTED;
  503. err_imp:
  504. if (vchan) {
  505. if ((vchan->pchan != NULL) &&
  506. (vchan->pchan->mem_proto == 1) &&
  507. (found == 1) &&
  508. (ret != 0)) {
  509. /* dma_buf create failure, rollback required */
  510. hab_send_unimport_msg(vchan, exp->export_id);
  511. spin_lock_bh(&ctx->imp_lock);
  512. list_del(&exp->node);
  513. ctx->import_total--;
  514. spin_unlock_bh(&ctx->imp_lock);
  515. kfree(exp_super);
  516. }
  517. hab_vchan_put(vchan);
  518. }
  519. return ret;
  520. }
  521. int hab_mem_unimport(struct uhab_context *ctx,
  522. struct hab_unimport *param,
  523. int kernel)
  524. {
  525. int ret = 0, found = 0;
  526. struct export_desc *exp = NULL, *exp_tmp;
  527. struct export_desc_super *exp_super = NULL;
  528. struct virtual_channel *vchan;
  529. if (!ctx || !param)
  530. return -EINVAL;
  531. vchan = hab_get_vchan_fromvcid(param->vcid, ctx, 1);
  532. if (!vchan || !vchan->pchan) {
  533. if (vchan)
  534. hab_vchan_put(vchan);
  535. return -ENODEV;
  536. }
  537. spin_lock_bh(&ctx->imp_lock);
  538. list_for_each_entry_safe(exp, exp_tmp, &ctx->imp_whse, node) {
  539. /* same pchan is expected here */
  540. if (exp->export_id == param->exportid &&
  541. exp->pchan == vchan->pchan) {
  542. exp_super = container_of(exp, struct export_desc_super, exp);
  543. /* only successfully imported export desc could be found and released */
  544. if (exp_super->import_state == EXP_DESC_IMPORTED) {
  545. list_del(&exp->node);
  546. ctx->import_total--;
  547. found = 1;
  548. } else
  549. pr_err("vc %x exp id:%u status:%d is found, invalid to unimport\n",
  550. vchan->id, exp->export_id, exp_super->import_state);
  551. break;
  552. }
  553. }
  554. spin_unlock_bh(&ctx->imp_lock);
  555. if (!found)
  556. ret = -EINVAL;
  557. else {
  558. ret = habmm_imp_hyp_unmap(ctx->import_ctx, exp, kernel);
  559. if (ret) {
  560. pr_err("unmap fail id:%d pcnt:%d vcid:%d\n",
  561. exp->export_id, exp->payload_count, exp->vcid_remote);
  562. }
  563. param->kva = (uint64_t)exp->kva;
  564. if (vchan->pchan->mem_proto == 1)
  565. hab_send_unimport_msg(vchan, exp->export_id);
  566. kfree(exp_super);
  567. }
  568. if (vchan)
  569. hab_vchan_put(vchan);
  570. return ret;
  571. }