hab_msg.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include "hab.h"
  7. #include "hab_grantable.h"
  8. static int hab_rx_queue_empty(struct virtual_channel *vchan)
  9. {
  10. int ret = 0;
  11. int irqs_disabled = irqs_disabled();
  12. hab_spin_lock(&vchan->rx_lock, irqs_disabled);
  13. ret = list_empty(&vchan->rx_list);
  14. hab_spin_unlock(&vchan->rx_lock, irqs_disabled);
  15. return ret;
  16. }
  17. static struct hab_message*
  18. hab_scatter_msg_alloc(struct physical_channel *pchan, size_t sizebytes)
  19. {
  20. struct hab_message *message = NULL;
  21. int i = 0;
  22. int allocated = 0;
  23. bool failed = false;
  24. void **scatter_buf = NULL;
  25. uint32_t total_num, page_num = 0U;
  26. /* The scatter routine is only for the message larger than one page size */
  27. if (sizebytes <= PAGE_SIZE)
  28. return NULL;
  29. page_num = sizebytes >> PAGE_SHIFT;
  30. total_num = (sizebytes % PAGE_SIZE == 0) ? page_num : (page_num + 1);
  31. message = kzalloc(sizeof(struct hab_message)
  32. + (total_num * sizeof(void *)), GFP_ATOMIC);
  33. if (!message)
  34. return NULL;
  35. message->scatter = true;
  36. scatter_buf = (void **)message->data;
  37. /*
  38. * All recv buffers need to be prepared before actual recv.
  39. * If instant recving is performed when each page is allocated,
  40. * we cannot ensure the success of the next allocation.
  41. * Part of the message will stuck in the channel if allocation
  42. * failed half way.
  43. */
  44. for (i = 0; i < page_num; i++) {
  45. scatter_buf[i] = kzalloc(PAGE_SIZE, GFP_ATOMIC);
  46. if (scatter_buf[i] == NULL) {
  47. failed = true;
  48. allocated = i;
  49. break;
  50. }
  51. }
  52. if ((!failed) && (sizebytes % PAGE_SIZE != 0)) {
  53. scatter_buf[i] = kzalloc(sizebytes % PAGE_SIZE, GFP_ATOMIC);
  54. if (scatter_buf[i] == NULL) {
  55. failed = true;
  56. allocated = i;
  57. }
  58. }
  59. if (!failed) {
  60. for (i = 0; i < sizebytes / PAGE_SIZE; i++)
  61. message->sizebytes += physical_channel_read(pchan,
  62. scatter_buf[i], PAGE_SIZE);
  63. if (sizebytes % PAGE_SIZE)
  64. message->sizebytes += physical_channel_read(pchan,
  65. scatter_buf[i], sizebytes % PAGE_SIZE);
  66. message->sequence_rx = pchan->sequence_rx;
  67. } else {
  68. for (i = 0; i < allocated; i++)
  69. kfree(scatter_buf[i]);
  70. kfree(message);
  71. message = NULL;
  72. }
  73. return message;
  74. }
  75. static struct hab_message*
  76. hab_msg_alloc(struct physical_channel *pchan, size_t sizebytes)
  77. {
  78. struct hab_message *message;
  79. if (sizebytes > HAB_HEADER_SIZE_MAX) {
  80. pr_err("pchan %s send size too large %zd\n",
  81. pchan->name, sizebytes);
  82. return NULL;
  83. }
  84. message = kzalloc(sizeof(*message) + sizebytes, GFP_ATOMIC);
  85. if (!message)
  86. /*
  87. * big buffer allocation may fail when memory fragment.
  88. * Instead of one big consecutive kmem, try alloc one page at a time
  89. */
  90. message = hab_scatter_msg_alloc(pchan, sizebytes);
  91. else {
  92. message->sizebytes =
  93. physical_channel_read(pchan, message->data, sizebytes);
  94. message->sequence_rx = pchan->sequence_rx;
  95. }
  96. return message;
  97. }
  98. void hab_msg_free(struct hab_message *message)
  99. {
  100. int i = 0;
  101. uint32_t page_num = 0U;
  102. void **scatter_buf = NULL;
  103. if (unlikely(message->scatter)) {
  104. scatter_buf = (void **)message->data;
  105. page_num = message->sizebytes >> PAGE_SHIFT;
  106. if (message->sizebytes % PAGE_SIZE)
  107. page_num++;
  108. for (i = 0; i < page_num; i++)
  109. kfree(scatter_buf[i]);
  110. }
  111. kfree(message);
  112. }
  113. int
  114. hab_msg_dequeue(struct virtual_channel *vchan, struct hab_message **msg,
  115. int *rsize, unsigned int timeout, unsigned int flags)
  116. {
  117. struct hab_message *message = NULL;
  118. /*
  119. * 1. When the user sets the Non-blocking flag and the rx_list is empty,
  120. * or hab_rx_queue_empty is not empty, but due to the competition relationship,
  121. * the rx_list is empty after the lock is obtained,
  122. * and the value of ret in both cases is the default value.
  123. * 2. When the function calls API wait_event_*, wait_event_* returns due to timeout
  124. * and the condition is not met, the value of ret is set to 0.
  125. * If the default value of ret is 0, we would have a hard time distinguishing
  126. * between the above two cases (or with more redundant code).
  127. * So we set the default value of ret to be -EAGAIN.
  128. * In this way, we can easily distinguish the above two cases.
  129. * This is what we expected to see.
  130. */
  131. int ret = -EAGAIN;
  132. int wait = !(flags & HABMM_SOCKET_RECV_FLAGS_NON_BLOCKING);
  133. int interruptible = !(flags & HABMM_SOCKET_RECV_FLAGS_UNINTERRUPTIBLE);
  134. int timeout_flag = flags & HABMM_SOCKET_RECV_FLAGS_TIMEOUT;
  135. int irqs_disabled = irqs_disabled();
  136. if (wait) {
  137. /* we will wait forever if timeout_flag not set */
  138. if (!timeout_flag)
  139. timeout = UINT_MAX;
  140. if (hab_rx_queue_empty(vchan)) {
  141. if (interruptible)
  142. ret = wait_event_interruptible_timeout(vchan->rx_queue,
  143. !hab_rx_queue_empty(vchan) ||
  144. vchan->otherend_closed,
  145. msecs_to_jiffies(timeout));
  146. else
  147. ret = wait_event_timeout(vchan->rx_queue,
  148. !hab_rx_queue_empty(vchan) ||
  149. vchan->otherend_closed,
  150. msecs_to_jiffies(timeout));
  151. }
  152. }
  153. /*
  154. * return all the received messages before the remote close,
  155. * and need empty check again in case the list is empty now due to
  156. * dequeue by other threads
  157. */
  158. hab_spin_lock(&vchan->rx_lock, irqs_disabled);
  159. if (!list_empty(&vchan->rx_list)) {
  160. message = list_first_entry(&vchan->rx_list,
  161. struct hab_message, node);
  162. if (message) {
  163. if (*rsize >= message->sizebytes) {
  164. /* msg can be safely retrieved in full */
  165. list_del(&message->node);
  166. ret = 0;
  167. *rsize = message->sizebytes;
  168. } else {
  169. pr_err("vcid %x rcv buf too small %d < %zd\n",
  170. vchan->id, *rsize,
  171. message->sizebytes);
  172. /*
  173. * Here we return the actual message size in RxQ instead of 0,
  174. * so that the hab client can re-receive the message with the
  175. * correct message size.
  176. */
  177. *rsize = message->sizebytes;
  178. message = NULL;
  179. ret = -EOVERFLOW; /* come back again */
  180. }
  181. }
  182. } else {
  183. /* no message received */
  184. *rsize = 0;
  185. if (vchan->otherend_closed)
  186. ret = -ENODEV;
  187. else if (ret == -ERESTARTSYS)
  188. ret = -EINTR;
  189. else if (ret == 0) {
  190. pr_debug("timeout! vcid: %x\n", vchan->id);
  191. ret = -ETIMEDOUT;
  192. } else {
  193. pr_debug("EAGAIN: ret = %d, flags = %x\n", ret, flags);
  194. ret = -EAGAIN;
  195. }
  196. }
  197. hab_spin_unlock(&vchan->rx_lock, irqs_disabled);
  198. *msg = message;
  199. return ret;
  200. }
  201. static void hab_msg_queue(struct virtual_channel *vchan,
  202. struct hab_message *message)
  203. {
  204. int irqs_disabled = irqs_disabled();
  205. hab_spin_lock(&vchan->rx_lock, irqs_disabled);
  206. list_add_tail(&message->node, &vchan->rx_list);
  207. hab_spin_unlock(&vchan->rx_lock, irqs_disabled);
  208. wake_up(&vchan->rx_queue);
  209. }
  210. static int hab_export_enqueue(struct virtual_channel *vchan,
  211. struct export_desc *exp)
  212. {
  213. struct uhab_context *ctx = vchan->ctx;
  214. int irqs_disabled = irqs_disabled();
  215. hab_spin_lock(&ctx->imp_lock, irqs_disabled);
  216. list_add_tail(&exp->node, &ctx->imp_whse);
  217. ctx->import_total++;
  218. hab_spin_unlock(&ctx->imp_lock, irqs_disabled);
  219. return 0;
  220. }
  221. /*
  222. * Called when received an invalid import request from importer.
  223. * If not doing this, importer will hang forever awaiting import ack msg.
  224. */
  225. static int hab_send_import_ack_fail(struct virtual_channel *vchan,
  226. uint32_t exp_id)
  227. {
  228. int ret = 0;
  229. uint32_t export_id = exp_id;
  230. struct hab_header header = HAB_HEADER_INITIALIZER;
  231. HAB_HEADER_SET_SIZE(header, sizeof(uint32_t));
  232. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_IMPORT_ACK_FAIL);
  233. HAB_HEADER_SET_ID(header, vchan->otherend_id);
  234. HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
  235. ret = physical_channel_send(vchan->pchan, &header, &export_id,
  236. HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING);
  237. if (ret != 0)
  238. pr_err("failed to send imp ack fail msg %d, exp_id %d, vcid %x\n",
  239. ret,
  240. export_id,
  241. vchan->id);
  242. return ret;
  243. }
  244. static int hab_send_import_ack(struct virtual_channel *vchan,
  245. struct export_desc *exp)
  246. {
  247. int ret = 0;
  248. struct export_desc_super *exp_super = container_of(exp, struct export_desc_super, exp);
  249. uint32_t sizebytes = sizeof(*exp) + exp_super->payload_size;
  250. struct hab_header header = HAB_HEADER_INITIALIZER;
  251. HAB_HEADER_SET_SIZE(header, sizebytes);
  252. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_IMPORT_ACK);
  253. HAB_HEADER_SET_ID(header, vchan->otherend_id);
  254. HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
  255. /*
  256. * Local pointers should not be leaked to remote from security perspective.
  257. * Relevant lock should be held like other places of modifying exp node
  258. * when cleaning local pointers. It is protected by exp_lock for now inside invoker.
  259. */
  260. exp->pchan = NULL;
  261. exp->vchan = NULL;
  262. exp->ctx = NULL;
  263. ret = physical_channel_send(vchan->pchan, &header, exp,
  264. HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING);
  265. if (ret != 0)
  266. pr_err("failed to send imp ack msg %d, vcid %x\n",
  267. ret, vchan->id);
  268. exp->pchan = vchan->pchan;
  269. exp->vchan = vchan;
  270. exp->ctx = vchan->ctx;
  271. return ret;
  272. }
  273. /* Called when facing issue during handling import ack msg to wake up local importer */
  274. static void hab_create_invalid_ack(struct virtual_channel *vchan, uint32_t export_id)
  275. {
  276. int irqs_disabled = irqs_disabled();
  277. struct hab_import_ack_recvd *ack_recvd = kzalloc(sizeof(*ack_recvd), GFP_ATOMIC);
  278. if (!ack_recvd)
  279. return;
  280. ack_recvd->ack.export_id = export_id;
  281. ack_recvd->ack.vcid_local = vchan->id;
  282. ack_recvd->ack.vcid_remote = vchan->otherend_id;
  283. ack_recvd->ack.imp_whse_added = 0;
  284. hab_spin_lock(&vchan->ctx->impq_lock, irqs_disabled);
  285. list_add_tail(&ack_recvd->node, &vchan->ctx->imp_rxq);
  286. hab_spin_unlock(&vchan->ctx->impq_lock, irqs_disabled);
  287. }
  288. static int hab_receive_import_ack_fail(struct physical_channel *pchan,
  289. struct virtual_channel *vchan)
  290. {
  291. struct hab_import_ack_recvd *ack_recvd = NULL;
  292. int irqs_disabled = irqs_disabled();
  293. uint32_t exp_id = 0;
  294. physical_channel_read(pchan, &exp_id, sizeof(uint32_t));
  295. ack_recvd = kzalloc(sizeof(*ack_recvd), GFP_ATOMIC);
  296. if (!ack_recvd)
  297. return -ENOMEM;
  298. ack_recvd->ack.export_id = exp_id;
  299. ack_recvd->ack.vcid_local = vchan->id;
  300. ack_recvd->ack.vcid_remote = vchan->otherend_id;
  301. ack_recvd->ack.imp_whse_added = 0;
  302. hab_spin_lock(&vchan->ctx->impq_lock, irqs_disabled);
  303. list_add_tail(&ack_recvd->node, &vchan->ctx->imp_rxq);
  304. hab_spin_unlock(&vchan->ctx->impq_lock, irqs_disabled);
  305. return 0;
  306. }
  307. static int hab_send_export_ack(struct virtual_channel *vchan,
  308. struct physical_channel *pchan,
  309. struct export_desc *exp)
  310. {
  311. int ret = 0;
  312. struct hab_export_ack exp_ack = {
  313. .export_id = exp->export_id,
  314. .vcid_local = exp->vcid_local,
  315. .vcid_remote = exp->vcid_remote
  316. };
  317. struct hab_header header = HAB_HEADER_INITIALIZER;
  318. HAB_HEADER_SET_SIZE(header, sizeof(exp_ack));
  319. HAB_HEADER_SET_TYPE(header, HAB_PAYLOAD_TYPE_EXPORT_ACK);
  320. HAB_HEADER_SET_ID(header, exp->vcid_local);
  321. HAB_HEADER_SET_SESSION_ID(header, vchan->session_id);
  322. ret = physical_channel_send(pchan, &header, &exp_ack,
  323. HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING);
  324. if (ret != 0)
  325. pr_err("failed to send exp ack msg %d, vcid %x\n",
  326. ret, vchan->id);
  327. return ret;
  328. }
  329. static int hab_receive_create_export_ack(struct physical_channel *pchan,
  330. struct uhab_context *ctx, size_t sizebytes)
  331. {
  332. struct hab_export_ack_recvd *ack_recvd =
  333. kzalloc(sizeof(*ack_recvd), GFP_ATOMIC);
  334. int irqs_disabled = irqs_disabled();
  335. if (!ack_recvd)
  336. return -ENOMEM;
  337. if (sizeof(ack_recvd->ack) != sizebytes)
  338. pr_err("%s exp ack size %zu is not as arrived %zu\n",
  339. pchan->name, sizeof(ack_recvd->ack), sizebytes);
  340. if (sizebytes > sizeof(ack_recvd->ack)) {
  341. pr_err("pchan %s read size too large %zd %zd\n",
  342. pchan->name, sizebytes, sizeof(ack_recvd->ack));
  343. kfree(ack_recvd);
  344. return -EINVAL;
  345. }
  346. /*
  347. * If the hab version on remote side is different with local side,
  348. * the size of the ack structure may differ. Under this circumstance,
  349. * the sizebytes is still trusted. Thus, we need to read it out and
  350. * drop the mismatched ack message from channel.
  351. * Dropping such message could avoid the [payload][header][payload]
  352. * data layout which will make the whole channel unusable.
  353. * But for security reason, we cannot perform it when sizebytes is
  354. * larger than expected.
  355. */
  356. if (physical_channel_read(pchan,
  357. &ack_recvd->ack,
  358. sizebytes) != sizebytes) {
  359. kfree(ack_recvd);
  360. return -EIO;
  361. }
  362. /* add ack_recvd node into rx queue only if the sizebytes is expected */
  363. if (sizeof(ack_recvd->ack) == sizebytes) {
  364. hab_spin_lock(&ctx->expq_lock, irqs_disabled);
  365. list_add_tail(&ack_recvd->node, &ctx->exp_rxq);
  366. hab_spin_unlock(&ctx->expq_lock, irqs_disabled);
  367. } else {
  368. kfree(ack_recvd);
  369. return -EINVAL;
  370. }
  371. return 0;
  372. }
  373. static int hab_receive_export_desc(struct physical_channel *pchan,
  374. struct virtual_channel *vchan,
  375. size_t sizebytes)
  376. {
  377. struct hab_import_ack_recvd *ack_recvd = NULL;
  378. size_t exp_desc_size_expected = 0;
  379. struct export_desc *exp_desc = NULL;
  380. struct export_desc_super *exp_desc_super = NULL;
  381. struct compressed_pfns *pfn_table = NULL;
  382. int irqs_disabled = irqs_disabled();
  383. int ret = 0;
  384. exp_desc_size_expected = sizeof(struct export_desc)
  385. + sizeof(struct compressed_pfns);
  386. if (sizebytes > (size_t)(HAB_HEADER_SIZE_MAX) ||
  387. sizebytes < exp_desc_size_expected) {
  388. pr_err("%s exp size too large/small %zu header %zu\n",
  389. pchan->name, sizebytes, sizeof(*exp_desc));
  390. return -EINVAL;
  391. }
  392. pr_debug("%s exp payload %zu bytes\n", pchan->name, sizebytes);
  393. exp_desc_super = kzalloc(sizebytes + sizeof(struct export_desc_super)
  394. - sizeof(struct export_desc), GFP_ATOMIC);
  395. if (!exp_desc_super)
  396. return -ENOMEM;
  397. exp_desc = &exp_desc_super->exp;
  398. if (physical_channel_read(pchan, exp_desc, sizebytes) != sizebytes) {
  399. pr_err("%s corrupted exp expect %zd bytes vcid %X remote %X open %d!\n",
  400. pchan->name, sizebytes, vchan->id,
  401. vchan->otherend_id, vchan->session_id);
  402. kfree(exp_desc_super);
  403. return -EIO;
  404. }
  405. if (pchan->vmid_local != exp_desc->domid_remote ||
  406. pchan->vmid_remote != exp_desc->domid_local)
  407. pr_err("corrupted vmid %d != %d %d != %d\n",
  408. pchan->vmid_local, exp_desc->domid_remote,
  409. pchan->vmid_remote, exp_desc->domid_local);
  410. exp_desc->domid_remote = pchan->vmid_remote;
  411. exp_desc->domid_local = pchan->vmid_local;
  412. exp_desc->pchan = pchan;
  413. if (pchan->mem_proto == 1) {
  414. exp_desc->vcid_remote = exp_desc->vcid_local;
  415. exp_desc->vcid_local = vchan->id;
  416. }
  417. /*
  418. * We should do all the checks here.
  419. * But in order to improve performance, we put the
  420. * checks related to exp->payload_count and pfn_table->region[i].size
  421. * into function pages_list_create. So any potential usage of such data
  422. * from the remote side after the checks here and before the checks in
  423. * pages_list_create needs to add some more checks if necessary.
  424. */
  425. pfn_table = (struct compressed_pfns *)exp_desc->payload;
  426. if (pfn_table->nregions <= 0 ||
  427. (pfn_table->nregions > SIZE_MAX / sizeof(struct region)) ||
  428. (SIZE_MAX - exp_desc_size_expected <
  429. pfn_table->nregions * sizeof(struct region))) {
  430. pr_err("%s nregions is too large or negative, nregions:%d!\n",
  431. pchan->name, pfn_table->nregions);
  432. ret = -EINVAL;
  433. goto err_imp;
  434. }
  435. if (pfn_table->nregions > exp_desc->payload_count) {
  436. pr_err("%s nregions %d greater than payload_count %d\n",
  437. pchan->name, pfn_table->nregions, exp_desc->payload_count);
  438. ret = -EINVAL;
  439. goto err_imp;
  440. }
  441. if (exp_desc->payload_count > MAX_EXP_PAYLOAD_COUNT) {
  442. pr_err("payload_count out of range: %d size overflow\n",
  443. exp_desc->payload_count);
  444. ret = -EINVAL;
  445. goto err_imp;
  446. }
  447. exp_desc_size_expected += pfn_table->nregions * sizeof(struct region);
  448. if (sizebytes != exp_desc_size_expected) {
  449. pr_err("%s exp size not equal %zu expect %zu\n",
  450. pchan->name, sizebytes, exp_desc_size_expected);
  451. ret = -EINVAL;
  452. goto err_imp;
  453. }
  454. if (pchan->mem_proto == 1) {
  455. ack_recvd = kzalloc(sizeof(*ack_recvd), GFP_ATOMIC);
  456. if (!ack_recvd) {
  457. ret = -ENOMEM;
  458. goto err_imp;
  459. }
  460. ack_recvd->ack.export_id = exp_desc->export_id;
  461. ack_recvd->ack.vcid_local = exp_desc->vcid_local;
  462. ack_recvd->ack.vcid_remote = exp_desc->vcid_remote;
  463. ack_recvd->ack.imp_whse_added = 1;
  464. }
  465. hab_export_enqueue(vchan, exp_desc);
  466. if (pchan->mem_proto == 1) {
  467. hab_spin_lock(&vchan->ctx->impq_lock, irqs_disabled);
  468. list_add_tail(&ack_recvd->node, &vchan->ctx->imp_rxq);
  469. hab_spin_unlock(&vchan->ctx->impq_lock, irqs_disabled);
  470. } else
  471. hab_send_export_ack(vchan, pchan, exp_desc);
  472. return 0;
  473. err_imp:
  474. if (pchan->mem_proto == 1) {
  475. hab_create_invalid_ack(vchan, exp_desc->export_id);
  476. hab_send_unimport_msg(vchan, exp_desc->export_id);
  477. }
  478. kfree(exp_desc_super);
  479. return ret;
  480. }
  481. static void hab_msg_drop(struct physical_channel *pchan, size_t sizebytes)
  482. {
  483. uint8_t *data = NULL;
  484. if (sizebytes > HAB_HEADER_SIZE_MAX) {
  485. pr_err("%s read size too large %zd\n", pchan->name, sizebytes);
  486. return;
  487. }
  488. data = kmalloc(sizebytes, GFP_ATOMIC);
  489. if (data == NULL)
  490. return;
  491. physical_channel_read(pchan, data, sizebytes);
  492. kfree(data);
  493. }
  494. static void hab_recv_unimport_msg(struct physical_channel *pchan, int vchan_exist)
  495. {
  496. uint32_t exp_id = 0;
  497. struct export_desc *exp = NULL;
  498. struct export_desc_super *exp_super = NULL;
  499. int irqs_disabled = irqs_disabled();
  500. physical_channel_read(pchan, &exp_id, sizeof(uint32_t));
  501. if (!vchan_exist)
  502. pr_debug("unimp msg recv after vchan closed on %s, exp id %u\n",
  503. pchan->name, exp_id);
  504. /*
  505. * expid_lock must be hold long enough to ensure the accessibility of exp_super
  506. * before it is freed in habmem_export_destroy where the expid_lock is hold during
  507. * idr_remove.
  508. */
  509. hab_spin_lock(&pchan->expid_lock, irqs_disabled);
  510. exp = idr_find(&pchan->expid_idr, exp_id);
  511. if ((exp != NULL) && (exp_id == exp->export_id) && (exp->pchan == pchan)) {
  512. exp_super = container_of(exp, struct export_desc_super, exp);
  513. if (exp_super->remote_imported)
  514. exp_super->remote_imported = 0;
  515. else
  516. pr_warn("invalid unimp msg recv on pchan %s, exp id %u\n",
  517. pchan->name, exp_id);
  518. } else
  519. pr_err("invalid unimp msg recv on %s, exp id %u\n", pchan->name, exp_id);
  520. hab_spin_unlock(&pchan->expid_lock, irqs_disabled);
  521. if (!vchan_exist)
  522. /* exp node is not in the reclaim list when vchan still exists */
  523. schedule_work(&hab_driver.reclaim_work);
  524. }
  525. static int hab_try_get_vchan(struct physical_channel *pchan,
  526. struct hab_header *header,
  527. struct virtual_channel **vchan_out)
  528. {
  529. struct virtual_channel *vchan = NULL;
  530. size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
  531. uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
  532. uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
  533. uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
  534. /* get the local virtual channel if it isn't an open message */
  535. if (payload_type != HAB_PAYLOAD_TYPE_INIT &&
  536. payload_type != HAB_PAYLOAD_TYPE_INIT_ACK &&
  537. payload_type != HAB_PAYLOAD_TYPE_INIT_DONE &&
  538. payload_type != HAB_PAYLOAD_TYPE_INIT_CANCEL) {
  539. /* sanity check the received message */
  540. if (payload_type >= HAB_PAYLOAD_TYPE_MAX ||
  541. vchan_id > (HAB_HEADER_ID_MASK >> HAB_HEADER_ID_SHIFT)
  542. || !vchan_id || !session_id) {
  543. pr_err("@@ %s Invalid msg type %d vcid %x bytes %zx sn %d\n",
  544. pchan->name, payload_type,
  545. vchan_id, sizebytes, session_id);
  546. dump_hab_wq(pchan);
  547. }
  548. /*
  549. * need both vcid and session_id to be accurate.
  550. * this is from pchan instead of ctx
  551. */
  552. vchan = hab_vchan_get(pchan, header);
  553. if (!vchan) {
  554. pr_debug("vchan not found type %d vcid %x sz %zx sesn %d\n",
  555. payload_type, vchan_id, sizebytes, session_id);
  556. if (payload_type == HAB_PAYLOAD_TYPE_UNIMPORT) {
  557. hab_recv_unimport_msg(pchan, 0);
  558. return 0;
  559. }
  560. if (sizebytes) {
  561. hab_msg_drop(pchan, sizebytes);
  562. pr_err("%s msg dropped type %d size %d vcid %X session id %d\n",
  563. pchan->name, payload_type,
  564. sizebytes, vchan_id,
  565. session_id);
  566. }
  567. return -EINVAL;
  568. } else if (vchan->otherend_closed) {
  569. hab_vchan_put(vchan);
  570. pr_info("vchan remote closed type %d, vchan id %x, sizebytes %zx, session %d\n",
  571. payload_type, vchan_id,
  572. sizebytes, session_id);
  573. if (sizebytes) {
  574. hab_msg_drop(pchan, sizebytes);
  575. pr_err("%s message %d dropped remote close, session id %d\n",
  576. pchan->name, payload_type,
  577. session_id);
  578. }
  579. return -ENODEV;
  580. }
  581. } else {
  582. if (sizebytes != sizeof(struct hab_open_send_data)) {
  583. pr_err("%s Invalid open req type %d vcid %x bytes %zx session %d\n",
  584. pchan->name, payload_type, vchan_id,
  585. sizebytes, session_id);
  586. if (sizebytes) {
  587. hab_msg_drop(pchan, sizebytes);
  588. pr_err("%s msg %d dropped unknown reason session id %d\n",
  589. pchan->name,
  590. payload_type,
  591. session_id);
  592. dump_hab_wq(pchan);
  593. }
  594. return -ENODEV;
  595. }
  596. }
  597. *vchan_out = vchan;
  598. return 0;
  599. }
  600. int hab_msg_recv(struct physical_channel *pchan,
  601. struct hab_header *header)
  602. {
  603. int ret = 0;
  604. struct hab_message *message;
  605. struct hab_device *dev = pchan->habdev;
  606. size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
  607. uint32_t payload_type = HAB_HEADER_GET_TYPE(*header);
  608. uint32_t vchan_id = HAB_HEADER_GET_ID(*header);
  609. uint32_t session_id = HAB_HEADER_GET_SESSION_ID(*header);
  610. struct virtual_channel *vchan = NULL;
  611. struct export_desc *exp;
  612. struct export_desc_super *exp_desc_super = NULL;
  613. struct timespec64 ts = {0};
  614. unsigned long long rx_mpm_tv;
  615. int found = 0;
  616. struct hab_import_data imp_data = {0};
  617. int irqs_disabled = irqs_disabled();
  618. ret = hab_try_get_vchan(pchan, header, &vchan);
  619. if (ret != 0 || ((vchan == NULL) && (payload_type == HAB_PAYLOAD_TYPE_UNIMPORT)))
  620. return ret;
  621. switch (payload_type) {
  622. case HAB_PAYLOAD_TYPE_MSG:
  623. case HAB_PAYLOAD_TYPE_SCHE_RESULT_REQ:
  624. case HAB_PAYLOAD_TYPE_SCHE_RESULT_RSP:
  625. message = hab_msg_alloc(pchan, sizebytes);
  626. if (!message)
  627. break;
  628. hab_msg_queue(vchan, message);
  629. break;
  630. case HAB_PAYLOAD_TYPE_INIT:
  631. case HAB_PAYLOAD_TYPE_INIT_ACK:
  632. case HAB_PAYLOAD_TYPE_INIT_DONE:
  633. ret = hab_open_request_add(pchan, sizebytes, payload_type);
  634. if (ret) {
  635. pr_err("%s open request add failed, ret %d, payload type %d, sizebytes %zx\n",
  636. pchan->name, ret, payload_type, sizebytes);
  637. break;
  638. }
  639. wake_up(&dev->openq);
  640. break;
  641. case HAB_PAYLOAD_TYPE_INIT_CANCEL:
  642. pr_info("remote open cancel header vcid %X session %d local %d remote %d\n",
  643. vchan_id, session_id, pchan->vmid_local,
  644. pchan->vmid_remote);
  645. ret = hab_open_receive_cancel(pchan, sizebytes);
  646. if (ret)
  647. pr_err("%s open cancel handling failed ret %d vcid %X session %d\n",
  648. pchan->name, ret, vchan_id, session_id);
  649. break;
  650. case HAB_PAYLOAD_TYPE_EXPORT:
  651. ret = hab_receive_export_desc(pchan, vchan, sizebytes);
  652. if (ret)
  653. pr_err("failed to handle exp msg on vcid %x, ret %d\n",
  654. vchan->id, ret);
  655. break;
  656. case HAB_PAYLOAD_TYPE_EXPORT_ACK:
  657. ret = hab_receive_create_export_ack(pchan, vchan->ctx,
  658. sizebytes);
  659. if (ret) {
  660. pr_err("%s failed to handled export ack %d\n",
  661. pchan->name, ret);
  662. break;
  663. }
  664. wake_up_interruptible(&vchan->ctx->exp_wq);
  665. break;
  666. case HAB_PAYLOAD_TYPE_CLOSE:
  667. /* remote request close */
  668. pr_debug("remote close vcid %pK %X other id %X session %d refcnt %d\n",
  669. vchan, vchan->id, vchan->otherend_id,
  670. session_id, get_refcnt(vchan->refcount));
  671. hab_vchan_stop(vchan);
  672. break;
  673. case HAB_PAYLOAD_TYPE_PROFILE:
  674. ktime_get_ts64(&ts);
  675. if (sizebytes < sizeof(struct habmm_xing_vm_stat)) {
  676. pr_err("%s expected size greater than %zd at least %zd\n",
  677. pchan->name, sizebytes, sizeof(struct habmm_xing_vm_stat));
  678. break;
  679. }
  680. /* pull down the incoming data */
  681. message = hab_msg_alloc(pchan, sizebytes);
  682. if (!message)
  683. pr_err("%s failed to allocate msg Arrived msg will be lost\n",
  684. pchan->name);
  685. else {
  686. struct habmm_xing_vm_stat *pstat =
  687. (struct habmm_xing_vm_stat *)message->data;
  688. pstat->rx_sec = ts.tv_sec;
  689. pstat->rx_usec = ts.tv_nsec/NSEC_PER_USEC;
  690. hab_msg_queue(vchan, message);
  691. }
  692. break;
  693. case HAB_PAYLOAD_TYPE_SCHE_MSG:
  694. case HAB_PAYLOAD_TYPE_SCHE_MSG_ACK:
  695. if (sizebytes < sizeof(unsigned long long)) {
  696. pr_err("%s expected size greater than %zd at least %zd\n",
  697. pchan->name, sizebytes, sizeof(unsigned long long));
  698. break;
  699. }
  700. rx_mpm_tv = msm_timer_get_sclk_ticks();
  701. /* pull down the incoming data */
  702. message = hab_msg_alloc(pchan, sizebytes);
  703. if (!message)
  704. pr_err("%s failed to allocate msg Arrived msg will be lost\n",
  705. pchan->name);
  706. else {
  707. ((unsigned long long *)message->data)[0] = rx_mpm_tv;
  708. hab_msg_queue(vchan, message);
  709. }
  710. break;
  711. case HAB_PAYLOAD_TYPE_IMPORT:
  712. if (physical_channel_read(pchan, &imp_data, sizeof(struct hab_import_data)) !=
  713. sizeof(struct hab_import_data)) {
  714. pr_err("corrupted import request, id %ld page %ld vcid %X on %s\n",
  715. imp_data.exp_id, imp_data.page_cnt, vchan->id, pchan->name);
  716. break;
  717. }
  718. /* expid lock is hold to ensure the availability of exp node */
  719. hab_spin_lock(&pchan->expid_lock, irqs_disabled);
  720. exp = idr_find(&pchan->expid_idr, imp_data.exp_id);
  721. if ((exp != NULL) && (imp_data.page_cnt == exp->payload_count)) {
  722. found = 1;
  723. exp_desc_super = container_of(exp, struct export_desc_super, exp);
  724. } else
  725. found = 0;
  726. if (found == 1 && (exp_desc_super->exp_state == HAB_EXP_SUCCESS)) {
  727. exp_desc_super->remote_imported = 1;
  728. /* might sleep in Vhost & VirtIO HAB, need non-blocking send or RT Linux */
  729. hab_send_import_ack(vchan, exp);
  730. pr_debug("remote imported exp id %d on vcid %x\n",
  731. exp->export_id, vchan->id);
  732. } else {
  733. pr_err("requested exp id %ld not found %d on %s\n",
  734. imp_data.exp_id, found, pchan->name);
  735. /* might sleep in Vhost & VirtIO HAB, need non-blocking send or RT Linux */
  736. hab_send_import_ack_fail(vchan, imp_data.exp_id);
  737. }
  738. hab_spin_unlock(&pchan->expid_lock, irqs_disabled);
  739. break;
  740. case HAB_PAYLOAD_TYPE_IMPORT_ACK:
  741. ret = hab_receive_export_desc(pchan, vchan, sizebytes);
  742. if (ret)
  743. pr_err("%s failed to handle import ack %d\n", pchan->name, ret);
  744. /* always try to wake up importer when any failure happens */
  745. wake_up_interruptible(&vchan->ctx->imp_wq);
  746. break;
  747. case HAB_PAYLOAD_TYPE_IMPORT_ACK_FAIL:
  748. ret = hab_receive_import_ack_fail(pchan, vchan);
  749. if (ret)
  750. pr_err("%s failed to handle import ack fail msg %d\n", pchan->name, ret);
  751. /* always try to wake up importer when any failure happens */
  752. wake_up_interruptible(&vchan->ctx->imp_wq);
  753. break;
  754. case HAB_PAYLOAD_TYPE_UNIMPORT:
  755. hab_recv_unimport_msg(pchan, 1);
  756. break;
  757. default:
  758. pr_err("%s unknown msg received, payload type %d, vchan id %x, sizebytes %zx, session %d\n",
  759. pchan->name, payload_type, vchan_id,
  760. sizebytes, session_id);
  761. break;
  762. }
  763. if (vchan)
  764. hab_vchan_put(vchan);
  765. return ret;
  766. }