hab_virtio.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/module.h>
  7. #include <linux/virtio.h>
  8. #include <linux/virtio_config.h>
  9. #include <uapi/linux/virtio_ids.h>
  10. #include <linux/version.h>
  11. #include <linux/cma.h>
  12. #include "hab_virtio.h" /* requires hab.h */
  13. #define HAB_VIRTIO_DEVICE_ID_HAB 88
  14. #define HAB_VIRTIO_DEVICE_ID_BUFFERQ 89
  15. #define HAB_VIRTIO_DEVICE_ID_MISC 90
  16. #define HAB_VIRTIO_DEVICE_ID_AUDIO 91
  17. #define HAB_VIRTIO_DEVICE_ID_CAMERA 92
  18. #define HAB_VIRTIO_DEVICE_ID_DISPLAY 93
  19. #define HAB_VIRTIO_DEVICE_ID_GRAPHICS 94
  20. #define HAB_VIRTIO_DEVICE_ID_VIDEO 95
  21. /* all probed virtio_hab stored in this list */
  22. static struct list_head vhab_list = LIST_HEAD_INIT(vhab_list);
  23. static DEFINE_SPINLOCK(vh_lock);
  24. static struct virtio_device_tbl {
  25. int32_t mmid;
  26. __u32 device;
  27. struct virtio_device *vdev;
  28. } vdev_tbl[] = {
  29. { HAB_MMID_ALL_AREA, HAB_VIRTIO_DEVICE_ID_HAB, NULL }, /* hab */
  30. { MM_BUFFERQ_1, HAB_VIRTIO_DEVICE_ID_BUFFERQ, NULL },
  31. { MM_MISC, HAB_VIRTIO_DEVICE_ID_MISC, NULL },
  32. { MM_AUD_1, HAB_VIRTIO_DEVICE_ID_AUDIO, NULL },
  33. { MM_CAM_1, HAB_VIRTIO_DEVICE_ID_CAMERA, NULL },
  34. { MM_DISP_1, HAB_VIRTIO_DEVICE_ID_DISPLAY, NULL },
  35. { MM_GFX, HAB_VIRTIO_DEVICE_ID_GRAPHICS, NULL },
  36. { MM_VID, HAB_VIRTIO_DEVICE_ID_VIDEO, NULL },
  37. };
  38. enum pool_type_t {
  39. PT_OUT_SMALL = 0, /* 512 bytes */
  40. PT_OUT_MEDIUM, /* 5120 bytes */
  41. PT_OUT_LARGE, /* 51200 bytes */
  42. PT_IN, /* 5120 bytes */
  43. PT_MAX
  44. };
  45. #define GUARD_BAND_SZ 20
  46. struct vh_buf_header {
  47. char *buf; /* buffer starting address */
  48. int size; /* total buffer size */
  49. enum pool_type_t pool_type;
  50. int index; /* debugging only */
  51. int payload_size; /* actual payload used size */
  52. struct list_head node;
  53. char padding[GUARD_BAND_SZ];
  54. };
  55. #define IN_BUF_SIZE 5120
  56. #define OUT_SMALL_BUF_SIZE 512
  57. #define OUT_MEDIUM_BUF_SIZE 5120
  58. #define OUT_LARGE_BUF_SIZE 51200
  59. #define IN_BUF_NUM 100 /*64*/
  60. #define OUT_SMALL_BUF_NUM 200 /*64*/
  61. #define OUT_MEDIUM_BUF_NUM 100 /*20*/
  62. #define OUT_LARGE_BUF_NUM 10
  63. #define IN_BUF_POOL_SLOT (GUARD_BAND_SZ + \
  64. sizeof(struct vh_buf_header) + \
  65. IN_BUF_SIZE)
  66. #define OUT_SMALL_BUF_SLOT (GUARD_BAND_SZ + \
  67. sizeof(struct vh_buf_header) + \
  68. OUT_SMALL_BUF_SIZE)
  69. #define OUT_MEDIUM_BUF_SLOT (GUARD_BAND_SZ + \
  70. sizeof(struct vh_buf_header) + \
  71. OUT_MEDIUM_BUF_SIZE)
  72. #define OUT_LARGE_BUF_SLOT (GUARD_BAND_SZ + \
  73. sizeof(struct vh_buf_header) + \
  74. OUT_LARGE_BUF_SIZE)
  75. #define IN_POOL_SIZE (IN_BUF_POOL_SLOT * IN_BUF_NUM)
  76. #define OUT_SMALL_POOL_SIZE (OUT_SMALL_BUF_SLOT * OUT_SMALL_BUF_NUM)
  77. #define OUT_MEDIUM_POOL_SIZE (OUT_MEDIUM_BUF_SLOT * OUT_MEDIUM_BUF_NUM)
  78. #define OUT_LARGE_POOL_SIZE (OUT_LARGE_BUF_SLOT * OUT_LARGE_BUF_NUM)
  79. struct virtio_hab *get_vh(struct virtio_device *vdev)
  80. {
  81. struct virtio_hab *vh = NULL;
  82. unsigned long flags;
  83. spin_lock_irqsave(&vh_lock, flags);
  84. list_for_each_entry(vh, &vhab_list, node) {
  85. if (vdev == vh->vdev)
  86. break;
  87. }
  88. spin_unlock_irqrestore(&vh_lock, flags);
  89. return vh;
  90. }
  91. static struct vq_pchan *get_virtio_pchan(struct virtio_hab *vhab,
  92. struct virtqueue *vq)
  93. {
  94. int index = vq->index - vhab->vqs_offset;
  95. if (index > hab_driver.ndevices * HAB_PCHAN_VQ_MAX || index < 0) {
  96. pr_err("wrong vq index %d total hab device %d\n",
  97. index, hab_driver.ndevices);
  98. return NULL;
  99. } else
  100. return &vhab->vqpchans[index/2];
  101. }
  102. /* vq event callback - send/out buf returns */
  103. static void virthab_recv_txq(struct virtqueue *vq)
  104. {
  105. struct virtio_hab *vh = get_vh(vq->vdev);
  106. struct vq_pchan *vpc = get_virtio_pchan(vh, vq);
  107. struct vh_buf_header *hd = NULL;
  108. unsigned long flags;
  109. unsigned int len;
  110. if (!vpc)
  111. return;
  112. spin_lock_irqsave(&vpc->lock[HAB_PCHAN_TX_VQ], flags);
  113. if (vpc->pchan_ready) {
  114. if (vq != vpc->vq[HAB_PCHAN_TX_VQ])
  115. pr_err("failed to match txq %pK expecting %pK\n",
  116. vq, vpc->vq[HAB_PCHAN_TX_VQ]);
  117. while ((hd = (struct vh_buf_header *)virtqueue_get_buf(vq, &len)) != NULL) {
  118. if ((hd->index < 0) || (hd->pool_type < 0) ||
  119. (hd->pool_type > PT_OUT_LARGE))
  120. pr_err("corrupted outbuf %pK %d %d %d\n",
  121. hd->buf, hd->size, hd->pool_type,
  122. hd->index);
  123. hd->payload_size = 0;
  124. switch (hd->pool_type) {
  125. case PT_OUT_SMALL:
  126. if ((hd->index > OUT_SMALL_BUF_NUM) ||
  127. (hd->size != OUT_SMALL_BUF_SIZE))
  128. pr_err("small buf index corrupted %pK %pK %d %d\n",
  129. hd, hd->buf, hd->index,
  130. hd->size);
  131. list_add_tail(&hd->node, &vpc->s_list);
  132. vpc->s_cnt++;
  133. break;
  134. case PT_OUT_MEDIUM:
  135. if ((hd->index > OUT_MEDIUM_BUF_NUM) ||
  136. (hd->size != OUT_MEDIUM_BUF_SIZE))
  137. pr_err("medium buf index corrupted %pK %pK %d %d\n",
  138. hd, hd->buf, hd->index,
  139. hd->size);
  140. list_add_tail(&hd->node, &vpc->m_list);
  141. vpc->m_cnt++;
  142. break;
  143. case PT_OUT_LARGE:
  144. if ((hd->index > OUT_MEDIUM_BUF_NUM) ||
  145. (hd->size != OUT_LARGE_BUF_SIZE))
  146. pr_err("large buf index corrupted %pK %pK %d %d\n",
  147. hd, hd->buf, hd->index,
  148. hd->size);
  149. list_add_tail(&hd->node, &vpc->l_list);
  150. vpc->l_cnt++;
  151. break;
  152. default:
  153. pr_err("invalid pool type %d received on txq\n",
  154. hd->pool_type);
  155. }
  156. }
  157. }
  158. spin_unlock_irqrestore(&vpc->lock[HAB_PCHAN_TX_VQ], flags);
  159. wake_up(&vpc->out_wq);
  160. }
  161. /* vq sts callback - recv/in buf returns */
  162. static void virthab_recv_rxq(unsigned long p)
  163. {
  164. struct virtqueue *vq = (struct virtqueue *)p;
  165. struct virtio_hab *vh = get_vh(vq->vdev);
  166. struct vq_pchan *vpc = get_virtio_pchan(vh, vq);
  167. char *inbuf;
  168. unsigned int len;
  169. struct physical_channel *pchan = NULL;
  170. struct scatterlist sg[1];
  171. int rc;
  172. struct vh_buf_header *hd = NULL;
  173. if (!vpc)
  174. return;
  175. pchan = vpc->pchan;
  176. if (vq != vpc->vq[HAB_PCHAN_RX_VQ])
  177. pr_err("%s failed to match rxq %pK expecting %pK\n",
  178. vq->name, vq, vpc->vq[HAB_PCHAN_RX_VQ]);
  179. spin_lock(&vpc->lock[HAB_PCHAN_RX_VQ]);
  180. while ((hd = virtqueue_get_buf(vpc->vq[HAB_PCHAN_RX_VQ], &len)) != NULL) {
  181. vpc->in_cnt--;
  182. /* sanity check */
  183. if ((hd->index < 0) || (hd->index > IN_BUF_NUM) ||
  184. hd->pool_type != PT_IN || hd->size != IN_BUF_SIZE) {
  185. pr_err("corrupted inbuf %pK %pK %d %d %d\n",
  186. hd, hd->buf, hd->size, hd->pool_type,
  187. hd->index);
  188. break;
  189. }
  190. hd->payload_size = 0;
  191. inbuf = hd->buf;
  192. /* inbuf should be one of the in1/in2 msg should will be
  193. * consumed before inbuf is kicked to PVM
  194. */
  195. vpc->read_data = inbuf;
  196. vpc->read_size = len;
  197. vpc->read_offset = 0;
  198. if (!pchan)
  199. pr_err("failed to find matching pchan for vq %s %pK\n",
  200. vq->name, vq);
  201. else {
  202. /* parse and handle the input */
  203. spin_unlock(&vpc->lock[HAB_PCHAN_RX_VQ]);
  204. rc = hab_msg_recv(pchan, (struct hab_header *)inbuf);
  205. spin_lock(&vpc->lock[HAB_PCHAN_RX_VQ]);
  206. if (pchan->sequence_rx + 1 != ((struct hab_header *)inbuf)->sequence)
  207. pr_err("%s: expected sequence_rx is %u, received is %u\n",
  208. pchan->name,
  209. pchan->sequence_rx,
  210. ((struct hab_header *)inbuf)->sequence);
  211. pchan->sequence_rx = ((struct hab_header *)inbuf)->sequence;
  212. if (rc && rc != -EINVAL)
  213. pr_err("%s hab_msg_recv wrong %d\n",
  214. pchan->name, rc);
  215. }
  216. /* return the inbuf to PVM after consuming */
  217. sg_init_one(sg, hd->buf, IN_BUF_SIZE);
  218. if (vpc->pchan_ready) {
  219. rc = virtqueue_add_inbuf(vq, sg, 1, hd, GFP_ATOMIC);
  220. if (rc)
  221. pr_err("failed to queue inbuf to PVM %d\n",
  222. rc);
  223. /* bundle kick? */
  224. vpc->in_cnt++;
  225. rc = virtqueue_kick(vq);
  226. if (!rc)
  227. pr_err("failed to kick inbuf to PVM %d\n", rc);
  228. } else {
  229. pr_err("vq not ready %d\n", vpc->pchan_ready);
  230. rc = -ENODEV;
  231. }
  232. }
  233. spin_unlock(&vpc->lock[HAB_PCHAN_RX_VQ]);
  234. }
  235. static void virthab_recv_rxq_task(struct virtqueue *vq)
  236. {
  237. struct virtio_hab *vh = get_vh(vq->vdev);
  238. struct vq_pchan *vpc = get_virtio_pchan(vh, vq);
  239. if (!vpc)
  240. return;
  241. tasklet_schedule(&vpc->task);
  242. }
  243. static void init_pool_list(void *pool, int buf_size, int buf_num,
  244. enum pool_type_t pool_type,
  245. struct list_head *pool_head,
  246. wait_queue_head_t *wq, int *cnt)
  247. {
  248. char *ptr;
  249. struct vh_buf_header *hd;
  250. int i;
  251. INIT_LIST_HEAD(pool_head);
  252. if (wq)
  253. init_waitqueue_head(wq);
  254. ptr = pool;
  255. for (i = 0; i < buf_num; i++) {
  256. hd = (struct vh_buf_header *)(ptr + GUARD_BAND_SZ);
  257. hd->buf = ptr + GUARD_BAND_SZ + sizeof(struct vh_buf_header);
  258. hd->size = buf_size;
  259. hd->pool_type = pool_type;
  260. hd->index = i;
  261. hd->payload_size = 0;
  262. list_add_tail(&hd->node, pool_head);
  263. ptr = hd->buf + buf_size;
  264. (*cnt)++;
  265. }
  266. }
  267. /* queue all the inbufs on all pchans/vqs */
  268. int virthab_queue_inbufs(struct virtio_hab *vh, int alloc)
  269. {
  270. int ret, size;
  271. int i;
  272. struct scatterlist sg[1];
  273. struct vh_buf_header *hd, *hd_tmp;
  274. for (i = 0; i < vh->mmid_range; i++) {
  275. struct vq_pchan *vpc = &vh->vqpchans[i];
  276. if (alloc) {
  277. vpc->in_cnt = 0;
  278. vpc->s_cnt = 0;
  279. vpc->m_cnt = 0;
  280. vpc->l_cnt = 0;
  281. vpc->in_pool = kmalloc(IN_POOL_SIZE, GFP_KERNEL);
  282. vpc->s_pool = kmalloc(OUT_SMALL_POOL_SIZE, GFP_KERNEL);
  283. vpc->m_pool = kmalloc(OUT_MEDIUM_POOL_SIZE, GFP_KERNEL);
  284. vpc->l_pool = kmalloc(OUT_LARGE_POOL_SIZE, GFP_KERNEL);
  285. if (!vpc->in_pool || !vpc->s_pool || !vpc->m_pool ||
  286. !vpc->l_pool) {
  287. pr_err("failed to alloc buf %d %pK %d %pK %d %pK %d %pK\n",
  288. IN_POOL_SIZE, vpc->in_pool,
  289. OUT_SMALL_POOL_SIZE, vpc->s_pool,
  290. OUT_MEDIUM_POOL_SIZE, vpc->m_pool,
  291. OUT_LARGE_POOL_SIZE, vpc->l_pool);
  292. return -ENOMEM;
  293. }
  294. init_waitqueue_head(&vpc->out_wq);
  295. init_pool_list(vpc->in_pool, IN_BUF_SIZE,
  296. IN_BUF_NUM, PT_IN,
  297. &vpc->in_list, NULL,
  298. &vpc->in_cnt);
  299. init_pool_list(vpc->s_pool, OUT_SMALL_BUF_SIZE,
  300. OUT_SMALL_BUF_NUM, PT_OUT_SMALL,
  301. &vpc->s_list, NULL,
  302. &vpc->s_cnt);
  303. init_pool_list(vpc->m_pool, OUT_MEDIUM_BUF_SIZE,
  304. OUT_MEDIUM_BUF_NUM, PT_OUT_MEDIUM,
  305. &vpc->m_list, NULL,
  306. &vpc->m_cnt);
  307. init_pool_list(vpc->l_pool, OUT_LARGE_BUF_SIZE,
  308. OUT_LARGE_BUF_NUM, PT_OUT_LARGE,
  309. &vpc->l_list, NULL,
  310. &vpc->l_cnt);
  311. pr_debug("VQ buf allocated %s %d %d %d %d %d %d %d %d %pK %pK %pK %pK\n",
  312. vpc->vq[HAB_PCHAN_RX_VQ]->name,
  313. IN_POOL_SIZE, OUT_SMALL_POOL_SIZE,
  314. OUT_MEDIUM_POOL_SIZE, OUT_LARGE_POOL_SIZE,
  315. vpc->in_cnt, vpc->s_cnt, vpc->m_cnt,
  316. vpc->l_cnt, vpc->in_list, vpc->s_list,
  317. vpc->m_list, vpc->l_list);
  318. }
  319. spin_lock(&vpc->lock[HAB_PCHAN_RX_VQ]);
  320. size = virtqueue_get_vring_size(vpc->vq[HAB_PCHAN_RX_VQ]);
  321. pr_debug("vq %s vring index %d num %d pchan %s\n",
  322. vpc->vq[HAB_PCHAN_RX_VQ]->name,
  323. vpc->vq[HAB_PCHAN_RX_VQ]->index, size,
  324. vpc->pchan->name);
  325. list_for_each_entry_safe(hd, hd_tmp, &vpc->in_list, node) {
  326. list_del(&hd->node);
  327. sg_init_one(sg, hd->buf, IN_BUF_SIZE);
  328. ret = virtqueue_add_inbuf(vpc->vq[HAB_PCHAN_RX_VQ], sg,
  329. 1, hd, GFP_ATOMIC);
  330. if (ret) {
  331. pr_err("failed to queue %s inbuf %d to PVM %d\n",
  332. vpc->vq[HAB_PCHAN_RX_VQ]->name,
  333. vpc->in_cnt, ret);
  334. }
  335. vpc->in_cnt--;
  336. }
  337. ret = virtqueue_kick(vpc->vq[HAB_PCHAN_RX_VQ]);
  338. if (!ret)
  339. pr_err("failed to kick %d %s ret %d cnt %d\n", i,
  340. vpc->vq[HAB_PCHAN_RX_VQ]->name, ret,
  341. vpc->in_cnt);
  342. spin_unlock(&vpc->lock[HAB_PCHAN_RX_VQ]);
  343. }
  344. return 0;
  345. }
  346. EXPORT_SYMBOL(virthab_queue_inbufs);
  347. int virthab_init_vqs_pre(struct virtio_hab *vh)
  348. {
  349. struct vq_pchan *vpchans = vh->vqpchans;
  350. vq_callback_t **cbs = vh->cbs;
  351. char **names = vh->names;
  352. char *temp;
  353. int i, idx = 0;
  354. struct hab_device *habdev = NULL;
  355. pr_debug("2 callbacks %pK %pK\n", (void *)virthab_recv_txq,
  356. (void *)virthab_recv_rxq);
  357. habdev = find_hab_device(vh->mmid_start);
  358. if (!habdev) {
  359. pr_err("failed to locate mmid %d range %d\n",
  360. vh->mmid_start, vh->mmid_range);
  361. return -ENODEV;
  362. }
  363. /* do sanity check */
  364. for (i = 0; i < hab_driver.ndevices; i++)
  365. if (habdev == &hab_driver.devp[i])
  366. break;
  367. if (i + vh->mmid_range > hab_driver.ndevices) {
  368. pr_err("invalid mmid %d range %d total %d\n",
  369. vh->mmid_start, vh->mmid_range,
  370. hab_driver.ndevices);
  371. return -EINVAL;
  372. }
  373. idx = i;
  374. for (i = 0; i < vh->mmid_range; i++) {
  375. habdev = &hab_driver.devp[idx + i];
  376. /* ToDo: each cb should only apply to one vq */
  377. cbs[i * HAB_PCHAN_VQ_MAX + HAB_PCHAN_TX_VQ] = virthab_recv_txq;
  378. cbs[i * HAB_PCHAN_VQ_MAX + HAB_PCHAN_RX_VQ] = virthab_recv_rxq_task;
  379. strscpy(names[i * HAB_PCHAN_VQ_MAX + HAB_PCHAN_TX_VQ],
  380. habdev->name, sizeof(habdev->name));
  381. temp = names[i * HAB_PCHAN_VQ_MAX + HAB_PCHAN_TX_VQ];
  382. temp[0] = 't'; temp[1] = 'x'; temp[2] = 'q';
  383. strscpy(names[i * HAB_PCHAN_VQ_MAX + HAB_PCHAN_RX_VQ],
  384. habdev->name, sizeof(habdev->name));
  385. temp = names[i * HAB_PCHAN_VQ_MAX + HAB_PCHAN_RX_VQ];
  386. temp[0] = 'r'; temp[1] = 'x'; temp[2] = 'q';
  387. vpchans[i].mmid = habdev->id;
  388. if (list_empty(&habdev->pchannels))
  389. pr_err("pchan is not initialized %s slot %d mmid %d\n",
  390. habdev->name, i, habdev->id);
  391. else
  392. /* GVM only has one instance of the pchan for each mmid
  393. * (no multi VMs)
  394. */
  395. vpchans[i].pchan = list_first_entry(&habdev->pchannels,
  396. struct physical_channel,
  397. node);
  398. }
  399. return 0;
  400. }
  401. EXPORT_SYMBOL(virthab_init_vqs_pre);
  402. int virthab_init_vqs_post(struct virtio_hab *vh)
  403. {
  404. struct vq_pchan *vpchans = vh->vqpchans;
  405. int i;
  406. struct virtio_pchan_link *link;
  407. /* map all the vqs to pchans */
  408. for (i = 0; i < vh->mmid_range; i++) {
  409. vpchans[i].vhab = vh;
  410. spin_lock_init(&vpchans[i].lock[HAB_PCHAN_TX_VQ]);
  411. spin_lock_init(&vpchans[i].lock[HAB_PCHAN_RX_VQ]);
  412. vpchans[i].vq[HAB_PCHAN_TX_VQ] =
  413. vh->vqs[i * HAB_PCHAN_VQ_MAX + HAB_PCHAN_TX_VQ];
  414. vpchans[i].vq[HAB_PCHAN_RX_VQ] =
  415. vh->vqs[i * HAB_PCHAN_VQ_MAX + HAB_PCHAN_RX_VQ];
  416. vpchans[i].index[HAB_PCHAN_TX_VQ] =
  417. vh->vqs[i * HAB_PCHAN_VQ_MAX + HAB_PCHAN_TX_VQ]->index;
  418. vpchans[i].index[HAB_PCHAN_RX_VQ] =
  419. vh->vqs[i * HAB_PCHAN_VQ_MAX + HAB_PCHAN_RX_VQ]->index;
  420. tasklet_init(&vpchans[i].task, virthab_recv_rxq,
  421. (unsigned long)vpchans[i].vq[HAB_PCHAN_RX_VQ]);
  422. vpchans[i].pchan_ready = true;
  423. link = (struct virtio_pchan_link *)vpchans[i].pchan->hyp_data;
  424. link->vpc = &vpchans[i];
  425. link->vhab = vh;
  426. pr_debug("mmid %d slot %d vhab %pK vdev %pK vq tx %pK pchan %pK\n",
  427. vpchans[i].mmid, i, vpchans[i].vhab,
  428. vpchans[i].vhab->vdev, vpchans[i].vq[HAB_PCHAN_TX_VQ],
  429. vpchans[i].pchan);
  430. }
  431. return 0;
  432. }
  433. EXPORT_SYMBOL(virthab_init_vqs_post);
  434. static int virthab_init_vqs(struct virtio_hab *vh)
  435. {
  436. int ret;
  437. vq_callback_t **cbs = vh->cbs;
  438. char **names = vh->names;
  439. ret = virthab_init_vqs_pre(vh);
  440. if (ret)
  441. return ret;
  442. pr_debug("mmid %d request %d vqs\n", vh->mmid_start,
  443. vh->mmid_range * HAB_PCHAN_VQ_MAX);
  444. ret = virtio_find_vqs(vh->vdev, vh->mmid_range * HAB_PCHAN_VQ_MAX,
  445. vh->vqs, cbs, (const char * const*)names, NULL);
  446. if (ret) {
  447. pr_err("failed to find vqs %d\n", ret);
  448. return ret;
  449. }
  450. pr_debug("find vqs OK %d\n", ret);
  451. vh->vqs_offset = 0; /* this virtio device has all the vqs to itself */
  452. ret = virthab_init_vqs_post(vh);
  453. if (ret)
  454. return ret;
  455. return 0;
  456. }
  457. static int virthab_alloc_mmid_device(struct virtio_hab *vh,
  458. uint32_t mmid_start, int mmid_range)
  459. {
  460. int i;
  461. vh->vqs = kzalloc(sizeof(struct virtqueue *) * mmid_range *
  462. HAB_PCHAN_VQ_MAX, GFP_KERNEL);
  463. if (!vh->vqs)
  464. return -ENOMEM;
  465. vh->cbs = kzalloc(sizeof(vq_callback_t *) * mmid_range *
  466. HAB_PCHAN_VQ_MAX, GFP_KERNEL);
  467. if (!vh->vqs)
  468. return -ENOMEM;
  469. vh->names = kzalloc(sizeof(char *) * mmid_range *
  470. HAB_PCHAN_VQ_MAX, GFP_KERNEL);
  471. if (!vh->names)
  472. return -ENOMEM;
  473. vh->vqpchans = kcalloc(hab_driver.ndevices, sizeof(struct vq_pchan),
  474. GFP_KERNEL);
  475. if (!vh->vqpchans)
  476. return -ENOMEM;
  477. /* loop through all pchans before vq registration for name creation */
  478. for (i = 0; i < mmid_range * HAB_PCHAN_VQ_MAX; i++) {
  479. vh->names[i] = kzalloc(MAX_VMID_NAME_SIZE + 2, GFP_KERNEL);
  480. if (!vh->names[i])
  481. return -ENOMEM;
  482. }
  483. vh->mmid_start = mmid_start;
  484. vh->mmid_range = mmid_range;
  485. return 0;
  486. }
  487. int virthab_alloc(struct virtio_device *vdev, struct virtio_hab **pvh,
  488. uint32_t mmid_start, int mmid_range)
  489. {
  490. struct virtio_hab *vh;
  491. int ret;
  492. unsigned long flags;
  493. vh = kzalloc(sizeof(*vh), GFP_KERNEL);
  494. if (!vh)
  495. return -ENOMEM;
  496. ret = virthab_alloc_mmid_device(vh, mmid_start, mmid_range);
  497. if (!ret)
  498. pr_debug("alloc done mmid %d range %d\n",
  499. mmid_start, mmid_range);
  500. else
  501. return ret;
  502. vh->vdev = vdev; /* store virtio device locally */
  503. *pvh = vh;
  504. spin_lock_irqsave(&vh_lock, flags);
  505. list_add_tail(&vh->node, &vhab_list);
  506. spin_unlock_irqrestore(&vh_lock, flags);
  507. spin_lock_init(&vh->mlock);
  508. pr_debug("start vqs init vh list empty %d\n", list_empty(&vhab_list));
  509. return 0;
  510. }
  511. EXPORT_SYMBOL(virthab_alloc);
  512. static void taken_range_calc(uint32_t mmid_start, int mmid_range,
  513. uint32_t *taken_start, uint32_t *taken_end)
  514. {
  515. int i;
  516. *taken_start = 0;
  517. *taken_end = 0;
  518. for (i = 0; i < hab_driver.ndevices; i++) {
  519. if (mmid_start == hab_driver.devp[i].id) {
  520. *taken_start = mmid_start;
  521. *taken_end = hab_driver.devp[i + mmid_range - 1].id;
  522. pr_debug("taken range %d %d\n", *taken_start, *taken_end);
  523. }
  524. }
  525. }
  526. static int virthab_pchan_avail_check(__u32 id, uint32_t mmid_start, int mmid_range)
  527. {
  528. int avail = 1; /* available */
  529. struct virtio_hab *vh = NULL;
  530. uint32_t taken_start = 0, taken_end = 0;
  531. list_for_each_entry(vh, &vhab_list, node) {
  532. if (vh->vdev->id.device == id) { /* virtio device id check */
  533. avail = 0;
  534. break;
  535. }
  536. taken_range_calc(vh->mmid_start, vh->mmid_range,
  537. &taken_start, &taken_end);
  538. if (mmid_start >= taken_start && mmid_start <= taken_end) {
  539. avail = 0;
  540. break;
  541. }
  542. }
  543. pr_debug("avail check input %d %d %d ret %d\n", id, mmid_start, mmid_range, avail);
  544. return avail;
  545. }
  546. static void virthab_store_vdev(int32_t mmid, struct virtio_device *vdev)
  547. {
  548. int i;
  549. int sz = ARRAY_SIZE(vdev_tbl);
  550. for (i = 0; i < sz; i++) {
  551. if (vdev_tbl[i].mmid == mmid) {
  552. vdev_tbl[i].vdev = vdev;
  553. break;
  554. }
  555. }
  556. }
  557. struct virtio_device *virthab_get_vdev(int32_t mmid)
  558. {
  559. int i;
  560. struct virtio_device *ret = NULL;
  561. int sz = ARRAY_SIZE(vdev_tbl);
  562. for (i = 0; i < sz; i++) {
  563. if (vdev_tbl[i].mmid == mmid) {
  564. ret = vdev_tbl[i].vdev;
  565. break;
  566. }
  567. }
  568. return ret;
  569. }
  570. EXPORT_SYMBOL(virthab_get_vdev);
  571. /* probe is called when GVM detects virtio device from devtree */
  572. static int virthab_probe(struct virtio_device *vdev)
  573. {
  574. struct virtio_hab *vh = NULL;
  575. int err = 0, ret = 0;
  576. int mmid_range = hab_driver.ndevices;
  577. uint32_t mmid_start = hab_driver.devp[0].id;
  578. if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
  579. pr_err("virtio has feature missing\n");
  580. return -ENODEV;
  581. }
  582. pr_info("virtio has feature %llX virtio devid %X vid %d empty %d\n",
  583. vdev->features, vdev->id.device, vdev->id.vendor,
  584. list_empty(&vhab_list));
  585. /* find out which virtio device is calling us.
  586. * if this is hab's own virtio device, all the pchans are available
  587. */
  588. if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_HAB) {
  589. /* all MMIDs are taken cannot co-exist with others */
  590. mmid_start = hab_driver.devp[0].id;
  591. mmid_range = hab_driver.ndevices;
  592. virthab_store_vdev(HAB_MMID_ALL_AREA, vdev);
  593. } else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_BUFFERQ) {
  594. mmid_start = MM_BUFFERQ_1;
  595. mmid_range = MM_BUFFERQ_END - MM_BUFFERQ_START - 1;
  596. virthab_store_vdev(MM_BUFFERQ_1, vdev);
  597. } else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_MISC) {
  598. mmid_start = MM_MISC;
  599. mmid_range = MM_MISC_END - MM_MISC_START - 1;
  600. virthab_store_vdev(MM_MISC, vdev);
  601. } else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_AUDIO) {
  602. mmid_start = MM_AUD_1;
  603. mmid_range = MM_AUD_END - MM_AUD_START - 1;
  604. virthab_store_vdev(MM_AUD_1, vdev);
  605. } else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_CAMERA) {
  606. mmid_start = MM_CAM_1;
  607. mmid_range = MM_CAM_END - MM_CAM_START - 1;
  608. virthab_store_vdev(MM_CAM_1, vdev);
  609. } else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_DISPLAY) {
  610. mmid_start = MM_DISP_1;
  611. mmid_range = MM_DISP_END - MM_DISP_START - 1;
  612. virthab_store_vdev(MM_DISP_1, vdev);
  613. } else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_GRAPHICS) {
  614. mmid_start = MM_GFX;
  615. mmid_range = MM_GFX_END - MM_GFX_START - 1;
  616. virthab_store_vdev(MM_GFX, vdev);
  617. } else if (vdev->id.device == HAB_VIRTIO_DEVICE_ID_VIDEO) {
  618. mmid_start = MM_VID;
  619. mmid_range = MM_VID_END - MM_VID_START - 1;
  620. virthab_store_vdev(MM_VID, vdev);
  621. } else {
  622. pr_err("unknown virtio device is detected %d\n",
  623. vdev->id.device);
  624. mmid_start = 0;
  625. mmid_range = 0;
  626. }
  627. pr_debug("virtio device id %d mmid %d range %d\n",
  628. vdev->id.device, mmid_start, mmid_range);
  629. if (!virthab_pchan_avail_check(vdev->id.device, mmid_start, mmid_range))
  630. return -EINVAL;
  631. ret = virthab_alloc(vdev, &vh, mmid_start, mmid_range);
  632. if (!ret)
  633. pr_debug("alloc done %d mmid %d range %d\n",
  634. ret, mmid_start, mmid_range);
  635. else {
  636. pr_err("probe failed mmid %d range %d\n",
  637. mmid_start, mmid_range);
  638. return ret;
  639. }
  640. err = virthab_init_vqs(vh);
  641. if (err)
  642. goto err_init_vq;
  643. virtio_device_ready(vdev);
  644. pr_info("virto device ready\n");
  645. vh->ready = true;
  646. pr_debug("store virto device %pK empty %d\n", vh, list_empty(&vhab_list));
  647. ret = virthab_queue_inbufs(vh, 1);
  648. if (ret)
  649. return ret;
  650. return 0;
  651. err_init_vq:
  652. kfree(vh);
  653. pr_err("virtio input probe failed %d\n", err);
  654. return err;
  655. }
  656. static void virthab_remove(struct virtio_device *vdev)
  657. {
  658. struct virtio_hab *vh = get_vh(vdev);
  659. void *buf;
  660. unsigned long flags;
  661. int i, j;
  662. struct virtio_pchan_link *link;
  663. spin_lock_irqsave(&vh->mlock, flags);
  664. vh->ready = false;
  665. spin_unlock_irqrestore(&vh->mlock, flags);
  666. vdev->config->reset(vdev);
  667. for (i = 0; i < vh->mmid_range; i++) {
  668. struct vq_pchan *vpc = &vh->vqpchans[i];
  669. j = 0;
  670. while ((buf =
  671. virtqueue_detach_unused_buf(vpc->vq[HAB_PCHAN_RX_VQ]))
  672. != NULL) {
  673. pr_debug("free vq-pchan %s %d buf %d %pK\n",
  674. vpc->vq[HAB_PCHAN_RX_VQ]->name, i, j, buf);
  675. }
  676. kfree(vpc->in_pool);
  677. kfree(vpc->s_pool);
  678. kfree(vpc->m_pool);
  679. kfree(vpc->l_pool);
  680. link = vpc->pchan->hyp_data;
  681. link->vhab = NULL;
  682. link->vpc = NULL;
  683. }
  684. vdev->config->del_vqs(vdev);
  685. kfree(vh->vqs);
  686. kfree(vh->cbs);
  687. for (i = 0; i < vh->mmid_range * HAB_PCHAN_VQ_MAX; i++)
  688. kfree(vh->names[i]);
  689. kfree(vh->names);
  690. kfree(vh->vqpchans);
  691. spin_lock_irqsave(&vh_lock, flags);
  692. list_del(&vh->node);
  693. spin_unlock_irqrestore(&vh_lock, flags);
  694. pr_info("remove virthab mmid %d range %d empty %d\n",
  695. vh->mmid_start, vh->mmid_range, list_empty(&vhab_list));
  696. kfree(vh);
  697. }
  698. #ifdef CONFIG_PM_SLEEP
  699. static int virthab_freeze(struct virtio_device *vdev)
  700. {
  701. struct virtio_hab *vh = get_vh(vdev);
  702. unsigned long flags;
  703. spin_lock_irqsave(&vh->mlock, flags);
  704. vh->ready = false;
  705. spin_unlock_irqrestore(&vh->mlock, flags);
  706. vdev->config->del_vqs(vdev);
  707. return 0;
  708. }
  709. static int virthab_restore(struct virtio_device *vdev)
  710. {
  711. struct virtio_hab *vh = get_vh(vdev);
  712. int err;
  713. err = virthab_init_vqs(vh);
  714. if (err)
  715. return err;
  716. virtio_device_ready(vdev);
  717. vh->ready = true;
  718. virthab_queue_inbufs(vh, 0);
  719. return 0;
  720. }
  721. #endif
  722. static unsigned int features[] = {
  723. /* none */
  724. };
  725. static struct virtio_device_id id_table[] = {
  726. { HAB_VIRTIO_DEVICE_ID_HAB, VIRTIO_DEV_ANY_ID }, /* virtio hab with all mmids */
  727. { HAB_VIRTIO_DEVICE_ID_BUFFERQ, VIRTIO_DEV_ANY_ID }, /* virtio bufferq only */
  728. { HAB_VIRTIO_DEVICE_ID_MISC, VIRTIO_DEV_ANY_ID }, /* virtio misc */
  729. { HAB_VIRTIO_DEVICE_ID_AUDIO, VIRTIO_DEV_ANY_ID }, /* virtio audio */
  730. { HAB_VIRTIO_DEVICE_ID_CAMERA, VIRTIO_DEV_ANY_ID }, /* virtio camera */
  731. { HAB_VIRTIO_DEVICE_ID_DISPLAY, VIRTIO_DEV_ANY_ID }, /* virtio display */
  732. { HAB_VIRTIO_DEVICE_ID_GRAPHICS, VIRTIO_DEV_ANY_ID }, /* virtio graphics */
  733. { HAB_VIRTIO_DEVICE_ID_VIDEO, VIRTIO_DEV_ANY_ID }, /* virtio video */
  734. { 0 },
  735. };
  736. static struct virtio_driver virtio_hab_driver = {
  737. .driver.name = KBUILD_MODNAME,
  738. .driver.owner = THIS_MODULE,
  739. .feature_table = features,
  740. .feature_table_size = ARRAY_SIZE(features),
  741. .id_table = id_table,
  742. .probe = virthab_probe,
  743. .remove = virthab_remove,
  744. #ifdef CONFIG_PM_SLEEP
  745. .freeze = virthab_freeze,
  746. .restore = virthab_restore,
  747. #endif
  748. };
  749. /* register / unregister */
  750. #ifdef HAB_DESKTOP
  751. extern struct cma *dma_contiguous_default_area;
  752. static struct cma *c;
  753. static struct page *cma_pgs;
  754. #endif
  755. int hab_hypervisor_register(void)
  756. {
  757. #ifdef HAB_DESKTOP
  758. /* Just need a device for memory allocation */
  759. c = dev_get_cma_area(hab_driver.dev[0]);
  760. cma_pgs = cma_alloc(c, (16 * 1024 * 1024) >> PAGE_SHIFT, 0
  761. , false
  762. ); /* better from cmdline parsing */
  763. if (!c || !cma_pgs)
  764. pr_err("failed to reserve 16MB cma region base_pfn %lX cnt %lX\n",
  765. cma_get_base(c), cma_get_size(c));
  766. #endif
  767. pr_info("alloc virtio_pchan_array of %d devices\n",
  768. hab_driver.ndevices);
  769. return 0;
  770. }
  771. void hab_hypervisor_unregister(void)
  772. {
  773. hab_hypervisor_unregister_common();
  774. unregister_virtio_driver(&virtio_hab_driver);
  775. #ifdef HAB_DESKTOP
  776. if (c && cma_pgs)
  777. cma_release(c, cma_pgs, (16 * 1024 * 1024) >> PAGE_SHIFT);
  778. #endif
  779. }
  780. void hab_pipe_read_dump(struct physical_channel *pchan) {};
  781. void dump_hab_wq(struct physical_channel *pchan) {};
  782. static struct vh_buf_header *get_vh_buf_header(spinlock_t *lock,
  783. unsigned long *irq_flags, struct list_head *list,
  784. wait_queue_head_t *wq, int *cnt,
  785. int nonblocking_flag)
  786. {
  787. struct vh_buf_header *hd = NULL;
  788. unsigned long flags = *irq_flags;
  789. if (list_empty(list) && nonblocking_flag)
  790. return ERR_PTR(-EAGAIN);
  791. while (list_empty(list)) {
  792. spin_unlock_irqrestore(lock, flags);
  793. wait_event(*wq, !list_empty(list));
  794. spin_lock_irqsave(lock, flags);
  795. }
  796. hd = list_first_entry(list, struct vh_buf_header, node);
  797. list_del(&hd->node);
  798. *irq_flags = flags;
  799. (*cnt)--;
  800. return hd;
  801. }
  802. int physical_channel_send(struct physical_channel *pchan,
  803. struct hab_header *header, void *payload,
  804. unsigned int flags)
  805. {
  806. size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
  807. struct virtio_pchan_link *link =
  808. (struct virtio_pchan_link *)pchan->hyp_data;
  809. struct vq_pchan *vpc = link->vpc;
  810. struct scatterlist sgout[1];
  811. char *outbuf = NULL;
  812. int rc;
  813. unsigned long lock_flags;
  814. struct vh_buf_header *hd = NULL;
  815. int nonblocking_flag = flags & HABMM_SOCKET_SEND_FLAGS_NON_BLOCKING;
  816. if (link->vpc == NULL) {
  817. pr_err("%s: %s link->vpc not ready\n", __func__, pchan->name);
  818. return -ENODEV;
  819. }
  820. if (sizebytes > OUT_LARGE_BUF_SIZE) {
  821. pr_err("send size %zd overflow %d available %d %d %d\n",
  822. sizebytes, OUT_LARGE_BUF_SIZE,
  823. vpc->s_cnt, vpc->m_cnt, vpc->l_cnt);
  824. return -EINVAL;
  825. }
  826. spin_lock_irqsave(&vpc->lock[HAB_PCHAN_TX_VQ], lock_flags);
  827. if (vpc->pchan_ready) {
  828. /* pick the available outbuf */
  829. if (sizebytes <= OUT_SMALL_BUF_SIZE) {
  830. hd = get_vh_buf_header(&vpc->lock[HAB_PCHAN_TX_VQ],
  831. &lock_flags, &vpc->s_list,
  832. &vpc->out_wq, &vpc->s_cnt,
  833. nonblocking_flag);
  834. } else if (sizebytes <= OUT_MEDIUM_BUF_SIZE) {
  835. hd = get_vh_buf_header(&vpc->lock[HAB_PCHAN_TX_VQ],
  836. &lock_flags, &vpc->m_list,
  837. &vpc->out_wq, &vpc->m_cnt,
  838. nonblocking_flag);
  839. } else {
  840. hd = get_vh_buf_header(&vpc->lock[HAB_PCHAN_TX_VQ],
  841. &lock_flags, &vpc->l_list,
  842. &vpc->out_wq, &vpc->l_cnt,
  843. nonblocking_flag);
  844. }
  845. if (IS_ERR(hd) && nonblocking_flag) {
  846. spin_unlock_irqrestore(&vpc->lock[HAB_PCHAN_TX_VQ], lock_flags);
  847. pr_info("get_vh_buf_header failed in non-blocking mode\n");
  848. return PTR_ERR(hd);
  849. }
  850. if (HAB_HEADER_GET_TYPE(*header) == HAB_PAYLOAD_TYPE_PROFILE) {
  851. struct habmm_xing_vm_stat *pstat =
  852. (struct habmm_xing_vm_stat *)payload;
  853. struct timespec64 ts;
  854. ktime_get_ts64(&ts);
  855. pstat->tx_sec = ts.tv_sec;
  856. pstat->tx_usec = ts.tv_nsec/NSEC_PER_USEC;
  857. }
  858. header->sequence = ++pchan->sequence_tx;
  859. header->signature = HAB_HEAD_SIGNATURE;
  860. outbuf = hd->buf;
  861. hd->payload_size = sizebytes;
  862. memcpy(outbuf, header, sizeof(*header));
  863. memcpy(&outbuf[sizeof(*header)], payload, sizebytes);
  864. sg_init_one(sgout, outbuf, sizeof(*header) + sizebytes);
  865. rc = virtqueue_add_outbuf(vpc->vq[HAB_PCHAN_TX_VQ], sgout, 1,
  866. hd, GFP_ATOMIC);
  867. if (!rc) {
  868. rc = virtqueue_kick(vpc->vq[HAB_PCHAN_TX_VQ]);
  869. if (!rc)
  870. pr_err("failed to kick outbuf to PVM %d\n", rc);
  871. } else
  872. pr_err("failed to add outbuf %d %zd bytes\n",
  873. rc, sizeof(*header) + sizebytes);
  874. } else {
  875. pr_err("%s pchan not ready\n", pchan->name);
  876. rc = -ENODEV;
  877. }
  878. spin_unlock_irqrestore(&vpc->lock[HAB_PCHAN_TX_VQ], lock_flags);
  879. return 0;
  880. }
  881. /* this read is called by hab-msg-recv from physical_channel_rx_dispatch or cb */
  882. int physical_channel_read(struct physical_channel *pchan,
  883. void *payload,
  884. size_t read_size)
  885. {
  886. struct virtio_pchan_link *link =
  887. (struct virtio_pchan_link *)pchan->hyp_data;
  888. struct vq_pchan *vpc = link->vpc;
  889. if (link->vpc == NULL) {
  890. pr_info("%s: %s link->vpc not ready\n", __func__, pchan->name);
  891. return -ENODEV;
  892. }
  893. if (!payload || !vpc->read_data) {
  894. pr_err("%s invalid parameters %pK %pK offset %d read %zd %s dev %pK\n",
  895. pchan->name, payload, vpc->read_data, vpc->read_offset,
  896. read_size, pchan->name, vpc);
  897. return 0;
  898. }
  899. /* size in header is only for payload excluding the header itself */
  900. if (vpc->read_size < read_size + sizeof(struct hab_header) +
  901. vpc->read_offset) {
  902. pr_warn("%s read %zd is less than requested %zd header %zd offset %d\n",
  903. pchan->name, vpc->read_size, read_size,
  904. sizeof(struct hab_header), vpc->read_offset);
  905. read_size = vpc->read_size - vpc->read_offset -
  906. sizeof(struct hab_header);
  907. }
  908. /* always skip the header */
  909. memcpy(payload, (unsigned char *)vpc->read_data +
  910. sizeof(struct hab_header) + vpc->read_offset, read_size);
  911. vpc->read_offset += (int)read_size;
  912. return (int)read_size;
  913. }
  914. /* called by hab recv() to act like ISR to poll msg from remote VM */
  915. /* ToDo: need to change the callback to here */
  916. void physical_channel_rx_dispatch(unsigned long data)
  917. {
  918. struct physical_channel *pchan = (struct physical_channel *)data;
  919. struct virtio_pchan_link *link =
  920. (struct virtio_pchan_link *)pchan->hyp_data;
  921. struct vq_pchan *vpc = link->vpc;
  922. if (link->vpc == NULL) {
  923. pr_info("%s: %s link->vpc not ready\n", __func__, pchan->name);
  924. return;
  925. }
  926. virthab_recv_rxq_task(vpc->vq[HAB_PCHAN_RX_VQ]);
  927. }
  928. /* pchan is directly added into the hab_device */
  929. static int habvirtio_pchan_create(struct hab_device *dev, char *pchan_name)
  930. {
  931. int result = 0;
  932. struct physical_channel *pchan = NULL;
  933. struct virtio_pchan_link *link = NULL;
  934. pchan = hab_pchan_alloc(dev, LOOPBACK_DOM);
  935. if (!pchan) {
  936. result = -ENOMEM;
  937. goto err;
  938. }
  939. pchan->closed = 0;
  940. strscpy(pchan->name, pchan_name, sizeof(pchan->name));
  941. link = kmalloc(sizeof(*link), GFP_KERNEL);
  942. if (!link) {
  943. result = -ENOMEM;
  944. goto err;
  945. }
  946. link->pchan = pchan;
  947. link->mmid = dev->id;
  948. pchan->hyp_data = link;
  949. link->vpc = NULL;
  950. link->vhab = NULL;
  951. /* create PCHAN first then wait for virtq later during probe */
  952. pr_debug("virtio device has NOT been initialized yet. %s has to wait for probe\n",
  953. pchan->name);
  954. return 0;
  955. err:
  956. kfree(pchan);
  957. return result;
  958. }
  959. int habhyp_commdev_alloc(void **commdev, int is_be, char *name, int vmid_remote,
  960. struct hab_device *mmid_device)
  961. {
  962. struct physical_channel *pchan;
  963. int ret = habvirtio_pchan_create(mmid_device, name);
  964. if (ret) {
  965. pr_err("failed to create %s pchan in mmid device %s, ret %d, pchan cnt %d\n",
  966. name, mmid_device->name, ret, mmid_device->pchan_cnt);
  967. *commdev = NULL;
  968. return ret;
  969. }
  970. pr_debug("create virtio pchan on %s return %d, loopback mode(%d), total pchan %d\n",
  971. name, ret, hab_driver.b_loopback, mmid_device->pchan_cnt);
  972. pchan = hab_pchan_find_domid(mmid_device, HABCFG_VMID_DONT_CARE);
  973. /* in this implementation, commdev is same as pchan */
  974. *commdev = pchan;
  975. return ret;
  976. }
  977. int habhyp_commdev_dealloc(void *commdev)
  978. {
  979. struct virtio_pchan_link *link = commdev;
  980. struct physical_channel *pchan = link->pchan;
  981. pr_info("free commdev %s\n", pchan->name);
  982. link->pchan = NULL;
  983. kfree(link);
  984. hab_pchan_put(pchan);
  985. return 0;
  986. }
  987. int hab_stat_log(struct physical_channel **pchans, int pchan_cnt, char *dest,
  988. int dest_size)
  989. {
  990. struct virtio_pchan_link *link;
  991. struct vq_pchan *vpc;
  992. int i, ret = 0;
  993. bool tx_pending, rx_pending;
  994. void *tx_buf, *rx_buf;
  995. unsigned int tx_len = 0, rx_len = 0;
  996. for (i = 0; i < pchan_cnt; i++) {
  997. link = (struct virtio_pchan_link *)pchans[i]->hyp_data;
  998. vpc = link->vpc;
  999. if (!vpc) {
  1000. pr_err("%s: %s vpc not ready\n", __func__, pchans[i]->name);
  1001. continue;
  1002. }
  1003. tx_pending = !virtqueue_enable_cb(vpc->vq[HAB_PCHAN_TX_VQ]);
  1004. rx_pending = !virtqueue_enable_cb(vpc->vq[HAB_PCHAN_RX_VQ]);
  1005. tx_buf = virtqueue_get_buf(vpc->vq[HAB_PCHAN_TX_VQ], &tx_len);
  1006. rx_buf = virtqueue_get_buf(vpc->vq[HAB_PCHAN_RX_VQ], &rx_len);
  1007. pr_info("pchan %d tx cnt %d %d %d rx %d txpend %d rxpend %d txlen %d rxlen %d\n",
  1008. i, vpc->s_cnt, vpc->m_cnt, vpc->l_cnt, vpc->in_cnt, tx_pending, rx_pending,
  1009. tx_len, rx_len);
  1010. ret = hab_stat_buffer_print(dest, dest_size,
  1011. "tx cnt %d %d %d rx %d txpend %d rxpend %d txlen %d rxlen %d\n",
  1012. vpc->s_cnt, vpc->m_cnt, vpc->l_cnt, vpc->in_cnt, tx_pending, rx_pending,
  1013. tx_len, rx_len);
  1014. if (ret)
  1015. break;
  1016. }
  1017. return ret;
  1018. }
  1019. int hab_hypervisor_register_post(void)
  1020. {
  1021. /* one virtio device */
  1022. register_virtio_driver(&virtio_hab_driver);
  1023. return 0;
  1024. }