hab_vhost.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021, The Linux Foundation. All rights reserved.
  4. * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
  5. */
  6. #include <linux/compat.h>
  7. #include <linux/eventfd.h>
  8. #include <linux/file.h>
  9. #include <linux/miscdevice.h>
  10. #include <linux/mmu_context.h>
  11. #include <linux/module.h>
  12. #include <linux/mutex.h>
  13. #include <linux/slab.h>
  14. #include <linux/vhost.h>
  15. #include <linux/workqueue.h>
  16. #include "hab.h"
  17. #include "vhost.h"
  18. /* Max number of bytes transferred before requeueing the job.
  19. * Using this limit prevents one virtqueue from starving others.
  20. */
  21. #define VHOST_HAB_WEIGHT 0x80000
  22. /* Max number of packets transferred before requeueing the job.
  23. * Using this limit prevents one virtqueue from starving others with
  24. * pkts.
  25. */
  26. #define VHOST_HAB_PKT_WEIGHT 256
  27. enum {
  28. VHOST_HAB_PCHAN_TX_VQ = 0, /* receive data from gvm */
  29. VHOST_HAB_PCHAN_RX_VQ, /* send data to gvm */
  30. VHOST_HAB_PCHAN_VQ_MAX,
  31. };
  32. struct vhost_hab_pchannel { /* per pchan */
  33. struct physical_channel *pchan; /* hab physical channel */
  34. struct hab_device *habdev; /* hab device for the mmid */
  35. struct list_head node;
  36. struct vhost_virtqueue vqs[VHOST_HAB_PCHAN_VQ_MAX]; /* vqs for pchan */
  37. struct iov_iter out_iter; /* iter to read data */
  38. struct vhost_work tx_recv_work;
  39. struct list_head send_list; /* list of node to be sent to rxq */
  40. struct mutex send_list_mutex; /* protect send_list */
  41. struct vhost_work rx_send_work;
  42. int tx_empty; /* cached value for out of context access */
  43. int rx_empty; /* ditto */
  44. };
  45. struct vhost_hab_send_node {
  46. struct list_head node;
  47. struct hab_header header;
  48. u8 payload[];
  49. } __packed;
  50. struct vhost_hab_dev { /* per user requested domain */
  51. struct vhost_dev dev; /* vhost base device */
  52. struct vhost_hab_cdev *vh_cdev;
  53. int started;
  54. struct list_head vh_pchan_list; /* pchannels on this vhost device */
  55. };
  56. struct vhost_hab_cdev { /* per domain, per gvm */
  57. struct device *dev;
  58. dev_t dev_no;
  59. struct cdev cdev;
  60. uint32_t domain_id;
  61. struct hab_device *habdevs[HABCFG_MMID_NUM];
  62. };
  63. struct vhost_hab_stat_work {
  64. struct work_struct work;
  65. struct physical_channel **pchans;
  66. int pchan_count;
  67. };
  68. struct vhost_hab { /* global */
  69. dev_t major;
  70. struct class *class;
  71. uint32_t num_cdevs; /* total number of cdevs created */
  72. /*
  73. * all vhost hab char devices on the system.
  74. * mmid area starts from 1. Slot 0 is for
  75. * vhost-hab which controls all the pchannels.
  76. */
  77. struct vhost_hab_cdev *vh_cdevs[HABCFG_MMID_AREA_MAX + 1];
  78. struct list_head vh_pchan_list; /* avalible pchannels on the system */
  79. struct mutex pchan_mutex;
  80. struct workqueue_struct *wq;
  81. };
  82. static struct vhost_hab g_vh;
  83. #define HAB_AREA_NAME_MAX 32
  84. static char hab_area_names[HABCFG_MMID_AREA_MAX + 1][HAB_AREA_NAME_MAX] = {
  85. [HAB_MMID_ALL_AREA] = "hab",
  86. [MM_AUD_START / 100] = "aud",
  87. [MM_CAM_START / 100] = "cam",
  88. [MM_DISP_START / 100] = "disp",
  89. [MM_GFX_START / 100] = "ogles",
  90. [MM_VID_START / 100] = "vid",
  91. [MM_MISC_START / 100] = "misc",
  92. [MM_QCPE_START / 100] = "qcpe",
  93. [MM_CLK_START / 100] = "clock",
  94. [MM_FDE_START / 100] = "fde",
  95. [MM_BUFFERQ_START / 100] = "bufferq",
  96. [MM_DATA_START / 100] = "network",
  97. [MM_HSI2S_START / 100] = "hsi2s",
  98. [MM_XVM_START / 100] = "xvm"
  99. };
  100. static int rx_worker(struct vhost_hab_pchannel *vh_pchan);
  101. static void stat_worker(struct work_struct *work);
  102. static void do_rx_send_work(struct vhost_work *work)
  103. {
  104. struct vhost_hab_pchannel *vh_pchan = container_of(work,
  105. struct vhost_hab_pchannel, rx_send_work);
  106. rx_worker(vh_pchan);
  107. }
  108. static int rx_send_list_empty(struct vhost_hab_pchannel *vh_pchan)
  109. {
  110. int ret;
  111. mutex_lock(&vh_pchan->send_list_mutex);
  112. ret = list_empty(&vh_pchan->send_list);
  113. mutex_unlock(&vh_pchan->send_list_mutex);
  114. return ret;
  115. }
  116. static void tx_worker(struct vhost_hab_pchannel *vh_pchan)
  117. {
  118. struct vhost_virtqueue *vq = vh_pchan->vqs + VHOST_HAB_PCHAN_TX_VQ;
  119. struct vhost_hab_dev *vh_dev = container_of(vq->dev,
  120. struct vhost_hab_dev, dev);
  121. unsigned int out_num = 0, in_num = 0;
  122. int head, ret;
  123. size_t out_len, in_len, total_len = 0;
  124. ssize_t copy_size;
  125. struct hab_header header;
  126. mutex_lock(&vq->mutex);
  127. if (!vq->private_data) {
  128. mutex_unlock(&vq->mutex);
  129. return;
  130. }
  131. vhost_disable_notify(&vh_dev->dev, vq);
  132. while (1) {
  133. head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
  134. &out_num, &in_num, NULL, NULL);
  135. if (head == vq->num) {
  136. if (unlikely(vhost_enable_notify(&vh_dev->dev, vq))) {
  137. vhost_disable_notify(&vh_dev->dev, vq);
  138. continue;
  139. }
  140. break; /* no more tx buf wait for next round */
  141. } else if (unlikely(head < 0)) {
  142. pr_err("%s error head %d out %d in %d\n",
  143. vh_pchan->pchan->name, head, out_num, in_num);
  144. break;
  145. }
  146. out_len = iov_length(vq->iov, out_num);
  147. if ((out_num > 0) && (out_len > 0)) {
  148. iov_iter_init(&vh_pchan->out_iter, WRITE, vq->iov,
  149. out_num, out_len);
  150. copy_size = copy_from_iter(&header, sizeof(header),
  151. &vh_pchan->out_iter);
  152. if (unlikely(copy_size != sizeof(header)))
  153. pr_err("fault on copy_from_iter, out_len %lu, ret %lu\n",
  154. out_len, copy_size);
  155. ret = hab_msg_recv(vh_pchan->pchan, &header);
  156. if (ret)
  157. pr_err("hab_msg_recv error %d\n", ret);
  158. total_len += out_len;
  159. if (vh_pchan->pchan->sequence_rx + 1 != header.sequence)
  160. pr_err("%s: expected sequence_rx is %u, received is %u\n",
  161. vh_pchan->pchan->name,
  162. vh_pchan->pchan->sequence_rx,
  163. header.sequence);
  164. vh_pchan->pchan->sequence_rx = header.sequence;
  165. }
  166. if (in_num) {
  167. in_len = iov_length(&vq->iov[out_num], in_num);
  168. total_len += in_len;
  169. pr_warn("unexpected in buf in tx vq, in_num %d, in_len %lu\n",
  170. in_num, in_len);
  171. }
  172. vhost_add_used_and_signal(&vh_dev->dev, vq, head, 0);
  173. if (unlikely(vhost_exceeds_weight(vq, 0, total_len))) {
  174. pr_err("total_len %lu > hab vq weight %d\n",
  175. total_len, VHOST_HAB_WEIGHT);
  176. break;
  177. }
  178. }
  179. mutex_unlock(&vq->mutex);
  180. }
  181. static void do_tx_recv_work(struct vhost_work *work)
  182. {
  183. struct vhost_hab_pchannel *vh_pchan = container_of(work,
  184. struct vhost_hab_pchannel, tx_recv_work);
  185. tx_worker(vh_pchan);
  186. }
  187. static void handle_tx_vq_kick(struct vhost_work *work)
  188. {
  189. struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  190. poll.work);
  191. struct vhost_hab_pchannel *vh_pchan = container_of(vq,
  192. struct vhost_hab_pchannel, vqs[VHOST_HAB_PCHAN_TX_VQ]);
  193. tx_worker(vh_pchan);
  194. }
  195. static void handle_rx_vq_kick(struct vhost_work *work)
  196. {
  197. struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
  198. poll.work);
  199. struct vhost_hab_pchannel *vh_pchan = container_of(vq,
  200. struct vhost_hab_pchannel, vqs[VHOST_HAB_PCHAN_RX_VQ]);
  201. rx_worker(vh_pchan);
  202. }
  203. static int vhost_hab_open(struct inode *inode, struct file *f)
  204. {
  205. struct vhost_hab_cdev *vh_cdev = container_of(inode->i_cdev,
  206. struct vhost_hab_cdev, cdev);
  207. struct vhost_hab_dev *vh_dev;
  208. struct vhost_hab_pchannel *vh_pchan, *vh_pchan_t;
  209. struct vhost_virtqueue **vqs;
  210. struct hab_device *habdev;
  211. int num_pchan = 0;
  212. bool vh_pchan_found;
  213. int i, j = 0;
  214. int ret;
  215. vh_dev = kmalloc(sizeof(*vh_dev), GFP_KERNEL);
  216. if (!vh_dev)
  217. return -ENOMEM;
  218. INIT_LIST_HEAD(&vh_dev->vh_pchan_list);
  219. mutex_lock(&g_vh.pchan_mutex);
  220. for (i = 0; i < HABCFG_MMID_NUM; i++) {
  221. habdev = vh_cdev->habdevs[i];
  222. if (habdev == NULL)
  223. break;
  224. pr_info("%s: i=%d, mmid=%d\n", __func__, i, habdev->id);
  225. vh_pchan_found = false;
  226. list_for_each_entry_safe(vh_pchan, vh_pchan_t,
  227. &g_vh.vh_pchan_list, node) {
  228. pr_debug("%s: vh-pchan id %d\n", __func__,
  229. vh_pchan->habdev->id);
  230. if (vh_pchan->habdev == habdev) {
  231. pr_debug("%s: find vh_pchan for mmid %d\n",
  232. __func__, habdev->id);
  233. list_move_tail(&vh_pchan->node,
  234. &vh_dev->vh_pchan_list);
  235. vh_pchan_found = true;
  236. pr_debug("%s: num_pchan %d\n", __func__,
  237. num_pchan);
  238. num_pchan++;
  239. break;
  240. }
  241. }
  242. if (!vh_pchan_found) {
  243. pr_err("no vh_pchan is available for mmid %d\n",
  244. habdev->id);
  245. goto err;
  246. }
  247. }
  248. vqs = kmalloc_array(num_pchan * VHOST_HAB_PCHAN_VQ_MAX, sizeof(*vqs),
  249. GFP_KERNEL);
  250. if (!vqs) {
  251. ret = -ENOMEM;
  252. goto err;
  253. }
  254. pr_info("num_pchan=%d\n", num_pchan);
  255. list_for_each_entry(vh_pchan, &vh_dev->vh_pchan_list, node) {
  256. vqs[j++] = &vh_pchan->vqs[VHOST_HAB_PCHAN_TX_VQ];
  257. vqs[j++] = &vh_pchan->vqs[VHOST_HAB_PCHAN_RX_VQ];
  258. }
  259. vhost_dev_init(&vh_dev->dev, vqs, VHOST_HAB_PCHAN_VQ_MAX * num_pchan,
  260. UIO_MAXIOV, VHOST_HAB_PKT_WEIGHT, VHOST_HAB_WEIGHT);
  261. list_for_each_entry(vh_pchan, &vh_dev->vh_pchan_list, node) {
  262. vhost_work_init(&vh_pchan->rx_send_work, do_rx_send_work);
  263. vhost_work_init(&vh_pchan->tx_recv_work, do_tx_recv_work);
  264. }
  265. vh_dev->vh_cdev = vh_cdev;
  266. f->private_data = vh_dev;
  267. mutex_unlock(&g_vh.pchan_mutex);
  268. return 0;
  269. err:
  270. /* return vh_pchans back to system */
  271. list_for_each_entry_safe(vh_pchan, vh_pchan_t,
  272. &vh_dev->vh_pchan_list, node)
  273. list_move_tail(&vh_pchan->node, &g_vh.vh_pchan_list);
  274. kfree(vh_dev);
  275. mutex_unlock(&g_vh.pchan_mutex);
  276. return ret;
  277. }
  278. static void *vhost_hab_stop_vq(struct vhost_hab_dev *vh_dev,
  279. struct vhost_virtqueue *vq)
  280. {
  281. struct vhost_hab_pchannel *vh_pchan = vq->private_data;
  282. mutex_lock(&vq->mutex);
  283. vq->private_data = NULL;
  284. mutex_unlock(&vq->mutex);
  285. return (void *)vh_pchan;
  286. }
  287. static void vhost_hab_stop(struct vhost_hab_dev *vh_dev)
  288. {
  289. struct vhost_hab_pchannel *vh_pchan;
  290. list_for_each_entry(vh_pchan, &vh_dev->vh_pchan_list, node) {
  291. vhost_hab_stop_vq(vh_dev,
  292. vh_pchan->vqs + VHOST_HAB_PCHAN_TX_VQ);
  293. vhost_hab_stop_vq(vh_dev,
  294. vh_pchan->vqs + VHOST_HAB_PCHAN_RX_VQ);
  295. }
  296. vh_dev->started = 0;
  297. }
  298. static void vhost_hab_flush_vq(struct vhost_hab_dev *vh_dev,
  299. struct vhost_virtqueue *vq)
  300. {
  301. vhost_poll_flush(&vq->poll);
  302. }
  303. static void vhost_hab_flush(struct vhost_hab_dev *vh_dev)
  304. {
  305. struct vhost_hab_pchannel *vh_pchan;
  306. list_for_each_entry(vh_pchan, &vh_dev->vh_pchan_list, node) {
  307. vhost_hab_flush_vq(vh_dev,
  308. vh_pchan->vqs + VHOST_HAB_PCHAN_TX_VQ);
  309. vhost_hab_flush_vq(vh_dev,
  310. vh_pchan->vqs + VHOST_HAB_PCHAN_RX_VQ);
  311. vhost_work_flush(&vh_dev->dev, &vh_pchan->rx_send_work);
  312. vhost_work_flush(&vh_dev->dev, &vh_pchan->tx_recv_work);
  313. }
  314. }
  315. static int vhost_hab_release(struct inode *inode, struct file *f)
  316. {
  317. struct vhost_hab_dev *vh_dev = f->private_data;
  318. struct vhost_hab_pchannel *vh_pchan, *vh_pchan_t;
  319. vhost_hab_stop(vh_dev);
  320. vhost_hab_flush(vh_dev);
  321. vhost_dev_stop(&vh_dev->dev);
  322. vhost_dev_cleanup(&vh_dev->dev);
  323. /* We do an extra flush before freeing memory,
  324. * since jobs can re-queue themselves.
  325. */
  326. vhost_hab_flush(vh_dev);
  327. kfree(vh_dev->dev.vqs);
  328. /* return pchannel back to the system */
  329. mutex_lock(&g_vh.pchan_mutex);
  330. list_for_each_entry_safe(vh_pchan, vh_pchan_t,
  331. &vh_dev->vh_pchan_list, node) {
  332. if (vh_pchan->pchan) {
  333. vh_pchan->pchan->hyp_data = NULL;
  334. hab_pchan_put(vh_pchan->pchan);
  335. vh_pchan->pchan = NULL;
  336. }
  337. list_move_tail(&vh_pchan->node, &g_vh.vh_pchan_list);
  338. }
  339. mutex_unlock(&g_vh.pchan_mutex);
  340. kfree(vh_dev);
  341. return 0;
  342. }
  343. static long vhost_hab_ready_check(struct vhost_hab_dev *vh_dev)
  344. {
  345. struct vhost_virtqueue *vq;
  346. int r, index;
  347. mutex_lock(&vh_dev->dev.mutex);
  348. r = vhost_dev_check_owner(&vh_dev->dev);
  349. if (r)
  350. goto err;
  351. for (index = 0; index < vh_dev->dev.nvqs; ++index) {
  352. vq = vh_dev->dev.vqs[index];
  353. /* Verify that ring has been setup correctly. */
  354. if (!vhost_vq_access_ok(vq)) {
  355. r = -EFAULT;
  356. goto err;
  357. }
  358. if (vq->kick == NULL) {
  359. r = -EFAULT;
  360. goto err;
  361. }
  362. if (vq->call_ctx == NULL) {
  363. r = -EFAULT;
  364. goto err;
  365. }
  366. }
  367. mutex_unlock(&vh_dev->dev.mutex);
  368. return 0;
  369. err:
  370. mutex_unlock(&vh_dev->dev.mutex);
  371. return r;
  372. }
  373. static long vhost_hab_run(struct vhost_hab_dev *vh_dev, int start)
  374. {
  375. struct vhost_virtqueue *vq;
  376. struct vhost_hab_pchannel *vh_pchan;
  377. int r = 0, i, ret = 0, not_started = 0;
  378. pr_info("vh_dev start %d\n", start);
  379. if (start < 0 || start > 1)
  380. return -EINVAL;
  381. mutex_lock(&vh_dev->dev.mutex);
  382. if (vh_dev->started == start) {
  383. pr_info("already started\n");
  384. goto exit;
  385. }
  386. r = vhost_dev_check_owner(&vh_dev->dev);
  387. if (r)
  388. goto exit;
  389. for (i = 0; i < vh_dev->dev.nvqs; ++i) {
  390. vq = vh_dev->dev.vqs[i];
  391. /* Verify that ring has been setup correctly. */
  392. if (!vhost_vq_access_ok(vq)) {
  393. r = -EFAULT;
  394. goto exit;
  395. }
  396. }
  397. /* try to start all the pchan and its vq */
  398. list_for_each_entry(vh_pchan, &vh_dev->vh_pchan_list, node) {
  399. for (i = 0; i < VHOST_HAB_PCHAN_VQ_MAX; i++) {
  400. vq = vh_pchan->vqs + i;
  401. if (vq->private_data)
  402. continue; /* already started */
  403. mutex_lock(&vq->mutex);
  404. vq->private_data = vh_pchan;
  405. r = vhost_vq_init_access(vq); /* poll need retry */
  406. if (r) {
  407. vq->private_data = NULL;
  408. not_started += 1;
  409. pr_warn("%s vq %d not ready %d total %d\n",
  410. vh_pchan->pchan->name, i, ret,
  411. not_started);
  412. mutex_unlock(&vq->mutex);
  413. continue; /* still not ready, try next vq */
  414. }
  415. mutex_unlock(&vq->mutex);
  416. } /* vq */
  417. } /* pchan */
  418. if (not_started == 0) {
  419. vh_dev->started = 1; /* ready when all vq is ready */
  420. mutex_unlock(&vh_dev->dev.mutex);
  421. /* Once vhost device starts successfully, trigger a kick */
  422. for (i = 0; i < vh_dev->dev.nvqs; ++i) {
  423. vq = vh_dev->dev.vqs[i];
  424. vhost_poll_queue(&vq->poll);
  425. }
  426. pr_info("%s exit start %d r %d ret %d not_started %d\n",
  427. __func__, start, r, ret, not_started);
  428. return 0;
  429. }
  430. exit:
  431. mutex_unlock(&vh_dev->dev.mutex);
  432. pr_info("%s exit start %d failure r %d ret %d not_started %d\n",
  433. __func__, start, r, ret, not_started);
  434. return r;
  435. }
  436. static long vhost_hab_reset_owner(struct vhost_hab_dev *vh_dev)
  437. {
  438. long err;
  439. struct vhost_umem *umem;
  440. mutex_lock(&vh_dev->dev.mutex);
  441. err = vhost_dev_check_owner(&vh_dev->dev);
  442. if (err)
  443. goto done;
  444. umem = vhost_dev_reset_owner_prepare();
  445. if (!umem) {
  446. err = -ENOMEM;
  447. goto done;
  448. }
  449. vhost_hab_stop(vh_dev);
  450. vhost_hab_flush(vh_dev);
  451. vhost_dev_stop(&vh_dev->dev);
  452. vhost_dev_reset_owner(&vh_dev->dev, umem);
  453. done:
  454. mutex_unlock(&vh_dev->dev.mutex);
  455. return err;
  456. }
  457. static int vhost_hab_set_features(struct vhost_hab_dev *vh_dev, u64 features)
  458. {
  459. struct vhost_virtqueue *vq;
  460. int i;
  461. mutex_lock(&vh_dev->dev.mutex);
  462. if ((features & (1 << VHOST_F_LOG_ALL)) &&
  463. !vhost_log_access_ok(&vh_dev->dev)) {
  464. mutex_unlock(&vh_dev->dev.mutex);
  465. return -EFAULT;
  466. }
  467. for (i = 0; i < vh_dev->dev.nvqs; ++i) {
  468. vq = vh_dev->dev.vqs[i];
  469. mutex_lock(&vq->mutex);
  470. vq->acked_features = features;
  471. mutex_unlock(&vq->mutex);
  472. }
  473. mutex_unlock(&vh_dev->dev.mutex);
  474. return 0;
  475. }
  476. static int vhost_hab_set_pchannels(struct vhost_hab_dev *vh_dev, int vmid)
  477. {
  478. struct vhost_hab_pchannel *vh_pchan;
  479. struct physical_channel *pchan;
  480. int ret = 0;
  481. mutex_lock(&g_vh.pchan_mutex);
  482. list_for_each_entry(vh_pchan, &vh_dev->vh_pchan_list, node) {
  483. pchan = hab_pchan_find_domid(vh_pchan->habdev, vmid);
  484. if (!pchan || pchan->hyp_data) {
  485. pr_err("failed to find pchan for mmid %d, vmid %d\n",
  486. vh_pchan->habdev->id, vmid);
  487. goto err;
  488. }
  489. vh_pchan->pchan = pchan;
  490. pchan->hyp_data = vh_pchan;
  491. }
  492. mutex_unlock(&g_vh.pchan_mutex);
  493. return ret;
  494. err:
  495. list_for_each_entry_continue_reverse(vh_pchan, &vh_dev->vh_pchan_list,
  496. node) {
  497. vh_pchan->pchan->hyp_data = NULL;
  498. hab_pchan_put(vh_pchan->pchan);
  499. vh_pchan->pchan = NULL;
  500. }
  501. mutex_unlock(&g_vh.pchan_mutex);
  502. return -ENODEV;
  503. }
  504. static int vhost_hab_set_config(struct vhost_hab_dev *vh_dev,
  505. struct vhost_config *cfg)
  506. {
  507. struct vhost_hab_config hab_cfg;
  508. size_t vm_name_size = sizeof(hab_cfg.vm_name);
  509. size_t cfg_size = min_t(size_t, (size_t)cfg->size, sizeof(hab_cfg));
  510. char *s, *t;
  511. int vmid;
  512. int ret;
  513. if (copy_from_user(&hab_cfg, cfg->data + cfg->offset, cfg_size))
  514. return -EFAULT;
  515. hab_cfg.vm_name[vm_name_size - 1] = '\0';
  516. pr_info("%s: vm_name %s\n", __func__, hab_cfg.vm_name);
  517. s = strnstr(hab_cfg.vm_name, "vm", vm_name_size);
  518. if (!s) {
  519. pr_err("vmid is not found in vm_name\n");
  520. return -EINVAL;
  521. }
  522. s += 2; /* skip 'vm' */
  523. if (s >= (hab_cfg.vm_name + vm_name_size)) {
  524. pr_err("id is not found after 'vm' in vm_name\n");
  525. return -EINVAL;
  526. }
  527. /* terminate string at '-' after 'vm' */
  528. t = strchrnul(s, '-');
  529. *t = '\0';
  530. ret = kstrtoint(s, 10, &vmid);
  531. if (ret < 0) {
  532. pr_err("failed to parse vmid from %s, %d\n", s, ret);
  533. return ret;
  534. }
  535. pr_debug("vmid=%d\n", vmid);
  536. return vhost_hab_set_pchannels(vh_dev, vmid);
  537. }
  538. static long vhost_hab_ioctl(struct file *f, unsigned int ioctl,
  539. unsigned long arg)
  540. {
  541. struct vhost_hab_dev *vh_dev = f->private_data;
  542. void __user *argp = (void __user *)arg;
  543. u64 features;
  544. struct vhost_config config;
  545. int r = 0;
  546. switch (ioctl) {
  547. case VHOST_RESET_OWNER:
  548. r = vhost_hab_reset_owner(vh_dev);
  549. break;
  550. case VHOST_SET_FEATURES:
  551. if (copy_from_user(&features, argp, sizeof(features))) {
  552. r = -EFAULT;
  553. break;
  554. }
  555. if (features & ~VHOST_FEATURES) {
  556. r = -EOPNOTSUPP;
  557. break;
  558. }
  559. r = vhost_hab_set_features(vh_dev, features);
  560. break;
  561. case VHOST_SET_CONFIG:
  562. if (copy_from_user(&config, argp, sizeof(config))) {
  563. r = -EFAULT;
  564. break;
  565. }
  566. r = vhost_hab_set_config(vh_dev, &config);
  567. break;
  568. case VHOST_GET_FEATURES:
  569. features = VHOST_FEATURES;
  570. if (copy_to_user(argp, &features, sizeof(features))) {
  571. r = -EFAULT;
  572. break;
  573. }
  574. r = 0;
  575. break;
  576. default:
  577. {
  578. mutex_lock(&vh_dev->dev.mutex);
  579. r = vhost_dev_ioctl(&vh_dev->dev, ioctl, argp);
  580. if (r == -ENOIOCTLCMD)
  581. r = vhost_vring_ioctl(&vh_dev->dev, ioctl, argp);
  582. vhost_hab_flush(vh_dev);
  583. mutex_unlock(&vh_dev->dev.mutex);
  584. if (vhost_hab_ready_check(vh_dev) == 0)
  585. vhost_hab_run(vh_dev, 1);
  586. break;
  587. }
  588. }
  589. return r;
  590. }
  591. #ifdef CONFIG_COMPAT
  592. static long vhost_hab_compat_ioctl(struct file *f, unsigned int ioctl,
  593. unsigned long arg)
  594. {
  595. return vhost_hab_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
  596. }
  597. #endif
  598. int hab_hypervisor_register(void)
  599. {
  600. uint32_t max_devices = HABCFG_MMID_AREA_MAX + 1;
  601. dev_t dev_no;
  602. int ret;
  603. ret = alloc_chrdev_region(&dev_no, 0, max_devices, "vhost-msm");
  604. if (ret < 0) {
  605. pr_err("alloc_chrdev_region failed: %d\n", ret);
  606. return ret;
  607. }
  608. g_vh.major = MAJOR(dev_no);
  609. pr_info("g_vh.major %d\n", g_vh.major);
  610. g_vh.class = class_create(THIS_MODULE, "vhost-msm");
  611. if (IS_ERR_OR_NULL(g_vh.class)) {
  612. pr_err("class_create failed\n");
  613. unregister_chrdev_region(g_vh.major, max_devices);
  614. return g_vh.class ? PTR_ERR(g_vh.class) : -ENOMEM;
  615. }
  616. g_vh.wq = create_singlethread_workqueue("hab_vhost_wq");
  617. if (!g_vh.wq) {
  618. pr_err("create workqueue failed\n");
  619. class_destroy(g_vh.class);
  620. unregister_chrdev_region(g_vh.major, max_devices);
  621. return -EINVAL;
  622. }
  623. mutex_init(&g_vh.pchan_mutex);
  624. INIT_LIST_HEAD(&g_vh.vh_pchan_list);
  625. return 0;
  626. }
  627. void hab_hypervisor_unregister(void)
  628. {
  629. uint32_t max_devices = HABCFG_MMID_AREA_MAX + 1;
  630. struct vhost_hab_pchannel *n, *vh_pchan;
  631. list_for_each_entry_safe(vh_pchan, n, &g_vh.vh_pchan_list, node) {
  632. /*
  633. * workaround: force hyp_date to NULL to prevent vh_pchan from
  634. * being freed again during hab_pchan_free.
  635. * ideally hab_pchan_free should not free hyp_data because it
  636. * is not allocated by hab_pchan_alloc.
  637. */
  638. if (vh_pchan->pchan)
  639. vh_pchan->pchan->hyp_data = NULL;
  640. list_del(&vh_pchan->node);
  641. mutex_destroy(&vh_pchan->send_list_mutex);
  642. kfree(vh_pchan);
  643. }
  644. hab_hypervisor_unregister_common();
  645. mutex_destroy(&g_vh.pchan_mutex);
  646. flush_workqueue(g_vh.wq);
  647. destroy_workqueue(g_vh.wq);
  648. class_destroy(g_vh.class);
  649. unregister_chrdev_region(g_vh.major, max_devices);
  650. }
  651. int hab_hypervisor_register_post(void) { return 0; }
  652. void hab_pipe_read_dump(struct physical_channel *pchan) {};
  653. void dump_hab_wq(struct physical_channel *pchan) {};
  654. /* caller must hold vq->mutex */
  655. static int get_rx_buf_locked(struct vhost_dev *dev,
  656. struct vhost_hab_pchannel *vh_pchan,
  657. struct iov_iter *in_iter, size_t *in_len,
  658. size_t *out_len, int *head)
  659. {
  660. struct vhost_virtqueue *vq = &vh_pchan->vqs[VHOST_HAB_PCHAN_RX_VQ];
  661. unsigned int out_num = 0, in_num = 0;
  662. int ret = 0;
  663. while (1) {
  664. *head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
  665. &out_num, &in_num, NULL, NULL);
  666. if (*head < 0) {
  667. pr_err("failed to get correct head %d\n", *head);
  668. ret = -EIO;
  669. break;
  670. }
  671. if (*head == vq->num) {
  672. pr_debug("rx buf %s underrun, vq avail %d\n",
  673. vh_pchan->pchan->name,
  674. vhost_vq_avail_empty(dev, vq));
  675. if (unlikely(vhost_enable_notify(dev, vq))) {
  676. pr_debug("%s enable notify return true, %d\n",
  677. vh_pchan->pchan->name,
  678. vhost_vq_avail_empty(dev, vq));
  679. vhost_disable_notify(dev, vq);
  680. continue; /* retry */
  681. }
  682. ret = -EAGAIN;
  683. break; /* no more buff wait for next round */
  684. }
  685. if (unlikely(out_num)) {
  686. *out_len = iov_length(vq->iov, out_num);
  687. pr_warn("unexpected outbuf in rxvq, num %d, len %lu\n",
  688. out_num, *out_len);
  689. }
  690. if (in_num) {
  691. *in_len = iov_length(&vq->iov[out_num], in_num);
  692. if (*in_len > 0)
  693. iov_iter_init(in_iter, READ, &vq->iov[out_num],
  694. in_num, *in_len);
  695. else {
  696. pr_err("out of in-len in iov %d in-num %d out-num %d\n",
  697. *in_len, in_num, out_num);
  698. ret = -EIO;
  699. }
  700. } else {
  701. pr_err("no in buf in this slot\n");
  702. ret = -EBUSY;
  703. }
  704. break;
  705. }
  706. return ret;
  707. }
  708. static ssize_t fill_rx_buf(void **pbuf, size_t *remain_size,
  709. struct iov_iter *in_iter, size_t in_len,
  710. size_t *size_filled)
  711. {
  712. ssize_t copy_size, copy_size_ret;
  713. if (unlikely(in_len < *remain_size))
  714. copy_size = in_len;
  715. else
  716. copy_size = *remain_size;
  717. copy_size_ret = copy_to_iter(*pbuf, copy_size, in_iter);
  718. if (unlikely(copy_size_ret != copy_size)) {
  719. pr_err("fault on copy_to_iter, copy_size %lu, ret %lu\n",
  720. copy_size, copy_size_ret);
  721. return -EFAULT;
  722. }
  723. *remain_size -= copy_size;
  724. *size_filled += copy_size;
  725. *pbuf += copy_size;
  726. return 0;
  727. }
  728. /* caller must hold vq->mutex */
  729. static int rx_send_one_node_locked(struct vhost_dev *dev,
  730. struct vhost_hab_pchannel *vh_pchan,
  731. struct vhost_virtqueue *vq,
  732. struct vhost_hab_send_node *send_node,
  733. int *added)
  734. {
  735. int head;
  736. struct hab_header *header = &send_node->header;
  737. void *data = header;
  738. size_t remain_size = sizeof(*header) + HAB_HEADER_GET_SIZE(*header);
  739. size_t out_len, in_len, total_len = 0;
  740. size_t size_filled;
  741. struct iov_iter in_iter;
  742. int ret = 0;
  743. while (remain_size > 0) {
  744. out_len = 0;
  745. in_len = 0;
  746. ret = get_rx_buf_locked(dev, vh_pchan, &in_iter, &in_len,
  747. &out_len, &head);
  748. if (ret) {
  749. if (ret != -EAGAIN)
  750. pr_info("%s failed to get one rx-buf ret %d\n",
  751. vh_pchan->pchan->name, ret);
  752. break;
  753. }
  754. total_len += in_len + out_len;
  755. size_filled = 0;
  756. if (in_len) {
  757. if (HAB_HEADER_GET_TYPE(send_node->header) ==
  758. HAB_PAYLOAD_TYPE_PROFILE) {
  759. struct habmm_xing_vm_stat *pstat =
  760. (struct habmm_xing_vm_stat *)
  761. (send_node->payload);
  762. struct timespec64 ts = {0};
  763. ktime_get_ts64(&ts);
  764. pstat->tx_sec = ts.tv_sec;
  765. pstat->tx_usec = ts.tv_nsec/NSEC_PER_USEC;
  766. }
  767. header->sequence = ++vh_pchan->pchan->sequence_tx;
  768. header->signature = HAB_HEAD_SIGNATURE;
  769. ret = fill_rx_buf((void **)(&data),
  770. &remain_size,
  771. &in_iter, in_len, &size_filled);
  772. if (ret)
  773. break;
  774. ret = vhost_add_used(vq, head, size_filled);
  775. if (ret) {
  776. pr_err("%s failed to add used ret %d head %d size %d\n",
  777. vh_pchan->pchan->name, ret, head, size_filled);
  778. break;
  779. }
  780. *added += 1; /* continue for the remaining */
  781. } else {
  782. pr_err("%s rx-buf empty ret %d inlen %d outlen %d head %d\n",
  783. vh_pchan->pchan->name, ret, in_len, out_len, head);
  784. ret = -EPIPE;
  785. break;
  786. }
  787. if (unlikely(vhost_exceeds_weight(vq, 0, total_len))) {
  788. pr_err("total_len %lu > hab vq weight %d\n",
  789. total_len, VHOST_HAB_WEIGHT);
  790. ret = -EINVAL;
  791. break;
  792. }
  793. }
  794. return ret;
  795. }
  796. static int rx_worker(struct vhost_hab_pchannel *vh_pchan)
  797. {
  798. struct vhost_hab_send_node *send_node;
  799. struct vhost_virtqueue *vq = &vh_pchan->vqs[VHOST_HAB_PCHAN_RX_VQ];
  800. struct vhost_dev *dev = vq->dev;
  801. int ret = 0, has_send = 1, added = 0;
  802. mutex_lock(&vq->mutex);
  803. vh_pchan = vq->private_data;
  804. if (!vh_pchan) {
  805. pr_err("rx vq is not ready yet\n");
  806. goto err_unlock;
  807. }
  808. vhost_disable_notify(dev, vq); /* no notify by default */
  809. while (has_send) {
  810. mutex_lock(&vh_pchan->send_list_mutex);
  811. send_node = list_first_entry_or_null(&vh_pchan->send_list,
  812. struct vhost_hab_send_node, node);
  813. mutex_unlock(&vh_pchan->send_list_mutex);
  814. if (!send_node) {
  815. has_send = 0; /* completed send list wait for more */
  816. } else {
  817. ret = rx_send_one_node_locked(dev, vh_pchan, vq,
  818. send_node, &added);
  819. if (ret)
  820. break; /* no more rx buf wait for next round */
  821. mutex_lock(&vh_pchan->send_list_mutex);
  822. list_del(&send_node->node);
  823. mutex_unlock(&vh_pchan->send_list_mutex);
  824. kfree(send_node); /* send OK process more */
  825. }
  826. }
  827. if (added)
  828. vhost_signal(dev, vq);
  829. err_unlock:
  830. mutex_unlock(&vq->mutex);
  831. return 0;
  832. }
  833. int physical_channel_send(struct physical_channel *pchan,
  834. struct hab_header *header,
  835. void *payload,
  836. unsigned int flags)
  837. {
  838. struct vhost_hab_pchannel *vh_pchan = pchan->hyp_data;
  839. struct vhost_virtqueue *vq;
  840. struct vhost_hab_dev *vh_dev;
  841. struct vhost_hab_send_node *send_node;
  842. size_t sizebytes = HAB_HEADER_GET_SIZE(*header);
  843. /* Only used in virtio arch */
  844. (void)flags;
  845. if (!vh_pchan) {
  846. pr_err("pchan is not ready yet\n");
  847. return -ENODEV;
  848. }
  849. vq = &vh_pchan->vqs[VHOST_HAB_PCHAN_RX_VQ];
  850. vh_dev = container_of(vq->dev, struct vhost_hab_dev, dev);
  851. send_node = kmalloc(sizebytes + sizeof(struct vhost_hab_send_node),
  852. GFP_KERNEL);
  853. if (!send_node)
  854. return -ENOMEM;
  855. send_node->header = *header;
  856. memcpy(send_node->payload, payload, sizebytes);
  857. mutex_lock(&vh_pchan->send_list_mutex);
  858. list_add_tail(&send_node->node, &vh_pchan->send_list);
  859. mutex_unlock(&vh_pchan->send_list_mutex);
  860. vhost_work_queue(&vh_dev->dev, &vh_pchan->rx_send_work);
  861. return 0;
  862. }
  863. int physical_channel_read(struct physical_channel *pchan,
  864. void *payload,
  865. size_t read_size)
  866. {
  867. struct vhost_hab_pchannel *vh_pchan = pchan->hyp_data;
  868. ssize_t copy_size;
  869. if (!vh_pchan) {
  870. pr_err("pchan is not ready yet\n");
  871. return -ENODEV;
  872. }
  873. copy_size = copy_from_iter(payload, read_size, &vh_pchan->out_iter);
  874. if (unlikely(copy_size != read_size))
  875. pr_err("fault on copy_from_iter, read_size %lu, ret %lu\n",
  876. read_size, copy_size);
  877. return copy_size;
  878. }
  879. void physical_channel_rx_dispatch(unsigned long physical_channel)
  880. {
  881. struct physical_channel *pchan =
  882. (struct physical_channel *)physical_channel;
  883. struct vhost_hab_pchannel *vh_pchan = pchan->hyp_data;
  884. struct vhost_virtqueue *vq = vh_pchan->vqs + VHOST_HAB_PCHAN_TX_VQ;
  885. struct vhost_hab_dev *vh_dev = container_of(vq->dev,
  886. struct vhost_hab_dev, dev);
  887. if (!vh_pchan) {
  888. pr_err("pchan is not ready yet\n");
  889. return;
  890. }
  891. vq = vh_pchan->vqs + VHOST_HAB_PCHAN_TX_VQ;
  892. vh_dev = container_of(vq->dev, struct vhost_hab_dev, dev);
  893. vhost_work_queue(&vh_dev->dev, &vh_pchan->tx_recv_work);
  894. }
  895. static const struct file_operations vhost_hab_fops = {
  896. .owner = THIS_MODULE,
  897. .open = vhost_hab_open,
  898. .release = vhost_hab_release,
  899. .unlocked_ioctl = vhost_hab_ioctl,
  900. #ifdef CONFIG_COMPAT
  901. .compat_ioctl = vhost_hab_compat_ioctl,
  902. #endif
  903. };
  904. static struct vhost_hab_cdev *get_cdev(uint32_t domain_id)
  905. {
  906. struct vhost_hab *vh = &g_vh;
  907. struct vhost_hab_cdev *vh_cdev = vh->vh_cdevs[domain_id];
  908. int ret;
  909. if (vh_cdev != NULL)
  910. return vh_cdev;
  911. vh_cdev = kzalloc(sizeof(*vh_cdev), GFP_KERNEL);
  912. if (vh_cdev == NULL)
  913. return NULL;
  914. cdev_init(&vh_cdev->cdev, &vhost_hab_fops);
  915. vh_cdev->cdev.owner = THIS_MODULE;
  916. vh_cdev->dev_no = MKDEV(vh->major, domain_id);
  917. ret = cdev_add(&vh_cdev->cdev, vh_cdev->dev_no, 1);
  918. if (ret) {
  919. pr_err("cdev_add failed for dev_no %d, domain_id %d\n",
  920. vh_cdev->dev_no, domain_id);
  921. goto err_free_cdev;
  922. }
  923. vh_cdev->dev = device_create(vh->class, NULL, vh_cdev->dev_no, NULL,
  924. "vhost-%s", hab_area_names[domain_id]);
  925. if (IS_ERR_OR_NULL(vh_cdev->dev)) {
  926. pr_err("device_create failed for, domain_id %d\n", domain_id);
  927. goto err_cdev_del;
  928. }
  929. vh_cdev->domain_id = domain_id;
  930. vh->vh_cdevs[domain_id] = vh_cdev;
  931. return vh_cdev;
  932. err_cdev_del:
  933. cdev_del(&vh_cdev->cdev);
  934. err_free_cdev:
  935. kfree(vh_cdev);
  936. return NULL;
  937. }
  938. static void del_hab_device_from_cdev(uint32_t mmid, struct hab_device *habdev)
  939. {
  940. struct vhost_hab *vh = &g_vh;
  941. struct vhost_hab_cdev *vh_cdev;
  942. uint32_t domain_id = mmid / 100;
  943. bool destroy = true;
  944. int i;
  945. vh_cdev = vh->vh_cdevs[domain_id];
  946. if (vh_cdev == NULL) {
  947. pr_err("cdev not created for domain %d\n", domain_id);
  948. return;
  949. }
  950. for (i = 0; i < HABCFG_MMID_NUM; i++) {
  951. if (vh_cdev->habdevs[i] == habdev)
  952. vh_cdev->habdevs[i] = NULL;
  953. else if (vh_cdev->habdevs[i] != NULL)
  954. destroy = false;
  955. }
  956. /* if no habdev is on this cdev, destroy it */
  957. if (!destroy)
  958. return;
  959. pr_info("delete cdev, mmid %d\n", mmid);
  960. device_destroy(vh->class, vh_cdev->dev_no);
  961. cdev_del(&vh_cdev->cdev);
  962. kfree(vh_cdev);
  963. vh->vh_cdevs[domain_id] = NULL;
  964. }
  965. static void vhost_hab_cdev_del_hab_device(struct hab_device *habdev)
  966. {
  967. del_hab_device_from_cdev(habdev->id, habdev);
  968. del_hab_device_from_cdev(HAB_MMID_ALL_AREA, habdev);
  969. }
  970. static int add_hab_device_to_cdev(uint32_t mmid, struct hab_device *habdev)
  971. {
  972. struct vhost_hab_cdev *vh_cdev;
  973. uint32_t domain_id = mmid / 100;
  974. int i;
  975. vh_cdev = get_cdev(domain_id);
  976. if (vh_cdev == NULL)
  977. return -ENODEV;
  978. /* add hab device to the new slot */
  979. for (i = 0; i < HABCFG_MMID_NUM; i++)
  980. if (vh_cdev->habdevs[i] == NULL) {
  981. vh_cdev->habdevs[i] = habdev;
  982. break;
  983. }
  984. if (i >= HABCFG_MMID_NUM) {
  985. pr_err("too many hab devices created\n");
  986. return -EINVAL;
  987. }
  988. return 0;
  989. }
  990. static int vhost_hab_cdev_add_hab_device(struct hab_device *habdev)
  991. {
  992. int ret;
  993. ret = add_hab_device_to_cdev(HAB_MMID_ALL_AREA, habdev);
  994. if (ret)
  995. return ret;
  996. ret = add_hab_device_to_cdev(habdev->id, habdev);
  997. if (ret)
  998. del_hab_device_from_cdev(HAB_MMID_ALL_AREA, habdev);
  999. return ret;
  1000. }
  1001. int habhyp_commdev_alloc(void **commdev, int is_be, char *name,
  1002. int vmid_remote, struct hab_device *habdev)
  1003. {
  1004. struct physical_channel *pchan;
  1005. struct vhost_hab_pchannel *vh_pchan;
  1006. int ret;
  1007. pchan = hab_pchan_alloc(habdev, vmid_remote);
  1008. if (!pchan) {
  1009. pr_err("failed to create %s pchan in mmid device %s, pchan cnt %d\n",
  1010. name, habdev->name, habdev->pchan_cnt);
  1011. *commdev = NULL;
  1012. return -ENOMEM;
  1013. }
  1014. pchan->closed = 0;
  1015. pchan->is_be = 1; /* vhost is always backend */
  1016. strscpy(pchan->name, name, sizeof(pchan->name));
  1017. pr_info("pchan on %s, loopback %d, total pchan %d, vmid %d\n",
  1018. name, hab_driver.b_loopback, habdev->pchan_cnt, vmid_remote);
  1019. vh_pchan = kzalloc(sizeof(*vh_pchan), GFP_KERNEL);
  1020. if (!vh_pchan) {
  1021. hab_pchan_put(pchan);
  1022. ret = -ENOMEM;
  1023. *commdev = NULL;
  1024. goto err_free_pchan;
  1025. }
  1026. vh_pchan->vqs[VHOST_HAB_PCHAN_TX_VQ].handle_kick = handle_tx_vq_kick;
  1027. vh_pchan->vqs[VHOST_HAB_PCHAN_RX_VQ].handle_kick = handle_rx_vq_kick;
  1028. mutex_init(&vh_pchan->send_list_mutex);
  1029. INIT_LIST_HEAD(&vh_pchan->send_list);
  1030. /* only add hab device when the first pchannel is added to it */
  1031. if (habdev->pchan_cnt == 1) {
  1032. ret = vhost_hab_cdev_add_hab_device(habdev);
  1033. if (ret) {
  1034. pr_err("vhost_hab_cdev_add_hab_device failed, vmid %d, mmid %d\n",
  1035. vmid_remote, habdev->id);
  1036. goto err_free_vh_pchan;
  1037. }
  1038. }
  1039. vh_pchan->habdev = habdev;
  1040. list_add_tail(&vh_pchan->node, &g_vh.vh_pchan_list);
  1041. *commdev = pchan;
  1042. pr_debug("pchan %s vchans %d refcnt %d\n",
  1043. pchan->name, pchan->vcnt, get_refcnt(pchan->refcount));
  1044. return 0;
  1045. err_free_vh_pchan:
  1046. kfree(vh_pchan);
  1047. err_free_pchan:
  1048. hab_pchan_put(pchan);
  1049. return ret;
  1050. }
  1051. int habhyp_commdev_dealloc(void *commdev)
  1052. {
  1053. struct physical_channel *pchan = commdev;
  1054. struct hab_device *habdev = pchan->habdev;
  1055. /* only remove hab device when removing the last pchannel */
  1056. if (habdev->pchan_cnt == 1)
  1057. vhost_hab_cdev_del_hab_device(habdev);
  1058. hab_pchan_put(pchan);
  1059. return 0;
  1060. }
  1061. int hab_stat_log(struct physical_channel **pchans, int pchan_cnt, char *dest,
  1062. int dest_size)
  1063. {
  1064. struct vhost_hab_stat_work stat_work;
  1065. struct vhost_hab_pchannel *vh_pchan;
  1066. int i, ret = 0;
  1067. stat_work.pchans = pchans;
  1068. stat_work.pchan_count = pchan_cnt;
  1069. INIT_WORK_ONSTACK(&stat_work.work, stat_worker);
  1070. queue_work(g_vh.wq, &stat_work.work);
  1071. flush_workqueue(g_vh.wq);
  1072. destroy_work_on_stack(&stat_work.work);
  1073. mutex_lock(&g_vh.pchan_mutex);
  1074. for (i = 0; i < pchan_cnt; i++) {
  1075. vh_pchan = pchans[i]->hyp_data;
  1076. if (!vh_pchan) {
  1077. pr_err("%s: pchan %s is not ready\n", __func__,
  1078. pchans[i]->name);
  1079. continue;
  1080. }
  1081. ret = hab_stat_buffer_print(dest, dest_size,
  1082. "mmid %d: vq empty tx %d rx %d\n",
  1083. vh_pchan->habdev->id, vh_pchan->tx_empty,
  1084. vh_pchan->rx_empty);
  1085. if (ret)
  1086. break;
  1087. }
  1088. mutex_unlock(&g_vh.pchan_mutex);
  1089. return ret;
  1090. }
  1091. static void stat_worker(struct work_struct *work)
  1092. {
  1093. struct vhost_hab_stat_work *stat_work = container_of(work,
  1094. struct vhost_hab_stat_work, work);
  1095. struct vhost_hab_pchannel *vh_pchan;
  1096. struct vhost_virtqueue *vq_tx;
  1097. struct vhost_virtqueue *vq_rx;
  1098. int i;
  1099. mm_segment_t oldfs;
  1100. mutex_lock(&g_vh.pchan_mutex);
  1101. for (i = 0; i < stat_work->pchan_count; i++) {
  1102. vh_pchan = stat_work->pchans[i]->hyp_data;
  1103. if (!vh_pchan) {
  1104. pr_err("%s: pchan %s is not ready\n", __func__,
  1105. stat_work->pchans[i]->name);
  1106. continue;
  1107. }
  1108. vq_tx = vh_pchan->vqs + VHOST_HAB_PCHAN_TX_VQ;
  1109. vq_rx = vh_pchan->vqs + VHOST_HAB_PCHAN_RX_VQ;
  1110. oldfs = get_fs();
  1111. set_fs(USER_DS);
  1112. use_mm(vq_tx->dev->mm);
  1113. vh_pchan->tx_empty = vhost_vq_avail_empty(vq_tx->dev, vq_tx);
  1114. vh_pchan->rx_empty = vhost_vq_avail_empty(vq_rx->dev, vq_rx);
  1115. unuse_mm(vq_tx->dev->mm);
  1116. set_fs(oldfs);
  1117. pr_info("%s mmid %d vq tx num %d empty %d vq rx num %d empty %d\n",
  1118. vh_pchan->pchan->name, vh_pchan->pchan->habdev->id, vq_tx->num,
  1119. vh_pchan->tx_empty, vq_rx->num, vh_pchan->rx_empty);
  1120. }
  1121. mutex_unlock(&g_vh.pchan_mutex);
  1122. }