ioreq.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * ACRN_HSM: Handle I/O requests
  4. *
  5. * Copyright (C) 2020 Intel Corporation. All rights reserved.
  6. *
  7. * Authors:
  8. * Jason Chen CJ <[email protected]>
  9. * Fengwei Yin <[email protected]>
  10. */
  11. #include <linux/interrupt.h>
  12. #include <linux/io.h>
  13. #include <linux/kthread.h>
  14. #include <linux/mm.h>
  15. #include <linux/slab.h>
  16. #include <asm/acrn.h>
  17. #include "acrn_drv.h"
  18. static void ioreq_pause(void);
  19. static void ioreq_resume(void);
  20. static void ioreq_dispatcher(struct work_struct *work);
  21. static struct workqueue_struct *ioreq_wq;
  22. static DECLARE_WORK(ioreq_work, ioreq_dispatcher);
  23. static inline bool has_pending_request(struct acrn_ioreq_client *client)
  24. {
  25. return !bitmap_empty(client->ioreqs_map, ACRN_IO_REQUEST_MAX);
  26. }
  27. static inline bool is_destroying(struct acrn_ioreq_client *client)
  28. {
  29. return test_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
  30. }
  31. static int ioreq_complete_request(struct acrn_vm *vm, u16 vcpu,
  32. struct acrn_io_request *acrn_req)
  33. {
  34. bool polling_mode;
  35. int ret = 0;
  36. polling_mode = acrn_req->completion_polling;
  37. /* Add barrier() to make sure the writes are done before completion */
  38. smp_store_release(&acrn_req->processed, ACRN_IOREQ_STATE_COMPLETE);
  39. /*
  40. * To fulfill the requirement of real-time in several industry
  41. * scenarios, like automotive, ACRN can run under the partition mode,
  42. * in which User VMs and Service VM are bound to dedicated CPU cores.
  43. * Polling mode of handling the I/O request is introduced to achieve a
  44. * faster I/O request handling. In polling mode, the hypervisor polls
  45. * I/O request's completion. Once an I/O request is marked as
  46. * ACRN_IOREQ_STATE_COMPLETE, hypervisor resumes from the polling point
  47. * to continue the I/O request flow. Thus, the completion notification
  48. * from HSM of I/O request is not needed. Please note,
  49. * completion_polling needs to be read before the I/O request being
  50. * marked as ACRN_IOREQ_STATE_COMPLETE to avoid racing with the
  51. * hypervisor.
  52. */
  53. if (!polling_mode) {
  54. ret = hcall_notify_req_finish(vm->vmid, vcpu);
  55. if (ret < 0)
  56. dev_err(acrn_dev.this_device,
  57. "Notify I/O request finished failed!\n");
  58. }
  59. return ret;
  60. }
  61. static int acrn_ioreq_complete_request(struct acrn_ioreq_client *client,
  62. u16 vcpu,
  63. struct acrn_io_request *acrn_req)
  64. {
  65. int ret;
  66. if (vcpu >= client->vm->vcpu_num)
  67. return -EINVAL;
  68. clear_bit(vcpu, client->ioreqs_map);
  69. if (!acrn_req) {
  70. acrn_req = (struct acrn_io_request *)client->vm->ioreq_buf;
  71. acrn_req += vcpu;
  72. }
  73. ret = ioreq_complete_request(client->vm, vcpu, acrn_req);
  74. return ret;
  75. }
  76. int acrn_ioreq_request_default_complete(struct acrn_vm *vm, u16 vcpu)
  77. {
  78. int ret = 0;
  79. spin_lock_bh(&vm->ioreq_clients_lock);
  80. if (vm->default_client)
  81. ret = acrn_ioreq_complete_request(vm->default_client,
  82. vcpu, NULL);
  83. spin_unlock_bh(&vm->ioreq_clients_lock);
  84. return ret;
  85. }
  86. /**
  87. * acrn_ioreq_range_add() - Add an iorange monitored by an ioreq client
  88. * @client: The ioreq client
  89. * @type: Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
  90. * @start: Start address of iorange
  91. * @end: End address of iorange
  92. *
  93. * Return: 0 on success, <0 on error
  94. */
  95. int acrn_ioreq_range_add(struct acrn_ioreq_client *client,
  96. u32 type, u64 start, u64 end)
  97. {
  98. struct acrn_ioreq_range *range;
  99. if (end < start) {
  100. dev_err(acrn_dev.this_device,
  101. "Invalid IO range [0x%llx,0x%llx]\n", start, end);
  102. return -EINVAL;
  103. }
  104. range = kzalloc(sizeof(*range), GFP_KERNEL);
  105. if (!range)
  106. return -ENOMEM;
  107. range->type = type;
  108. range->start = start;
  109. range->end = end;
  110. write_lock_bh(&client->range_lock);
  111. list_add(&range->list, &client->range_list);
  112. write_unlock_bh(&client->range_lock);
  113. return 0;
  114. }
  115. /**
  116. * acrn_ioreq_range_del() - Del an iorange monitored by an ioreq client
  117. * @client: The ioreq client
  118. * @type: Type (ACRN_IOREQ_TYPE_MMIO or ACRN_IOREQ_TYPE_PORTIO)
  119. * @start: Start address of iorange
  120. * @end: End address of iorange
  121. */
  122. void acrn_ioreq_range_del(struct acrn_ioreq_client *client,
  123. u32 type, u64 start, u64 end)
  124. {
  125. struct acrn_ioreq_range *range;
  126. write_lock_bh(&client->range_lock);
  127. list_for_each_entry(range, &client->range_list, list) {
  128. if (type == range->type &&
  129. start == range->start &&
  130. end == range->end) {
  131. list_del(&range->list);
  132. kfree(range);
  133. break;
  134. }
  135. }
  136. write_unlock_bh(&client->range_lock);
  137. }
  138. /*
  139. * ioreq_task() is the execution entity of handler thread of an I/O client.
  140. * The handler callback of the I/O client is called within the handler thread.
  141. */
  142. static int ioreq_task(void *data)
  143. {
  144. struct acrn_ioreq_client *client = data;
  145. struct acrn_io_request *req;
  146. unsigned long *ioreqs_map;
  147. int vcpu, ret;
  148. /*
  149. * Lockless access to ioreqs_map is safe, because
  150. * 1) set_bit() and clear_bit() are atomic operations.
  151. * 2) I/O requests arrives serialized. The access flow of ioreqs_map is:
  152. * set_bit() - in ioreq_work handler
  153. * Handler callback handles corresponding I/O request
  154. * clear_bit() - in handler thread (include ACRN userspace)
  155. * Mark corresponding I/O request completed
  156. * Loop again if a new I/O request occurs
  157. */
  158. ioreqs_map = client->ioreqs_map;
  159. while (!kthread_should_stop()) {
  160. acrn_ioreq_client_wait(client);
  161. while (has_pending_request(client)) {
  162. vcpu = find_first_bit(ioreqs_map, client->vm->vcpu_num);
  163. req = client->vm->ioreq_buf->req_slot + vcpu;
  164. ret = client->handler(client, req);
  165. if (ret < 0) {
  166. dev_err(acrn_dev.this_device,
  167. "IO handle failure: %d\n", ret);
  168. break;
  169. }
  170. acrn_ioreq_complete_request(client, vcpu, req);
  171. }
  172. }
  173. return 0;
  174. }
  175. /*
  176. * For the non-default I/O clients, give them chance to complete the current
  177. * I/O requests if there are any. For the default I/O client, it is safe to
  178. * clear all pending I/O requests because the clearing request is from ACRN
  179. * userspace.
  180. */
  181. void acrn_ioreq_request_clear(struct acrn_vm *vm)
  182. {
  183. struct acrn_ioreq_client *client;
  184. bool has_pending = false;
  185. unsigned long vcpu;
  186. int retry = 10;
  187. /*
  188. * IO requests of this VM will be completed directly in
  189. * acrn_ioreq_dispatch if ACRN_VM_FLAG_CLEARING_IOREQ flag is set.
  190. */
  191. set_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
  192. /*
  193. * acrn_ioreq_request_clear is only called in VM reset case. Simply
  194. * wait 100ms in total for the IO requests' completion.
  195. */
  196. do {
  197. spin_lock_bh(&vm->ioreq_clients_lock);
  198. list_for_each_entry(client, &vm->ioreq_clients, list) {
  199. has_pending = has_pending_request(client);
  200. if (has_pending)
  201. break;
  202. }
  203. spin_unlock_bh(&vm->ioreq_clients_lock);
  204. if (has_pending)
  205. schedule_timeout_interruptible(HZ / 100);
  206. } while (has_pending && --retry > 0);
  207. if (retry == 0)
  208. dev_warn(acrn_dev.this_device,
  209. "%s cannot flush pending request!\n", client->name);
  210. /* Clear all ioreqs belonging to the default client */
  211. spin_lock_bh(&vm->ioreq_clients_lock);
  212. client = vm->default_client;
  213. if (client) {
  214. for_each_set_bit(vcpu, client->ioreqs_map, ACRN_IO_REQUEST_MAX)
  215. acrn_ioreq_complete_request(client, vcpu, NULL);
  216. }
  217. spin_unlock_bh(&vm->ioreq_clients_lock);
  218. /* Clear ACRN_VM_FLAG_CLEARING_IOREQ flag after the clearing */
  219. clear_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags);
  220. }
  221. int acrn_ioreq_client_wait(struct acrn_ioreq_client *client)
  222. {
  223. if (client->is_default) {
  224. /*
  225. * In the default client, a user space thread waits on the
  226. * waitqueue. The is_destroying() check is used to notify user
  227. * space the client is going to be destroyed.
  228. */
  229. wait_event_interruptible(client->wq,
  230. has_pending_request(client) ||
  231. is_destroying(client));
  232. if (is_destroying(client))
  233. return -ENODEV;
  234. } else {
  235. wait_event_interruptible(client->wq,
  236. has_pending_request(client) ||
  237. kthread_should_stop());
  238. }
  239. return 0;
  240. }
  241. static bool is_cfg_addr(struct acrn_io_request *req)
  242. {
  243. return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
  244. (req->reqs.pio_request.address == 0xcf8));
  245. }
  246. static bool is_cfg_data(struct acrn_io_request *req)
  247. {
  248. return ((req->type == ACRN_IOREQ_TYPE_PORTIO) &&
  249. ((req->reqs.pio_request.address >= 0xcfc) &&
  250. (req->reqs.pio_request.address < (0xcfc + 4))));
  251. }
  252. /* The low 8-bit of supported pci_reg addr.*/
  253. #define PCI_LOWREG_MASK 0xFC
  254. /* The high 4-bit of supported pci_reg addr */
  255. #define PCI_HIGHREG_MASK 0xF00
  256. /* Max number of supported functions */
  257. #define PCI_FUNCMAX 7
  258. /* Max number of supported slots */
  259. #define PCI_SLOTMAX 31
  260. /* Max number of supported buses */
  261. #define PCI_BUSMAX 255
  262. #define CONF1_ENABLE 0x80000000UL
  263. /*
  264. * A PCI configuration space access via PIO 0xCF8 and 0xCFC normally has two
  265. * following steps:
  266. * 1) writes address into 0xCF8 port
  267. * 2) accesses data in/from 0xCFC
  268. * This function combines such paired PCI configuration space I/O requests into
  269. * one ACRN_IOREQ_TYPE_PCICFG type I/O request and continues the processing.
  270. */
  271. static bool handle_cf8cfc(struct acrn_vm *vm,
  272. struct acrn_io_request *req, u16 vcpu)
  273. {
  274. int offset, pci_cfg_addr, pci_reg;
  275. bool is_handled = false;
  276. if (is_cfg_addr(req)) {
  277. WARN_ON(req->reqs.pio_request.size != 4);
  278. if (req->reqs.pio_request.direction == ACRN_IOREQ_DIR_WRITE)
  279. vm->pci_conf_addr = req->reqs.pio_request.value;
  280. else
  281. req->reqs.pio_request.value = vm->pci_conf_addr;
  282. is_handled = true;
  283. } else if (is_cfg_data(req)) {
  284. if (!(vm->pci_conf_addr & CONF1_ENABLE)) {
  285. if (req->reqs.pio_request.direction ==
  286. ACRN_IOREQ_DIR_READ)
  287. req->reqs.pio_request.value = 0xffffffff;
  288. is_handled = true;
  289. } else {
  290. offset = req->reqs.pio_request.address - 0xcfc;
  291. req->type = ACRN_IOREQ_TYPE_PCICFG;
  292. pci_cfg_addr = vm->pci_conf_addr;
  293. req->reqs.pci_request.bus =
  294. (pci_cfg_addr >> 16) & PCI_BUSMAX;
  295. req->reqs.pci_request.dev =
  296. (pci_cfg_addr >> 11) & PCI_SLOTMAX;
  297. req->reqs.pci_request.func =
  298. (pci_cfg_addr >> 8) & PCI_FUNCMAX;
  299. pci_reg = (pci_cfg_addr & PCI_LOWREG_MASK) +
  300. ((pci_cfg_addr >> 16) & PCI_HIGHREG_MASK);
  301. req->reqs.pci_request.reg = pci_reg + offset;
  302. }
  303. }
  304. if (is_handled)
  305. ioreq_complete_request(vm, vcpu, req);
  306. return is_handled;
  307. }
  308. static bool in_range(struct acrn_ioreq_range *range,
  309. struct acrn_io_request *req)
  310. {
  311. bool ret = false;
  312. if (range->type == req->type) {
  313. switch (req->type) {
  314. case ACRN_IOREQ_TYPE_MMIO:
  315. if (req->reqs.mmio_request.address >= range->start &&
  316. (req->reqs.mmio_request.address +
  317. req->reqs.mmio_request.size - 1) <= range->end)
  318. ret = true;
  319. break;
  320. case ACRN_IOREQ_TYPE_PORTIO:
  321. if (req->reqs.pio_request.address >= range->start &&
  322. (req->reqs.pio_request.address +
  323. req->reqs.pio_request.size - 1) <= range->end)
  324. ret = true;
  325. break;
  326. default:
  327. break;
  328. }
  329. }
  330. return ret;
  331. }
  332. static struct acrn_ioreq_client *find_ioreq_client(struct acrn_vm *vm,
  333. struct acrn_io_request *req)
  334. {
  335. struct acrn_ioreq_client *client, *found = NULL;
  336. struct acrn_ioreq_range *range;
  337. lockdep_assert_held(&vm->ioreq_clients_lock);
  338. list_for_each_entry(client, &vm->ioreq_clients, list) {
  339. read_lock_bh(&client->range_lock);
  340. list_for_each_entry(range, &client->range_list, list) {
  341. if (in_range(range, req)) {
  342. found = client;
  343. break;
  344. }
  345. }
  346. read_unlock_bh(&client->range_lock);
  347. if (found)
  348. break;
  349. }
  350. return found ? found : vm->default_client;
  351. }
  352. /**
  353. * acrn_ioreq_client_create() - Create an ioreq client
  354. * @vm: The VM that this client belongs to
  355. * @handler: The ioreq_handler of ioreq client acrn_hsm will create a kernel
  356. * thread and call the handler to handle I/O requests.
  357. * @priv: Private data for the handler
  358. * @is_default: If it is the default client
  359. * @name: The name of ioreq client
  360. *
  361. * Return: acrn_ioreq_client pointer on success, NULL on error
  362. */
  363. struct acrn_ioreq_client *acrn_ioreq_client_create(struct acrn_vm *vm,
  364. ioreq_handler_t handler,
  365. void *priv, bool is_default,
  366. const char *name)
  367. {
  368. struct acrn_ioreq_client *client;
  369. if (!handler && !is_default) {
  370. dev_dbg(acrn_dev.this_device,
  371. "Cannot create non-default client w/o handler!\n");
  372. return NULL;
  373. }
  374. client = kzalloc(sizeof(*client), GFP_KERNEL);
  375. if (!client)
  376. return NULL;
  377. client->handler = handler;
  378. client->vm = vm;
  379. client->priv = priv;
  380. client->is_default = is_default;
  381. if (name)
  382. strncpy(client->name, name, sizeof(client->name) - 1);
  383. rwlock_init(&client->range_lock);
  384. INIT_LIST_HEAD(&client->range_list);
  385. init_waitqueue_head(&client->wq);
  386. if (client->handler) {
  387. client->thread = kthread_run(ioreq_task, client, "VM%u-%s",
  388. client->vm->vmid, client->name);
  389. if (IS_ERR(client->thread)) {
  390. kfree(client);
  391. return NULL;
  392. }
  393. }
  394. spin_lock_bh(&vm->ioreq_clients_lock);
  395. if (is_default)
  396. vm->default_client = client;
  397. else
  398. list_add(&client->list, &vm->ioreq_clients);
  399. spin_unlock_bh(&vm->ioreq_clients_lock);
  400. dev_dbg(acrn_dev.this_device, "Created ioreq client %s.\n", name);
  401. return client;
  402. }
  403. /**
  404. * acrn_ioreq_client_destroy() - Destroy an ioreq client
  405. * @client: The ioreq client
  406. */
  407. void acrn_ioreq_client_destroy(struct acrn_ioreq_client *client)
  408. {
  409. struct acrn_ioreq_range *range, *next;
  410. struct acrn_vm *vm = client->vm;
  411. dev_dbg(acrn_dev.this_device,
  412. "Destroy ioreq client %s.\n", client->name);
  413. ioreq_pause();
  414. set_bit(ACRN_IOREQ_CLIENT_DESTROYING, &client->flags);
  415. if (client->is_default)
  416. wake_up_interruptible(&client->wq);
  417. else
  418. kthread_stop(client->thread);
  419. spin_lock_bh(&vm->ioreq_clients_lock);
  420. if (client->is_default)
  421. vm->default_client = NULL;
  422. else
  423. list_del(&client->list);
  424. spin_unlock_bh(&vm->ioreq_clients_lock);
  425. write_lock_bh(&client->range_lock);
  426. list_for_each_entry_safe(range, next, &client->range_list, list) {
  427. list_del(&range->list);
  428. kfree(range);
  429. }
  430. write_unlock_bh(&client->range_lock);
  431. kfree(client);
  432. ioreq_resume();
  433. }
  434. static int acrn_ioreq_dispatch(struct acrn_vm *vm)
  435. {
  436. struct acrn_ioreq_client *client;
  437. struct acrn_io_request *req;
  438. int i;
  439. for (i = 0; i < vm->vcpu_num; i++) {
  440. req = vm->ioreq_buf->req_slot + i;
  441. /* barrier the read of processed of acrn_io_request */
  442. if (smp_load_acquire(&req->processed) ==
  443. ACRN_IOREQ_STATE_PENDING) {
  444. /* Complete the IO request directly in clearing stage */
  445. if (test_bit(ACRN_VM_FLAG_CLEARING_IOREQ, &vm->flags)) {
  446. ioreq_complete_request(vm, i, req);
  447. continue;
  448. }
  449. if (handle_cf8cfc(vm, req, i))
  450. continue;
  451. spin_lock_bh(&vm->ioreq_clients_lock);
  452. client = find_ioreq_client(vm, req);
  453. if (!client) {
  454. dev_err(acrn_dev.this_device,
  455. "Failed to find ioreq client!\n");
  456. spin_unlock_bh(&vm->ioreq_clients_lock);
  457. return -EINVAL;
  458. }
  459. if (!client->is_default)
  460. req->kernel_handled = 1;
  461. else
  462. req->kernel_handled = 0;
  463. /*
  464. * Add barrier() to make sure the writes are done
  465. * before setting ACRN_IOREQ_STATE_PROCESSING
  466. */
  467. smp_store_release(&req->processed,
  468. ACRN_IOREQ_STATE_PROCESSING);
  469. set_bit(i, client->ioreqs_map);
  470. wake_up_interruptible(&client->wq);
  471. spin_unlock_bh(&vm->ioreq_clients_lock);
  472. }
  473. }
  474. return 0;
  475. }
  476. static void ioreq_dispatcher(struct work_struct *work)
  477. {
  478. struct acrn_vm *vm;
  479. read_lock(&acrn_vm_list_lock);
  480. list_for_each_entry(vm, &acrn_vm_list, list) {
  481. if (!vm->ioreq_buf)
  482. break;
  483. acrn_ioreq_dispatch(vm);
  484. }
  485. read_unlock(&acrn_vm_list_lock);
  486. }
  487. static void ioreq_intr_handler(void)
  488. {
  489. queue_work(ioreq_wq, &ioreq_work);
  490. }
  491. static void ioreq_pause(void)
  492. {
  493. /* Flush and unarm the handler to ensure no I/O requests pending */
  494. acrn_remove_intr_handler();
  495. drain_workqueue(ioreq_wq);
  496. }
  497. static void ioreq_resume(void)
  498. {
  499. /* Schedule after enabling in case other clients miss interrupt */
  500. acrn_setup_intr_handler(ioreq_intr_handler);
  501. queue_work(ioreq_wq, &ioreq_work);
  502. }
  503. int acrn_ioreq_intr_setup(void)
  504. {
  505. acrn_setup_intr_handler(ioreq_intr_handler);
  506. ioreq_wq = alloc_workqueue("ioreq_wq",
  507. WQ_HIGHPRI | WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
  508. if (!ioreq_wq) {
  509. dev_err(acrn_dev.this_device, "Failed to alloc workqueue!\n");
  510. acrn_remove_intr_handler();
  511. return -ENOMEM;
  512. }
  513. return 0;
  514. }
  515. void acrn_ioreq_intr_remove(void)
  516. {
  517. if (ioreq_wq)
  518. destroy_workqueue(ioreq_wq);
  519. acrn_remove_intr_handler();
  520. }
  521. int acrn_ioreq_init(struct acrn_vm *vm, u64 buf_vma)
  522. {
  523. struct acrn_ioreq_buffer *set_buffer;
  524. struct page *page;
  525. int ret;
  526. if (vm->ioreq_buf)
  527. return -EEXIST;
  528. set_buffer = kzalloc(sizeof(*set_buffer), GFP_KERNEL);
  529. if (!set_buffer)
  530. return -ENOMEM;
  531. ret = pin_user_pages_fast(buf_vma, 1,
  532. FOLL_WRITE | FOLL_LONGTERM, &page);
  533. if (unlikely(ret != 1) || !page) {
  534. dev_err(acrn_dev.this_device, "Failed to pin ioreq page!\n");
  535. ret = -EFAULT;
  536. goto free_buf;
  537. }
  538. vm->ioreq_buf = page_address(page);
  539. vm->ioreq_page = page;
  540. set_buffer->ioreq_buf = page_to_phys(page);
  541. ret = hcall_set_ioreq_buffer(vm->vmid, virt_to_phys(set_buffer));
  542. if (ret < 0) {
  543. dev_err(acrn_dev.this_device, "Failed to init ioreq buffer!\n");
  544. unpin_user_page(page);
  545. vm->ioreq_buf = NULL;
  546. goto free_buf;
  547. }
  548. dev_dbg(acrn_dev.this_device,
  549. "Init ioreq buffer %pK!\n", vm->ioreq_buf);
  550. ret = 0;
  551. free_buf:
  552. kfree(set_buffer);
  553. return ret;
  554. }
  555. void acrn_ioreq_deinit(struct acrn_vm *vm)
  556. {
  557. struct acrn_ioreq_client *client, *next;
  558. dev_dbg(acrn_dev.this_device,
  559. "Deinit ioreq buffer %pK!\n", vm->ioreq_buf);
  560. /* Destroy all clients belonging to this VM */
  561. list_for_each_entry_safe(client, next, &vm->ioreq_clients, list)
  562. acrn_ioreq_client_destroy(client);
  563. if (vm->default_client)
  564. acrn_ioreq_client_destroy(vm->default_client);
  565. if (vm->ioreq_buf && vm->ioreq_page) {
  566. unpin_user_page(vm->ioreq_page);
  567. vm->ioreq_buf = NULL;
  568. }
  569. }