vdpa_sim.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * VDPA device simulator core.
  4. *
  5. * Copyright (c) 2020, Red Hat Inc. All rights reserved.
  6. * Author: Jason Wang <[email protected]>
  7. *
  8. */
  9. #include <linux/init.h>
  10. #include <linux/module.h>
  11. #include <linux/device.h>
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/sched.h>
  15. #include <linux/dma-map-ops.h>
  16. #include <linux/vringh.h>
  17. #include <linux/vdpa.h>
  18. #include <linux/vhost_iotlb.h>
  19. #include <linux/iova.h>
  20. #include <uapi/linux/vdpa.h>
  21. #include "vdpa_sim.h"
  22. #define DRV_VERSION "0.1"
  23. #define DRV_AUTHOR "Jason Wang <[email protected]>"
  24. #define DRV_DESC "vDPA Device Simulator core"
  25. #define DRV_LICENSE "GPL v2"
  26. static int batch_mapping = 1;
  27. module_param(batch_mapping, int, 0444);
  28. MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
  29. static int max_iotlb_entries = 2048;
  30. module_param(max_iotlb_entries, int, 0444);
  31. MODULE_PARM_DESC(max_iotlb_entries,
  32. "Maximum number of iotlb entries for each address space. 0 means unlimited. (default: 2048)");
  33. #define VDPASIM_QUEUE_ALIGN PAGE_SIZE
  34. #define VDPASIM_QUEUE_MAX 256
  35. #define VDPASIM_VENDOR_ID 0
  36. static struct vdpasim *vdpa_to_sim(struct vdpa_device *vdpa)
  37. {
  38. return container_of(vdpa, struct vdpasim, vdpa);
  39. }
  40. static struct vdpasim *dev_to_sim(struct device *dev)
  41. {
  42. struct vdpa_device *vdpa = dev_to_vdpa(dev);
  43. return vdpa_to_sim(vdpa);
  44. }
  45. static void vdpasim_vq_notify(struct vringh *vring)
  46. {
  47. struct vdpasim_virtqueue *vq =
  48. container_of(vring, struct vdpasim_virtqueue, vring);
  49. if (!vq->cb)
  50. return;
  51. vq->cb(vq->private);
  52. }
  53. static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx)
  54. {
  55. struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
  56. uint16_t last_avail_idx = vq->vring.last_avail_idx;
  57. vringh_init_iotlb(&vq->vring, vdpasim->features, vq->num, false,
  58. (struct vring_desc *)(uintptr_t)vq->desc_addr,
  59. (struct vring_avail *)
  60. (uintptr_t)vq->driver_addr,
  61. (struct vring_used *)
  62. (uintptr_t)vq->device_addr);
  63. vq->vring.last_avail_idx = last_avail_idx;
  64. /*
  65. * Since vdpa_sim does not support receive inflight descriptors as a
  66. * destination of a migration, let's set both avail_idx and used_idx
  67. * the same at vq start. This is how vhost-user works in a
  68. * VHOST_SET_VRING_BASE call.
  69. *
  70. * Although the simple fix is to set last_used_idx at
  71. * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready.
  72. */
  73. vq->vring.last_used_idx = last_avail_idx;
  74. vq->vring.notify = vdpasim_vq_notify;
  75. }
  76. static void vdpasim_vq_reset(struct vdpasim *vdpasim,
  77. struct vdpasim_virtqueue *vq)
  78. {
  79. vq->ready = false;
  80. vq->desc_addr = 0;
  81. vq->driver_addr = 0;
  82. vq->device_addr = 0;
  83. vq->cb = NULL;
  84. vq->private = NULL;
  85. vringh_init_iotlb(&vq->vring, vdpasim->dev_attr.supported_features,
  86. VDPASIM_QUEUE_MAX, false, NULL, NULL, NULL);
  87. vq->vring.notify = NULL;
  88. }
  89. static void vdpasim_do_reset(struct vdpasim *vdpasim)
  90. {
  91. int i;
  92. spin_lock(&vdpasim->iommu_lock);
  93. for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
  94. vdpasim_vq_reset(vdpasim, &vdpasim->vqs[i]);
  95. vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
  96. &vdpasim->iommu_lock);
  97. }
  98. for (i = 0; i < vdpasim->dev_attr.nas; i++)
  99. vhost_iotlb_reset(&vdpasim->iommu[i]);
  100. vdpasim->running = true;
  101. spin_unlock(&vdpasim->iommu_lock);
  102. vdpasim->features = 0;
  103. vdpasim->status = 0;
  104. ++vdpasim->generation;
  105. }
  106. static int dir_to_perm(enum dma_data_direction dir)
  107. {
  108. int perm = -EFAULT;
  109. switch (dir) {
  110. case DMA_FROM_DEVICE:
  111. perm = VHOST_MAP_WO;
  112. break;
  113. case DMA_TO_DEVICE:
  114. perm = VHOST_MAP_RO;
  115. break;
  116. case DMA_BIDIRECTIONAL:
  117. perm = VHOST_MAP_RW;
  118. break;
  119. default:
  120. break;
  121. }
  122. return perm;
  123. }
  124. static dma_addr_t vdpasim_map_range(struct vdpasim *vdpasim, phys_addr_t paddr,
  125. size_t size, unsigned int perm)
  126. {
  127. struct iova *iova;
  128. dma_addr_t dma_addr;
  129. int ret;
  130. /* We set the limit_pfn to the maximum (ULONG_MAX - 1) */
  131. iova = alloc_iova(&vdpasim->iova, size >> iova_shift(&vdpasim->iova),
  132. ULONG_MAX - 1, true);
  133. if (!iova)
  134. return DMA_MAPPING_ERROR;
  135. dma_addr = iova_dma_addr(&vdpasim->iova, iova);
  136. spin_lock(&vdpasim->iommu_lock);
  137. ret = vhost_iotlb_add_range(&vdpasim->iommu[0], (u64)dma_addr,
  138. (u64)dma_addr + size - 1, (u64)paddr, perm);
  139. spin_unlock(&vdpasim->iommu_lock);
  140. if (ret) {
  141. __free_iova(&vdpasim->iova, iova);
  142. return DMA_MAPPING_ERROR;
  143. }
  144. return dma_addr;
  145. }
  146. static void vdpasim_unmap_range(struct vdpasim *vdpasim, dma_addr_t dma_addr,
  147. size_t size)
  148. {
  149. spin_lock(&vdpasim->iommu_lock);
  150. vhost_iotlb_del_range(&vdpasim->iommu[0], (u64)dma_addr,
  151. (u64)dma_addr + size - 1);
  152. spin_unlock(&vdpasim->iommu_lock);
  153. free_iova(&vdpasim->iova, iova_pfn(&vdpasim->iova, dma_addr));
  154. }
  155. static dma_addr_t vdpasim_map_page(struct device *dev, struct page *page,
  156. unsigned long offset, size_t size,
  157. enum dma_data_direction dir,
  158. unsigned long attrs)
  159. {
  160. struct vdpasim *vdpasim = dev_to_sim(dev);
  161. phys_addr_t paddr = page_to_phys(page) + offset;
  162. int perm = dir_to_perm(dir);
  163. if (perm < 0)
  164. return DMA_MAPPING_ERROR;
  165. return vdpasim_map_range(vdpasim, paddr, size, perm);
  166. }
  167. static void vdpasim_unmap_page(struct device *dev, dma_addr_t dma_addr,
  168. size_t size, enum dma_data_direction dir,
  169. unsigned long attrs)
  170. {
  171. struct vdpasim *vdpasim = dev_to_sim(dev);
  172. vdpasim_unmap_range(vdpasim, dma_addr, size);
  173. }
  174. static void *vdpasim_alloc_coherent(struct device *dev, size_t size,
  175. dma_addr_t *dma_addr, gfp_t flag,
  176. unsigned long attrs)
  177. {
  178. struct vdpasim *vdpasim = dev_to_sim(dev);
  179. phys_addr_t paddr;
  180. void *addr;
  181. addr = kmalloc(size, flag);
  182. if (!addr) {
  183. *dma_addr = DMA_MAPPING_ERROR;
  184. return NULL;
  185. }
  186. paddr = virt_to_phys(addr);
  187. *dma_addr = vdpasim_map_range(vdpasim, paddr, size, VHOST_MAP_RW);
  188. if (*dma_addr == DMA_MAPPING_ERROR) {
  189. kfree(addr);
  190. return NULL;
  191. }
  192. return addr;
  193. }
  194. static void vdpasim_free_coherent(struct device *dev, size_t size,
  195. void *vaddr, dma_addr_t dma_addr,
  196. unsigned long attrs)
  197. {
  198. struct vdpasim *vdpasim = dev_to_sim(dev);
  199. vdpasim_unmap_range(vdpasim, dma_addr, size);
  200. kfree(vaddr);
  201. }
  202. static const struct dma_map_ops vdpasim_dma_ops = {
  203. .map_page = vdpasim_map_page,
  204. .unmap_page = vdpasim_unmap_page,
  205. .alloc = vdpasim_alloc_coherent,
  206. .free = vdpasim_free_coherent,
  207. };
  208. static const struct vdpa_config_ops vdpasim_config_ops;
  209. static const struct vdpa_config_ops vdpasim_batch_config_ops;
  210. struct vdpasim *vdpasim_create(struct vdpasim_dev_attr *dev_attr,
  211. const struct vdpa_dev_set_config *config)
  212. {
  213. const struct vdpa_config_ops *ops;
  214. struct vdpasim *vdpasim;
  215. struct device *dev;
  216. int i, ret = -ENOMEM;
  217. if (config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) {
  218. if (config->device_features &
  219. ~dev_attr->supported_features)
  220. return ERR_PTR(-EINVAL);
  221. dev_attr->supported_features =
  222. config->device_features;
  223. }
  224. if (batch_mapping)
  225. ops = &vdpasim_batch_config_ops;
  226. else
  227. ops = &vdpasim_config_ops;
  228. vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops,
  229. dev_attr->ngroups, dev_attr->nas,
  230. dev_attr->name, false);
  231. if (IS_ERR(vdpasim)) {
  232. ret = PTR_ERR(vdpasim);
  233. goto err_alloc;
  234. }
  235. vdpasim->dev_attr = *dev_attr;
  236. INIT_WORK(&vdpasim->work, dev_attr->work_fn);
  237. spin_lock_init(&vdpasim->lock);
  238. spin_lock_init(&vdpasim->iommu_lock);
  239. dev = &vdpasim->vdpa.dev;
  240. dev->dma_mask = &dev->coherent_dma_mask;
  241. if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
  242. goto err_iommu;
  243. set_dma_ops(dev, &vdpasim_dma_ops);
  244. vdpasim->vdpa.mdev = dev_attr->mgmt_dev;
  245. vdpasim->config = kzalloc(dev_attr->config_size, GFP_KERNEL);
  246. if (!vdpasim->config)
  247. goto err_iommu;
  248. vdpasim->vqs = kcalloc(dev_attr->nvqs, sizeof(struct vdpasim_virtqueue),
  249. GFP_KERNEL);
  250. if (!vdpasim->vqs)
  251. goto err_iommu;
  252. vdpasim->iommu = kmalloc_array(vdpasim->dev_attr.nas,
  253. sizeof(*vdpasim->iommu), GFP_KERNEL);
  254. if (!vdpasim->iommu)
  255. goto err_iommu;
  256. for (i = 0; i < vdpasim->dev_attr.nas; i++)
  257. vhost_iotlb_init(&vdpasim->iommu[i], max_iotlb_entries, 0);
  258. vdpasim->buffer = kvmalloc(dev_attr->buffer_size, GFP_KERNEL);
  259. if (!vdpasim->buffer)
  260. goto err_iommu;
  261. for (i = 0; i < dev_attr->nvqs; i++)
  262. vringh_set_iotlb(&vdpasim->vqs[i].vring, &vdpasim->iommu[0],
  263. &vdpasim->iommu_lock);
  264. ret = iova_cache_get();
  265. if (ret)
  266. goto err_iommu;
  267. /* For simplicity we use an IOVA allocator with byte granularity */
  268. init_iova_domain(&vdpasim->iova, 1, 0);
  269. vdpasim->vdpa.dma_dev = dev;
  270. return vdpasim;
  271. err_iommu:
  272. put_device(dev);
  273. err_alloc:
  274. return ERR_PTR(ret);
  275. }
  276. EXPORT_SYMBOL_GPL(vdpasim_create);
  277. static int vdpasim_set_vq_address(struct vdpa_device *vdpa, u16 idx,
  278. u64 desc_area, u64 driver_area,
  279. u64 device_area)
  280. {
  281. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  282. struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
  283. vq->desc_addr = desc_area;
  284. vq->driver_addr = driver_area;
  285. vq->device_addr = device_area;
  286. return 0;
  287. }
  288. static void vdpasim_set_vq_num(struct vdpa_device *vdpa, u16 idx, u32 num)
  289. {
  290. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  291. struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
  292. vq->num = num;
  293. }
  294. static void vdpasim_kick_vq(struct vdpa_device *vdpa, u16 idx)
  295. {
  296. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  297. struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
  298. if (vq->ready)
  299. schedule_work(&vdpasim->work);
  300. }
  301. static void vdpasim_set_vq_cb(struct vdpa_device *vdpa, u16 idx,
  302. struct vdpa_callback *cb)
  303. {
  304. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  305. struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
  306. vq->cb = cb->callback;
  307. vq->private = cb->private;
  308. }
  309. static void vdpasim_set_vq_ready(struct vdpa_device *vdpa, u16 idx, bool ready)
  310. {
  311. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  312. struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
  313. bool old_ready;
  314. spin_lock(&vdpasim->lock);
  315. old_ready = vq->ready;
  316. vq->ready = ready;
  317. if (vq->ready && !old_ready) {
  318. vdpasim_queue_ready(vdpasim, idx);
  319. }
  320. spin_unlock(&vdpasim->lock);
  321. }
  322. static bool vdpasim_get_vq_ready(struct vdpa_device *vdpa, u16 idx)
  323. {
  324. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  325. struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
  326. return vq->ready;
  327. }
  328. static int vdpasim_set_vq_state(struct vdpa_device *vdpa, u16 idx,
  329. const struct vdpa_vq_state *state)
  330. {
  331. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  332. struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
  333. struct vringh *vrh = &vq->vring;
  334. spin_lock(&vdpasim->lock);
  335. vrh->last_avail_idx = state->split.avail_index;
  336. spin_unlock(&vdpasim->lock);
  337. return 0;
  338. }
  339. static int vdpasim_get_vq_state(struct vdpa_device *vdpa, u16 idx,
  340. struct vdpa_vq_state *state)
  341. {
  342. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  343. struct vdpasim_virtqueue *vq = &vdpasim->vqs[idx];
  344. struct vringh *vrh = &vq->vring;
  345. state->split.avail_index = vrh->last_avail_idx;
  346. return 0;
  347. }
  348. static u32 vdpasim_get_vq_align(struct vdpa_device *vdpa)
  349. {
  350. return VDPASIM_QUEUE_ALIGN;
  351. }
  352. static u32 vdpasim_get_vq_group(struct vdpa_device *vdpa, u16 idx)
  353. {
  354. /* RX and TX belongs to group 0, CVQ belongs to group 1 */
  355. if (idx == 2)
  356. return 1;
  357. else
  358. return 0;
  359. }
  360. static u64 vdpasim_get_device_features(struct vdpa_device *vdpa)
  361. {
  362. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  363. return vdpasim->dev_attr.supported_features;
  364. }
  365. static int vdpasim_set_driver_features(struct vdpa_device *vdpa, u64 features)
  366. {
  367. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  368. /* DMA mapping must be done by driver */
  369. if (!(features & (1ULL << VIRTIO_F_ACCESS_PLATFORM)))
  370. return -EINVAL;
  371. vdpasim->features = features & vdpasim->dev_attr.supported_features;
  372. return 0;
  373. }
  374. static u64 vdpasim_get_driver_features(struct vdpa_device *vdpa)
  375. {
  376. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  377. return vdpasim->features;
  378. }
  379. static void vdpasim_set_config_cb(struct vdpa_device *vdpa,
  380. struct vdpa_callback *cb)
  381. {
  382. /* We don't support config interrupt */
  383. }
  384. static u16 vdpasim_get_vq_num_max(struct vdpa_device *vdpa)
  385. {
  386. return VDPASIM_QUEUE_MAX;
  387. }
  388. static u32 vdpasim_get_device_id(struct vdpa_device *vdpa)
  389. {
  390. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  391. return vdpasim->dev_attr.id;
  392. }
  393. static u32 vdpasim_get_vendor_id(struct vdpa_device *vdpa)
  394. {
  395. return VDPASIM_VENDOR_ID;
  396. }
  397. static u8 vdpasim_get_status(struct vdpa_device *vdpa)
  398. {
  399. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  400. u8 status;
  401. spin_lock(&vdpasim->lock);
  402. status = vdpasim->status;
  403. spin_unlock(&vdpasim->lock);
  404. return status;
  405. }
  406. static void vdpasim_set_status(struct vdpa_device *vdpa, u8 status)
  407. {
  408. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  409. spin_lock(&vdpasim->lock);
  410. vdpasim->status = status;
  411. spin_unlock(&vdpasim->lock);
  412. }
  413. static int vdpasim_reset(struct vdpa_device *vdpa)
  414. {
  415. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  416. spin_lock(&vdpasim->lock);
  417. vdpasim->status = 0;
  418. vdpasim_do_reset(vdpasim);
  419. spin_unlock(&vdpasim->lock);
  420. return 0;
  421. }
  422. static int vdpasim_suspend(struct vdpa_device *vdpa)
  423. {
  424. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  425. spin_lock(&vdpasim->lock);
  426. vdpasim->running = false;
  427. spin_unlock(&vdpasim->lock);
  428. return 0;
  429. }
  430. static size_t vdpasim_get_config_size(struct vdpa_device *vdpa)
  431. {
  432. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  433. return vdpasim->dev_attr.config_size;
  434. }
  435. static void vdpasim_get_config(struct vdpa_device *vdpa, unsigned int offset,
  436. void *buf, unsigned int len)
  437. {
  438. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  439. if (offset + len > vdpasim->dev_attr.config_size)
  440. return;
  441. if (vdpasim->dev_attr.get_config)
  442. vdpasim->dev_attr.get_config(vdpasim, vdpasim->config);
  443. memcpy(buf, vdpasim->config + offset, len);
  444. }
  445. static void vdpasim_set_config(struct vdpa_device *vdpa, unsigned int offset,
  446. const void *buf, unsigned int len)
  447. {
  448. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  449. if (offset + len > vdpasim->dev_attr.config_size)
  450. return;
  451. memcpy(vdpasim->config + offset, buf, len);
  452. if (vdpasim->dev_attr.set_config)
  453. vdpasim->dev_attr.set_config(vdpasim, vdpasim->config);
  454. }
  455. static u32 vdpasim_get_generation(struct vdpa_device *vdpa)
  456. {
  457. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  458. return vdpasim->generation;
  459. }
  460. static struct vdpa_iova_range vdpasim_get_iova_range(struct vdpa_device *vdpa)
  461. {
  462. struct vdpa_iova_range range = {
  463. .first = 0ULL,
  464. .last = ULLONG_MAX,
  465. };
  466. return range;
  467. }
  468. static int vdpasim_set_group_asid(struct vdpa_device *vdpa, unsigned int group,
  469. unsigned int asid)
  470. {
  471. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  472. struct vhost_iotlb *iommu;
  473. int i;
  474. if (group > vdpasim->dev_attr.ngroups)
  475. return -EINVAL;
  476. if (asid >= vdpasim->dev_attr.nas)
  477. return -EINVAL;
  478. iommu = &vdpasim->iommu[asid];
  479. spin_lock(&vdpasim->lock);
  480. for (i = 0; i < vdpasim->dev_attr.nvqs; i++)
  481. if (vdpasim_get_vq_group(vdpa, i) == group)
  482. vringh_set_iotlb(&vdpasim->vqs[i].vring, iommu,
  483. &vdpasim->iommu_lock);
  484. spin_unlock(&vdpasim->lock);
  485. return 0;
  486. }
  487. static int vdpasim_set_map(struct vdpa_device *vdpa, unsigned int asid,
  488. struct vhost_iotlb *iotlb)
  489. {
  490. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  491. struct vhost_iotlb_map *map;
  492. struct vhost_iotlb *iommu;
  493. u64 start = 0ULL, last = 0ULL - 1;
  494. int ret;
  495. if (asid >= vdpasim->dev_attr.nas)
  496. return -EINVAL;
  497. spin_lock(&vdpasim->iommu_lock);
  498. iommu = &vdpasim->iommu[asid];
  499. vhost_iotlb_reset(iommu);
  500. for (map = vhost_iotlb_itree_first(iotlb, start, last); map;
  501. map = vhost_iotlb_itree_next(map, start, last)) {
  502. ret = vhost_iotlb_add_range(iommu, map->start,
  503. map->last, map->addr, map->perm);
  504. if (ret)
  505. goto err;
  506. }
  507. spin_unlock(&vdpasim->iommu_lock);
  508. return 0;
  509. err:
  510. vhost_iotlb_reset(iommu);
  511. spin_unlock(&vdpasim->iommu_lock);
  512. return ret;
  513. }
  514. static int vdpasim_dma_map(struct vdpa_device *vdpa, unsigned int asid,
  515. u64 iova, u64 size,
  516. u64 pa, u32 perm, void *opaque)
  517. {
  518. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  519. int ret;
  520. if (asid >= vdpasim->dev_attr.nas)
  521. return -EINVAL;
  522. spin_lock(&vdpasim->iommu_lock);
  523. ret = vhost_iotlb_add_range_ctx(&vdpasim->iommu[asid], iova,
  524. iova + size - 1, pa, perm, opaque);
  525. spin_unlock(&vdpasim->iommu_lock);
  526. return ret;
  527. }
  528. static int vdpasim_dma_unmap(struct vdpa_device *vdpa, unsigned int asid,
  529. u64 iova, u64 size)
  530. {
  531. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  532. if (asid >= vdpasim->dev_attr.nas)
  533. return -EINVAL;
  534. spin_lock(&vdpasim->iommu_lock);
  535. vhost_iotlb_del_range(&vdpasim->iommu[asid], iova, iova + size - 1);
  536. spin_unlock(&vdpasim->iommu_lock);
  537. return 0;
  538. }
  539. static void vdpasim_free(struct vdpa_device *vdpa)
  540. {
  541. struct vdpasim *vdpasim = vdpa_to_sim(vdpa);
  542. int i;
  543. cancel_work_sync(&vdpasim->work);
  544. for (i = 0; i < vdpasim->dev_attr.nvqs; i++) {
  545. vringh_kiov_cleanup(&vdpasim->vqs[i].out_iov);
  546. vringh_kiov_cleanup(&vdpasim->vqs[i].in_iov);
  547. }
  548. if (vdpa_get_dma_dev(vdpa)) {
  549. put_iova_domain(&vdpasim->iova);
  550. iova_cache_put();
  551. }
  552. kvfree(vdpasim->buffer);
  553. for (i = 0; i < vdpasim->dev_attr.nas; i++)
  554. vhost_iotlb_reset(&vdpasim->iommu[i]);
  555. kfree(vdpasim->iommu);
  556. kfree(vdpasim->vqs);
  557. kfree(vdpasim->config);
  558. }
  559. static const struct vdpa_config_ops vdpasim_config_ops = {
  560. .set_vq_address = vdpasim_set_vq_address,
  561. .set_vq_num = vdpasim_set_vq_num,
  562. .kick_vq = vdpasim_kick_vq,
  563. .set_vq_cb = vdpasim_set_vq_cb,
  564. .set_vq_ready = vdpasim_set_vq_ready,
  565. .get_vq_ready = vdpasim_get_vq_ready,
  566. .set_vq_state = vdpasim_set_vq_state,
  567. .get_vq_state = vdpasim_get_vq_state,
  568. .get_vq_align = vdpasim_get_vq_align,
  569. .get_vq_group = vdpasim_get_vq_group,
  570. .get_device_features = vdpasim_get_device_features,
  571. .set_driver_features = vdpasim_set_driver_features,
  572. .get_driver_features = vdpasim_get_driver_features,
  573. .set_config_cb = vdpasim_set_config_cb,
  574. .get_vq_num_max = vdpasim_get_vq_num_max,
  575. .get_device_id = vdpasim_get_device_id,
  576. .get_vendor_id = vdpasim_get_vendor_id,
  577. .get_status = vdpasim_get_status,
  578. .set_status = vdpasim_set_status,
  579. .reset = vdpasim_reset,
  580. .suspend = vdpasim_suspend,
  581. .get_config_size = vdpasim_get_config_size,
  582. .get_config = vdpasim_get_config,
  583. .set_config = vdpasim_set_config,
  584. .get_generation = vdpasim_get_generation,
  585. .get_iova_range = vdpasim_get_iova_range,
  586. .set_group_asid = vdpasim_set_group_asid,
  587. .dma_map = vdpasim_dma_map,
  588. .dma_unmap = vdpasim_dma_unmap,
  589. .free = vdpasim_free,
  590. };
  591. static const struct vdpa_config_ops vdpasim_batch_config_ops = {
  592. .set_vq_address = vdpasim_set_vq_address,
  593. .set_vq_num = vdpasim_set_vq_num,
  594. .kick_vq = vdpasim_kick_vq,
  595. .set_vq_cb = vdpasim_set_vq_cb,
  596. .set_vq_ready = vdpasim_set_vq_ready,
  597. .get_vq_ready = vdpasim_get_vq_ready,
  598. .set_vq_state = vdpasim_set_vq_state,
  599. .get_vq_state = vdpasim_get_vq_state,
  600. .get_vq_align = vdpasim_get_vq_align,
  601. .get_vq_group = vdpasim_get_vq_group,
  602. .get_device_features = vdpasim_get_device_features,
  603. .set_driver_features = vdpasim_set_driver_features,
  604. .get_driver_features = vdpasim_get_driver_features,
  605. .set_config_cb = vdpasim_set_config_cb,
  606. .get_vq_num_max = vdpasim_get_vq_num_max,
  607. .get_device_id = vdpasim_get_device_id,
  608. .get_vendor_id = vdpasim_get_vendor_id,
  609. .get_status = vdpasim_get_status,
  610. .set_status = vdpasim_set_status,
  611. .reset = vdpasim_reset,
  612. .suspend = vdpasim_suspend,
  613. .get_config_size = vdpasim_get_config_size,
  614. .get_config = vdpasim_get_config,
  615. .set_config = vdpasim_set_config,
  616. .get_generation = vdpasim_get_generation,
  617. .get_iova_range = vdpasim_get_iova_range,
  618. .set_group_asid = vdpasim_set_group_asid,
  619. .set_map = vdpasim_set_map,
  620. .free = vdpasim_free,
  621. };
  622. MODULE_VERSION(DRV_VERSION);
  623. MODULE_LICENSE(DRV_LICENSE);
  624. MODULE_AUTHOR(DRV_AUTHOR);
  625. MODULE_DESCRIPTION(DRV_DESC);