virtio_pci_modern_dev.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. #include <linux/virtio_pci_modern.h>
  3. #include <linux/module.h>
  4. #include <linux/pci.h>
  5. #include <linux/delay.h>
  6. /*
  7. * vp_modern_map_capability - map a part of virtio pci capability
  8. * @mdev: the modern virtio-pci device
  9. * @off: offset of the capability
  10. * @minlen: minimal length of the capability
  11. * @align: align requirement
  12. * @start: start from the capability
  13. * @size: map size
  14. * @len: the length that is actually mapped
  15. * @pa: physical address of the capability
  16. *
  17. * Returns the io address of for the part of the capability
  18. */
  19. static void __iomem *
  20. vp_modern_map_capability(struct virtio_pci_modern_device *mdev, int off,
  21. size_t minlen, u32 align, u32 start, u32 size,
  22. size_t *len, resource_size_t *pa)
  23. {
  24. struct pci_dev *dev = mdev->pci_dev;
  25. u8 bar;
  26. u32 offset, length;
  27. void __iomem *p;
  28. pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap,
  29. bar),
  30. &bar);
  31. pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset),
  32. &offset);
  33. pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length),
  34. &length);
  35. /* Check if the BAR may have changed since we requested the region. */
  36. if (bar >= PCI_STD_NUM_BARS || !(mdev->modern_bars & (1 << bar))) {
  37. dev_err(&dev->dev,
  38. "virtio_pci: bar unexpectedly changed to %u\n", bar);
  39. return NULL;
  40. }
  41. if (length <= start) {
  42. dev_err(&dev->dev,
  43. "virtio_pci: bad capability len %u (>%u expected)\n",
  44. length, start);
  45. return NULL;
  46. }
  47. if (length - start < minlen) {
  48. dev_err(&dev->dev,
  49. "virtio_pci: bad capability len %u (>=%zu expected)\n",
  50. length, minlen);
  51. return NULL;
  52. }
  53. length -= start;
  54. if (start + offset < offset) {
  55. dev_err(&dev->dev,
  56. "virtio_pci: map wrap-around %u+%u\n",
  57. start, offset);
  58. return NULL;
  59. }
  60. offset += start;
  61. if (offset & (align - 1)) {
  62. dev_err(&dev->dev,
  63. "virtio_pci: offset %u not aligned to %u\n",
  64. offset, align);
  65. return NULL;
  66. }
  67. if (length > size)
  68. length = size;
  69. if (len)
  70. *len = length;
  71. if (minlen + offset < minlen ||
  72. minlen + offset > pci_resource_len(dev, bar)) {
  73. dev_err(&dev->dev,
  74. "virtio_pci: map virtio %zu@%u "
  75. "out of range on bar %i length %lu\n",
  76. minlen, offset,
  77. bar, (unsigned long)pci_resource_len(dev, bar));
  78. return NULL;
  79. }
  80. p = pci_iomap_range(dev, bar, offset, length);
  81. if (!p)
  82. dev_err(&dev->dev,
  83. "virtio_pci: unable to map virtio %u@%u on bar %i\n",
  84. length, offset, bar);
  85. else if (pa)
  86. *pa = pci_resource_start(dev, bar) + offset;
  87. return p;
  88. }
  89. /**
  90. * virtio_pci_find_capability - walk capabilities to find device info.
  91. * @dev: the pci device
  92. * @cfg_type: the VIRTIO_PCI_CAP_* value we seek
  93. * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO.
  94. * @bars: the bitmask of BARs
  95. *
  96. * Returns offset of the capability, or 0.
  97. */
  98. static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type,
  99. u32 ioresource_types, int *bars)
  100. {
  101. int pos;
  102. for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
  103. pos > 0;
  104. pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) {
  105. u8 type, bar;
  106. pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
  107. cfg_type),
  108. &type);
  109. pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap,
  110. bar),
  111. &bar);
  112. /* Ignore structures with reserved BAR values */
  113. if (bar >= PCI_STD_NUM_BARS)
  114. continue;
  115. if (type == cfg_type) {
  116. if (pci_resource_len(dev, bar) &&
  117. pci_resource_flags(dev, bar) & ioresource_types) {
  118. *bars |= (1 << bar);
  119. return pos;
  120. }
  121. }
  122. }
  123. return 0;
  124. }
  125. /* This is part of the ABI. Don't screw with it. */
  126. static inline void check_offsets(void)
  127. {
  128. /* Note: disk space was harmed in compilation of this function. */
  129. BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR !=
  130. offsetof(struct virtio_pci_cap, cap_vndr));
  131. BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT !=
  132. offsetof(struct virtio_pci_cap, cap_next));
  133. BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN !=
  134. offsetof(struct virtio_pci_cap, cap_len));
  135. BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE !=
  136. offsetof(struct virtio_pci_cap, cfg_type));
  137. BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR !=
  138. offsetof(struct virtio_pci_cap, bar));
  139. BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET !=
  140. offsetof(struct virtio_pci_cap, offset));
  141. BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH !=
  142. offsetof(struct virtio_pci_cap, length));
  143. BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT !=
  144. offsetof(struct virtio_pci_notify_cap,
  145. notify_off_multiplier));
  146. BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT !=
  147. offsetof(struct virtio_pci_common_cfg,
  148. device_feature_select));
  149. BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF !=
  150. offsetof(struct virtio_pci_common_cfg, device_feature));
  151. BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT !=
  152. offsetof(struct virtio_pci_common_cfg,
  153. guest_feature_select));
  154. BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF !=
  155. offsetof(struct virtio_pci_common_cfg, guest_feature));
  156. BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX !=
  157. offsetof(struct virtio_pci_common_cfg, msix_config));
  158. BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ !=
  159. offsetof(struct virtio_pci_common_cfg, num_queues));
  160. BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS !=
  161. offsetof(struct virtio_pci_common_cfg, device_status));
  162. BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION !=
  163. offsetof(struct virtio_pci_common_cfg, config_generation));
  164. BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT !=
  165. offsetof(struct virtio_pci_common_cfg, queue_select));
  166. BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE !=
  167. offsetof(struct virtio_pci_common_cfg, queue_size));
  168. BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX !=
  169. offsetof(struct virtio_pci_common_cfg, queue_msix_vector));
  170. BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE !=
  171. offsetof(struct virtio_pci_common_cfg, queue_enable));
  172. BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF !=
  173. offsetof(struct virtio_pci_common_cfg, queue_notify_off));
  174. BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO !=
  175. offsetof(struct virtio_pci_common_cfg, queue_desc_lo));
  176. BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI !=
  177. offsetof(struct virtio_pci_common_cfg, queue_desc_hi));
  178. BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO !=
  179. offsetof(struct virtio_pci_common_cfg, queue_avail_lo));
  180. BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI !=
  181. offsetof(struct virtio_pci_common_cfg, queue_avail_hi));
  182. BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO !=
  183. offsetof(struct virtio_pci_common_cfg, queue_used_lo));
  184. BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI !=
  185. offsetof(struct virtio_pci_common_cfg, queue_used_hi));
  186. }
  187. /*
  188. * vp_modern_probe: probe the modern virtio pci device, note that the
  189. * caller is required to enable PCI device before calling this function.
  190. * @mdev: the modern virtio-pci device
  191. *
  192. * Return 0 on succeed otherwise fail
  193. */
  194. int vp_modern_probe(struct virtio_pci_modern_device *mdev)
  195. {
  196. struct pci_dev *pci_dev = mdev->pci_dev;
  197. int err, common, isr, notify, device;
  198. u32 notify_length;
  199. u32 notify_offset;
  200. check_offsets();
  201. /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */
  202. if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f)
  203. return -ENODEV;
  204. if (pci_dev->device < 0x1040) {
  205. /* Transitional devices: use the PCI subsystem device id as
  206. * virtio device id, same as legacy driver always did.
  207. */
  208. mdev->id.device = pci_dev->subsystem_device;
  209. } else {
  210. /* Modern devices: simply use PCI device id, but start from 0x1040. */
  211. mdev->id.device = pci_dev->device - 0x1040;
  212. }
  213. mdev->id.vendor = pci_dev->subsystem_vendor;
  214. /* check for a common config: if not, use legacy mode (bar 0). */
  215. common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG,
  216. IORESOURCE_IO | IORESOURCE_MEM,
  217. &mdev->modern_bars);
  218. if (!common) {
  219. dev_info(&pci_dev->dev,
  220. "virtio_pci: leaving for legacy driver\n");
  221. return -ENODEV;
  222. }
  223. /* If common is there, these should be too... */
  224. isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG,
  225. IORESOURCE_IO | IORESOURCE_MEM,
  226. &mdev->modern_bars);
  227. notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG,
  228. IORESOURCE_IO | IORESOURCE_MEM,
  229. &mdev->modern_bars);
  230. if (!isr || !notify) {
  231. dev_err(&pci_dev->dev,
  232. "virtio_pci: missing capabilities %i/%i/%i\n",
  233. common, isr, notify);
  234. return -EINVAL;
  235. }
  236. err = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(64));
  237. if (err)
  238. err = dma_set_mask_and_coherent(&pci_dev->dev,
  239. DMA_BIT_MASK(32));
  240. if (err)
  241. dev_warn(&pci_dev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
  242. /* Device capability is only mandatory for devices that have
  243. * device-specific configuration.
  244. */
  245. device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG,
  246. IORESOURCE_IO | IORESOURCE_MEM,
  247. &mdev->modern_bars);
  248. err = pci_request_selected_regions(pci_dev, mdev->modern_bars,
  249. "virtio-pci-modern");
  250. if (err)
  251. return err;
  252. err = -EINVAL;
  253. mdev->common = vp_modern_map_capability(mdev, common,
  254. sizeof(struct virtio_pci_common_cfg), 4,
  255. 0, sizeof(struct virtio_pci_modern_common_cfg),
  256. NULL, NULL);
  257. if (!mdev->common)
  258. goto err_map_common;
  259. mdev->isr = vp_modern_map_capability(mdev, isr, sizeof(u8), 1,
  260. 0, 1,
  261. NULL, NULL);
  262. if (!mdev->isr)
  263. goto err_map_isr;
  264. /* Read notify_off_multiplier from config space. */
  265. pci_read_config_dword(pci_dev,
  266. notify + offsetof(struct virtio_pci_notify_cap,
  267. notify_off_multiplier),
  268. &mdev->notify_offset_multiplier);
  269. /* Read notify length and offset from config space. */
  270. pci_read_config_dword(pci_dev,
  271. notify + offsetof(struct virtio_pci_notify_cap,
  272. cap.length),
  273. &notify_length);
  274. pci_read_config_dword(pci_dev,
  275. notify + offsetof(struct virtio_pci_notify_cap,
  276. cap.offset),
  277. &notify_offset);
  278. /* We don't know how many VQs we'll map, ahead of the time.
  279. * If notify length is small, map it all now.
  280. * Otherwise, map each VQ individually later.
  281. */
  282. if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) {
  283. mdev->notify_base = vp_modern_map_capability(mdev, notify,
  284. 2, 2,
  285. 0, notify_length,
  286. &mdev->notify_len,
  287. &mdev->notify_pa);
  288. if (!mdev->notify_base)
  289. goto err_map_notify;
  290. } else {
  291. mdev->notify_map_cap = notify;
  292. }
  293. /* Again, we don't know how much we should map, but PAGE_SIZE
  294. * is more than enough for all existing devices.
  295. */
  296. if (device) {
  297. mdev->device = vp_modern_map_capability(mdev, device, 0, 4,
  298. 0, PAGE_SIZE,
  299. &mdev->device_len,
  300. NULL);
  301. if (!mdev->device)
  302. goto err_map_device;
  303. }
  304. return 0;
  305. err_map_device:
  306. if (mdev->notify_base)
  307. pci_iounmap(pci_dev, mdev->notify_base);
  308. err_map_notify:
  309. pci_iounmap(pci_dev, mdev->isr);
  310. err_map_isr:
  311. pci_iounmap(pci_dev, mdev->common);
  312. err_map_common:
  313. pci_release_selected_regions(pci_dev, mdev->modern_bars);
  314. return err;
  315. }
  316. EXPORT_SYMBOL_GPL(vp_modern_probe);
  317. /*
  318. * vp_modern_remove: remove and cleanup the modern virtio pci device
  319. * @mdev: the modern virtio-pci device
  320. */
  321. void vp_modern_remove(struct virtio_pci_modern_device *mdev)
  322. {
  323. struct pci_dev *pci_dev = mdev->pci_dev;
  324. if (mdev->device)
  325. pci_iounmap(pci_dev, mdev->device);
  326. if (mdev->notify_base)
  327. pci_iounmap(pci_dev, mdev->notify_base);
  328. pci_iounmap(pci_dev, mdev->isr);
  329. pci_iounmap(pci_dev, mdev->common);
  330. pci_release_selected_regions(pci_dev, mdev->modern_bars);
  331. }
  332. EXPORT_SYMBOL_GPL(vp_modern_remove);
  333. /*
  334. * vp_modern_get_features - get features from device
  335. * @mdev: the modern virtio-pci device
  336. *
  337. * Returns the features read from the device
  338. */
  339. u64 vp_modern_get_features(struct virtio_pci_modern_device *mdev)
  340. {
  341. struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
  342. u64 features;
  343. vp_iowrite32(0, &cfg->device_feature_select);
  344. features = vp_ioread32(&cfg->device_feature);
  345. vp_iowrite32(1, &cfg->device_feature_select);
  346. features |= ((u64)vp_ioread32(&cfg->device_feature) << 32);
  347. return features;
  348. }
  349. EXPORT_SYMBOL_GPL(vp_modern_get_features);
  350. /*
  351. * vp_modern_get_driver_features - get driver features from device
  352. * @mdev: the modern virtio-pci device
  353. *
  354. * Returns the driver features read from the device
  355. */
  356. u64 vp_modern_get_driver_features(struct virtio_pci_modern_device *mdev)
  357. {
  358. struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
  359. u64 features;
  360. vp_iowrite32(0, &cfg->guest_feature_select);
  361. features = vp_ioread32(&cfg->guest_feature);
  362. vp_iowrite32(1, &cfg->guest_feature_select);
  363. features |= ((u64)vp_ioread32(&cfg->guest_feature) << 32);
  364. return features;
  365. }
  366. EXPORT_SYMBOL_GPL(vp_modern_get_driver_features);
  367. /*
  368. * vp_modern_set_features - set features to device
  369. * @mdev: the modern virtio-pci device
  370. * @features: the features set to device
  371. */
  372. void vp_modern_set_features(struct virtio_pci_modern_device *mdev,
  373. u64 features)
  374. {
  375. struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
  376. vp_iowrite32(0, &cfg->guest_feature_select);
  377. vp_iowrite32((u32)features, &cfg->guest_feature);
  378. vp_iowrite32(1, &cfg->guest_feature_select);
  379. vp_iowrite32(features >> 32, &cfg->guest_feature);
  380. }
  381. EXPORT_SYMBOL_GPL(vp_modern_set_features);
  382. /*
  383. * vp_modern_generation - get the device genreation
  384. * @mdev: the modern virtio-pci device
  385. *
  386. * Returns the genreation read from device
  387. */
  388. u32 vp_modern_generation(struct virtio_pci_modern_device *mdev)
  389. {
  390. struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
  391. return vp_ioread8(&cfg->config_generation);
  392. }
  393. EXPORT_SYMBOL_GPL(vp_modern_generation);
  394. /*
  395. * vp_modern_get_status - get the device status
  396. * @mdev: the modern virtio-pci device
  397. *
  398. * Returns the status read from device
  399. */
  400. u8 vp_modern_get_status(struct virtio_pci_modern_device *mdev)
  401. {
  402. struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
  403. return vp_ioread8(&cfg->device_status);
  404. }
  405. EXPORT_SYMBOL_GPL(vp_modern_get_status);
  406. /*
  407. * vp_modern_set_status - set status to device
  408. * @mdev: the modern virtio-pci device
  409. * @status: the status set to device
  410. */
  411. void vp_modern_set_status(struct virtio_pci_modern_device *mdev,
  412. u8 status)
  413. {
  414. struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
  415. /*
  416. * Per memory-barriers.txt, wmb() is not needed to guarantee
  417. * that the cache coherent memory writes have completed
  418. * before writing to the MMIO region.
  419. */
  420. vp_iowrite8(status, &cfg->device_status);
  421. }
  422. EXPORT_SYMBOL_GPL(vp_modern_set_status);
  423. /*
  424. * vp_modern_get_queue_reset - get the queue reset status
  425. * @mdev: the modern virtio-pci device
  426. * @index: queue index
  427. */
  428. int vp_modern_get_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
  429. {
  430. struct virtio_pci_modern_common_cfg __iomem *cfg;
  431. cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
  432. vp_iowrite16(index, &cfg->cfg.queue_select);
  433. return vp_ioread16(&cfg->queue_reset);
  434. }
  435. EXPORT_SYMBOL_GPL(vp_modern_get_queue_reset);
  436. /*
  437. * vp_modern_set_queue_reset - reset the queue
  438. * @mdev: the modern virtio-pci device
  439. * @index: queue index
  440. */
  441. void vp_modern_set_queue_reset(struct virtio_pci_modern_device *mdev, u16 index)
  442. {
  443. struct virtio_pci_modern_common_cfg __iomem *cfg;
  444. cfg = (struct virtio_pci_modern_common_cfg __iomem *)mdev->common;
  445. vp_iowrite16(index, &cfg->cfg.queue_select);
  446. vp_iowrite16(1, &cfg->queue_reset);
  447. while (vp_ioread16(&cfg->queue_reset))
  448. msleep(1);
  449. while (vp_ioread16(&cfg->cfg.queue_enable))
  450. msleep(1);
  451. }
  452. EXPORT_SYMBOL_GPL(vp_modern_set_queue_reset);
  453. /*
  454. * vp_modern_queue_vector - set the MSIX vector for a specific virtqueue
  455. * @mdev: the modern virtio-pci device
  456. * @index: queue index
  457. * @vector: the config vector
  458. *
  459. * Returns the config vector read from the device
  460. */
  461. u16 vp_modern_queue_vector(struct virtio_pci_modern_device *mdev,
  462. u16 index, u16 vector)
  463. {
  464. struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
  465. vp_iowrite16(index, &cfg->queue_select);
  466. vp_iowrite16(vector, &cfg->queue_msix_vector);
  467. /* Flush the write out to device */
  468. return vp_ioread16(&cfg->queue_msix_vector);
  469. }
  470. EXPORT_SYMBOL_GPL(vp_modern_queue_vector);
  471. /*
  472. * vp_modern_config_vector - set the vector for config interrupt
  473. * @mdev: the modern virtio-pci device
  474. * @vector: the config vector
  475. *
  476. * Returns the config vector read from the device
  477. */
  478. u16 vp_modern_config_vector(struct virtio_pci_modern_device *mdev,
  479. u16 vector)
  480. {
  481. struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
  482. /* Setup the vector used for configuration events */
  483. vp_iowrite16(vector, &cfg->msix_config);
  484. /* Verify we had enough resources to assign the vector */
  485. /* Will also flush the write out to device */
  486. return vp_ioread16(&cfg->msix_config);
  487. }
  488. EXPORT_SYMBOL_GPL(vp_modern_config_vector);
  489. /*
  490. * vp_modern_queue_address - set the virtqueue address
  491. * @mdev: the modern virtio-pci device
  492. * @index: the queue index
  493. * @desc_addr: address of the descriptor area
  494. * @driver_addr: address of the driver area
  495. * @device_addr: address of the device area
  496. */
  497. void vp_modern_queue_address(struct virtio_pci_modern_device *mdev,
  498. u16 index, u64 desc_addr, u64 driver_addr,
  499. u64 device_addr)
  500. {
  501. struct virtio_pci_common_cfg __iomem *cfg = mdev->common;
  502. vp_iowrite16(index, &cfg->queue_select);
  503. vp_iowrite64_twopart(desc_addr, &cfg->queue_desc_lo,
  504. &cfg->queue_desc_hi);
  505. vp_iowrite64_twopart(driver_addr, &cfg->queue_avail_lo,
  506. &cfg->queue_avail_hi);
  507. vp_iowrite64_twopart(device_addr, &cfg->queue_used_lo,
  508. &cfg->queue_used_hi);
  509. }
  510. EXPORT_SYMBOL_GPL(vp_modern_queue_address);
  511. /*
  512. * vp_modern_set_queue_enable - enable a virtqueue
  513. * @mdev: the modern virtio-pci device
  514. * @index: the queue index
  515. * @enable: whether the virtqueue is enable or not
  516. */
  517. void vp_modern_set_queue_enable(struct virtio_pci_modern_device *mdev,
  518. u16 index, bool enable)
  519. {
  520. vp_iowrite16(index, &mdev->common->queue_select);
  521. vp_iowrite16(enable, &mdev->common->queue_enable);
  522. }
  523. EXPORT_SYMBOL_GPL(vp_modern_set_queue_enable);
  524. /*
  525. * vp_modern_get_queue_enable - enable a virtqueue
  526. * @mdev: the modern virtio-pci device
  527. * @index: the queue index
  528. *
  529. * Returns whether a virtqueue is enabled or not
  530. */
  531. bool vp_modern_get_queue_enable(struct virtio_pci_modern_device *mdev,
  532. u16 index)
  533. {
  534. vp_iowrite16(index, &mdev->common->queue_select);
  535. return vp_ioread16(&mdev->common->queue_enable);
  536. }
  537. EXPORT_SYMBOL_GPL(vp_modern_get_queue_enable);
  538. /*
  539. * vp_modern_set_queue_size - set size for a virtqueue
  540. * @mdev: the modern virtio-pci device
  541. * @index: the queue index
  542. * @size: the size of the virtqueue
  543. */
  544. void vp_modern_set_queue_size(struct virtio_pci_modern_device *mdev,
  545. u16 index, u16 size)
  546. {
  547. vp_iowrite16(index, &mdev->common->queue_select);
  548. vp_iowrite16(size, &mdev->common->queue_size);
  549. }
  550. EXPORT_SYMBOL_GPL(vp_modern_set_queue_size);
  551. /*
  552. * vp_modern_get_queue_size - get size for a virtqueue
  553. * @mdev: the modern virtio-pci device
  554. * @index: the queue index
  555. *
  556. * Returns the size of the virtqueue
  557. */
  558. u16 vp_modern_get_queue_size(struct virtio_pci_modern_device *mdev,
  559. u16 index)
  560. {
  561. vp_iowrite16(index, &mdev->common->queue_select);
  562. return vp_ioread16(&mdev->common->queue_size);
  563. }
  564. EXPORT_SYMBOL_GPL(vp_modern_get_queue_size);
  565. /*
  566. * vp_modern_get_num_queues - get the number of virtqueues
  567. * @mdev: the modern virtio-pci device
  568. *
  569. * Returns the number of virtqueues
  570. */
  571. u16 vp_modern_get_num_queues(struct virtio_pci_modern_device *mdev)
  572. {
  573. return vp_ioread16(&mdev->common->num_queues);
  574. }
  575. EXPORT_SYMBOL_GPL(vp_modern_get_num_queues);
  576. /*
  577. * vp_modern_get_queue_notify_off - get notification offset for a virtqueue
  578. * @mdev: the modern virtio-pci device
  579. * @index: the queue index
  580. *
  581. * Returns the notification offset for a virtqueue
  582. */
  583. static u16 vp_modern_get_queue_notify_off(struct virtio_pci_modern_device *mdev,
  584. u16 index)
  585. {
  586. vp_iowrite16(index, &mdev->common->queue_select);
  587. return vp_ioread16(&mdev->common->queue_notify_off);
  588. }
  589. /*
  590. * vp_modern_map_vq_notify - map notification area for a
  591. * specific virtqueue
  592. * @mdev: the modern virtio-pci device
  593. * @index: the queue index
  594. * @pa: the pointer to the physical address of the nofity area
  595. *
  596. * Returns the address of the notification area
  597. */
  598. void __iomem *vp_modern_map_vq_notify(struct virtio_pci_modern_device *mdev,
  599. u16 index, resource_size_t *pa)
  600. {
  601. u16 off = vp_modern_get_queue_notify_off(mdev, index);
  602. if (mdev->notify_base) {
  603. /* offset should not wrap */
  604. if ((u64)off * mdev->notify_offset_multiplier + 2
  605. > mdev->notify_len) {
  606. dev_warn(&mdev->pci_dev->dev,
  607. "bad notification offset %u (x %u) "
  608. "for queue %u > %zd",
  609. off, mdev->notify_offset_multiplier,
  610. index, mdev->notify_len);
  611. return NULL;
  612. }
  613. if (pa)
  614. *pa = mdev->notify_pa +
  615. off * mdev->notify_offset_multiplier;
  616. return mdev->notify_base + off * mdev->notify_offset_multiplier;
  617. } else {
  618. return vp_modern_map_capability(mdev,
  619. mdev->notify_map_cap, 2, 2,
  620. off * mdev->notify_offset_multiplier, 2,
  621. NULL, pa);
  622. }
  623. }
  624. EXPORT_SYMBOL_GPL(vp_modern_map_vq_notify);
  625. MODULE_VERSION("0.1");
  626. MODULE_DESCRIPTION("Modern Virtio PCI Device");
  627. MODULE_AUTHOR("Jason Wang <[email protected]>");
  628. MODULE_LICENSE("GPL");