virtio_mmio.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165
  1. // SPDX-License-Identifier: GPL-2.0-or-later
  2. /*
  3. * Virtio memory mapped device driver
  4. *
  5. * Copyright 2011-2014, ARM Ltd.
  6. *
  7. * This module allows virtio devices to be used over a virtual, memory mapped
  8. * platform device.
  9. *
  10. * The guest device(s) may be instantiated in one of three equivalent ways:
  11. *
  12. * 1. Static platform device in board's code, eg.:
  13. *
  14. * static struct platform_device v2m_virtio_device = {
  15. * .name = "virtio-mmio",
  16. * .id = -1,
  17. * .num_resources = 2,
  18. * .resource = (struct resource []) {
  19. * {
  20. * .start = 0x1001e000,
  21. * .end = 0x1001e0ff,
  22. * .flags = IORESOURCE_MEM,
  23. * }, {
  24. * .start = 42 + 32,
  25. * .end = 42 + 32,
  26. * .flags = IORESOURCE_IRQ,
  27. * },
  28. * }
  29. * };
  30. *
  31. * 2. Device Tree node, eg.:
  32. *
  33. * virtio_block@1e000 {
  34. * compatible = "virtio,mmio";
  35. * reg = <0x1e000 0x100>;
  36. * interrupts = <42>;
  37. * }
  38. *
  39. * 3. Kernel module (or command line) parameter. Can be used more than once -
  40. * one device will be created for each one. Syntax:
  41. *
  42. * [virtio_mmio.]device=<size>@<baseaddr>:<irq>[:<id>]
  43. * where:
  44. * <size> := size (can use standard suffixes like K, M or G)
  45. * <baseaddr> := physical base address
  46. * <irq> := interrupt number (as passed to request_irq())
  47. * <id> := (optional) platform device id
  48. * eg.:
  49. * virtio_mmio.device=0x100@0x100b0000:48 \
  50. * virtio_mmio.device=1K@0x1001e000:74
  51. *
  52. * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
  53. */
  54. #define pr_fmt(fmt) "virtio-mmio: " fmt
  55. #include <linux/acpi.h>
  56. #include <linux/dma-mapping.h>
  57. #include <linux/highmem.h>
  58. #include <linux/interrupt.h>
  59. #include <linux/io.h>
  60. #include <linux/of_address.h>
  61. #include <linux/list.h>
  62. #include <linux/module.h>
  63. #include <linux/platform_device.h>
  64. #include <linux/pm.h>
  65. #include <linux/slab.h>
  66. #include <linux/spinlock.h>
  67. #include <linux/virtio.h>
  68. #include <linux/virtio_config.h>
  69. #include <uapi/linux/virtio_mmio.h>
  70. #include <linux/virtio_ring.h>
  71. #include <linux/delay.h>
  72. #ifdef CONFIG_GH_VIRTIO_DEBUG
  73. #define CREATE_TRACE_POINTS
  74. #include <trace/events/gh_virtio_frontend.h>
  75. #undef CREATE_TRACE_POINTS
  76. #endif
  77. #ifdef CONFIG_VIRTIO_MMIO_SWIOTLB
  78. #include <linux/swiotlb.h>
  79. #include <linux/dma-direct.h>
  80. #endif
  81. /* The alignment to use between consumer and producer parts of vring.
  82. * Currently hardcoded to the page size. */
  83. #define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
  84. #define to_virtio_mmio_device(_plat_dev) \
  85. container_of(_plat_dev, struct virtio_mmio_device, vdev)
  86. #ifdef CONFIG_VIRTIO_MMIO_SWIOTLB
  87. struct virtio_mem_pool {
  88. void *virt_base;
  89. dma_addr_t dma_base;
  90. size_t size;
  91. unsigned long *bitmap;
  92. spinlock_t lock;
  93. };
  94. #endif
  95. struct virtio_mmio_device {
  96. struct virtio_device vdev;
  97. struct platform_device *pdev;
  98. void __iomem *base;
  99. unsigned long version;
  100. /* a list of queues so we can dispatch IRQs */
  101. spinlock_t lock;
  102. struct list_head virtqueues;
  103. #ifdef CONFIG_VIRTIO_MMIO_SWIOTLB
  104. struct virtio_mem_pool *mem_pool;
  105. #endif
  106. };
  107. struct virtio_mmio_vq_info {
  108. /* the actual virtqueue */
  109. struct virtqueue *vq;
  110. /* the list node for the virtqueues list */
  111. struct list_head node;
  112. };
  113. /* Configuration interface */
  114. static u64 vm_get_features(struct virtio_device *vdev)
  115. {
  116. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  117. u64 features;
  118. writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
  119. features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
  120. features <<= 32;
  121. writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
  122. features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
  123. return features;
  124. }
  125. static int vm_finalize_features(struct virtio_device *vdev)
  126. {
  127. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  128. /* Give virtio_ring a chance to accept features. */
  129. vring_transport_features(vdev);
  130. /* Make sure there are no mixed devices */
  131. if (vm_dev->version == 2 &&
  132. !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
  133. dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
  134. return -EINVAL;
  135. }
  136. writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
  137. writel((u32)(vdev->features >> 32),
  138. vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
  139. writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
  140. writel((u32)vdev->features,
  141. vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
  142. return 0;
  143. }
  144. static void vm_get(struct virtio_device *vdev, unsigned int offset,
  145. void *buf, unsigned int len)
  146. {
  147. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  148. void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
  149. u8 b;
  150. __le16 w;
  151. __le32 l;
  152. if (vm_dev->version == 1) {
  153. u8 *ptr = buf;
  154. int i;
  155. for (i = 0; i < len; i++)
  156. ptr[i] = readb(base + offset + i);
  157. return;
  158. }
  159. switch (len) {
  160. case 1:
  161. b = readb(base + offset);
  162. memcpy(buf, &b, sizeof b);
  163. break;
  164. case 2:
  165. w = cpu_to_le16(readw(base + offset));
  166. memcpy(buf, &w, sizeof w);
  167. break;
  168. case 4:
  169. l = cpu_to_le32(readl(base + offset));
  170. memcpy(buf, &l, sizeof l);
  171. break;
  172. case 8:
  173. l = cpu_to_le32(readl(base + offset));
  174. memcpy(buf, &l, sizeof l);
  175. l = cpu_to_le32(ioread32(base + offset + sizeof l));
  176. memcpy(buf + sizeof l, &l, sizeof l);
  177. break;
  178. default:
  179. BUG();
  180. }
  181. }
  182. static void vm_set(struct virtio_device *vdev, unsigned int offset,
  183. const void *buf, unsigned int len)
  184. {
  185. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  186. void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
  187. u8 b;
  188. __le16 w;
  189. __le32 l;
  190. if (vm_dev->version == 1) {
  191. const u8 *ptr = buf;
  192. int i;
  193. for (i = 0; i < len; i++)
  194. writeb(ptr[i], base + offset + i);
  195. return;
  196. }
  197. switch (len) {
  198. case 1:
  199. memcpy(&b, buf, sizeof b);
  200. writeb(b, base + offset);
  201. break;
  202. case 2:
  203. memcpy(&w, buf, sizeof w);
  204. writew(le16_to_cpu(w), base + offset);
  205. break;
  206. case 4:
  207. memcpy(&l, buf, sizeof l);
  208. writel(le32_to_cpu(l), base + offset);
  209. break;
  210. case 8:
  211. memcpy(&l, buf, sizeof l);
  212. writel(le32_to_cpu(l), base + offset);
  213. memcpy(&l, buf + sizeof l, sizeof l);
  214. writel(le32_to_cpu(l), base + offset + sizeof l);
  215. break;
  216. default:
  217. BUG();
  218. }
  219. }
  220. static u32 vm_generation(struct virtio_device *vdev)
  221. {
  222. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  223. if (vm_dev->version == 1)
  224. return 0;
  225. else
  226. return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
  227. }
  228. static u8 vm_get_status(struct virtio_device *vdev)
  229. {
  230. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  231. return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff;
  232. }
  233. static void vm_set_status(struct virtio_device *vdev, u8 status)
  234. {
  235. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  236. /* We should never be setting status to 0. */
  237. BUG_ON(status == 0);
  238. /*
  239. * Per memory-barriers.txt, wmb() is not needed to guarantee
  240. * that the cache coherent memory writes have completed
  241. * before writing to the MMIO region.
  242. */
  243. writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
  244. }
  245. static void vm_reset(struct virtio_device *vdev)
  246. {
  247. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  248. /* 0 status means a reset. */
  249. writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
  250. #ifdef CONFIG_VIRTIO_MMIO_POLL_RESET
  251. /* After writing 0 to device_status, the driver MUST wait for a read of
  252. * device_status to return 0 before reinitializing the device.
  253. */
  254. while (readl(vm_dev->base + VIRTIO_MMIO_STATUS))
  255. usleep_range(1000, 1100);
  256. #endif
  257. }
  258. /* Transport interface */
  259. /* the notify function used when creating a virt queue */
  260. static bool vm_notify(struct virtqueue *vq)
  261. {
  262. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
  263. #ifdef CONFIG_GH_VIRTIO_DEBUG
  264. trace_virtio_mmio_vm_notify(vq->vdev->index, vq->index);
  265. #endif
  266. /* We write the queue's selector into the notification register to
  267. * signal the other end */
  268. writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
  269. return true;
  270. }
  271. /* Notify all virtqueues on an interrupt. */
  272. static irqreturn_t vm_interrupt(int irq, void *opaque)
  273. {
  274. struct virtio_mmio_device *vm_dev = opaque;
  275. struct virtio_mmio_vq_info *info;
  276. unsigned long status;
  277. unsigned long flags;
  278. irqreturn_t ret = IRQ_NONE;
  279. /* Read and acknowledge interrupts */
  280. status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
  281. #ifdef CONFIG_GH_VIRTIO_DEBUG
  282. trace_virtio_mmio_vm_interrupt(vm_dev->vdev.index, status);
  283. #endif
  284. writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
  285. if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) {
  286. virtio_config_changed(&vm_dev->vdev);
  287. ret = IRQ_HANDLED;
  288. }
  289. if (likely(status & VIRTIO_MMIO_INT_VRING)) {
  290. spin_lock_irqsave(&vm_dev->lock, flags);
  291. list_for_each_entry(info, &vm_dev->virtqueues, node)
  292. ret |= vring_interrupt(irq, info->vq);
  293. spin_unlock_irqrestore(&vm_dev->lock, flags);
  294. }
  295. return ret;
  296. }
  297. static void vm_del_vq(struct virtqueue *vq)
  298. {
  299. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
  300. struct virtio_mmio_vq_info *info = vq->priv;
  301. unsigned long flags;
  302. unsigned int index = vq->index;
  303. spin_lock_irqsave(&vm_dev->lock, flags);
  304. list_del(&info->node);
  305. spin_unlock_irqrestore(&vm_dev->lock, flags);
  306. /* Select and deactivate the queue */
  307. writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
  308. if (vm_dev->version == 1) {
  309. writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
  310. } else {
  311. writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
  312. WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
  313. }
  314. vring_del_virtqueue(vq);
  315. kfree(info);
  316. }
  317. static void vm_del_vqs(struct virtio_device *vdev)
  318. {
  319. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  320. struct virtqueue *vq, *n;
  321. list_for_each_entry_safe(vq, n, &vdev->vqs, list)
  322. vm_del_vq(vq);
  323. free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
  324. }
  325. static void vm_synchronize_cbs(struct virtio_device *vdev)
  326. {
  327. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  328. synchronize_irq(platform_get_irq(vm_dev->pdev, 0));
  329. }
  330. static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
  331. void (*callback)(struct virtqueue *vq),
  332. const char *name, bool ctx)
  333. {
  334. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  335. struct virtio_mmio_vq_info *info;
  336. struct virtqueue *vq;
  337. unsigned long flags;
  338. unsigned int num;
  339. int err;
  340. if (!name)
  341. return NULL;
  342. /* Select the queue we're interested in */
  343. writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
  344. /* Queue shouldn't already be set up. */
  345. if (readl(vm_dev->base + (vm_dev->version == 1 ?
  346. VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
  347. err = -ENOENT;
  348. goto error_available;
  349. }
  350. /* Allocate and fill out our active queue description */
  351. info = kmalloc(sizeof(*info), GFP_KERNEL);
  352. if (!info) {
  353. err = -ENOMEM;
  354. goto error_kmalloc;
  355. }
  356. num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
  357. if (num == 0) {
  358. err = -ENOENT;
  359. goto error_new_virtqueue;
  360. }
  361. /* Create the vring */
  362. vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
  363. true, true, ctx, vm_notify, callback, name);
  364. if (!vq) {
  365. err = -ENOMEM;
  366. goto error_new_virtqueue;
  367. }
  368. vq->num_max = num;
  369. /* Activate the queue */
  370. writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
  371. if (vm_dev->version == 1) {
  372. u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
  373. /*
  374. * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
  375. * that doesn't fit in 32bit, fail the setup rather than
  376. * pretending to be successful.
  377. */
  378. if (q_pfn >> 32) {
  379. dev_err(&vdev->dev,
  380. "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
  381. 0x1ULL << (32 + PAGE_SHIFT - 30));
  382. err = -E2BIG;
  383. goto error_bad_pfn;
  384. }
  385. writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
  386. writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
  387. } else {
  388. u64 addr;
  389. addr = virtqueue_get_desc_addr(vq);
  390. writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
  391. writel((u32)(addr >> 32),
  392. vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
  393. addr = virtqueue_get_avail_addr(vq);
  394. writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
  395. writel((u32)(addr >> 32),
  396. vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
  397. addr = virtqueue_get_used_addr(vq);
  398. writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
  399. writel((u32)(addr >> 32),
  400. vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
  401. writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
  402. }
  403. vq->priv = info;
  404. info->vq = vq;
  405. spin_lock_irqsave(&vm_dev->lock, flags);
  406. list_add(&info->node, &vm_dev->virtqueues);
  407. spin_unlock_irqrestore(&vm_dev->lock, flags);
  408. return vq;
  409. error_bad_pfn:
  410. vring_del_virtqueue(vq);
  411. error_new_virtqueue:
  412. if (vm_dev->version == 1) {
  413. writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
  414. } else {
  415. writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
  416. WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
  417. }
  418. kfree(info);
  419. error_kmalloc:
  420. error_available:
  421. return ERR_PTR(err);
  422. }
  423. static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
  424. struct virtqueue *vqs[],
  425. vq_callback_t *callbacks[],
  426. const char * const names[],
  427. const bool *ctx,
  428. struct irq_affinity *desc)
  429. {
  430. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  431. int irq = platform_get_irq(vm_dev->pdev, 0);
  432. int i, err, queue_idx = 0;
  433. if (irq < 0)
  434. return irq;
  435. err = request_irq(irq, vm_interrupt, IRQF_SHARED,
  436. dev_name(&vdev->dev), vm_dev);
  437. if (err)
  438. return err;
  439. if (of_property_read_bool(vm_dev->pdev->dev.of_node, "wakeup-source"))
  440. enable_irq_wake(irq);
  441. for (i = 0; i < nvqs; ++i) {
  442. if (!names[i]) {
  443. vqs[i] = NULL;
  444. continue;
  445. }
  446. vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
  447. ctx ? ctx[i] : false);
  448. if (IS_ERR(vqs[i])) {
  449. vm_del_vqs(vdev);
  450. return PTR_ERR(vqs[i]);
  451. }
  452. }
  453. return 0;
  454. }
  455. static const char *vm_bus_name(struct virtio_device *vdev)
  456. {
  457. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  458. return vm_dev->pdev->name;
  459. }
  460. static bool vm_get_shm_region(struct virtio_device *vdev,
  461. struct virtio_shm_region *region, u8 id)
  462. {
  463. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  464. u64 len, addr;
  465. /* Select the region we're interested in */
  466. writel(id, vm_dev->base + VIRTIO_MMIO_SHM_SEL);
  467. /* Read the region size */
  468. len = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_LOW);
  469. len |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_HIGH) << 32;
  470. region->len = len;
  471. /* Check if region length is -1. If that's the case, the shared memory
  472. * region does not exist and there is no need to proceed further.
  473. */
  474. if (len == ~(u64)0)
  475. return false;
  476. /* Read the region base address */
  477. addr = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_LOW);
  478. addr |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_HIGH) << 32;
  479. region->addr = addr;
  480. return true;
  481. }
  482. static const struct virtio_config_ops virtio_mmio_config_ops = {
  483. .get = vm_get,
  484. .set = vm_set,
  485. .generation = vm_generation,
  486. .get_status = vm_get_status,
  487. .set_status = vm_set_status,
  488. .reset = vm_reset,
  489. .find_vqs = vm_find_vqs,
  490. .del_vqs = vm_del_vqs,
  491. .get_features = vm_get_features,
  492. .finalize_features = vm_finalize_features,
  493. .bus_name = vm_bus_name,
  494. .get_shm_region = vm_get_shm_region,
  495. .synchronize_cbs = vm_synchronize_cbs,
  496. };
  497. #ifdef CONFIG_PM_SLEEP
  498. static int virtio_mmio_freeze(struct device *dev)
  499. {
  500. struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
  501. return virtio_device_freeze(&vm_dev->vdev);
  502. }
  503. static int virtio_mmio_restore(struct device *dev)
  504. {
  505. struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
  506. if (vm_dev->version == 1)
  507. writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
  508. return virtio_device_restore(&vm_dev->vdev);
  509. }
  510. static const struct dev_pm_ops virtio_mmio_pm_ops = {
  511. .freeze = virtio_mmio_freeze,
  512. .restore = virtio_mmio_restore,
  513. };
  514. #endif
  515. static void virtio_mmio_release_dev(struct device *_d)
  516. {
  517. struct virtio_device *vdev =
  518. container_of(_d, struct virtio_device, dev);
  519. struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
  520. kfree(vm_dev);
  521. }
  522. #ifdef CONFIG_VIRTIO_MMIO_SWIOTLB
  523. static phys_addr_t virtio_swiotlb_base;
  524. static phys_addr_t virtio_swiotlb_dma_base;
  525. static size_t virtio_swiotlb_size;
  526. static int virtio_get_shm(struct device_node *np, phys_addr_t *base,
  527. phys_addr_t *dma_base, size_t *size)
  528. {
  529. const __be64 *val;
  530. int len;
  531. struct device_node *shm_np;
  532. struct resource res_mem;
  533. int ret;
  534. shm_np = of_parse_phandle(np, "memory-region", 0);
  535. if (!shm_np) {
  536. pr_err("%s: Invalid memory-region\n", __func__);
  537. return -EINVAL;
  538. }
  539. ret = of_address_to_resource(shm_np, 0, &res_mem);
  540. if (ret) {
  541. pr_err("%s: of_address_to_resource failed ret %d\n", __func__, ret);
  542. return -EINVAL;
  543. }
  544. *base = res_mem.start;
  545. *size = resource_size(&res_mem);
  546. of_node_put(shm_np);
  547. if (!*base || !*size) {
  548. pr_err("%s: Invalid memory-region base %llx size %d\n", __func__, *base, *size);
  549. return -EINVAL;
  550. }
  551. val = of_get_property(np, "dma_base", &len);
  552. if (!val || len != 8) {
  553. pr_err("%s: Invalid dma_base prop val %llx size %d\n", __func__, val, len);
  554. return -EINVAL;
  555. }
  556. *dma_base = __be64_to_cpup(val);
  557. pr_debug("%s: shm base %llx size %llx dma_base %llx\n", __func__, *base, *size, *dma_base);
  558. return 0;
  559. }
  560. static int __init virtio_swiotlb_init(void)
  561. {
  562. void __iomem *vbase;
  563. int ret;
  564. unsigned long nslabs;
  565. phys_addr_t base;
  566. phys_addr_t dma_base;
  567. size_t size;
  568. struct device_node *np;
  569. np = of_find_node_by_path("/swiotlb");
  570. if (!np)
  571. return 0;
  572. ret = virtio_get_shm(np, &base, &dma_base, &size);
  573. of_node_put(np);
  574. if (ret)
  575. return ret;
  576. nslabs = (size >> IO_TLB_SHIFT);
  577. nslabs = ALIGN_DOWN(nslabs, IO_TLB_SEGSIZE);
  578. if (!nslabs)
  579. return -EINVAL;
  580. vbase = ioremap_cache(base, size);
  581. if (!vbase)
  582. return -EINVAL;
  583. ret = swiotlb_late_init_with_tblpaddr(vbase, dma_base, nslabs);
  584. if (ret) {
  585. iounmap(vbase);
  586. return ret;
  587. }
  588. virtio_swiotlb_base = base;
  589. virtio_swiotlb_dma_base = dma_base;
  590. virtio_swiotlb_size = size;
  591. return 0;
  592. }
  593. static void *virtio_alloc_coherent(struct device *dev, size_t size,
  594. dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
  595. {
  596. struct platform_device *pdev =
  597. container_of(dev, struct platform_device, dev);
  598. struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
  599. int pageno;
  600. unsigned long irq_flags;
  601. int order = get_order(size);
  602. void *ret = NULL;
  603. struct virtio_mem_pool *mem = vm_dev->mem_pool;
  604. if (!mem || (size > (mem->size << PAGE_SHIFT)))
  605. return NULL;
  606. spin_lock_irqsave(&mem->lock, irq_flags);
  607. pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
  608. if (pageno >= 0) {
  609. *dma_handle = mem->dma_base + (pageno << PAGE_SHIFT);
  610. ret = mem->virt_base + (pageno << PAGE_SHIFT);
  611. }
  612. spin_unlock_irqrestore(&mem->lock, irq_flags);
  613. return ret;
  614. }
  615. static void virtio_free_coherent(struct device *dev, size_t size, void *vaddr,
  616. dma_addr_t dma_handle, unsigned long attrs)
  617. {
  618. struct platform_device *pdev =
  619. container_of(dev, struct platform_device, dev);
  620. struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
  621. int pageno;
  622. unsigned long flags;
  623. struct virtio_mem_pool *mem = vm_dev->mem_pool;
  624. if (!mem)
  625. return;
  626. spin_lock_irqsave(&mem->lock, flags);
  627. if (vaddr >= mem->virt_base &&
  628. vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
  629. pageno = (vaddr - mem->virt_base) >> PAGE_SHIFT;
  630. bitmap_release_region(mem->bitmap, pageno, get_order(size));
  631. }
  632. spin_unlock_irqrestore(&mem->lock, flags);
  633. }
  634. static dma_addr_t virtio_map_page(struct device *dev, struct page *page,
  635. unsigned long offset, size_t size,
  636. enum dma_data_direction dir,
  637. unsigned long attrs)
  638. {
  639. phys_addr_t phys = page_to_phys(page) + offset;
  640. return swiotlb_map(dev, phys, size, dir, attrs);
  641. }
  642. static void virtio_unmap_page(struct device *dev, dma_addr_t dev_addr,
  643. size_t size, enum dma_data_direction dir,
  644. unsigned long attrs)
  645. {
  646. BUG_ON(!is_swiotlb_buffer(dev, dev_addr));
  647. swiotlb_tbl_unmap_single(dev, dev_addr, size, dir, attrs);
  648. }
  649. size_t virtio_max_mapping_size(struct device *dev)
  650. {
  651. return SZ_4K;
  652. }
  653. static const struct dma_map_ops virtio_dma_ops = {
  654. .alloc = virtio_alloc_coherent,
  655. .free = virtio_free_coherent,
  656. .map_page = virtio_map_page,
  657. .unmap_page = virtio_unmap_page,
  658. .max_mapping_size = virtio_max_mapping_size,
  659. };
  660. static inline int
  661. get_ring_base(struct platform_device *pdev, phys_addr_t *ring_base,
  662. phys_addr_t *ring_dma_base, size_t *ring_size)
  663. {
  664. int ret;
  665. if (!virtio_swiotlb_base)
  666. return 0;
  667. ret = virtio_get_shm(pdev->dev.of_node, ring_base, ring_dma_base, ring_size);
  668. return ret ? 0 : 1;
  669. }
  670. static int setup_virtio_dma_ops(struct platform_device *pdev)
  671. {
  672. struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
  673. phys_addr_t ring_base, ring_dma_base;
  674. size_t ring_size, pages;
  675. struct virtio_mem_pool *vmem_pool;
  676. unsigned long bitmap_size;
  677. if (!vm_dev || !get_ring_base(pdev, &ring_base,
  678. &ring_dma_base, &ring_size))
  679. return 0;
  680. vmem_pool = devm_kzalloc(&pdev->dev, sizeof(struct virtio_mem_pool),
  681. GFP_KERNEL);
  682. if (!vmem_pool)
  683. return -ENOMEM;
  684. pages = ring_size >> PAGE_SHIFT;
  685. if (!pages) {
  686. pr_err("%s: Ring size too small\n", __func__);
  687. return -EINVAL;
  688. }
  689. if (ULONG_MAX / sizeof(unsigned long) < BITS_TO_LONGS(pages)) {
  690. pr_err("%s: Ring size too large %lu\n", __func__, ring_size);
  691. return -EINVAL;
  692. }
  693. bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
  694. vmem_pool->bitmap = devm_kzalloc(&pdev->dev, bitmap_size, GFP_KERNEL);
  695. if (!vmem_pool->bitmap)
  696. return -ENOMEM;
  697. /* Note: Mapped as 'normal/cacheable' memory */
  698. vmem_pool->virt_base = ioremap_cache(ring_base, ring_size);
  699. if (!vmem_pool->virt_base) {
  700. pr_err("Unable to ioremap %pK size %lx\n",
  701. (void *)ring_base, ring_size);
  702. return -ENOMEM;
  703. }
  704. memset(vmem_pool->virt_base, 0, ring_size);
  705. vmem_pool->dma_base = ring_dma_base;
  706. vmem_pool->size = pages;
  707. spin_lock_init(&vmem_pool->lock);
  708. vm_dev->mem_pool = vmem_pool;
  709. set_dma_ops(&pdev->dev, &virtio_dma_ops);
  710. dev_dbg(&pdev->dev, "virtio_mem_pool: virt_base %llx pages %lx\n",
  711. vmem_pool->virt_base, pages);
  712. return 0;
  713. }
  714. #else /* CONFIG_VIRTIO_MMIO_SWIOTLB */
  715. static inline int setup_virtio_dma_ops(struct platform_device *pdev)
  716. {
  717. return 0;
  718. }
  719. static inline int virtio_swiotlb_init(void)
  720. {
  721. return 0;
  722. }
  723. #endif /* CONFIG_VIRTIO_MMIO_SWIOTLB */
  724. /* Platform device */
  725. static int virtio_mmio_probe(struct platform_device *pdev)
  726. {
  727. struct virtio_mmio_device *vm_dev;
  728. unsigned long magic;
  729. int rc;
  730. vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
  731. if (!vm_dev)
  732. return -ENOMEM;
  733. vm_dev->vdev.dev.parent = &pdev->dev;
  734. vm_dev->vdev.dev.release = virtio_mmio_release_dev;
  735. vm_dev->vdev.config = &virtio_mmio_config_ops;
  736. vm_dev->pdev = pdev;
  737. INIT_LIST_HEAD(&vm_dev->virtqueues);
  738. spin_lock_init(&vm_dev->lock);
  739. vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
  740. if (IS_ERR(vm_dev->base)) {
  741. rc = PTR_ERR(vm_dev->base);
  742. goto free_vm_dev;
  743. }
  744. /* Check magic value */
  745. magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
  746. if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
  747. dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
  748. rc = -ENODEV;
  749. goto free_vm_dev;
  750. }
  751. /* Check device version */
  752. vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
  753. if (vm_dev->version < 1 || vm_dev->version > 2) {
  754. dev_err(&pdev->dev, "Version %ld not supported!\n",
  755. vm_dev->version);
  756. rc = -ENXIO;
  757. goto free_vm_dev;
  758. }
  759. vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
  760. if (vm_dev->vdev.id.device == 0) {
  761. /*
  762. * virtio-mmio device with an ID 0 is a (dummy) placeholder
  763. * with no function. End probing now with no error reported.
  764. */
  765. rc = -ENODEV;
  766. goto free_vm_dev;
  767. }
  768. vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
  769. if (vm_dev->version == 1) {
  770. writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
  771. rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
  772. /*
  773. * In the legacy case, ensure our coherently-allocated virtio
  774. * ring will be at an address expressable as a 32-bit PFN.
  775. */
  776. if (!rc)
  777. dma_set_coherent_mask(&pdev->dev,
  778. DMA_BIT_MASK(32 + PAGE_SHIFT));
  779. } else {
  780. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  781. }
  782. if (rc)
  783. rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  784. if (rc)
  785. dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
  786. platform_set_drvdata(pdev, vm_dev);
  787. rc = setup_virtio_dma_ops(pdev);
  788. if (rc) {
  789. put_device(&vm_dev->vdev.dev);
  790. return rc;
  791. }
  792. rc = register_virtio_device(&vm_dev->vdev);
  793. if (rc)
  794. put_device(&vm_dev->vdev.dev);
  795. return rc;
  796. free_vm_dev:
  797. kfree(vm_dev);
  798. return rc;
  799. }
  800. static int virtio_mmio_remove(struct platform_device *pdev)
  801. {
  802. struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
  803. unregister_virtio_device(&vm_dev->vdev);
  804. return 0;
  805. }
  806. /* Devices list parameter */
  807. #if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES)
  808. static struct device vm_cmdline_parent = {
  809. .init_name = "virtio-mmio-cmdline",
  810. };
  811. static int vm_cmdline_parent_registered;
  812. static int vm_cmdline_id;
  813. static int vm_cmdline_set(const char *device,
  814. const struct kernel_param *kp)
  815. {
  816. int err;
  817. struct resource resources[2] = {};
  818. char *str;
  819. long long base, size;
  820. unsigned int irq;
  821. int processed, consumed = 0;
  822. struct platform_device *pdev;
  823. /* Consume "size" part of the command line parameter */
  824. size = memparse(device, &str);
  825. /* Get "@<base>:<irq>[:<id>]" chunks */
  826. processed = sscanf(str, "@%lli:%u%n:%d%n",
  827. &base, &irq, &consumed,
  828. &vm_cmdline_id, &consumed);
  829. /*
  830. * sscanf() must process at least 2 chunks; also there
  831. * must be no extra characters after the last chunk, so
  832. * str[consumed] must be '\0'
  833. */
  834. if (processed < 2 || str[consumed] || irq == 0)
  835. return -EINVAL;
  836. resources[0].flags = IORESOURCE_MEM;
  837. resources[0].start = base;
  838. resources[0].end = base + size - 1;
  839. resources[1].flags = IORESOURCE_IRQ;
  840. resources[1].start = resources[1].end = irq;
  841. if (!vm_cmdline_parent_registered) {
  842. err = device_register(&vm_cmdline_parent);
  843. if (err) {
  844. put_device(&vm_cmdline_parent);
  845. pr_err("Failed to register parent device!\n");
  846. return err;
  847. }
  848. vm_cmdline_parent_registered = 1;
  849. }
  850. pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n",
  851. vm_cmdline_id,
  852. (unsigned long long)resources[0].start,
  853. (unsigned long long)resources[0].end,
  854. (int)resources[1].start);
  855. pdev = platform_device_register_resndata(&vm_cmdline_parent,
  856. "virtio-mmio", vm_cmdline_id++,
  857. resources, ARRAY_SIZE(resources), NULL, 0);
  858. return PTR_ERR_OR_ZERO(pdev);
  859. }
  860. static int vm_cmdline_get_device(struct device *dev, void *data)
  861. {
  862. char *buffer = data;
  863. unsigned int len = strlen(buffer);
  864. struct platform_device *pdev = to_platform_device(dev);
  865. snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n",
  866. pdev->resource[0].end - pdev->resource[0].start + 1ULL,
  867. (unsigned long long)pdev->resource[0].start,
  868. (unsigned long long)pdev->resource[1].start,
  869. pdev->id);
  870. return 0;
  871. }
  872. static int vm_cmdline_get(char *buffer, const struct kernel_param *kp)
  873. {
  874. buffer[0] = '\0';
  875. device_for_each_child(&vm_cmdline_parent, buffer,
  876. vm_cmdline_get_device);
  877. return strlen(buffer) + 1;
  878. }
  879. static const struct kernel_param_ops vm_cmdline_param_ops = {
  880. .set = vm_cmdline_set,
  881. .get = vm_cmdline_get,
  882. };
  883. device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR);
  884. static int vm_unregister_cmdline_device(struct device *dev,
  885. void *data)
  886. {
  887. platform_device_unregister(to_platform_device(dev));
  888. return 0;
  889. }
  890. static void vm_unregister_cmdline_devices(void)
  891. {
  892. if (vm_cmdline_parent_registered) {
  893. device_for_each_child(&vm_cmdline_parent, NULL,
  894. vm_unregister_cmdline_device);
  895. device_unregister(&vm_cmdline_parent);
  896. vm_cmdline_parent_registered = 0;
  897. }
  898. }
  899. #else
  900. static void vm_unregister_cmdline_devices(void)
  901. {
  902. }
  903. #endif
  904. /* Platform driver */
  905. static const struct of_device_id virtio_mmio_match[] = {
  906. { .compatible = "virtio,mmio", },
  907. {},
  908. };
  909. MODULE_DEVICE_TABLE(of, virtio_mmio_match);
  910. #ifdef CONFIG_ACPI
  911. static const struct acpi_device_id virtio_mmio_acpi_match[] = {
  912. { "LNRO0005", },
  913. { }
  914. };
  915. MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
  916. #endif
  917. static struct platform_driver virtio_mmio_driver = {
  918. .probe = virtio_mmio_probe,
  919. .remove = virtio_mmio_remove,
  920. .driver = {
  921. .name = "virtio-mmio",
  922. .of_match_table = virtio_mmio_match,
  923. .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
  924. #if IS_ENABLED(CONFIG_PM_SLEEP) && !IS_ENABLED(CONFIG_VIRTIO_MMIO_SWIOTLB)
  925. .pm = &virtio_mmio_pm_ops,
  926. #endif
  927. },
  928. };
  929. static int __init virtio_mmio_init(void)
  930. {
  931. int ret;
  932. ret = virtio_swiotlb_init();
  933. if (ret)
  934. return ret;
  935. return platform_driver_register(&virtio_mmio_driver);
  936. }
  937. static void __exit virtio_mmio_exit(void)
  938. {
  939. platform_driver_unregister(&virtio_mmio_driver);
  940. vm_unregister_cmdline_devices();
  941. }
  942. module_init(virtio_mmio_init);
  943. module_exit(virtio_mmio_exit);
  944. MODULE_AUTHOR("Pawel Moll <[email protected]>");
  945. MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
  946. MODULE_LICENSE("GPL");