1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Virtio driver for the paravirtualized IOMMU
- *
- * Copyright (C) 2019 Arm Limited
- */
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
- #include <linux/delay.h>
- #include <linux/dma-map-ops.h>
- #include <linux/freezer.h>
- #include <linux/interval_tree.h>
- #include <linux/iommu.h>
- #include <linux/module.h>
- #include <linux/of_platform.h>
- #include <linux/pci.h>
- #include <linux/virtio.h>
- #include <linux/virtio_config.h>
- #include <linux/virtio_ids.h>
- #include <linux/wait.h>
- #include <uapi/linux/virtio_iommu.h>
- #include "dma-iommu.h"
- #define MSI_IOVA_BASE 0x8000000
- #define MSI_IOVA_LENGTH 0x100000
- #define VIOMMU_REQUEST_VQ 0
- #define VIOMMU_EVENT_VQ 1
- #define VIOMMU_NR_VQS 2
- struct viommu_dev {
- struct iommu_device iommu;
- struct device *dev;
- struct virtio_device *vdev;
- struct ida domain_ids;
- struct virtqueue *vqs[VIOMMU_NR_VQS];
- spinlock_t request_lock;
- struct list_head requests;
- void *evts;
- /* Device configuration */
- struct iommu_domain_geometry geometry;
- u64 pgsize_bitmap;
- u32 first_domain;
- u32 last_domain;
- /* Supported MAP flags */
- u32 map_flags;
- u32 probe_size;
- };
- struct viommu_mapping {
- phys_addr_t paddr;
- struct interval_tree_node iova;
- u32 flags;
- };
- struct viommu_domain {
- struct iommu_domain domain;
- struct viommu_dev *viommu;
- struct mutex mutex; /* protects viommu pointer */
- unsigned int id;
- u32 map_flags;
- spinlock_t mappings_lock;
- struct rb_root_cached mappings;
- unsigned long nr_endpoints;
- bool bypass;
- };
- struct viommu_endpoint {
- struct device *dev;
- struct viommu_dev *viommu;
- struct viommu_domain *vdomain;
- struct list_head resv_regions;
- };
- struct viommu_request {
- struct list_head list;
- void *writeback;
- unsigned int write_offset;
- unsigned int len;
- char buf[];
- };
- #define VIOMMU_FAULT_RESV_MASK 0xffffff00
- struct viommu_event {
- union {
- u32 head;
- struct virtio_iommu_fault fault;
- };
- };
- #define to_viommu_domain(domain) \
- container_of(domain, struct viommu_domain, domain)
- static int viommu_get_req_errno(void *buf, size_t len)
- {
- struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
- switch (tail->status) {
- case VIRTIO_IOMMU_S_OK:
- return 0;
- case VIRTIO_IOMMU_S_UNSUPP:
- return -ENOSYS;
- case VIRTIO_IOMMU_S_INVAL:
- return -EINVAL;
- case VIRTIO_IOMMU_S_RANGE:
- return -ERANGE;
- case VIRTIO_IOMMU_S_NOENT:
- return -ENOENT;
- case VIRTIO_IOMMU_S_FAULT:
- return -EFAULT;
- case VIRTIO_IOMMU_S_NOMEM:
- return -ENOMEM;
- case VIRTIO_IOMMU_S_IOERR:
- case VIRTIO_IOMMU_S_DEVERR:
- default:
- return -EIO;
- }
- }
- static void viommu_set_req_status(void *buf, size_t len, int status)
- {
- struct virtio_iommu_req_tail *tail = buf + len - sizeof(*tail);
- tail->status = status;
- }
- static off_t viommu_get_write_desc_offset(struct viommu_dev *viommu,
- struct virtio_iommu_req_head *req,
- size_t len)
- {
- size_t tail_size = sizeof(struct virtio_iommu_req_tail);
- if (req->type == VIRTIO_IOMMU_T_PROBE)
- return len - viommu->probe_size - tail_size;
- return len - tail_size;
- }
- /*
- * __viommu_sync_req - Complete all in-flight requests
- *
- * Wait for all added requests to complete. When this function returns, all
- * requests that were in-flight at the time of the call have completed.
- */
- static int __viommu_sync_req(struct viommu_dev *viommu)
- {
- unsigned int len;
- size_t write_len;
- struct viommu_request *req;
- struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
- assert_spin_locked(&viommu->request_lock);
- virtqueue_kick(vq);
- while (!list_empty(&viommu->requests)) {
- len = 0;
- req = virtqueue_get_buf(vq, &len);
- if (!req)
- continue;
- if (!len)
- viommu_set_req_status(req->buf, req->len,
- VIRTIO_IOMMU_S_IOERR);
- write_len = req->len - req->write_offset;
- if (req->writeback && len == write_len)
- memcpy(req->writeback, req->buf + req->write_offset,
- write_len);
- list_del(&req->list);
- kfree(req);
- }
- return 0;
- }
- static int viommu_sync_req(struct viommu_dev *viommu)
- {
- int ret;
- unsigned long flags;
- spin_lock_irqsave(&viommu->request_lock, flags);
- ret = __viommu_sync_req(viommu);
- if (ret)
- dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
- spin_unlock_irqrestore(&viommu->request_lock, flags);
- return ret;
- }
- /*
- * __viommu_add_request - Add one request to the queue
- * @buf: pointer to the request buffer
- * @len: length of the request buffer
- * @writeback: copy data back to the buffer when the request completes.
- *
- * Add a request to the queue. Only synchronize the queue if it's already full.
- * Otherwise don't kick the queue nor wait for requests to complete.
- *
- * When @writeback is true, data written by the device, including the request
- * status, is copied into @buf after the request completes. This is unsafe if
- * the caller allocates @buf on stack and drops the lock between add_req() and
- * sync_req().
- *
- * Return 0 if the request was successfully added to the queue.
- */
- static int __viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len,
- bool writeback)
- {
- int ret;
- off_t write_offset;
- struct viommu_request *req;
- struct scatterlist top_sg, bottom_sg;
- struct scatterlist *sg[2] = { &top_sg, &bottom_sg };
- struct virtqueue *vq = viommu->vqs[VIOMMU_REQUEST_VQ];
- assert_spin_locked(&viommu->request_lock);
- write_offset = viommu_get_write_desc_offset(viommu, buf, len);
- if (write_offset <= 0)
- return -EINVAL;
- req = kzalloc(sizeof(*req) + len, GFP_ATOMIC);
- if (!req)
- return -ENOMEM;
- req->len = len;
- if (writeback) {
- req->writeback = buf + write_offset;
- req->write_offset = write_offset;
- }
- memcpy(&req->buf, buf, write_offset);
- sg_init_one(&top_sg, req->buf, write_offset);
- sg_init_one(&bottom_sg, req->buf + write_offset, len - write_offset);
- ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
- if (ret == -ENOSPC) {
- /* If the queue is full, sync and retry */
- if (!__viommu_sync_req(viommu))
- ret = virtqueue_add_sgs(vq, sg, 1, 1, req, GFP_ATOMIC);
- }
- if (ret)
- goto err_free;
- list_add_tail(&req->list, &viommu->requests);
- return 0;
- err_free:
- kfree(req);
- return ret;
- }
- static int viommu_add_req(struct viommu_dev *viommu, void *buf, size_t len)
- {
- int ret;
- unsigned long flags;
- spin_lock_irqsave(&viommu->request_lock, flags);
- ret = __viommu_add_req(viommu, buf, len, false);
- if (ret)
- dev_dbg(viommu->dev, "could not add request: %d\n", ret);
- spin_unlock_irqrestore(&viommu->request_lock, flags);
- return ret;
- }
- /*
- * Send a request and wait for it to complete. Return the request status (as an
- * errno)
- */
- static int viommu_send_req_sync(struct viommu_dev *viommu, void *buf,
- size_t len)
- {
- int ret;
- unsigned long flags;
- spin_lock_irqsave(&viommu->request_lock, flags);
- ret = __viommu_add_req(viommu, buf, len, true);
- if (ret) {
- dev_dbg(viommu->dev, "could not add request (%d)\n", ret);
- goto out_unlock;
- }
- ret = __viommu_sync_req(viommu);
- if (ret) {
- dev_dbg(viommu->dev, "could not sync requests (%d)\n", ret);
- /* Fall-through (get the actual request status) */
- }
- ret = viommu_get_req_errno(buf, len);
- out_unlock:
- spin_unlock_irqrestore(&viommu->request_lock, flags);
- return ret;
- }
- /*
- * viommu_add_mapping - add a mapping to the internal tree
- *
- * On success, return the new mapping. Otherwise return NULL.
- */
- static int viommu_add_mapping(struct viommu_domain *vdomain, u64 iova, u64 end,
- phys_addr_t paddr, u32 flags)
- {
- unsigned long irqflags;
- struct viommu_mapping *mapping;
- mapping = kzalloc(sizeof(*mapping), GFP_ATOMIC);
- if (!mapping)
- return -ENOMEM;
- mapping->paddr = paddr;
- mapping->iova.start = iova;
- mapping->iova.last = end;
- mapping->flags = flags;
- spin_lock_irqsave(&vdomain->mappings_lock, irqflags);
- interval_tree_insert(&mapping->iova, &vdomain->mappings);
- spin_unlock_irqrestore(&vdomain->mappings_lock, irqflags);
- return 0;
- }
- /*
- * viommu_del_mappings - remove mappings from the internal tree
- *
- * @vdomain: the domain
- * @iova: start of the range
- * @end: end of the range
- *
- * On success, returns the number of unmapped bytes
- */
- static size_t viommu_del_mappings(struct viommu_domain *vdomain,
- u64 iova, u64 end)
- {
- size_t unmapped = 0;
- unsigned long flags;
- struct viommu_mapping *mapping = NULL;
- struct interval_tree_node *node, *next;
- spin_lock_irqsave(&vdomain->mappings_lock, flags);
- next = interval_tree_iter_first(&vdomain->mappings, iova, end);
- while (next) {
- node = next;
- mapping = container_of(node, struct viommu_mapping, iova);
- next = interval_tree_iter_next(node, iova, end);
- /* Trying to split a mapping? */
- if (mapping->iova.start < iova)
- break;
- /*
- * Virtio-iommu doesn't allow UNMAP to split a mapping created
- * with a single MAP request, so remove the full mapping.
- */
- unmapped += mapping->iova.last - mapping->iova.start + 1;
- interval_tree_remove(node, &vdomain->mappings);
- kfree(mapping);
- }
- spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
- return unmapped;
- }
- /*
- * Fill the domain with identity mappings, skipping the device's reserved
- * regions.
- */
- static int viommu_domain_map_identity(struct viommu_endpoint *vdev,
- struct viommu_domain *vdomain)
- {
- int ret;
- struct iommu_resv_region *resv;
- u64 iova = vdomain->domain.geometry.aperture_start;
- u64 limit = vdomain->domain.geometry.aperture_end;
- u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
- unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap);
- iova = ALIGN(iova, granule);
- limit = ALIGN_DOWN(limit + 1, granule) - 1;
- list_for_each_entry(resv, &vdev->resv_regions, list) {
- u64 resv_start = ALIGN_DOWN(resv->start, granule);
- u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1;
- if (resv_end < iova || resv_start > limit)
- /* No overlap */
- continue;
- if (resv_start > iova) {
- ret = viommu_add_mapping(vdomain, iova, resv_start - 1,
- (phys_addr_t)iova, flags);
- if (ret)
- goto err_unmap;
- }
- if (resv_end >= limit)
- return 0;
- iova = resv_end + 1;
- }
- ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova,
- flags);
- if (ret)
- goto err_unmap;
- return 0;
- err_unmap:
- viommu_del_mappings(vdomain, 0, iova);
- return ret;
- }
- /*
- * viommu_replay_mappings - re-send MAP requests
- *
- * When reattaching a domain that was previously detached from all endpoints,
- * mappings were deleted from the device. Re-create the mappings available in
- * the internal tree.
- */
- static int viommu_replay_mappings(struct viommu_domain *vdomain)
- {
- int ret = 0;
- unsigned long flags;
- struct viommu_mapping *mapping;
- struct interval_tree_node *node;
- struct virtio_iommu_req_map map;
- spin_lock_irqsave(&vdomain->mappings_lock, flags);
- node = interval_tree_iter_first(&vdomain->mappings, 0, -1UL);
- while (node) {
- mapping = container_of(node, struct viommu_mapping, iova);
- map = (struct virtio_iommu_req_map) {
- .head.type = VIRTIO_IOMMU_T_MAP,
- .domain = cpu_to_le32(vdomain->id),
- .virt_start = cpu_to_le64(mapping->iova.start),
- .virt_end = cpu_to_le64(mapping->iova.last),
- .phys_start = cpu_to_le64(mapping->paddr),
- .flags = cpu_to_le32(mapping->flags),
- };
- ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
- if (ret)
- break;
- node = interval_tree_iter_next(node, 0, -1UL);
- }
- spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
- return ret;
- }
- static int viommu_add_resv_mem(struct viommu_endpoint *vdev,
- struct virtio_iommu_probe_resv_mem *mem,
- size_t len)
- {
- size_t size;
- u64 start64, end64;
- phys_addr_t start, end;
- struct iommu_resv_region *region = NULL, *next;
- unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
- start = start64 = le64_to_cpu(mem->start);
- end = end64 = le64_to_cpu(mem->end);
- size = end64 - start64 + 1;
- /* Catch any overflow, including the unlikely end64 - start64 + 1 = 0 */
- if (start != start64 || end != end64 || size < end64 - start64)
- return -EOVERFLOW;
- if (len < sizeof(*mem))
- return -EINVAL;
- switch (mem->subtype) {
- default:
- dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n",
- mem->subtype);
- fallthrough;
- case VIRTIO_IOMMU_RESV_MEM_T_RESERVED:
- region = iommu_alloc_resv_region(start, size, 0,
- IOMMU_RESV_RESERVED,
- GFP_KERNEL);
- break;
- case VIRTIO_IOMMU_RESV_MEM_T_MSI:
- region = iommu_alloc_resv_region(start, size, prot,
- IOMMU_RESV_MSI,
- GFP_KERNEL);
- break;
- }
- if (!region)
- return -ENOMEM;
- /* Keep the list sorted */
- list_for_each_entry(next, &vdev->resv_regions, list) {
- if (next->start > region->start)
- break;
- }
- list_add_tail(®ion->list, &next->list);
- return 0;
- }
- static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
- {
- int ret;
- u16 type, len;
- size_t cur = 0;
- size_t probe_len;
- struct virtio_iommu_req_probe *probe;
- struct virtio_iommu_probe_property *prop;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
- if (!fwspec->num_ids)
- return -EINVAL;
- probe_len = sizeof(*probe) + viommu->probe_size +
- sizeof(struct virtio_iommu_req_tail);
- probe = kzalloc(probe_len, GFP_KERNEL);
- if (!probe)
- return -ENOMEM;
- probe->head.type = VIRTIO_IOMMU_T_PROBE;
- /*
- * For now, assume that properties of an endpoint that outputs multiple
- * IDs are consistent. Only probe the first one.
- */
- probe->endpoint = cpu_to_le32(fwspec->ids[0]);
- ret = viommu_send_req_sync(viommu, probe, probe_len);
- if (ret)
- goto out_free;
- prop = (void *)probe->properties;
- type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
- while (type != VIRTIO_IOMMU_PROBE_T_NONE &&
- cur < viommu->probe_size) {
- len = le16_to_cpu(prop->length) + sizeof(*prop);
- switch (type) {
- case VIRTIO_IOMMU_PROBE_T_RESV_MEM:
- ret = viommu_add_resv_mem(vdev, (void *)prop, len);
- break;
- default:
- dev_err(dev, "unknown viommu prop 0x%x\n", type);
- }
- if (ret)
- dev_err(dev, "failed to parse viommu prop 0x%x\n", type);
- cur += len;
- if (cur >= viommu->probe_size)
- break;
- prop = (void *)probe->properties + cur;
- type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
- }
- out_free:
- kfree(probe);
- return ret;
- }
- static int viommu_fault_handler(struct viommu_dev *viommu,
- struct virtio_iommu_fault *fault)
- {
- char *reason_str;
- u8 reason = fault->reason;
- u32 flags = le32_to_cpu(fault->flags);
- u32 endpoint = le32_to_cpu(fault->endpoint);
- u64 address = le64_to_cpu(fault->address);
- switch (reason) {
- case VIRTIO_IOMMU_FAULT_R_DOMAIN:
- reason_str = "domain";
- break;
- case VIRTIO_IOMMU_FAULT_R_MAPPING:
- reason_str = "page";
- break;
- case VIRTIO_IOMMU_FAULT_R_UNKNOWN:
- default:
- reason_str = "unknown";
- break;
- }
- /* TODO: find EP by ID and report_iommu_fault */
- if (flags & VIRTIO_IOMMU_FAULT_F_ADDRESS)
- dev_err_ratelimited(viommu->dev, "%s fault from EP %u at %#llx [%s%s%s]\n",
- reason_str, endpoint, address,
- flags & VIRTIO_IOMMU_FAULT_F_READ ? "R" : "",
- flags & VIRTIO_IOMMU_FAULT_F_WRITE ? "W" : "",
- flags & VIRTIO_IOMMU_FAULT_F_EXEC ? "X" : "");
- else
- dev_err_ratelimited(viommu->dev, "%s fault from EP %u\n",
- reason_str, endpoint);
- return 0;
- }
- static void viommu_event_handler(struct virtqueue *vq)
- {
- int ret;
- unsigned int len;
- struct scatterlist sg[1];
- struct viommu_event *evt;
- struct viommu_dev *viommu = vq->vdev->priv;
- while ((evt = virtqueue_get_buf(vq, &len)) != NULL) {
- if (len > sizeof(*evt)) {
- dev_err(viommu->dev,
- "invalid event buffer (len %u != %zu)\n",
- len, sizeof(*evt));
- } else if (!(evt->head & VIOMMU_FAULT_RESV_MASK)) {
- viommu_fault_handler(viommu, &evt->fault);
- }
- sg_init_one(sg, evt, sizeof(*evt));
- ret = virtqueue_add_inbuf(vq, sg, 1, evt, GFP_ATOMIC);
- if (ret)
- dev_err(viommu->dev, "could not add event buffer\n");
- }
- virtqueue_kick(vq);
- }
- /* IOMMU API */
- static struct iommu_domain *viommu_domain_alloc(unsigned type)
- {
- struct viommu_domain *vdomain;
- if (type != IOMMU_DOMAIN_UNMANAGED &&
- type != IOMMU_DOMAIN_DMA &&
- type != IOMMU_DOMAIN_IDENTITY)
- return NULL;
- vdomain = kzalloc(sizeof(*vdomain), GFP_KERNEL);
- if (!vdomain)
- return NULL;
- mutex_init(&vdomain->mutex);
- spin_lock_init(&vdomain->mappings_lock);
- vdomain->mappings = RB_ROOT_CACHED;
- return &vdomain->domain;
- }
- static int viommu_domain_finalise(struct viommu_endpoint *vdev,
- struct iommu_domain *domain)
- {
- int ret;
- unsigned long viommu_page_size;
- struct viommu_dev *viommu = vdev->viommu;
- struct viommu_domain *vdomain = to_viommu_domain(domain);
- viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
- if (viommu_page_size > PAGE_SIZE) {
- dev_err(vdev->dev,
- "granule 0x%lx larger than system page size 0x%lx\n",
- viommu_page_size, PAGE_SIZE);
- return -EINVAL;
- }
- ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
- viommu->last_domain, GFP_KERNEL);
- if (ret < 0)
- return ret;
- vdomain->id = (unsigned int)ret;
- domain->pgsize_bitmap = viommu->pgsize_bitmap;
- domain->geometry = viommu->geometry;
- vdomain->map_flags = viommu->map_flags;
- vdomain->viommu = viommu;
- if (domain->type == IOMMU_DOMAIN_IDENTITY) {
- if (virtio_has_feature(viommu->vdev,
- VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
- vdomain->bypass = true;
- return 0;
- }
- ret = viommu_domain_map_identity(vdev, vdomain);
- if (ret) {
- ida_free(&viommu->domain_ids, vdomain->id);
- vdomain->viommu = NULL;
- return -EOPNOTSUPP;
- }
- }
- return 0;
- }
- static void viommu_domain_free(struct iommu_domain *domain)
- {
- struct viommu_domain *vdomain = to_viommu_domain(domain);
- /* Free all remaining mappings */
- viommu_del_mappings(vdomain, 0, ULLONG_MAX);
- if (vdomain->viommu)
- ida_free(&vdomain->viommu->domain_ids, vdomain->id);
- kfree(vdomain);
- }
- static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
- {
- int i;
- int ret = 0;
- struct virtio_iommu_req_attach req;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
- struct viommu_domain *vdomain = to_viommu_domain(domain);
- mutex_lock(&vdomain->mutex);
- if (!vdomain->viommu) {
- /*
- * Properly initialize the domain now that we know which viommu
- * owns it.
- */
- ret = viommu_domain_finalise(vdev, domain);
- } else if (vdomain->viommu != vdev->viommu) {
- dev_err(dev, "cannot attach to foreign vIOMMU\n");
- ret = -EXDEV;
- }
- mutex_unlock(&vdomain->mutex);
- if (ret)
- return ret;
- /*
- * In the virtio-iommu device, when attaching the endpoint to a new
- * domain, it is detached from the old one and, if as a result the
- * old domain isn't attached to any endpoint, all mappings are removed
- * from the old domain and it is freed.
- *
- * In the driver the old domain still exists, and its mappings will be
- * recreated if it gets reattached to an endpoint. Otherwise it will be
- * freed explicitly.
- *
- * vdev->vdomain is protected by group->mutex
- */
- if (vdev->vdomain)
- vdev->vdomain->nr_endpoints--;
- req = (struct virtio_iommu_req_attach) {
- .head.type = VIRTIO_IOMMU_T_ATTACH,
- .domain = cpu_to_le32(vdomain->id),
- };
- if (vdomain->bypass)
- req.flags |= cpu_to_le32(VIRTIO_IOMMU_ATTACH_F_BYPASS);
- for (i = 0; i < fwspec->num_ids; i++) {
- req.endpoint = cpu_to_le32(fwspec->ids[i]);
- ret = viommu_send_req_sync(vdomain->viommu, &req, sizeof(req));
- if (ret)
- return ret;
- }
- if (!vdomain->nr_endpoints) {
- /*
- * This endpoint is the first to be attached to the domain.
- * Replay existing mappings (e.g. SW MSI).
- */
- ret = viommu_replay_mappings(vdomain);
- if (ret)
- return ret;
- }
- vdomain->nr_endpoints++;
- vdev->vdomain = vdomain;
- return 0;
- }
- static void viommu_detach_dev(struct viommu_endpoint *vdev)
- {
- int i;
- struct virtio_iommu_req_detach req;
- struct viommu_domain *vdomain = vdev->vdomain;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(vdev->dev);
- if (!vdomain)
- return;
- req = (struct virtio_iommu_req_detach) {
- .head.type = VIRTIO_IOMMU_T_DETACH,
- .domain = cpu_to_le32(vdomain->id),
- };
- for (i = 0; i < fwspec->num_ids; i++) {
- req.endpoint = cpu_to_le32(fwspec->ids[i]);
- WARN_ON(viommu_send_req_sync(vdev->viommu, &req, sizeof(req)));
- }
- vdomain->nr_endpoints--;
- vdev->vdomain = NULL;
- }
- static int viommu_map_pages(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t pgsize, size_t pgcount,
- int prot, gfp_t gfp, size_t *mapped)
- {
- int ret;
- u32 flags;
- size_t size = pgsize * pgcount;
- u64 end = iova + size - 1;
- struct virtio_iommu_req_map map;
- struct viommu_domain *vdomain = to_viommu_domain(domain);
- flags = (prot & IOMMU_READ ? VIRTIO_IOMMU_MAP_F_READ : 0) |
- (prot & IOMMU_WRITE ? VIRTIO_IOMMU_MAP_F_WRITE : 0) |
- (prot & IOMMU_MMIO ? VIRTIO_IOMMU_MAP_F_MMIO : 0);
- if (flags & ~vdomain->map_flags)
- return -EINVAL;
- ret = viommu_add_mapping(vdomain, iova, end, paddr, flags);
- if (ret)
- return ret;
- if (vdomain->nr_endpoints) {
- map = (struct virtio_iommu_req_map) {
- .head.type = VIRTIO_IOMMU_T_MAP,
- .domain = cpu_to_le32(vdomain->id),
- .virt_start = cpu_to_le64(iova),
- .phys_start = cpu_to_le64(paddr),
- .virt_end = cpu_to_le64(end),
- .flags = cpu_to_le32(flags),
- };
- ret = viommu_send_req_sync(vdomain->viommu, &map, sizeof(map));
- if (ret) {
- viommu_del_mappings(vdomain, iova, end);
- return ret;
- }
- }
- if (mapped)
- *mapped = size;
- return 0;
- }
- static size_t viommu_unmap_pages(struct iommu_domain *domain, unsigned long iova,
- size_t pgsize, size_t pgcount,
- struct iommu_iotlb_gather *gather)
- {
- int ret = 0;
- size_t unmapped;
- struct virtio_iommu_req_unmap unmap;
- struct viommu_domain *vdomain = to_viommu_domain(domain);
- size_t size = pgsize * pgcount;
- unmapped = viommu_del_mappings(vdomain, iova, iova + size - 1);
- if (unmapped < size)
- return 0;
- /* Device already removed all mappings after detach. */
- if (!vdomain->nr_endpoints)
- return unmapped;
- unmap = (struct virtio_iommu_req_unmap) {
- .head.type = VIRTIO_IOMMU_T_UNMAP,
- .domain = cpu_to_le32(vdomain->id),
- .virt_start = cpu_to_le64(iova),
- .virt_end = cpu_to_le64(iova + unmapped - 1),
- };
- ret = viommu_add_req(vdomain->viommu, &unmap, sizeof(unmap));
- return ret ? 0 : unmapped;
- }
- static phys_addr_t viommu_iova_to_phys(struct iommu_domain *domain,
- dma_addr_t iova)
- {
- u64 paddr = 0;
- unsigned long flags;
- struct viommu_mapping *mapping;
- struct interval_tree_node *node;
- struct viommu_domain *vdomain = to_viommu_domain(domain);
- spin_lock_irqsave(&vdomain->mappings_lock, flags);
- node = interval_tree_iter_first(&vdomain->mappings, iova, iova);
- if (node) {
- mapping = container_of(node, struct viommu_mapping, iova);
- paddr = mapping->paddr + (iova - mapping->iova.start);
- }
- spin_unlock_irqrestore(&vdomain->mappings_lock, flags);
- return paddr;
- }
- static void viommu_iotlb_sync(struct iommu_domain *domain,
- struct iommu_iotlb_gather *gather)
- {
- struct viommu_domain *vdomain = to_viommu_domain(domain);
- viommu_sync_req(vdomain->viommu);
- }
- static void viommu_get_resv_regions(struct device *dev, struct list_head *head)
- {
- struct iommu_resv_region *entry, *new_entry, *msi = NULL;
- struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
- int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
- list_for_each_entry(entry, &vdev->resv_regions, list) {
- if (entry->type == IOMMU_RESV_MSI)
- msi = entry;
- new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL);
- if (!new_entry)
- return;
- list_add_tail(&new_entry->list, head);
- }
- /*
- * If the device didn't register any bypass MSI window, add a
- * software-mapped region.
- */
- if (!msi) {
- msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH,
- prot, IOMMU_RESV_SW_MSI,
- GFP_KERNEL);
- if (!msi)
- return;
- list_add_tail(&msi->list, head);
- }
- iommu_dma_get_resv_regions(dev, head);
- }
- static struct iommu_ops viommu_ops;
- static struct virtio_driver virtio_iommu_drv;
- static int viommu_match_node(struct device *dev, const void *data)
- {
- return device_match_fwnode(dev->parent, data);
- }
- static struct viommu_dev *viommu_get_by_fwnode(struct fwnode_handle *fwnode)
- {
- struct device *dev = driver_find_device(&virtio_iommu_drv.driver, NULL,
- fwnode, viommu_match_node);
- put_device(dev);
- return dev ? dev_to_virtio(dev)->priv : NULL;
- }
- static struct iommu_device *viommu_probe_device(struct device *dev)
- {
- int ret;
- struct viommu_endpoint *vdev;
- struct viommu_dev *viommu = NULL;
- struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
- if (!fwspec || fwspec->ops != &viommu_ops)
- return ERR_PTR(-ENODEV);
- viommu = viommu_get_by_fwnode(fwspec->iommu_fwnode);
- if (!viommu)
- return ERR_PTR(-ENODEV);
- vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
- if (!vdev)
- return ERR_PTR(-ENOMEM);
- vdev->dev = dev;
- vdev->viommu = viommu;
- INIT_LIST_HEAD(&vdev->resv_regions);
- dev_iommu_priv_set(dev, vdev);
- if (viommu->probe_size) {
- /* Get additional information for this endpoint */
- ret = viommu_probe_endpoint(viommu, dev);
- if (ret)
- goto err_free_dev;
- }
- return &viommu->iommu;
- err_free_dev:
- iommu_put_resv_regions(dev, &vdev->resv_regions);
- kfree(vdev);
- return ERR_PTR(ret);
- }
- static void viommu_probe_finalize(struct device *dev)
- {
- #ifndef CONFIG_ARCH_HAS_SETUP_DMA_OPS
- /* First clear the DMA ops in case we're switching from a DMA domain */
- set_dma_ops(dev, NULL);
- iommu_setup_dma_ops(dev, 0, U64_MAX);
- #endif
- }
- static void viommu_release_device(struct device *dev)
- {
- struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
- viommu_detach_dev(vdev);
- iommu_put_resv_regions(dev, &vdev->resv_regions);
- kfree(vdev);
- }
- static struct iommu_group *viommu_device_group(struct device *dev)
- {
- if (dev_is_pci(dev))
- return pci_device_group(dev);
- else
- return generic_device_group(dev);
- }
- static int viommu_of_xlate(struct device *dev, struct of_phandle_args *args)
- {
- return iommu_fwspec_add_ids(dev, args->args, 1);
- }
- static bool viommu_capable(struct device *dev, enum iommu_cap cap)
- {
- switch (cap) {
- case IOMMU_CAP_CACHE_COHERENCY:
- return true;
- default:
- return false;
- }
- }
- static struct iommu_ops viommu_ops = {
- .capable = viommu_capable,
- .domain_alloc = viommu_domain_alloc,
- .probe_device = viommu_probe_device,
- .probe_finalize = viommu_probe_finalize,
- .release_device = viommu_release_device,
- .device_group = viommu_device_group,
- .get_resv_regions = viommu_get_resv_regions,
- .of_xlate = viommu_of_xlate,
- .owner = THIS_MODULE,
- .default_domain_ops = &(const struct iommu_domain_ops) {
- .attach_dev = viommu_attach_dev,
- .map_pages = viommu_map_pages,
- .unmap_pages = viommu_unmap_pages,
- .iova_to_phys = viommu_iova_to_phys,
- .iotlb_sync = viommu_iotlb_sync,
- .free = viommu_domain_free,
- }
- };
- static int viommu_init_vqs(struct viommu_dev *viommu)
- {
- struct virtio_device *vdev = dev_to_virtio(viommu->dev);
- const char *names[] = { "request", "event" };
- vq_callback_t *callbacks[] = {
- NULL, /* No async requests */
- viommu_event_handler,
- };
- return virtio_find_vqs(vdev, VIOMMU_NR_VQS, viommu->vqs, callbacks,
- names, NULL);
- }
- static int viommu_fill_evtq(struct viommu_dev *viommu)
- {
- int i, ret;
- struct scatterlist sg[1];
- struct viommu_event *evts;
- struct virtqueue *vq = viommu->vqs[VIOMMU_EVENT_VQ];
- size_t nr_evts = vq->num_free;
- viommu->evts = evts = devm_kmalloc_array(viommu->dev, nr_evts,
- sizeof(*evts), GFP_KERNEL);
- if (!evts)
- return -ENOMEM;
- for (i = 0; i < nr_evts; i++) {
- sg_init_one(sg, &evts[i], sizeof(*evts));
- ret = virtqueue_add_inbuf(vq, sg, 1, &evts[i], GFP_KERNEL);
- if (ret)
- return ret;
- }
- return 0;
- }
- static int viommu_probe(struct virtio_device *vdev)
- {
- struct device *parent_dev = vdev->dev.parent;
- struct viommu_dev *viommu = NULL;
- struct device *dev = &vdev->dev;
- u64 input_start = 0;
- u64 input_end = -1UL;
- int ret;
- if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1) ||
- !virtio_has_feature(vdev, VIRTIO_IOMMU_F_MAP_UNMAP))
- return -ENODEV;
- viommu = devm_kzalloc(dev, sizeof(*viommu), GFP_KERNEL);
- if (!viommu)
- return -ENOMEM;
- spin_lock_init(&viommu->request_lock);
- ida_init(&viommu->domain_ids);
- viommu->dev = dev;
- viommu->vdev = vdev;
- INIT_LIST_HEAD(&viommu->requests);
- ret = viommu_init_vqs(viommu);
- if (ret)
- return ret;
- virtio_cread_le(vdev, struct virtio_iommu_config, page_size_mask,
- &viommu->pgsize_bitmap);
- if (!viommu->pgsize_bitmap) {
- ret = -EINVAL;
- goto err_free_vqs;
- }
- viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
- viommu->last_domain = ~0U;
- /* Optional features */
- virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
- struct virtio_iommu_config, input_range.start,
- &input_start);
- virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_INPUT_RANGE,
- struct virtio_iommu_config, input_range.end,
- &input_end);
- virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
- struct virtio_iommu_config, domain_range.start,
- &viommu->first_domain);
- virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_DOMAIN_RANGE,
- struct virtio_iommu_config, domain_range.end,
- &viommu->last_domain);
- virtio_cread_le_feature(vdev, VIRTIO_IOMMU_F_PROBE,
- struct virtio_iommu_config, probe_size,
- &viommu->probe_size);
- viommu->geometry = (struct iommu_domain_geometry) {
- .aperture_start = input_start,
- .aperture_end = input_end,
- .force_aperture = true,
- };
- if (virtio_has_feature(vdev, VIRTIO_IOMMU_F_MMIO))
- viommu->map_flags |= VIRTIO_IOMMU_MAP_F_MMIO;
- viommu_ops.pgsize_bitmap = viommu->pgsize_bitmap;
- virtio_device_ready(vdev);
- /* Populate the event queue with buffers */
- ret = viommu_fill_evtq(viommu);
- if (ret)
- goto err_free_vqs;
- ret = iommu_device_sysfs_add(&viommu->iommu, dev, NULL, "%s",
- virtio_bus_name(vdev));
- if (ret)
- goto err_free_vqs;
- iommu_device_register(&viommu->iommu, &viommu_ops, parent_dev);
- vdev->priv = viommu;
- dev_info(dev, "input address: %u bits\n",
- order_base_2(viommu->geometry.aperture_end));
- dev_info(dev, "page mask: %#llx\n", viommu->pgsize_bitmap);
- return 0;
- err_free_vqs:
- vdev->config->del_vqs(vdev);
- return ret;
- }
- static void viommu_remove(struct virtio_device *vdev)
- {
- struct viommu_dev *viommu = vdev->priv;
- iommu_device_sysfs_remove(&viommu->iommu);
- iommu_device_unregister(&viommu->iommu);
- /* Stop all virtqueues */
- virtio_reset_device(vdev);
- vdev->config->del_vqs(vdev);
- dev_info(&vdev->dev, "device removed\n");
- }
- static void viommu_config_changed(struct virtio_device *vdev)
- {
- dev_warn(&vdev->dev, "config changed\n");
- }
- static unsigned int features[] = {
- VIRTIO_IOMMU_F_MAP_UNMAP,
- VIRTIO_IOMMU_F_INPUT_RANGE,
- VIRTIO_IOMMU_F_DOMAIN_RANGE,
- VIRTIO_IOMMU_F_PROBE,
- VIRTIO_IOMMU_F_MMIO,
- VIRTIO_IOMMU_F_BYPASS_CONFIG,
- };
- static struct virtio_device_id id_table[] = {
- { VIRTIO_ID_IOMMU, VIRTIO_DEV_ANY_ID },
- { 0 },
- };
- MODULE_DEVICE_TABLE(virtio, id_table);
- static struct virtio_driver virtio_iommu_drv = {
- .driver.name = KBUILD_MODNAME,
- .driver.owner = THIS_MODULE,
- .id_table = id_table,
- .feature_table = features,
- .feature_table_size = ARRAY_SIZE(features),
- .probe = viommu_probe,
- .remove = viommu_remove,
- .config_changed = viommu_config_changed,
- };
- module_virtio_driver(virtio_iommu_drv);
- MODULE_DESCRIPTION("Virtio IOMMU driver");
- MODULE_AUTHOR("Jean-Philippe Brucker <[email protected]>");
- MODULE_LICENSE("GPL v2");
|