BACKPORT: iommu/dma: Fix race condition during iova_domain initialization

When many devices share the same iova domain, iommu_dma_init_domain()
may be called at the same time. The checking of iovad->start_pfn will
all get false in iommu_dma_init_domain() and both enter init_iova_domain()
to do iovad initialization.

Fix this by protecting init_iova_domain() with iommu_dma_cookie->mutex.

Exception backtrace:
rb_insert_color(param1=0xFFFFFF80CD2BDB40, param3=1) + 64
init_iova_domain() + 180
iommu_setup_dma_ops() + 260
arch_setup_dma_ops() + 132
of_dma_configure_id() + 468
platform_dma_configure() + 32
really_probe() + 1168
driver_probe_device() + 268
__device_attach_driver() + 524
__device_attach() + 524
bus_probe_device() + 64
deferred_probe_work_func() + 260
process_one_work() + 580
worker_thread() + 1076
kthread() + 332
ret_from_fork() + 16

Signed-off-by: Ning Li <ning.li@mediatek.com>
Signed-off-by: Yunfei Wang <yf.wang@mediatek.com>
Acked-by: Robin Murphy <robin.murphy@arm.com>
Reviewed-by: Miles Chen <miles.chen@mediatek.com>
Link: https://lore.kernel.org/r/20220530120748.31733-1-yf.wang@mediatek.com
Signed-off-by: Joerg Roedel <jroedel@suse.de>

Bug: 236922015
(cherry picked from commit ac9a5d522bb80be50ea84965699e1c8257d745ce
 https://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git core)
[Yunfei: Embed iommu_dma_cookie into iommu_dma_cookie_ext to avoid
 changing struct iommu_dma_cookie]
Signed-off-by: Yunfei Wang <yf.wang@mediatek.com>
Change-Id: I9b7931bea912837f17d2322713ba68a37122499d
(cherry picked from commit 8a410d778a3c3b1f535acecff7f53c542ffb348c)
This commit is contained in:
Yunfei Wang
2022-06-23 15:43:53 +08:00
committed by Suren Baghdasaryan
parent 321bf845e1
commit 8ee37d0bcd

View File

@@ -50,6 +50,11 @@ struct iommu_dma_cookie {
struct iommu_domain *fq_domain;
};
struct iommu_dma_cookie_ext {
struct iommu_dma_cookie cookie;
struct mutex mutex;
};
static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
{
if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
@@ -59,14 +64,15 @@ static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
{
struct iommu_dma_cookie *cookie;
struct iommu_dma_cookie_ext *cookie;
cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
if (cookie) {
INIT_LIST_HEAD(&cookie->msi_page_list);
cookie->type = type;
INIT_LIST_HEAD(&cookie->cookie.msi_page_list);
cookie->cookie.type = type;
mutex_init(&cookie->mutex);
}
return cookie;
return &cookie->cookie;
}
/**
@@ -305,9 +311,11 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
u64 size, struct device *dev)
{
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_cookie_ext *cookie_ext;
unsigned long order, base_pfn;
struct iova_domain *iovad;
int attr;
int ret;
if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
return -EINVAL;
@@ -331,14 +339,18 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
}
/* start_pfn is always nonzero for an already-initialised domain */
cookie_ext = container_of(cookie, struct iommu_dma_cookie_ext, cookie);
mutex_lock(&cookie_ext->mutex);
if (iovad->start_pfn) {
if (1UL << order != iovad->granule ||
base_pfn != iovad->start_pfn) {
pr_warn("Incompatible range for DMA domain\n");
return -EFAULT;
ret = -EFAULT;
goto done_unlock;
}
return 0;
ret = 0;
goto done_unlock;
}
init_iova_domain(iovad, 1UL << order, base_pfn);
@@ -352,10 +364,16 @@ static int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
cookie->fq_domain = domain;
}
if (!dev)
return 0;
if (!dev) {
ret = 0;
goto done_unlock;
}
return iova_reserve_iommu_regions(dev, domain);
ret = iova_reserve_iommu_regions(dev, domain);
done_unlock:
mutex_unlock(&cookie_ext->mutex);
return ret;
}
static int iommu_dma_deferred_attach(struct device *dev,