powerpc/vfio/iommu/kvm: Do not pin device memory

This new memory does not have page structs as it is not plugged to
the host so gup() will fail anyway.

This adds 2 helpers:
- mm_iommu_newdev() to preregister the "memory device" memory so
the rest of API can still be used;
- mm_iommu_is_devmem() to know if the physical address is one of thise
new regions which we must avoid unpinning of.

This adds @mm to tce_page_is_contained() and iommu_tce_xchg() to test
if the memory is device memory to avoid pfn_to_page().

This adds a check for device memory in mm_iommu_ua_mark_dirty_rm() which
does delayed pages dirtying.

Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
Reviewed-by: Paul Mackerras <paulus@ozlabs.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Alexey Kardashevskiy
2018-12-19 19:52:15 +11:00
committed by Michael Ellerman
parent e0bf78b0f9
commit c10c21efa4
6 changed files with 135 additions and 32 deletions

View File

@@ -222,8 +222,16 @@ put_exit:
return ret;
}
static bool tce_page_is_contained(struct page *page, unsigned page_shift)
static bool tce_page_is_contained(struct mm_struct *mm, unsigned long hpa,
unsigned int page_shift)
{
struct page *page;
unsigned long size = 0;
if (mm_iommu_is_devmem(mm, hpa, page_shift, &size))
return size == (1UL << page_shift);
page = pfn_to_page(hpa >> PAGE_SHIFT);
/*
* Check that the TCE table granularity is not bigger than the size of
* a page we just found. Otherwise the hardware can get access to
@@ -499,7 +507,8 @@ static int tce_iommu_clear(struct tce_container *container,
direction = DMA_NONE;
oldhpa = 0;
ret = iommu_tce_xchg(tbl, entry, &oldhpa, &direction);
ret = iommu_tce_xchg(container->mm, tbl, entry, &oldhpa,
&direction);
if (ret)
continue;
@@ -537,7 +546,6 @@ static long tce_iommu_build(struct tce_container *container,
enum dma_data_direction direction)
{
long i, ret = 0;
struct page *page;
unsigned long hpa;
enum dma_data_direction dirtmp;
@@ -548,15 +556,16 @@ static long tce_iommu_build(struct tce_container *container,
if (ret)
break;
page = pfn_to_page(hpa >> PAGE_SHIFT);
if (!tce_page_is_contained(page, tbl->it_page_shift)) {
if (!tce_page_is_contained(container->mm, hpa,
tbl->it_page_shift)) {
ret = -EPERM;
break;
}
hpa |= offset;
dirtmp = direction;
ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
&dirtmp);
if (ret) {
tce_iommu_unuse_page(container, hpa);
pr_err("iommu_tce: %s failed ioba=%lx, tce=%lx, ret=%ld\n",
@@ -583,7 +592,6 @@ static long tce_iommu_build_v2(struct tce_container *container,
enum dma_data_direction direction)
{
long i, ret = 0;
struct page *page;
unsigned long hpa;
enum dma_data_direction dirtmp;
@@ -596,8 +604,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
if (ret)
break;
page = pfn_to_page(hpa >> PAGE_SHIFT);
if (!tce_page_is_contained(page, tbl->it_page_shift)) {
if (!tce_page_is_contained(container->mm, hpa,
tbl->it_page_shift)) {
ret = -EPERM;
break;
}
@@ -610,7 +618,8 @@ static long tce_iommu_build_v2(struct tce_container *container,
if (mm_iommu_mapped_inc(mem))
break;
ret = iommu_tce_xchg(tbl, entry + i, &hpa, &dirtmp);
ret = iommu_tce_xchg(container->mm, tbl, entry + i, &hpa,
&dirtmp);
if (ret) {
/* dirtmp cannot be DMA_NONE here */
tce_iommu_unuse_page_v2(container, tbl, entry + i);