Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Catalin Marinas: - kdump support, including two necessary memblock additions: memblock_clear_nomap() and memblock_cap_memory_range() - ARMv8.3 HWCAP bits for JavaScript conversion instructions, complex numbers and weaker release consistency - arm64 ACPI platform MSI support - arm perf updates: ACPI PMU support, L3 cache PMU in some Qualcomm SoCs, Cortex-A53 L2 cache events and DTLB refills, MAINTAINERS update for DT perf bindings - architected timer errata framework (the arch/arm64 changes only) - support for DMA_ATTR_FORCE_CONTIGUOUS in the arm64 iommu DMA API - arm64 KVM refactoring to use common system register definitions - remove support for ASID-tagged VIVT I-cache (no ARMv8 implementation using it and deprecated in the architecture) together with some I-cache handling clean-up - PE/COFF EFI header clean-up/hardening - define BUG() instruction without CONFIG_BUG * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (92 commits) arm64: Fix the DMA mmap and get_sgtable API with DMA_ATTR_FORCE_CONTIGUOUS arm64: Print DT machine model in setup_machine_fdt() arm64: pmu: Wire-up Cortex A53 L2 cache events and DTLB refills arm64: module: split core and init PLT sections arm64: pmuv3: handle pmuv3+ arm64: Add CNTFRQ_EL0 trap handler arm64: Silence spurious kbuild warning on menuconfig arm64: pmuv3: use arm_pmu ACPI framework arm64: pmuv3: handle !PMUv3 when probing drivers/perf: arm_pmu: add ACPI framework arm64: add function to get a cpu's MADT GICC table drivers/perf: arm_pmu: split out platform device probe logic drivers/perf: arm_pmu: move irq request/free into probe drivers/perf: arm_pmu: split cpu-local irq request/free drivers/perf: arm_pmu: rename irq request/free functions drivers/perf: arm_pmu: handle no platform_device drivers/perf: arm_pmu: simplify cpu_pmu_request_irqs() drivers/perf: arm_pmu: factor out pmu registration drivers/perf: arm_pmu: fold init into alloc drivers/perf: arm_pmu: define armpmu_init_fn ...
This commit is contained in:
@@ -308,24 +308,15 @@ static void __swiotlb_sync_sg_for_device(struct device *dev,
|
||||
sg->length, dir);
|
||||
}
|
||||
|
||||
static int __swiotlb_mmap(struct device *dev,
|
||||
struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
|
||||
unsigned long pfn, size_t size)
|
||||
{
|
||||
int ret = -ENXIO;
|
||||
unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
|
||||
PAGE_SHIFT;
|
||||
unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
|
||||
is_device_dma_coherent(dev));
|
||||
|
||||
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
||||
if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
|
||||
ret = remap_pfn_range(vma, vma->vm_start,
|
||||
pfn + off,
|
||||
@@ -336,19 +327,43 @@ static int __swiotlb_mmap(struct device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t handle, size_t size,
|
||||
unsigned long attrs)
|
||||
static int __swiotlb_mmap(struct device *dev,
|
||||
struct vm_area_struct *vma,
|
||||
void *cpu_addr, dma_addr_t dma_addr, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
int ret;
|
||||
unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
|
||||
|
||||
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
|
||||
is_device_dma_coherent(dev));
|
||||
|
||||
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
||||
return __swiotlb_mmap_pfn(vma, pfn, size);
|
||||
}
|
||||
|
||||
static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
|
||||
struct page *page, size_t size)
|
||||
{
|
||||
int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
||||
|
||||
if (!ret)
|
||||
sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
|
||||
PAGE_ALIGN(size), 0);
|
||||
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
void *cpu_addr, dma_addr_t handle, size_t size,
|
||||
unsigned long attrs)
|
||||
{
|
||||
struct page *page = phys_to_page(dma_to_phys(dev, handle));
|
||||
|
||||
return __swiotlb_get_sgtable_page(sgt, page, size);
|
||||
}
|
||||
|
||||
static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
||||
{
|
||||
if (swiotlb)
|
||||
@@ -584,20 +599,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
*/
|
||||
gfp |= __GFP_ZERO;
|
||||
|
||||
if (gfpflags_allow_blocking(gfp)) {
|
||||
struct page **pages;
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
|
||||
|
||||
pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
|
||||
handle, flush_page);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
||||
addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
|
||||
__builtin_return_address(0));
|
||||
if (!addr)
|
||||
iommu_dma_free(dev, pages, iosize, handle);
|
||||
} else {
|
||||
if (!gfpflags_allow_blocking(gfp)) {
|
||||
struct page *page;
|
||||
/*
|
||||
* In atomic context we can't remap anything, so we'll only
|
||||
@@ -621,6 +623,45 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
__free_from_pool(addr, size);
|
||||
addr = NULL;
|
||||
}
|
||||
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
|
||||
struct page *page;
|
||||
|
||||
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
|
||||
get_order(size), gfp);
|
||||
if (!page)
|
||||
return NULL;
|
||||
|
||||
*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
|
||||
if (iommu_dma_mapping_error(dev, *handle)) {
|
||||
dma_release_from_contiguous(dev, page,
|
||||
size >> PAGE_SHIFT);
|
||||
return NULL;
|
||||
}
|
||||
if (!coherent)
|
||||
__dma_flush_area(page_to_virt(page), iosize);
|
||||
|
||||
addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
|
||||
prot,
|
||||
__builtin_return_address(0));
|
||||
if (!addr) {
|
||||
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
|
||||
dma_release_from_contiguous(dev, page,
|
||||
size >> PAGE_SHIFT);
|
||||
}
|
||||
} else {
|
||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
|
||||
struct page **pages;
|
||||
|
||||
pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
|
||||
handle, flush_page);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
||||
addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
|
||||
__builtin_return_address(0));
|
||||
if (!addr)
|
||||
iommu_dma_free(dev, pages, iosize, handle);
|
||||
}
|
||||
return addr;
|
||||
}
|
||||
@@ -632,7 +673,8 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
/*
|
||||
* @cpu_addr will be one of 3 things depending on how it was allocated:
|
||||
* @cpu_addr will be one of 4 things depending on how it was allocated:
|
||||
* - A remapped array of pages for contiguous allocations.
|
||||
* - A remapped array of pages from iommu_dma_alloc(), for all
|
||||
* non-atomic allocations.
|
||||
* - A non-cacheable alias from the atomic pool, for atomic
|
||||
@@ -644,6 +686,12 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
if (__in_atomic_pool(cpu_addr, size)) {
|
||||
iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
|
||||
__free_from_pool(cpu_addr, size);
|
||||
} else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
struct page *page = vmalloc_to_page(cpu_addr);
|
||||
|
||||
iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
|
||||
dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
|
||||
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
|
||||
} else if (is_vmalloc_addr(cpu_addr)){
|
||||
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||
|
||||
@@ -670,6 +718,15 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
|
||||
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
||||
return ret;
|
||||
|
||||
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
/*
|
||||
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
|
||||
* hence in the vmalloc space.
|
||||
*/
|
||||
unsigned long pfn = vmalloc_to_pfn(cpu_addr);
|
||||
return __swiotlb_mmap_pfn(vma, pfn, size);
|
||||
}
|
||||
|
||||
area = find_vm_area(cpu_addr);
|
||||
if (WARN_ON(!area || !area->pages))
|
||||
return -ENXIO;
|
||||
@@ -684,6 +741,15 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||
struct vm_struct *area = find_vm_area(cpu_addr);
|
||||
|
||||
if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
|
||||
/*
|
||||
* DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
|
||||
* hence in the vmalloc space.
|
||||
*/
|
||||
struct page *page = vmalloc_to_page(cpu_addr);
|
||||
return __swiotlb_get_sgtable_page(sgt, page, size);
|
||||
}
|
||||
|
||||
if (WARN_ON(!area || !area->pages))
|
||||
return -ENXIO;
|
||||
|
||||
|
Reference in New Issue
Block a user