Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 updates from Will Deacon: - Errata workarounds for Qualcomm's Falkor CPU - Qualcomm L2 Cache PMU driver - Qualcomm SMCCC firmware quirk - Support for DEBUG_VIRTUAL - CPU feature detection for userspace via MRS emulation - Preliminary work for the Statistical Profiling Extension - Misc cleanups and non-critical fixes * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (74 commits) arm64/kprobes: consistently handle MRS/MSR with XZR arm64: cpufeature: correctly handle MRS to XZR arm64: traps: correctly handle MRS/MSR with XZR arm64: ptrace: add XZR-safe regs accessors arm64: include asm/assembler.h in entry-ftrace.S arm64: fix warning about swapper_pg_dir overflow arm64: Work around Falkor erratum 1003 arm64: head.S: Enable EL1 (host) access to SPE when entered at EL2 arm64: arch_timer: document Hisilicon erratum 161010101 arm64: use is_vmalloc_addr arm64: use linux/sizes.h for constants arm64: uaccess: consistently check object sizes perf: add qcom l2 cache perf events driver arm64: remove wrong CONFIG_PROC_SYSCTL ifdef ARM: smccc: Update HVC comment to describe new quirk parameter arm64: do not trace atomic operations ACPI/IORT: Fix the error return code in iort_add_smmu_platform_device() ACPI/IORT: Fix iort_node_get_id() mapping entries indexing arm64: mm: enable CONFIG_HOLES_IN_ZONE for NUMA perf: xgene: Include module.h ...
This commit is contained in:
@@ -211,7 +211,8 @@ static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
|
||||
dma_addr_t dev_addr;
|
||||
|
||||
dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
|
||||
if (!is_device_dma_coherent(dev))
|
||||
if (!is_device_dma_coherent(dev) &&
|
||||
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
|
||||
|
||||
return dev_addr;
|
||||
@@ -222,7 +223,8 @@ static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
|
||||
size_t size, enum dma_data_direction dir,
|
||||
unsigned long attrs)
|
||||
{
|
||||
if (!is_device_dma_coherent(dev))
|
||||
if (!is_device_dma_coherent(dev) &&
|
||||
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
|
||||
swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
|
||||
}
|
||||
@@ -235,7 +237,8 @@ static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
|
||||
int i, ret;
|
||||
|
||||
ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
|
||||
if (!is_device_dma_coherent(dev))
|
||||
if (!is_device_dma_coherent(dev) &&
|
||||
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
for_each_sg(sgl, sg, ret, i)
|
||||
__dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
|
||||
sg->length, dir);
|
||||
@@ -251,7 +254,8 @@ static void __swiotlb_unmap_sg_attrs(struct device *dev,
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
if (!is_device_dma_coherent(dev))
|
||||
if (!is_device_dma_coherent(dev) &&
|
||||
(attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
|
||||
for_each_sg(sgl, sg, nelems, i)
|
||||
__dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
|
||||
sg->length, dir);
|
||||
@@ -352,6 +356,13 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
|
||||
{
|
||||
if (swiotlb)
|
||||
return swiotlb_dma_mapping_error(hwdev, addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dma_map_ops swiotlb_dma_ops = {
|
||||
.alloc = __dma_alloc,
|
||||
.free = __dma_free,
|
||||
@@ -366,7 +377,7 @@ static struct dma_map_ops swiotlb_dma_ops = {
|
||||
.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = __swiotlb_sync_sg_for_device,
|
||||
.dma_supported = __swiotlb_dma_supported,
|
||||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
.mapping_error = __swiotlb_dma_mapping_error,
|
||||
};
|
||||
|
||||
static int __init atomic_pool_init(void)
|
||||
@@ -830,14 +841,21 @@ static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
|
||||
* then the IOMMU core will have already configured a group for this
|
||||
* device, and allocated the default domain for that group.
|
||||
*/
|
||||
if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
|
||||
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
|
||||
dev_name(dev));
|
||||
return false;
|
||||
if (!domain)
|
||||
goto out_err;
|
||||
|
||||
if (domain->type == IOMMU_DOMAIN_DMA) {
|
||||
if (iommu_dma_init_domain(domain, dma_base, size, dev))
|
||||
goto out_err;
|
||||
|
||||
dev->archdata.dma_ops = &iommu_dma_ops;
|
||||
}
|
||||
|
||||
dev->archdata.dma_ops = &iommu_dma_ops;
|
||||
return true;
|
||||
out_err:
|
||||
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
|
||||
dev_name(dev));
|
||||
return false;
|
||||
}
|
||||
|
||||
static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
|
||||
|
Reference in New Issue
Block a user