swiotlb: remove dma_mark_clean

Instead of providing a special dma_mark_clean hook just for ia64, switch
ia64 to use the normal arch_sync_dma_for_cpu hooks instead.

This means that we now also set the PG_arch_1 bit for pages in the
swiotlb buffer, which isn't stricly needed as we will never execute code
out of the swiotlb buffer, but otherwise harmless.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Tested-by: Jesper Dangaard Brouer <brouer@redhat.com>
Tested-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
Christoph Hellwig
2018-12-06 07:06:04 -08:00
parent b907e20508
commit 68c608345c
6 changed files with 30 additions and 56 deletions

View File

@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
#include <linux/dma-mapping.h>
#include <linux/dma-direct.h>
#include <linux/swiotlb.h>
#include <linux/export.h>
@@ -16,6 +16,24 @@ const struct dma_map_ops *dma_get_ops(struct device *dev)
EXPORT_SYMBOL(dma_get_ops);
#ifdef CONFIG_SWIOTLB
void *arch_dma_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
return dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs);
}
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs)
{
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
}
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr)
{
return page_to_pfn(virt_to_page(cpu_addr));
}
void __init swiotlb_dma_init(void)
{
dma_ops = &swiotlb_dma_ops;