swiotlb: remove dma_mark_clean

Instead of providing a special dma_mark_clean hook just for ia64, switch
ia64 to use the normal arch_sync_dma_for_cpu hooks instead.

This means that we now also set the PG_arch_1 bit for pages in the
swiotlb buffer, which isn't stricly needed as we will never execute code
out of the swiotlb buffer, but otherwise harmless.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Tested-by: Jesper Dangaard Brouer <brouer@redhat.com>
Tested-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
Christoph Hellwig
2018-12-06 07:06:04 -08:00
parent b907e20508
commit 68c608345c
6 changed files with 30 additions and 56 deletions

View File

@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/dma-noncoherent.h>
#include <linux/efi.h>
#include <linux/elf.h>
#include <linux/memblock.h>
@@ -71,18 +72,14 @@ __ia64_sync_icache_dcache (pte_t pte)
* DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
* flush them when they get mapped into an executable vm-area.
*/
void
dma_mark_clean(void *addr, size_t size)
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size_t size, enum dma_data_direction dir)
{
unsigned long pg_addr, end;
unsigned long pfn = PHYS_PFN(paddr);
pg_addr = PAGE_ALIGN((unsigned long) addr);
end = (unsigned long) addr + size;
while (pg_addr + PAGE_SIZE <= end) {
struct page *page = virt_to_page(pg_addr);
set_bit(PG_arch_1, &page->flags);
pg_addr += PAGE_SIZE;
}
do {
set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
} while (++pfn <= PHYS_PFN(paddr + size - 1));
}
inline void