Merge branch 'topic/dma' into next
Merge hch's big DMA rework series. This is in a topic branch in case he wants to merge it to minimise conflicts.
Esse commit está contido em:
@@ -30,6 +30,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <linux/dma-noncoherent.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
@@ -151,8 +152,8 @@ static struct ppc_vm_region *ppc_vm_region_find(struct ppc_vm_region *head, unsi
|
||||
* Allocate DMA-coherent memory space and return both the kernel remapped
|
||||
* virtual and bus address for that space.
|
||||
*/
|
||||
void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
|
||||
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
||||
gfp_t gfp, unsigned long attrs)
|
||||
{
|
||||
struct page *page;
|
||||
struct ppc_vm_region *c;
|
||||
@@ -253,7 +254,7 @@ void *__dma_nommu_alloc_coherent(struct device *dev, size_t size,
|
||||
/*
|
||||
* free a page as defined by the above mapping.
|
||||
*/
|
||||
void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
void arch_dma_free(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle, unsigned long attrs)
|
||||
{
|
||||
struct ppc_vm_region *c;
|
||||
@@ -313,7 +314,7 @@ void __dma_nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
/*
|
||||
* make an area consistent.
|
||||
*/
|
||||
void __dma_sync(void *vaddr, size_t size, int direction)
|
||||
static void __dma_sync(void *vaddr, size_t size, int direction)
|
||||
{
|
||||
unsigned long start = (unsigned long)vaddr;
|
||||
unsigned long end = start + size;
|
||||
@@ -339,7 +340,6 @@ void __dma_sync(void *vaddr, size_t size, int direction)
|
||||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__dma_sync);
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
@@ -386,28 +386,42 @@ static inline void __dma_sync_page_highmem(struct page *page,
|
||||
* __dma_sync_page makes memory consistent. identical to __dma_sync, but
|
||||
* takes a struct page instead of a virtual address
|
||||
*/
|
||||
void __dma_sync_page(struct page *page, unsigned long offset,
|
||||
size_t size, int direction)
|
||||
static void __dma_sync_page(phys_addr_t paddr, size_t size, int dir)
|
||||
{
|
||||
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
|
||||
unsigned offset = paddr & ~PAGE_MASK;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
__dma_sync_page_highmem(page, offset, size, direction);
|
||||
__dma_sync_page_highmem(page, offset, size, dir);
|
||||
#else
|
||||
unsigned long start = (unsigned long)page_address(page) + offset;
|
||||
__dma_sync((void *)start, size, direction);
|
||||
__dma_sync((void *)start, size, dir);
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(__dma_sync_page);
|
||||
|
||||
void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_sync_page(paddr, size, dir);
|
||||
}
|
||||
|
||||
void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
__dma_sync_page(paddr, size, dir);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the PFN for a given cpu virtual address returned by
|
||||
* __dma_nommu_alloc_coherent. This is used by dma_mmap_coherent()
|
||||
* Return the PFN for a given cpu virtual address returned by arch_dma_alloc.
|
||||
*/
|
||||
unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
|
||||
long arch_dma_coherent_to_pfn(struct device *dev, void *vaddr,
|
||||
dma_addr_t dma_addr)
|
||||
{
|
||||
/* This should always be populated, so we don't test every
|
||||
* level. If that fails, we'll have a nice crash which
|
||||
* will be as good as a BUG_ON()
|
||||
*/
|
||||
unsigned long cpu_addr = (unsigned long)vaddr;
|
||||
pgd_t *pgd = pgd_offset_k(cpu_addr);
|
||||
pud_t *pud = pud_offset(pgd, cpu_addr);
|
||||
pmd_t *pmd = pmd_offset(pud, cpu_addr);
|
||||
|
@@ -69,15 +69,12 @@ pte_t *kmap_pte;
|
||||
EXPORT_SYMBOL(kmap_pte);
|
||||
pgprot_t kmap_prot;
|
||||
EXPORT_SYMBOL(kmap_prot);
|
||||
#define TOP_ZONE ZONE_HIGHMEM
|
||||
|
||||
static inline pte_t *virt_to_kpte(unsigned long vaddr)
|
||||
{
|
||||
return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
|
||||
vaddr), vaddr), vaddr);
|
||||
}
|
||||
#else
|
||||
#define TOP_ZONE ZONE_NORMAL
|
||||
#endif
|
||||
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
@@ -228,25 +225,6 @@ static int __init mark_nonram_nosave(void)
|
||||
*/
|
||||
static unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
|
||||
/*
|
||||
* Find the least restrictive zone that is entirely below the
|
||||
* specified pfn limit. Returns < 0 if no suitable zone is found.
|
||||
*
|
||||
* pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
|
||||
* systems -- the DMA limit can be higher than any possible real pfn.
|
||||
*/
|
||||
int dma_pfn_limit_to_zone(u64 pfn_limit)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = TOP_ZONE; i >= 0; i--) {
|
||||
if (max_zone_pfns[i] <= pfn_limit)
|
||||
return i;
|
||||
}
|
||||
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
/*
|
||||
* paging_init() sets up the page tables - in fact we've already done this.
|
||||
*/
|
||||
|
Referência em uma nova issue
Block a user