Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
Pull sparc updates from David Miller: "Here we go: - Fix various long standing issues in the sparc 32-bit IOMMU support code, from Christoph Hellwig. - Various other code cleanups and simplifications all over. From Gustavo A. R. Silva, Jagadeesh Pagadala, Masahiro Yamada, Mauro Carvalho Chehab, Mike Rapoport" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc: sparc64: simplify reduce_memory() function sparc: use struct_size() in kzalloc() docs: sparc: convert to ReST sparc/iommu: merge iommu_get_one and __sbus_iommu_map_page sparc/iommu: use __sbus_iommu_map_page to implement the map_sg path sparc/iommu: fix __sbus_iommu_map_page for highmem pages sparc/iommu: move per-page flushing into __sbus_iommu_map_page sparc/iommu: pass a physical address to iommu_get_one sparc/iommu: create a common helper for map_sg sparc/iommu: merge iommu_release_one and sbus_iommu_unmap_page sparc/iommu: use sbus_iommu_unmap_page in sbus_iommu_unmap_sg sparc/iommu: use !PageHighMem to check if a page has a kernel mapping sparc: vdso: add FORCE to the build rule of %.so arch:sparc:kernel/uprobes.c : Remove duplicate header
This commit is contained in:
@@ -194,8 +194,7 @@ static struct cpuinfo_tree *build_cpuinfo_tree(void)
|
||||
|
||||
n = enumerate_cpuinfo_nodes(tmp_level);
|
||||
|
||||
new_tree = kzalloc(sizeof(struct cpuinfo_tree) +
|
||||
(sizeof(struct cpuinfo_node) * n), GFP_ATOMIC);
|
||||
new_tree = kzalloc(struct_size(new_tree, nodes, n), GFP_ATOMIC);
|
||||
if (!new_tree)
|
||||
return NULL;
|
||||
|
||||
|
@@ -29,7 +29,6 @@
|
||||
#include <linux/kdebug.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
/* Compute the address of the breakpoint instruction and return it.
|
||||
*
|
||||
|
@@ -2269,19 +2269,6 @@ static unsigned long last_valid_pfn;
|
||||
static void sun4u_pgprot_init(void);
|
||||
static void sun4v_pgprot_init(void);
|
||||
|
||||
static phys_addr_t __init available_memory(void)
|
||||
{
|
||||
phys_addr_t available = 0ULL;
|
||||
phys_addr_t pa_start, pa_end;
|
||||
u64 i;
|
||||
|
||||
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
|
||||
&pa_end, NULL)
|
||||
available = available + (pa_end - pa_start);
|
||||
|
||||
return available;
|
||||
}
|
||||
|
||||
#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
|
||||
#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
|
||||
#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
|
||||
@@ -2295,33 +2282,8 @@ static phys_addr_t __init available_memory(void)
|
||||
*/
|
||||
static void __init reduce_memory(phys_addr_t limit_ram)
|
||||
{
|
||||
phys_addr_t avail_ram = available_memory();
|
||||
phys_addr_t pa_start, pa_end;
|
||||
u64 i;
|
||||
|
||||
if (limit_ram >= avail_ram)
|
||||
return;
|
||||
|
||||
for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &pa_start,
|
||||
&pa_end, NULL) {
|
||||
phys_addr_t region_size = pa_end - pa_start;
|
||||
phys_addr_t clip_start = pa_start;
|
||||
|
||||
avail_ram = avail_ram - region_size;
|
||||
/* Are we consuming too much? */
|
||||
if (avail_ram < limit_ram) {
|
||||
phys_addr_t give_back = limit_ram - avail_ram;
|
||||
|
||||
region_size = region_size - give_back;
|
||||
clip_start = clip_start + give_back;
|
||||
}
|
||||
|
||||
memblock_remove(clip_start, region_size);
|
||||
|
||||
if (avail_ram <= limit_ram)
|
||||
break;
|
||||
i = 0UL;
|
||||
}
|
||||
limit_ram += memblock_reserved_size();
|
||||
memblock_enforce_memory_limit(limit_ram);
|
||||
}
|
||||
|
||||
void __init paging_init(void)
|
||||
|
@@ -175,16 +175,37 @@ static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
|
||||
}
|
||||
}
|
||||
|
||||
static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
|
||||
static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t len, bool per_page_flush)
|
||||
{
|
||||
struct iommu_struct *iommu = dev->archdata.iommu;
|
||||
int ioptex;
|
||||
iopte_t *iopte, *iopte0;
|
||||
phys_addr_t paddr = page_to_phys(page) + offset;
|
||||
unsigned long off = paddr & ~PAGE_MASK;
|
||||
unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
unsigned long pfn = __phys_to_pfn(paddr);
|
||||
unsigned int busa, busa0;
|
||||
int i;
|
||||
iopte_t *iopte, *iopte0;
|
||||
int ioptex, i;
|
||||
|
||||
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||
if (!len || len > 256 * 1024)
|
||||
return DMA_MAPPING_ERROR;
|
||||
|
||||
/*
|
||||
* We expect unmapped highmem pages to be not in the cache.
|
||||
* XXX Is this a good assumption?
|
||||
* XXX What if someone else unmaps it here and races us?
|
||||
*/
|
||||
if (per_page_flush && !PageHighMem(page)) {
|
||||
unsigned long vaddr, p;
|
||||
|
||||
vaddr = (unsigned long)page_address(page) + offset;
|
||||
for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
|
||||
flush_page_for_dma(p);
|
||||
}
|
||||
|
||||
/* page color = pfn of page */
|
||||
ioptex = bit_map_string_get(&iommu->usemap, npages, page_to_pfn(page));
|
||||
ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
|
||||
if (ioptex < 0)
|
||||
panic("iommu out");
|
||||
busa0 = iommu->start + (ioptex << PAGE_SHIFT);
|
||||
@@ -193,29 +214,15 @@ static u32 iommu_get_one(struct device *dev, struct page *page, int npages)
|
||||
busa = busa0;
|
||||
iopte = iopte0;
|
||||
for (i = 0; i < npages; i++) {
|
||||
iopte_val(*iopte) = MKIOPTE(page_to_pfn(page), IOPERM);
|
||||
iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
|
||||
iommu_invalidate_page(iommu->regs, busa);
|
||||
busa += PAGE_SIZE;
|
||||
iopte++;
|
||||
page++;
|
||||
pfn++;
|
||||
}
|
||||
|
||||
iommu_flush_iotlb(iopte0, npages);
|
||||
|
||||
return busa0;
|
||||
}
|
||||
|
||||
static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t len)
|
||||
{
|
||||
void *vaddr = page_address(page) + offset;
|
||||
unsigned long off = (unsigned long)vaddr & ~PAGE_MASK;
|
||||
unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
/* XXX So what is maxphys for us and how do drivers know it? */
|
||||
if (!len || len > 256 * 1024)
|
||||
return DMA_MAPPING_ERROR;
|
||||
return iommu_get_one(dev, virt_to_page(vaddr), npages) + off;
|
||||
return busa0 + off;
|
||||
}
|
||||
|
||||
static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
|
||||
@@ -223,81 +230,58 @@ static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
flush_page_for_dma(0);
|
||||
return __sbus_iommu_map_page(dev, page, offset, len);
|
||||
return __sbus_iommu_map_page(dev, page, offset, len, false);
|
||||
}
|
||||
|
||||
static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
|
||||
struct page *page, unsigned long offset, size_t len,
|
||||
enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
void *vaddr = page_address(page) + offset;
|
||||
unsigned long p = ((unsigned long)vaddr) & PAGE_MASK;
|
||||
return __sbus_iommu_map_page(dev, page, offset, len, true);
|
||||
}
|
||||
|
||||
while (p < (unsigned long)vaddr + len) {
|
||||
flush_page_for_dma(p);
|
||||
p += PAGE_SIZE;
|
||||
static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs,
|
||||
bool per_page_flush)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int j;
|
||||
|
||||
for_each_sg(sgl, sg, nents, j) {
|
||||
sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
|
||||
sg->offset, sg->length, per_page_flush);
|
||||
if (sg->dma_address == DMA_MAPPING_ERROR)
|
||||
return 0;
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
|
||||
return __sbus_iommu_map_page(dev, page, offset, len);
|
||||
return nents;
|
||||
}
|
||||
|
||||
static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i, n;
|
||||
|
||||
flush_page_for_dma(0);
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
|
||||
return nents;
|
||||
return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false);
|
||||
}
|
||||
|
||||
static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
unsigned long page, oldpage = 0;
|
||||
struct scatterlist *sg;
|
||||
int i, j, n;
|
||||
|
||||
for_each_sg(sgl, sg, nents, j) {
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
|
||||
/*
|
||||
* We expect unmapped highmem pages to be not in the cache.
|
||||
* XXX Is this a good assumption?
|
||||
* XXX What if someone else unmaps it here and races us?
|
||||
*/
|
||||
if ((page = (unsigned long) page_address(sg_page(sg))) != 0) {
|
||||
for (i = 0; i < n; i++) {
|
||||
if (page != oldpage) { /* Already flushed? */
|
||||
flush_page_for_dma(page);
|
||||
oldpage = page;
|
||||
}
|
||||
page += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
|
||||
return nents;
|
||||
return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
|
||||
}
|
||||
|
||||
static void iommu_release_one(struct device *dev, u32 busa, int npages)
|
||||
static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t len, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct iommu_struct *iommu = dev->archdata.iommu;
|
||||
int ioptex;
|
||||
int i;
|
||||
unsigned int busa = dma_addr & PAGE_MASK;
|
||||
unsigned long off = dma_addr & ~PAGE_MASK;
|
||||
unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
|
||||
unsigned int i;
|
||||
|
||||
BUG_ON(busa < iommu->start);
|
||||
ioptex = (busa - iommu->start) >> PAGE_SHIFT;
|
||||
for (i = 0; i < npages; i++) {
|
||||
iopte_val(iommu->page_table[ioptex + i]) = 0;
|
||||
iommu_invalidate_page(iommu->regs, busa);
|
||||
@@ -306,25 +290,15 @@ static void iommu_release_one(struct device *dev, u32 busa, int npages)
|
||||
bit_map_clear(&iommu->usemap, ioptex, npages);
|
||||
}
|
||||
|
||||
static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t len, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
unsigned long off = dma_addr & ~PAGE_MASK;
|
||||
int npages;
|
||||
|
||||
npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
iommu_release_one(dev, dma_addr & PAGE_MASK, npages);
|
||||
}
|
||||
|
||||
static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i, n;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
|
||||
iommu_release_one(dev, sg->dma_address & PAGE_MASK, n);
|
||||
sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
|
||||
attrs);
|
||||
sg->dma_address = 0x21212121;
|
||||
}
|
||||
}
|
||||
|
@@ -68,7 +68,7 @@ CFLAGS_REMOVE_vdso-note.o = -pg
|
||||
CFLAGS_REMOVE_vclock_gettime.o = -pg
|
||||
|
||||
$(obj)/%.so: OBJCOPYFLAGS := -S
|
||||
$(obj)/%.so: $(obj)/%.so.dbg
|
||||
$(obj)/%.so: $(obj)/%.so.dbg FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
|
||||
|
Reference in New Issue
Block a user