Merge branches 'atags', 'cache-l2x0', 'clkdev', 'fixes', 'integrator', 'misc', 'opcodes' and 'syscall' into for-linus
このコミットが含まれているのは:

@@ -699,7 +699,6 @@ do_alignment_t32_to_handler(unsigned long *pinstr, struct pt_regs *regs,
|
||||
unsigned long instr = *pinstr;
|
||||
u16 tinst1 = (instr >> 16) & 0xffff;
|
||||
u16 tinst2 = instr & 0xffff;
|
||||
poffset->un = 0;
|
||||
|
||||
switch (tinst1 & 0xffe0) {
|
||||
/* A6.3.5 Load/Store multiple */
|
||||
@@ -854,9 +853,10 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
|
||||
break;
|
||||
|
||||
case 0x08000000: /* ldm or stm, or thumb-2 32bit instruction */
|
||||
if (thumb2_32b)
|
||||
if (thumb2_32b) {
|
||||
offset.un = 0;
|
||||
handler = do_alignment_t32_to_handler(&instr, regs, &offset);
|
||||
else
|
||||
} else
|
||||
handler = do_alignment_ldmstm;
|
||||
break;
|
||||
|
||||
|
@@ -368,14 +368,18 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
|
||||
/* l2x0 controller is disabled */
|
||||
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
|
||||
|
||||
l2x0_saved_regs.aux_ctrl = aux;
|
||||
|
||||
l2x0_inv_all();
|
||||
|
||||
/* enable L2X0 */
|
||||
writel_relaxed(1, l2x0_base + L2X0_CTRL);
|
||||
}
|
||||
|
||||
/* Re-read it in case some bits are reserved. */
|
||||
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
|
||||
|
||||
/* Save the value for resuming. */
|
||||
l2x0_saved_regs.aux_ctrl = aux;
|
||||
|
||||
outer_cache.inv_range = l2x0_inv_range;
|
||||
outer_cache.clean_range = l2x0_clean_range;
|
||||
outer_cache.flush_range = l2x0_flush_range;
|
||||
|
@@ -211,6 +211,9 @@ ENTRY(v7_coherent_user_range)
|
||||
* isn't mapped, fail with -EFAULT.
|
||||
*/
|
||||
9001:
|
||||
#ifdef CONFIG_ARM_ERRATA_775420
|
||||
dsb
|
||||
#endif
|
||||
mov r0, #-EFAULT
|
||||
mov pc, lr
|
||||
UNWIND(.fnend )
|
||||
|
@@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
|
||||
pid = task_pid_nr(thread->task) << ASID_BITS;
|
||||
asm volatile(
|
||||
" mrc p15, 0, %0, c13, c0, 1\n"
|
||||
" bfi %1, %0, #0, %2\n"
|
||||
" mcr p15, 0, %1, c13, c0, 1\n"
|
||||
" and %0, %0, %2\n"
|
||||
" orr %0, %0, %1\n"
|
||||
" mcr p15, 0, %0, c13, c0, 1\n"
|
||||
: "=r" (contextidr), "+r" (pid)
|
||||
: "I" (ASID_BITS));
|
||||
: "I" (~ASID_MASK));
|
||||
isb();
|
||||
|
||||
return NOTIFY_OK;
|
||||
|
@@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
|
||||
vunmap(cpu_addr);
|
||||
}
|
||||
|
||||
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
|
||||
|
||||
struct dma_pool {
|
||||
size_t size;
|
||||
spinlock_t lock;
|
||||
unsigned long *bitmap;
|
||||
unsigned long nr_pages;
|
||||
void *vaddr;
|
||||
struct page *page;
|
||||
struct page **pages;
|
||||
};
|
||||
|
||||
static struct dma_pool atomic_pool = {
|
||||
.size = SZ_256K,
|
||||
.size = DEFAULT_DMA_COHERENT_POOL_SIZE,
|
||||
};
|
||||
|
||||
static int __init early_coherent_pool(char *p)
|
||||
@@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p)
|
||||
}
|
||||
early_param("coherent_pool", early_coherent_pool);
|
||||
|
||||
void __init init_dma_coherent_pool_size(unsigned long size)
|
||||
{
|
||||
/*
|
||||
* Catch any attempt to set the pool size too late.
|
||||
*/
|
||||
BUG_ON(atomic_pool.vaddr);
|
||||
|
||||
/*
|
||||
* Set architecture specific coherent pool size only if
|
||||
* it has not been changed by kernel command line parameter.
|
||||
*/
|
||||
if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
|
||||
atomic_pool.size = size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise the coherent pool for atomic allocations.
|
||||
*/
|
||||
@@ -297,6 +314,7 @@ static int __init atomic_pool_init(void)
|
||||
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
|
||||
unsigned long *bitmap;
|
||||
struct page *page;
|
||||
struct page **pages;
|
||||
void *ptr;
|
||||
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
|
||||
|
||||
@@ -304,21 +322,33 @@ static int __init atomic_pool_init(void)
|
||||
if (!bitmap)
|
||||
goto no_bitmap;
|
||||
|
||||
pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages)
|
||||
goto no_pages;
|
||||
|
||||
if (IS_ENABLED(CONFIG_CMA))
|
||||
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
|
||||
else
|
||||
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
|
||||
&page, NULL);
|
||||
if (ptr) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
pages[i] = page + i;
|
||||
|
||||
spin_lock_init(&pool->lock);
|
||||
pool->vaddr = ptr;
|
||||
pool->page = page;
|
||||
pool->pages = pages;
|
||||
pool->bitmap = bitmap;
|
||||
pool->nr_pages = nr_pages;
|
||||
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
|
||||
(unsigned)pool->size / 1024);
|
||||
return 0;
|
||||
}
|
||||
|
||||
kfree(pages);
|
||||
no_pages:
|
||||
kfree(bitmap);
|
||||
no_bitmap:
|
||||
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
|
||||
@@ -358,7 +388,7 @@ void __init dma_contiguous_remap(void)
|
||||
if (end > arm_lowmem_limit)
|
||||
end = arm_lowmem_limit;
|
||||
if (start >= end)
|
||||
return;
|
||||
continue;
|
||||
|
||||
map.pfn = __phys_to_pfn(start);
|
||||
map.virtual = __phys_to_virt(start);
|
||||
@@ -423,7 +453,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
|
||||
unsigned int pageno;
|
||||
unsigned long flags;
|
||||
void *ptr = NULL;
|
||||
size_t align;
|
||||
unsigned long align_mask;
|
||||
|
||||
if (!pool->vaddr) {
|
||||
WARN(1, "coherent pool not initialised!\n");
|
||||
@@ -435,35 +465,53 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
|
||||
* small, so align them to their order in pages, minimum is a page
|
||||
* size. This helps reduce fragmentation of the DMA space.
|
||||
*/
|
||||
align = PAGE_SIZE << get_order(size);
|
||||
align_mask = (1 << get_order(size)) - 1;
|
||||
|
||||
spin_lock_irqsave(&pool->lock, flags);
|
||||
pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages,
|
||||
0, count, (1 << align) - 1);
|
||||
0, count, align_mask);
|
||||
if (pageno < pool->nr_pages) {
|
||||
bitmap_set(pool->bitmap, pageno, count);
|
||||
ptr = pool->vaddr + PAGE_SIZE * pageno;
|
||||
*ret_page = pool->page + pageno;
|
||||
*ret_page = pool->pages[pageno];
|
||||
} else {
|
||||
pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
|
||||
"Please increase it with coherent_pool= kernel parameter!\n",
|
||||
(unsigned)pool->size / 1024);
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static bool __in_atomic_pool(void *start, size_t size)
|
||||
{
|
||||
struct dma_pool *pool = &atomic_pool;
|
||||
void *end = start + size;
|
||||
void *pool_start = pool->vaddr;
|
||||
void *pool_end = pool->vaddr + pool->size;
|
||||
|
||||
if (start < pool_start || start >= pool_end)
|
||||
return false;
|
||||
|
||||
if (end <= pool_end)
|
||||
return true;
|
||||
|
||||
WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
|
||||
start, end - 1, pool_start, pool_end - 1);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int __free_from_pool(void *start, size_t size)
|
||||
{
|
||||
struct dma_pool *pool = &atomic_pool;
|
||||
unsigned long pageno, count;
|
||||
unsigned long flags;
|
||||
|
||||
if (start < pool->vaddr || start > pool->vaddr + pool->size)
|
||||
if (!__in_atomic_pool(start, size))
|
||||
return 0;
|
||||
|
||||
if (start + size > pool->vaddr + pool->size) {
|
||||
WARN(1, "freeing wrong coherent size from pool\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
pageno = (start - pool->vaddr) >> PAGE_SHIFT;
|
||||
count = size >> PAGE_SHIFT;
|
||||
|
||||
@@ -648,12 +696,12 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||
|
||||
if (arch_is_coherent() || nommu()) {
|
||||
__dma_free_buffer(page, size);
|
||||
} else if (__free_from_pool(cpu_addr, size)) {
|
||||
return;
|
||||
} else if (!IS_ENABLED(CONFIG_CMA)) {
|
||||
__dma_free_remap(cpu_addr, size);
|
||||
__dma_free_buffer(page, size);
|
||||
} else {
|
||||
if (__free_from_pool(cpu_addr, size))
|
||||
return;
|
||||
/*
|
||||
* Non-atomic allocations cannot be freed with IRQs disabled
|
||||
*/
|
||||
@@ -1090,10 +1138,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct page **__atomic_get_pages(void *addr)
|
||||
{
|
||||
struct dma_pool *pool = &atomic_pool;
|
||||
struct page **pages = pool->pages;
|
||||
int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
|
||||
|
||||
return pages + offs;
|
||||
}
|
||||
|
||||
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
|
||||
return __atomic_get_pages(cpu_addr);
|
||||
|
||||
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
|
||||
return cpu_addr;
|
||||
|
||||
@@ -1103,6 +1163,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *__iommu_alloc_atomic(struct device *dev, size_t size,
|
||||
dma_addr_t *handle)
|
||||
{
|
||||
struct page *page;
|
||||
void *addr;
|
||||
|
||||
addr = __alloc_from_pool(size, &page);
|
||||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
*handle = __iommu_create_mapping(dev, &page, size);
|
||||
if (*handle == DMA_ERROR_CODE)
|
||||
goto err_mapping;
|
||||
|
||||
return addr;
|
||||
|
||||
err_mapping:
|
||||
__free_from_pool(addr, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __iommu_free_atomic(struct device *dev, struct page **pages,
|
||||
dma_addr_t handle, size_t size)
|
||||
{
|
||||
__iommu_remove_mapping(dev, handle, size);
|
||||
__free_from_pool(page_address(pages[0]), size);
|
||||
}
|
||||
|
||||
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
@@ -1113,6 +1201,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
*handle = DMA_ERROR_CODE;
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (gfp & GFP_ATOMIC)
|
||||
return __iommu_alloc_atomic(dev, size, handle);
|
||||
|
||||
pages = __iommu_alloc_buffer(dev, size, gfp);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
@@ -1179,6 +1270,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
return;
|
||||
}
|
||||
|
||||
if (__in_atomic_pool(cpu_addr, size)) {
|
||||
__iommu_free_atomic(dev, pages, handle, size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
|
||||
unmap_kernel_range((unsigned long)cpu_addr, size);
|
||||
vunmap(cpu_addr);
|
||||
|
@@ -231,8 +231,6 @@ void __sync_icache_dcache(pte_t pteval)
|
||||
struct page *page;
|
||||
struct address_space *mapping;
|
||||
|
||||
if (!pte_present_user(pteval))
|
||||
return;
|
||||
if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
|
||||
/* only flush non-aliasing VIPT caches for exec mappings */
|
||||
return;
|
||||
|
@@ -324,7 +324,7 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
|
||||
|
||||
BUG_ON(!arm_memblock_steal_permitted);
|
||||
|
||||
phys = memblock_alloc(size, align);
|
||||
phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
|
||||
memblock_free(phys, size);
|
||||
memblock_remove(phys, size);
|
||||
|
||||
|
@@ -247,6 +247,7 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
|
||||
if (!area)
|
||||
return NULL;
|
||||
addr = (unsigned long)area->addr;
|
||||
area->phys_addr = __pfn_to_phys(pfn);
|
||||
|
||||
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
|
||||
if (DOMAIN_IO == 0 &&
|
||||
|
@@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
|
||||
/* permanent static mappings from iotable_init() */
|
||||
#define VM_ARM_STATIC_MAPPING 0x40000000
|
||||
|
||||
/* empty mapping */
|
||||
#define VM_ARM_EMPTY_MAPPING 0x20000000
|
||||
|
||||
/* mapping type (attributes) for permanent static mappings */
|
||||
#define VM_ARM_MTYPE(mt) ((mt) << 20)
|
||||
#define VM_ARM_MTYPE_MASK (0x1f << 20)
|
||||
|
@@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
|
||||
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
|
||||
vm->addr = (void *)addr;
|
||||
vm->size = SECTION_SIZE;
|
||||
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
|
||||
vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
|
||||
vm->caller = pmd_empty_section_gap;
|
||||
vm_area_add_early(vm);
|
||||
}
|
||||
@@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
|
||||
|
||||
/* we're still single threaded hence no lock needed here */
|
||||
for (vm = vmlist; vm; vm = vm->next) {
|
||||
if (!(vm->flags & VM_ARM_STATIC_MAPPING))
|
||||
if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
|
||||
continue;
|
||||
addr = (unsigned long)vm->addr;
|
||||
if (addr < next)
|
||||
@@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void)
|
||||
* Check whether this memory bank would partially overlap
|
||||
* the vmalloc area.
|
||||
*/
|
||||
if (__va(bank->start + bank->size) > vmalloc_min ||
|
||||
__va(bank->start + bank->size) < __va(bank->start)) {
|
||||
if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
|
||||
__va(bank->start + bank->size - 1) <= __va(bank->start)) {
|
||||
unsigned long newsize = vmalloc_min - __va(bank->start);
|
||||
printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
|
||||
"to -%.8llx (vmalloc region overlap).\n",
|
||||
|
@@ -38,10 +38,10 @@ ENTRY(v7wbi_flush_user_tlb_range)
|
||||
dsb
|
||||
mov r0, r0, lsr #PAGE_SHIFT @ align address
|
||||
mov r1, r1, lsr #PAGE_SHIFT
|
||||
#ifdef CONFIG_ARM_ERRATA_720789
|
||||
mov r3, #0
|
||||
#else
|
||||
asid r3, r3 @ mask ASID
|
||||
#ifdef CONFIG_ARM_ERRATA_720789
|
||||
ALT_SMP(W(mov) r3, #0 )
|
||||
ALT_UP(W(nop) )
|
||||
#endif
|
||||
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
|
||||
mov r1, r1, lsl #PAGE_SHIFT
|
||||
|
新しいイシューから参照
ユーザーをブロックする