Merge branch 'devel-stable' into for-linus

Conflicts:
	arch/arm/kernel/setup.c
	arch/arm/mach-shmobile/board-kota2.c
This commit is contained in:
Russell King
2012-01-05 13:24:33 +00:00
433 changed files with 3326 additions and 4081 deletions

View File

@@ -629,6 +629,23 @@ config IO_36
comment "Processor Features"
config ARM_LPAE
bool "Support for the Large Physical Address Extension"
depends on MMU && CPU_V7
help
Say Y if you have an ARMv7 processor supporting the LPAE page
table format and you would like to access memory beyond the
4GB limit. The resulting kernel image will not run on
processors without the LPA extension.
If unsure, say N.
config ARCH_PHYS_ADDR_T_64BIT
def_bool ARM_LPAE
config ARCH_DMA_ADDR_T_64BIT
bool
config ARM_THUMB
bool "Support Thumb user binaries"
depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON
@@ -816,14 +833,23 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
Say Y here to use the Feroceon L2 cache in writethrough mode.
Unless you specifically require this, say N for writeback mode.
config MIGHT_HAVE_CACHE_L2X0
bool
help
This option should be selected by machines which have a L2x0
or PL310 cache controller, but where its use is optional.
The only effect of this option is to make CACHE_L2X0 and
related options available to the user for configuration.
Boards or SoCs which always require the cache controller
support to be present should select CACHE_L2X0 directly
instead of this option, thus preventing the user from
inadvertently configuring a broken kernel.
config CACHE_L2X0
bool "Enable the L2x0 outer cache controller"
depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
REALVIEW_EB_A9MP || ARCH_IMX_V6_V7 || MACH_REALVIEW_PBX || \
ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \
ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE || \
ARCH_PRIMA2 || ARCH_ZYNQ || ARCH_CNS3XXX || ARCH_HIGHBANK
default y
bool "Enable the L2x0 outer cache controller" if MIGHT_HAVE_CACHE_L2X0
default MIGHT_HAVE_CACHE_L2X0
select OUTER_CACHE
select OUTER_CACHE_SYNC
help

View File

@@ -968,7 +968,7 @@ static int __init alignment_init(void)
ai_usermode = safe_usermode(ai_usermode, false);
}
hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN,
hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
"alignment exception");
/*

View File

@@ -22,6 +22,21 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION;
DEFINE_PER_CPU(struct mm_struct *, current_mm);
#endif
#ifdef CONFIG_ARM_LPAE
#define cpu_set_asid(asid) { \
unsigned long ttbl, ttbh; \
asm volatile( \
" mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \
" mov %1, %2, lsl #(48 - 32) @ set ASID\n" \
" mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \
: "=&r" (ttbl), "=&r" (ttbh) \
: "r" (asid & ~ASID_MASK)); \
}
#else
#define cpu_set_asid(asid) \
asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid))
#endif
/*
* We fork()ed a process, and we need a new context for the child
* to run in. We reserve version 0 for initial tasks so we will
@@ -37,7 +52,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
static void flush_context(void)
{
/* set the reserved ASID before flushing the TLB */
asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
cpu_set_asid(0);
isb();
local_flush_tlb_all();
if (icache_is_vivt_asid_tagged()) {
@@ -99,7 +114,7 @@ static void reset_context(void *info)
set_mm_context(mm, asid);
/* set the new ASID */
asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
cpu_set_asid(mm->context.id);
isb();
}

View File

@@ -27,19 +27,6 @@
#include "fault.h"
/*
* Fault status register encodings. We steal bit 31 for our own purposes.
*/
#define FSR_LNX_PF (1 << 31)
#define FSR_WRITE (1 << 11)
#define FSR_FS4 (1 << 10)
#define FSR_FS3_0 (15)
static inline int fsr_fs(unsigned int fsr)
{
return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
}
#ifdef CONFIG_MMU
#ifdef CONFIG_KPROBES
@@ -123,8 +110,10 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
pte = pte_offset_map(pmd, addr);
printk(", *pte=%08llx", (long long)pte_val(*pte));
#ifndef CONFIG_ARM_LPAE
printk(", *ppte=%08llx",
(long long)pte_val(pte[PTE_HWTABLE_PTRS]));
#endif
pte_unmap(pte);
} while(0);
@@ -461,6 +450,12 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
pmd = pmd_offset(pud, addr);
pmd_k = pmd_offset(pud_k, addr);
#ifdef CONFIG_ARM_LPAE
/*
* Only one hardware entry per PMD with LPAE.
*/
index = 0;
#else
/*
* On ARM one Linux PGD entry contains two hardware entries (see page
* tables layout in pgtable.h). We normally guarantee that we always
@@ -470,6 +465,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
* for the first of pair.
*/
index = (addr >> SECTION_SHIFT) & 1;
#endif
if (pmd_none(pmd_k[index]))
goto bad_area;
@@ -509,55 +505,20 @@ do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
return 1;
}
static struct fsr_info {
struct fsr_info {
int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
int sig;
int code;
const char *name;
} fsr_info[] = {
/*
* The following are the standard ARMv3 and ARMv4 aborts. ARMv5
* defines these to be "precise" aborts.
*/
{ do_bad, SIGSEGV, 0, "vector exception" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGKILL, 0, "terminal exception" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
/*
* The following are "imprecise" aborts, which are signalled by bit
* 10 of the FSR, and may not be recoverable. These are only
* supported if the CPU abort handler supports bit 10.
*/
{ do_bad, SIGBUS, 0, "unknown 16" },
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "lock abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 21" },
{ do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "unknown 28" },
{ do_bad, SIGBUS, 0, "unknown 29" },
{ do_bad, SIGBUS, 0, "unknown 30" },
{ do_bad, SIGBUS, 0, "unknown 31" }
};
/* FSR definition */
#ifdef CONFIG_ARM_LPAE
#include "fsr-3level.c"
#else
#include "fsr-2level.c"
#endif
void __init
hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, int code, const char *name)
@@ -593,42 +554,6 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
arm_notify_die("", regs, &info, fsr, 0);
}
static struct fsr_info ifsr_info[] = {
{ do_bad, SIGBUS, 0, "unknown 0" },
{ do_bad, SIGBUS, 0, "unknown 1" },
{ do_bad, SIGBUS, 0, "debug event" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
{ do_bad, SIGBUS, 0, "unknown 4" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
{ do_bad, SIGBUS, 0, "unknown 10" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
{ do_bad, SIGBUS, 0, "unknown 16" },
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "unknown 20" },
{ do_bad, SIGBUS, 0, "unknown 21" },
{ do_bad, SIGBUS, 0, "unknown 22" },
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "unknown 24" },
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "unknown 28" },
{ do_bad, SIGBUS, 0, "unknown 29" },
{ do_bad, SIGBUS, 0, "unknown 30" },
{ do_bad, SIGBUS, 0, "unknown 31" },
};
void __init
hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *),
int sig, int code, const char *name)
@@ -661,6 +586,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
arm_notify_die("", regs, &info, ifsr, 0);
}
#ifndef CONFIG_ARM_LPAE
static int __init exceptions_init(void)
{
if (cpu_architecture() >= CPU_ARCH_ARMv6) {
@@ -683,3 +609,4 @@ static int __init exceptions_init(void)
}
arch_initcall(exceptions_init);
#endif

View File

@@ -1,3 +1,28 @@
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
#ifndef __ARCH_ARM_FAULT_H
#define __ARCH_ARM_FAULT_H
/*
* Fault status register encodings. We steal bit 31 for our own purposes.
*/
#define FSR_LNX_PF (1 << 31)
#define FSR_WRITE (1 << 11)
#define FSR_FS4 (1 << 10)
#define FSR_FS3_0 (15)
#define FSR_FS5_0 (0x3f)
#ifdef CONFIG_ARM_LPAE
static inline int fsr_fs(unsigned int fsr)
{
return fsr & FSR_FS5_0;
}
#else
static inline int fsr_fs(unsigned int fsr)
{
return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6;
}
#endif
void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
unsigned long search_exception_table(unsigned long addr);
#endif /* __ARCH_ARM_FAULT_H */

78
arch/arm/mm/fsr-2level.c Normal file
View File

@@ -0,0 +1,78 @@
static struct fsr_info fsr_info[] = {
/*
* The following are the standard ARMv3 and ARMv4 aborts. ARMv5
* defines these to be "precise" aborts.
*/
{ do_bad, SIGSEGV, 0, "vector exception" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGKILL, 0, "terminal exception" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment exception" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGBUS, 0, "external abort on linefetch" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
/*
* The following are "imprecise" aborts, which are signalled by bit
* 10 of the FSR, and may not be recoverable. These are only
* supported if the CPU abort handler supports bit 10.
*/
{ do_bad, SIGBUS, 0, "unknown 16" },
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "lock abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 21" },
{ do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "unknown 28" },
{ do_bad, SIGBUS, 0, "unknown 29" },
{ do_bad, SIGBUS, 0, "unknown 30" },
{ do_bad, SIGBUS, 0, "unknown 31" },
};
static struct fsr_info ifsr_info[] = {
{ do_bad, SIGBUS, 0, "unknown 0" },
{ do_bad, SIGBUS, 0, "unknown 1" },
{ do_bad, SIGBUS, 0, "debug event" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" },
{ do_bad, SIGBUS, 0, "unknown 4" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" },
{ do_bad, SIGBUS, 0, "external abort on non-linefetch" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" },
{ do_bad, SIGBUS, 0, "unknown 10" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" },
{ do_bad, SIGBUS, 0, "external abort on translation" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" },
{ do_bad, SIGBUS, 0, "unknown 16" },
{ do_bad, SIGBUS, 0, "unknown 17" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "unknown 20" },
{ do_bad, SIGBUS, 0, "unknown 21" },
{ do_bad, SIGBUS, 0, "unknown 22" },
{ do_bad, SIGBUS, 0, "unknown 23" },
{ do_bad, SIGBUS, 0, "unknown 24" },
{ do_bad, SIGBUS, 0, "unknown 25" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "unknown 28" },
{ do_bad, SIGBUS, 0, "unknown 29" },
{ do_bad, SIGBUS, 0, "unknown 30" },
{ do_bad, SIGBUS, 0, "unknown 31" },
};

68
arch/arm/mm/fsr-3level.c Normal file
View File

@@ -0,0 +1,68 @@
static struct fsr_info fsr_info[] = {
{ do_bad, SIGBUS, 0, "unknown 0" },
{ do_bad, SIGBUS, 0, "unknown 1" },
{ do_bad, SIGBUS, 0, "unknown 2" },
{ do_bad, SIGBUS, 0, "unknown 3" },
{ do_bad, SIGBUS, 0, "reserved translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" },
{ do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" },
{ do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" },
{ do_bad, SIGBUS, 0, "reserved access flag fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" },
{ do_bad, SIGBUS, 0, "reserved permission fault" },
{ do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" },
{ do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" },
{ do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" },
{ do_bad, SIGBUS, 0, "synchronous external abort" },
{ do_bad, SIGBUS, 0, "asynchronous external abort" },
{ do_bad, SIGBUS, 0, "unknown 18" },
{ do_bad, SIGBUS, 0, "unknown 19" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error" },
{ do_bad, SIGBUS, 0, "asynchronous parity error" },
{ do_bad, SIGBUS, 0, "unknown 26" },
{ do_bad, SIGBUS, 0, "unknown 27" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" },
{ do_bad, SIGBUS, 0, "unknown 32" },
{ do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
{ do_bad, SIGBUS, 0, "debug event" },
{ do_bad, SIGBUS, 0, "unknown 35" },
{ do_bad, SIGBUS, 0, "unknown 36" },
{ do_bad, SIGBUS, 0, "unknown 37" },
{ do_bad, SIGBUS, 0, "unknown 38" },
{ do_bad, SIGBUS, 0, "unknown 39" },
{ do_bad, SIGBUS, 0, "unknown 40" },
{ do_bad, SIGBUS, 0, "unknown 41" },
{ do_bad, SIGBUS, 0, "unknown 42" },
{ do_bad, SIGBUS, 0, "unknown 43" },
{ do_bad, SIGBUS, 0, "unknown 44" },
{ do_bad, SIGBUS, 0, "unknown 45" },
{ do_bad, SIGBUS, 0, "unknown 46" },
{ do_bad, SIGBUS, 0, "unknown 47" },
{ do_bad, SIGBUS, 0, "unknown 48" },
{ do_bad, SIGBUS, 0, "unknown 49" },
{ do_bad, SIGBUS, 0, "unknown 50" },
{ do_bad, SIGBUS, 0, "unknown 51" },
{ do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" },
{ do_bad, SIGBUS, 0, "unknown 53" },
{ do_bad, SIGBUS, 0, "unknown 54" },
{ do_bad, SIGBUS, 0, "unknown 55" },
{ do_bad, SIGBUS, 0, "unknown 56" },
{ do_bad, SIGBUS, 0, "unknown 57" },
{ do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" },
{ do_bad, SIGBUS, 0, "unknown 59" },
{ do_bad, SIGBUS, 0, "unknown 60" },
{ do_bad, SIGBUS, 0, "unknown 61" },
{ do_bad, SIGBUS, 0, "unknown 62" },
{ do_bad, SIGBUS, 0, "unknown 63" },
};
#define ifsr_info fsr_info

View File

@@ -1,9 +1,38 @@
#include <linux/kernel.h>
#include <asm/cputype.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
pgd_t *idmap_pgd;
#ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long prot)
{
pmd_t *pmd;
unsigned long next;
if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) {
pmd = pmd_alloc_one(&init_mm, addr);
if (!pmd) {
pr_warning("Failed to allocate identity pmd.\n");
return;
}
pud_populate(&init_mm, pud, pmd);
pmd += pmd_index(addr);
} else
pmd = pmd_offset(pud, addr);
do {
next = pmd_addr_end(addr, end);
*pmd = __pmd((addr & PMD_MASK) | prot);
flush_pmd_entry(pmd);
} while (pmd++, addr = next, addr != end);
}
#else /* !CONFIG_ARM_LPAE */
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
unsigned long prot)
{
@@ -15,6 +44,7 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
pmd[1] = __pmd(addr);
flush_pmd_entry(pmd);
}
#endif /* CONFIG_ARM_LPAE */
static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
unsigned long prot)
@@ -28,11 +58,11 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
} while (pud++, addr = next, addr != end);
}
void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
{
unsigned long prot, next;
prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE;
prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF;
if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
prot |= PMD_BIT4;
@@ -43,48 +73,41 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end)
} while (pgd++, addr = next, addr != end);
}
#ifdef CONFIG_SMP
static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end)
extern char __idmap_text_start[], __idmap_text_end[];
static int __init init_static_idmap(void)
{
pmd_t *pmd = pmd_offset(pud, addr);
pmd_clear(pmd);
phys_addr_t idmap_start, idmap_end;
idmap_pgd = pgd_alloc(&init_mm);
if (!idmap_pgd)
return -ENOMEM;
/* Add an identity mapping for the physical address of the section. */
idmap_start = virt_to_phys((void *)__idmap_text_start);
idmap_end = virt_to_phys((void *)__idmap_text_end);
pr_info("Setting up static identity map for 0x%llx - 0x%llx\n",
(long long)idmap_start, (long long)idmap_end);
identity_mapping_add(idmap_pgd, idmap_start, idmap_end);
return 0;
}
static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end)
{
pud_t *pud = pud_offset(pgd, addr);
unsigned long next;
do {
next = pud_addr_end(addr, end);
idmap_del_pmd(pud, addr, next);
} while (pud++, addr = next, addr != end);
}
void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end)
{
unsigned long next;
pgd += pgd_index(addr);
do {
next = pgd_addr_end(addr, end);
idmap_del_pud(pgd, addr, next);
} while (pgd++, addr = next, addr != end);
}
#endif
early_initcall(init_static_idmap);
/*
* In order to soft-boot, we need to insert a 1:1 mapping in place of
* the user-mode pages. This will then ensure that we have predictable
* results when turning the mmu off
* In order to soft-boot, we need to switch to a 1:1 mapping for the
* cpu_reset functions. This will then ensure that we have predictable
* results when turning off the mmu.
*/
void setup_mm_for_reboot(char mode)
void setup_mm_for_reboot(void)
{
/*
* We need to access to user-mode page tables here. For kernel threads
* we don't have any user-mode mappings so we use the context that we
* "borrowed".
*/
identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE);
/* Clean and invalidate L1. */
flush_cache_all();
/* Switch to the identity mapping. */
cpu_switch_mm(idmap_pgd, &init_mm);
/* Flush the TLB. */
local_flush_tlb_all();
}

View File

@@ -20,7 +20,6 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <asm/mach-types.h>
#include <asm/prom.h>
@@ -134,30 +133,18 @@ void show_mem(unsigned int filter)
}
static void __init find_limits(unsigned long *min, unsigned long *max_low,
unsigned long *max_high)
unsigned long *max_high)
{
struct meminfo *mi = &meminfo;
int i;
*min = -1UL;
*max_low = *max_high = 0;
for_each_bank (i, mi) {
struct membank *bank = &mi->bank[i];
unsigned long start, end;
start = bank_pfn_start(bank);
end = bank_pfn_end(bank);
if (*min > start)
*min = start;
if (*max_high < end)
*max_high = end;
if (bank->highmem)
continue;
if (*max_low < end)
*max_low = end;
}
/* This assumes the meminfo array is properly sorted */
*min = bank_pfn_start(&mi->bank[0]);
for_each_bank (i, mi)
if (mi->bank[i].highmem)
break;
*max_low = bank_pfn_end(&mi->bank[i - 1]);
*max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
}
static void __init arm_bootmem_init(unsigned long start_pfn,
@@ -319,19 +306,10 @@ static void arm_memory_present(void)
}
#endif
static int __init meminfo_cmp(const void *_a, const void *_b)
{
const struct membank *a = _a, *b = _b;
long cmp = bank_pfn_start(a) - bank_pfn_start(b);
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}
void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
{
int i;
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
memblock_init();
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -403,8 +381,6 @@ void __init bootmem_init(void)
*/
arm_bootmem_free(min, max_low, max_high);
high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
/*
* This doesn't seem to be used by the Linux memory manager any
* more, but is used by ll_rw_block. If we can get rid of it, we

View File

@@ -36,12 +36,6 @@
#include <asm/mach/map.h>
#include "mm.h"
/*
* Used by ioremap() and iounmap() code to mark (super)section-mapped
* I/O regions in vm_struct->flags field.
*/
#define VM_ARM_SECTION_MAPPING 0x80000000
int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype)
{
@@ -64,7 +58,7 @@ void __check_kvm_seq(struct mm_struct *mm)
} while (seq != init_mm.context.kvm_seq);
}
#ifndef CONFIG_SMP
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
* Section support is unsafe on SMP - If you iounmap and ioremap a region,
* the other CPUs will not see this change until their next context switch.
@@ -79,13 +73,16 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
{
unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
pgd_t *pgd;
pud_t *pud;
pmd_t *pmdp;
flush_cache_vunmap(addr, end);
pgd = pgd_offset_k(addr);
pud = pud_offset(pgd, addr);
pmdp = pmd_offset(pud, addr);
do {
pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
pmd_t pmd = *pmdp;
pmd = *pmdp;
if (!pmd_none(pmd)) {
/*
* Clear the PMD from the page table, and
@@ -104,8 +101,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size)
pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
}
addr += PGDIR_SIZE;
pgd++;
addr += PMD_SIZE;
pmdp += 2;
} while (addr < end);
/*
@@ -124,6 +121,8 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
{
unsigned long addr = virt, end = virt + size;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
/*
* Remove and free any PTE-based mapping, and
@@ -132,17 +131,17 @@ remap_area_sections(unsigned long virt, unsigned long pfn,
unmap_area_sections(virt, size);
pgd = pgd_offset_k(addr);
pud = pud_offset(pgd, addr);
pmd = pmd_offset(pud, addr);
do {
pmd_t *pmd = pmd_offset(pgd, addr);
pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
pfn += SZ_1M >> PAGE_SHIFT;
flush_pmd_entry(pmd);
addr += PGDIR_SIZE;
pgd++;
addr += PMD_SIZE;
pmd += 2;
} while (addr < end);
return 0;
@@ -154,6 +153,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
{
unsigned long addr = virt, end = virt + size;
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
/*
* Remove and free any PTE-based mapping, and
@@ -162,6 +163,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
unmap_area_sections(virt, size);
pgd = pgd_offset_k(virt);
pud = pud_offset(pgd, addr);
pmd = pmd_offset(pud, addr);
do {
unsigned long super_pmd_val, i;
@@ -170,14 +173,12 @@ remap_area_supersections(unsigned long virt, unsigned long pfn,
super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
for (i = 0; i < 8; i++) {
pmd_t *pmd = pmd_offset(pgd, addr);
pmd[0] = __pmd(super_pmd_val);
pmd[1] = __pmd(super_pmd_val);
flush_pmd_entry(pmd);
addr += PGDIR_SIZE;
pgd++;
addr += PMD_SIZE;
pmd += 2;
}
pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
@@ -195,17 +196,13 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
unsigned long addr;
struct vm_struct * area;
#ifndef CONFIG_ARM_LPAE
/*
* High mappings must be supersection aligned
*/
if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
return NULL;
/*
* Don't allow RAM to be mapped - this causes problems with ARMv6+
*/
if (WARN_ON(pfn_valid(pfn)))
return NULL;
#endif
type = get_mem_type(mtype);
if (!type)
@@ -216,12 +213,40 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
*/
size = PAGE_ALIGN(offset + size);
/*
* Try to reuse one of the static mapping whenever possible.
*/
read_lock(&vmlist_lock);
for (area = vmlist; area; area = area->next) {
if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
break;
if (!(area->flags & VM_ARM_STATIC_MAPPING))
continue;
if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
continue;
if (__phys_to_pfn(area->phys_addr) > pfn ||
__pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
continue;
/* we can drop the lock here as we know *area is static */
read_unlock(&vmlist_lock);
addr = (unsigned long)area->addr;
addr += __pfn_to_phys(pfn) - area->phys_addr;
return (void __iomem *) (offset + addr);
}
read_unlock(&vmlist_lock);
/*
* Don't allow RAM to be mapped - this causes problems with ARMv6+
*/
if (WARN_ON(pfn_valid(pfn)))
return NULL;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (!area)
return NULL;
addr = (unsigned long)area->addr;
#ifndef CONFIG_SMP
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
if (DOMAIN_IO == 0 &&
(((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
cpu_is_xsc3()) && pfn >= 0x100000 &&
@@ -313,28 +338,34 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
#ifndef CONFIG_SMP
struct vm_struct **p, *tmp;
struct vm_struct *vm;
/*
* If this is a section based mapping we need to handle it
* specially as the VM subsystem does not know how to handle
* such a beast. We need the lock here b/c we need to clear
* all the mappings before the area can be reclaimed
* by someone else.
*/
write_lock(&vmlist_lock);
for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
if (tmp->flags & VM_ARM_SECTION_MAPPING) {
unmap_area_sections((unsigned long)tmp->addr,
tmp->size);
}
read_lock(&vmlist_lock);
for (vm = vmlist; vm; vm = vm->next) {
if (vm->addr > addr)
break;
if (!(vm->flags & VM_IOREMAP))
continue;
/* If this is a static mapping we must leave it alone */
if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
(vm->addr <= addr) && (vm->addr + vm->size > addr)) {
read_unlock(&vmlist_lock);
return;
}
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
/*
* If this is a section based mapping we need to handle it
* specially as the VM subsystem does not know how to handle
* such a beast.
*/
if ((vm->addr == addr) &&
(vm->flags & VM_ARM_SECTION_MAPPING)) {
unmap_area_sections((unsigned long)vm->addr, vm->size);
break;
}
}
write_unlock(&vmlist_lock);
#endif
}
read_unlock(&vmlist_lock);
vunmap(addr);
}

View File

@@ -21,6 +21,20 @@ const struct mem_type *get_mem_type(unsigned int type);
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
/*
* ARM specific vm_struct->flags bits.
*/
/* (super)section-mapped I/O regions used by ioremap()/iounmap() */
#define VM_ARM_SECTION_MAPPING 0x80000000
/* permanent static mappings from iotable_init() */
#define VM_ARM_STATIC_MAPPING 0x40000000
/* mapping type (attributes) for permanent static mappings */
#define VM_ARM_MTYPE(mt) ((mt) << 20)
#define VM_ARM_MTYPE_MASK (0x1f << 20)
#endif
#ifdef CONFIG_ZONE_DMA

View File

@@ -15,6 +15,7 @@
#include <linux/nodemask.h>
#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
#include <asm/cputype.h>
#include <asm/sections.h>
@@ -150,6 +151,7 @@ static int __init early_nowrite(char *__unused)
}
early_param("nowb", early_nowrite);
#ifndef CONFIG_ARM_LPAE
static int __init early_ecc(char *p)
{
if (memcmp(p, "on", 2) == 0)
@@ -159,6 +161,7 @@ static int __init early_ecc(char *p)
return 0;
}
early_param("ecc", early_ecc);
#endif
static int __init noalign_setup(char *__unused)
{
@@ -228,10 +231,12 @@ static struct mem_type mem_types[] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
.domain = DOMAIN_KERNEL,
},
#ifndef CONFIG_ARM_LPAE
[MT_MINICLEAN] = {
.prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
.domain = DOMAIN_KERNEL,
},
#endif
[MT_LOW_VECTORS] = {
.prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
L_PTE_RDONLY,
@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void)
* ARMv6 and above have extended page tables.
*/
if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
#ifndef CONFIG_ARM_LPAE
/*
* Mark cache clean areas and XIP ROM read only
* from SVC mode and no access from userspace.
@@ -436,6 +442,7 @@ static void __init build_mem_type_table(void)
mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
#endif
if (is_smp()) {
/*
@@ -474,6 +481,18 @@ static void __init build_mem_type_table(void)
mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
}
#ifdef CONFIG_ARM_LPAE
/*
* Do not generate access flag faults for the kernel mappings.
*/
for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
mem_types[i].prot_pte |= PTE_EXT_AF;
mem_types[i].prot_sect |= PMD_SECT_AF;
}
kern_pgprot |= PTE_EXT_AF;
vecs_pgprot |= PTE_EXT_AF;
#endif
for (i = 0; i < 16; i++) {
unsigned long v = pgprot_val(protection_map[i]);
protection_map[i] = __pgprot(v | user_pgprot);
@@ -529,13 +548,18 @@ EXPORT_SYMBOL(phys_mem_access_prot);
#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
static void __init *early_alloc(unsigned long sz)
static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
{
void *ptr = __va(memblock_alloc(sz, sz));
void *ptr = __va(memblock_alloc(sz, align));
memset(ptr, 0, sz);
return ptr;
}
static void __init *early_alloc(unsigned long sz)
{
return early_alloc_aligned(sz, sz);
}
static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
{
if (pmd_none(*pmd)) {
@@ -572,8 +596,10 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
if (((addr | end | phys) & ~SECTION_MASK) == 0) {
pmd_t *p = pmd;
#ifndef CONFIG_ARM_LPAE
if (addr & SECTION_SIZE)
pmd++;
#endif
do {
*pmd = __pmd(phys | type->prot_sect);
@@ -603,6 +629,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
} while (pud++, addr = next, addr != end);
}
#ifndef CONFIG_ARM_LPAE
static void __init create_36bit_mapping(struct map_desc *md,
const struct mem_type *type)
{
@@ -662,6 +689,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
} while (addr != end);
}
#endif /* !CONFIG_ARM_LPAE */
/*
* Create the page directory entries and any necessary
@@ -685,14 +713,16 @@ static void __init create_mapping(struct map_desc *md)
}
if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
md->virtual >= PAGE_OFFSET &&
(md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
printk(KERN_WARNING "BUG: mapping for 0x%08llx"
" at 0x%08lx overlaps vmalloc space\n",
" at 0x%08lx out of vmalloc space\n",
(long long)__pfn_to_phys((u64)md->pfn), md->virtual);
}
type = &mem_types[md->type];
#ifndef CONFIG_ARM_LPAE
/*
* Catch 36-bit addresses
*/
@@ -700,6 +730,7 @@ static void __init create_mapping(struct map_desc *md)
create_36bit_mapping(md, type);
return;
}
#endif
addr = md->virtual & PAGE_MASK;
phys = __pfn_to_phys(md->pfn);
@@ -729,18 +760,33 @@ static void __init create_mapping(struct map_desc *md)
*/
void __init iotable_init(struct map_desc *io_desc, int nr)
{
int i;
struct map_desc *md;
struct vm_struct *vm;
for (i = 0; i < nr; i++)
create_mapping(io_desc + i);
if (!nr)
return;
vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
for (md = io_desc; nr; md++, nr--) {
create_mapping(md);
vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(md->pfn);
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->flags |= VM_ARM_MTYPE(md->type);
vm->caller = iotable_init;
vm_area_add_early(vm++);
}
}
static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
static void * __initdata vmalloc_min =
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
/*
* vmalloc=size forces the vmalloc area to be exactly 'size'
* bytes. This can be used to increase (or decrease) the vmalloc
* area - the default is 128m.
* area - the default is 240m.
*/
static int __init early_vmalloc(char *arg)
{
@@ -775,6 +821,9 @@ void __init sanity_check_meminfo(void)
struct membank *bank = &meminfo.bank[j];
*bank = meminfo.bank[i];
if (bank->start > ULONG_MAX)
highmem = 1;
#ifdef CONFIG_HIGHMEM
if (__va(bank->start) >= vmalloc_min ||
__va(bank->start) < (void *)PAGE_OFFSET)
@@ -786,7 +835,7 @@ void __init sanity_check_meminfo(void)
* Split those memory banks which are partially overlapping
* the vmalloc area greatly simplifying things later.
*/
if (__va(bank->start) < vmalloc_min &&
if (!highmem && __va(bank->start) < vmalloc_min &&
bank->size > vmalloc_min - __va(bank->start)) {
if (meminfo.nr_banks >= NR_BANKS) {
printk(KERN_CRIT "NR_BANKS too low, "
@@ -806,6 +855,17 @@ void __init sanity_check_meminfo(void)
#else
bank->highmem = highmem;
/*
* Highmem banks not allowed with !CONFIG_HIGHMEM.
*/
if (highmem) {
printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
"(!CONFIG_HIGHMEM).\n",
(unsigned long long)bank->start,
(unsigned long long)bank->start + bank->size - 1);
continue;
}
/*
* Check whether this memory bank would entirely overlap
* the vmalloc area.
@@ -860,6 +920,7 @@ void __init sanity_check_meminfo(void)
}
#endif
meminfo.nr_banks = j;
high_memory = __va(lowmem_limit - 1) + 1;
memblock_set_current_limit(lowmem_limit);
}
@@ -890,14 +951,20 @@ static inline void prepare_page_table(void)
/*
* Clear out all the kernel space mappings, except for the first
* memory bank, up to the end of the vmalloc region.
* memory bank, up to the vmalloc region.
*/
for (addr = __phys_to_virt(end);
addr < VMALLOC_END; addr += PMD_SIZE)
addr < VMALLOC_START; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
}
#ifdef CONFIG_ARM_LPAE
/* the first page is reserved for pgd */
#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
#else
#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
#endif
/*
* Reserve the special regions of memory
@@ -920,8 +987,8 @@ void __init arm_mm_memblock_reserve(void)
}
/*
* Set up device the mappings. Since we clear out the page tables for all
* mappings above VMALLOC_END, we will remove any debug device mappings.
* Set up the device mappings. Since we clear out the page tables for all
* mappings above VMALLOC_START, we will remove any debug device mappings.
* This means you have to be careful how you debug this function, or any
* called function. This means you can't use any function or debugging
* method which may touch any device, otherwise the kernel _will_ crash.
@@ -936,7 +1003,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
*/
vectors_page = early_alloc(PAGE_SIZE);
for (addr = VMALLOC_END; addr; addr += PMD_SIZE)
for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*

View File

@@ -29,6 +29,8 @@ void __init arm_mm_memblock_reserve(void)
void __init sanity_check_meminfo(void)
{
phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]);
high_memory = __va(end - 1) + 1;
}
/*
@@ -43,7 +45,7 @@ void __init paging_init(struct machine_desc *mdesc)
/*
* We don't need to do anything here for nommu machines.
*/
void setup_mm_for_reboot(char mode)
void setup_mm_for_reboot(void)
{
}

View File

@@ -10,6 +10,7 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
@@ -17,6 +18,14 @@
#include "mm.h"
#ifdef CONFIG_ARM_LPAE
#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL)
#define __pgd_free(pgd) kfree(pgd)
#else
#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2)
#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2)
#endif
/*
* need to get a 16k page for level 1
*/
@@ -27,7 +36,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
pmd_t *new_pmd, *init_pmd;
pte_t *new_pte, *init_pte;
new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
new_pgd = __pgd_alloc();
if (!new_pgd)
goto no_pgd;
@@ -42,10 +51,25 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
#ifdef CONFIG_ARM_LPAE
/*
* Allocate PMD table for modules and pkmap mappings.
*/
new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR),
MODULES_VADDR);
if (!new_pud)
goto no_pud;
new_pmd = pmd_alloc(mm, new_pud, 0);
if (!new_pmd)
goto no_pmd;
#endif
if (!vectors_high()) {
/*
* On ARM, first page must always be allocated since it
* contains the machine vectors.
* contains the machine vectors. The vectors are always high
* with LPAE.
*/
new_pud = pud_alloc(mm, new_pgd, 0);
if (!new_pud)
@@ -74,7 +98,7 @@ no_pte:
no_pmd:
pud_free(mm, new_pud);
no_pud:
free_pages((unsigned long)new_pgd, 2);
__pgd_free(new_pgd);
no_pgd:
return NULL;
}
@@ -111,5 +135,24 @@ no_pud:
pgd_clear(pgd);
pud_free(mm, pud);
no_pgd:
free_pages((unsigned long) pgd_base, 2);
#ifdef CONFIG_ARM_LPAE
/*
* Free modules/pkmap or identity pmd tables.
*/
for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) {
if (pgd_none_or_clear_bad(pgd))
continue;
if (pgd_val(*pgd) & L_PGD_SWAPPER)
continue;
pud = pud_offset(pgd, 0);
if (pud_none_or_clear_bad(pud))
continue;
pmd = pmd_offset(pud, 0);
pud_clear(pud);
pmd_free(mm, pmd);
pgd_clear(pgd);
pud_free(mm, pud);
}
#endif
__pgd_free(pgd_base);
}

View File

@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1020_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm1020_reset)
.popsection
/*
* cpu_arm1020_do_idle()

View File

@@ -95,6 +95,7 @@ ENTRY(cpu_arm1020e_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1020e_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -107,6 +108,8 @@ ENTRY(cpu_arm1020e_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm1020e_reset)
.popsection
/*
* cpu_arm1020e_do_idle()

View File

@@ -84,6 +84,7 @@ ENTRY(cpu_arm1022_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1022_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -96,6 +97,8 @@ ENTRY(cpu_arm1022_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm1022_reset)
.popsection
/*
* cpu_arm1022_do_idle()

View File

@@ -84,6 +84,7 @@ ENTRY(cpu_arm1026_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm1026_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -96,6 +97,8 @@ ENTRY(cpu_arm1026_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm1026_reset)
.popsection
/*
* cpu_arm1026_do_idle()

View File

@@ -225,6 +225,7 @@ ENTRY(cpu_arm7_set_pte_ext)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm6_reset)
ENTRY(cpu_arm7_reset)
mov r1, #0
@@ -235,6 +236,9 @@ ENTRY(cpu_arm7_reset)
mov r1, #0x30
mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc
mov pc, r0
ENDPROC(cpu_arm6_reset)
ENDPROC(cpu_arm7_reset)
.popsection
__CPUINIT

View File

@@ -101,6 +101,7 @@ ENTRY(cpu_arm720_set_pte_ext)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm720_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate cache
@@ -112,6 +113,8 @@ ENTRY(cpu_arm720_reset)
bic ip, ip, #0x2100 @ ..v....s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm720_reset)
.popsection
__CPUINIT

View File

@@ -49,6 +49,7 @@ ENTRY(cpu_arm740_proc_fin)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm740_reset)
mov ip, #0
mcr p15, 0, ip, c7, c0, 0 @ invalidate cache
@@ -56,6 +57,8 @@ ENTRY(cpu_arm740_reset)
bic ip, ip, #0x0000000c @ ............wc..
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm740_reset)
.popsection
__CPUINIT

View File

@@ -45,8 +45,11 @@ ENTRY(cpu_arm7tdmi_proc_fin)
* Params : loc(r0) address to jump to
* Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm7tdmi_reset)
mov pc, r0
ENDPROC(cpu_arm7tdmi_reset)
.popsection
__CPUINIT

View File

@@ -85,6 +85,7 @@ ENTRY(cpu_arm920_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm920_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -97,6 +98,8 @@ ENTRY(cpu_arm920_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm920_reset)
.popsection
/*
* cpu_arm920_do_idle()

View File

@@ -87,6 +87,7 @@ ENTRY(cpu_arm922_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm922_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -99,6 +100,8 @@ ENTRY(cpu_arm922_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm922_reset)
.popsection
/*
* cpu_arm922_do_idle()

View File

@@ -108,6 +108,7 @@ ENTRY(cpu_arm925_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm925_reset)
/* Send software reset to MPU and DSP */
mov ip, #0xff000000
@@ -115,6 +116,8 @@ ENTRY(cpu_arm925_reset)
orr ip, ip, #0x0000ce00
mov r4, #1
strh r4, [ip, #0x10]
ENDPROC(cpu_arm925_reset)
.popsection
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches

View File

@@ -77,6 +77,7 @@ ENTRY(cpu_arm926_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm926_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -89,6 +90,8 @@ ENTRY(cpu_arm926_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm926_reset)
.popsection
/*
* cpu_arm926_do_idle()

View File

@@ -48,6 +48,7 @@ ENTRY(cpu_arm940_proc_fin)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm940_reset)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache
@@ -58,6 +59,8 @@ ENTRY(cpu_arm940_reset)
bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm940_reset)
.popsection
/*
* cpu_arm940_do_idle()

View File

@@ -55,6 +55,7 @@ ENTRY(cpu_arm946_proc_fin)
* Params : r0 = address to jump to
* Notes : This sets up everything for a reset
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm946_reset)
mov ip, #0
mcr p15, 0, ip, c7, c5, 0 @ flush I cache
@@ -65,6 +66,8 @@ ENTRY(cpu_arm946_reset)
bic ip, ip, #0x00001000 @ i-cache
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_arm946_reset)
.popsection
/*
* cpu_arm946_do_idle()

View File

@@ -45,8 +45,11 @@ ENTRY(cpu_arm9tdmi_proc_fin)
* Params : loc(r0) address to jump to
* Purpose : Sets up everything for a reset and jump to the location for soft reset.
*/
.pushsection .idmap.text, "ax"
ENTRY(cpu_arm9tdmi_reset)
mov pc, r0
ENDPROC(cpu_arm9tdmi_reset)
.popsection
__CPUINIT

View File

@@ -57,6 +57,7 @@ ENTRY(cpu_fa526_proc_fin)
* loc: location to jump to for soft reset
*/
.align 4
.pushsection .idmap.text, "ax"
ENTRY(cpu_fa526_reset)
/* TODO: Use CP8 if possible... */
mov ip, #0
@@ -73,6 +74,8 @@ ENTRY(cpu_fa526_reset)
nop
nop
mov pc, r0
ENDPROC(cpu_fa526_reset)
.popsection
/*
* cpu_fa526_do_idle()

View File

@@ -98,6 +98,7 @@ ENTRY(cpu_feroceon_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_feroceon_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -110,6 +111,8 @@ ENTRY(cpu_feroceon_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_feroceon_reset)
.popsection
/*
* cpu_feroceon_do_idle()

View File

@@ -91,8 +91,9 @@
#if L_PTE_SHARED != PTE_EXT_SHARED
#error PTE shared bit mismatch
#endif
#if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
#if !defined (CONFIG_ARM_LPAE) && \
(L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\
L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
#error Invalid Linux PTE bit settings
#endif
#endif /* CONFIG_MMU */

View File

@@ -69,6 +69,7 @@ ENTRY(cpu_mohawk_proc_fin)
* (same as arm926)
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_mohawk_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -79,6 +80,8 @@ ENTRY(cpu_mohawk_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_mohawk_reset)
.popsection
/*
* cpu_mohawk_do_idle()

View File

@@ -62,6 +62,7 @@ ENTRY(cpu_sa110_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_sa110_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -74,6 +75,8 @@ ENTRY(cpu_sa110_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_sa110_reset)
.popsection
/*
* cpu_sa110_do_idle(type)

View File

@@ -70,6 +70,7 @@ ENTRY(cpu_sa1100_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_sa1100_reset)
mov ip, #0
mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
@@ -82,6 +83,8 @@ ENTRY(cpu_sa1100_reset)
bic ip, ip, #0x1100 @ ...i...s........
mcr p15, 0, ip, c1, c0, 0 @ ctrl register
mov pc, r0
ENDPROC(cpu_sa1100_reset)
.popsection
/*
* cpu_sa1100_do_idle(type)

View File

@@ -55,6 +55,7 @@ ENTRY(cpu_v6_proc_fin)
* - loc - location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_v6_reset)
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x1 @ ...............m
@@ -62,6 +63,8 @@ ENTRY(cpu_v6_reset)
mov r1, #0
mcr p15, 0, r1, c7, c5, 4 @ ISB
mov pc, r0
ENDPROC(cpu_v6_reset)
.popsection
/*
* cpu_v6_do_idle()

View File

@@ -0,0 +1,171 @@
/*
* arch/arm/mm/proc-v7-2level.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define TTB_S (1 << 1)
#define TTB_RGN_NC (0 << 3)
#define TTB_RGN_OC_WBWA (1 << 3)
#define TTB_RGN_OC_WT (2 << 3)
#define TTB_RGN_OC_WB (3 << 3)
#define TTB_NOS (1 << 5)
#define TTB_IRGN_NC ((0 << 0) | (0 << 6))
#define TTB_IRGN_WBWA ((0 << 0) | (1 << 6))
#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
#define PMD_FLAGS_UP PMD_SECT_WB
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
/*
* cpu_v7_switch_mm(pgd_phys, tsk)
*
* Set the translation table base pointer to be pgd_phys
*
* - pgd_phys - physical address of new TTB
*
* It is assumed that:
* - we are not using split page tables
*/
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
isb
1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
isb
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
mcr p15, 0, r1, c13, c0, 1 @ set context ID
isb
#endif
mov pc, lr
ENDPROC(cpu_v7_switch_mm)
/*
* cpu_v7_set_pte_ext(ptep, pte)
*
* Set a level 2 translation table entry.
*
* - ptep - pointer to level 2 translation table entry
* (hardware version is stored at +2048 bytes)
* - pte - PTE value to store
* - ext - value for extended PTE bits
*/
ENTRY(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
str r1, [r0] @ linux version
bic r3, r1, #0x000003f0
bic r3, r3, #PTE_TYPE_MASK
orr r3, r3, r2
orr r3, r3, #PTE_EXT_AP0 | 2
tst r1, #1 << 4
orrne r3, r3, #PTE_EXT_TEX(1)
eor r1, r1, #L_PTE_DIRTY
tst r1, #L_PTE_RDONLY | L_PTE_DIRTY
orrne r3, r3, #PTE_EXT_APX
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
#ifdef CONFIG_CPU_USE_DOMAINS
@ allow kernel read/write access to read-only user pages
tstne r3, #PTE_EXT_APX
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
#endif
tst r1, #L_PTE_XN
orrne r3, r3, #PTE_EXT_XN
tst r1, #L_PTE_YOUNG
tstne r1, #L_PTE_PRESENT
moveq r3, #0
ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] )
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
#endif
mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)
/*
* Memory region attributes with SCTLR.TRE=1
*
* n = TEX[0],C,B
* TR = PRRR[2n+1:2n] - memory type
* IR = NMRR[2n+1:2n] - inner cacheable property
* OR = NMRR[2n+17:2n+16] - outer cacheable property
*
* n TR IR OR
* UNCACHED 000 00
* BUFFERABLE 001 10 00 00
* WRITETHROUGH 010 10 10 10
* WRITEBACK 011 10 11 11
* reserved 110
* WRITEALLOC 111 10 01 01
* DEV_SHARED 100 01
* DEV_NONSHARED 100 01
* DEV_WC 001 10
* DEV_CACHED 011 10
*
* Other attributes:
*
* DS0 = PRRR[16] = 0 - device shareable property
* DS1 = PRRR[17] = 1 - device shareable property
* NS0 = PRRR[18] = 0 - normal shareable property
* NS1 = PRRR[19] = 1 - normal shareable property
* NOS = PRRR[24+n] = 1 - not outer shareable
*/
.equ PRRR, 0xff0a81a8
.equ NMRR, 0x40e040e0
/*
* Macro for setting up the TTBRx and TTBCR registers.
* - \ttb0 and \ttb1 updated with the corresponding flags.
*/
.macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
mcr p15, 0, \zero, c2, c0, 2 @ TTB control register
ALT_SMP(orr \ttbr0, \ttbr0, #TTB_FLAGS_SMP)
ALT_UP(orr \ttbr0, \ttbr0, #TTB_FLAGS_UP)
ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP)
ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP)
mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1
.endm
__CPUINIT
/* AT
* TFR EV X F I D LR S
* .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
* rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
* 1 0 110 0011 1100 .111 1101 < we want
*/
.align 2
.type v7_crval, #object
v7_crval:
crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
.previous

View File

@@ -0,0 +1,150 @@
/*
* arch/arm/mm/proc-v7-3level.S
*
* Copyright (C) 2001 Deep Blue Solutions Ltd.
* Copyright (C) 2011 ARM Ltd.
* Author: Catalin Marinas <catalin.marinas@arm.com>
* based on arch/arm/mm/proc-v7-2level.S
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define TTB_IRGN_NC (0 << 8)
#define TTB_IRGN_WBWA (1 << 8)
#define TTB_IRGN_WT (2 << 8)
#define TTB_IRGN_WB (3 << 8)
#define TTB_RGN_NC (0 << 10)
#define TTB_RGN_OC_WBWA (1 << 10)
#define TTB_RGN_OC_WT (2 << 10)
#define TTB_RGN_OC_WB (3 << 10)
#define TTB_S (3 << 12)
#define TTB_EAE (1 << 31)
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
#define TTB_FLAGS_UP (TTB_IRGN_WB|TTB_RGN_OC_WB)
#define PMD_FLAGS_UP (PMD_SECT_WB)
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
#define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA)
#define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S)
/*
* cpu_v7_switch_mm(pgd_phys, tsk)
*
* Set the translation table base pointer to be pgd_phys (physical address of
* the new TTB).
*/
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
and r3, r1, #0xff
mov r3, r3, lsl #(48 - 32) @ ASID
mcrr p15, 0, r0, r3, c2 @ set TTB 0
isb
#endif
mov pc, lr
ENDPROC(cpu_v7_switch_mm)
/*
* cpu_v7_set_pte_ext(ptep, pte)
*
* Set a level 2 translation table entry.
* - ptep - pointer to level 3 translation table entry
* - pte - PTE value to store (64-bit in r2 and r3)
*/
ENTRY(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
tst r2, #L_PTE_PRESENT
beq 1f
tst r3, #1 << (55 - 32) @ L_PTE_DIRTY
orreq r2, #L_PTE_RDONLY
1: strd r2, r3, [r0]
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
#endif
mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)
/*
* Memory region attributes for LPAE (defined in pgtable-3level.h):
*
* n = AttrIndx[2:0]
*
* n MAIR
* UNCACHED 000 00000000
* BUFFERABLE 001 01000100
* DEV_WC 001 01000100
* WRITETHROUGH 010 10101010
* WRITEBACK 011 11101110
* DEV_CACHED 011 11101110
* DEV_SHARED 100 00000100
* DEV_NONSHARED 100 00000100
* unused 101
* unused 110
* WRITEALLOC 111 11111111
*/
.equ PRRR, 0xeeaa4400 @ MAIR0
.equ NMRR, 0xff000004 @ MAIR1
/*
* Macro for setting up the TTBRx and TTBCR registers.
* - \ttbr1 updated.
*/
.macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp
ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address
cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below)
mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register
orr \tmp, \tmp, #TTB_EAE
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP)
ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP)
ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16)
ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16)
/*
* TTBR0/TTBR1 split (PAGE_OFFSET):
* 0x40000000: T0SZ = 2, T1SZ = 0 (not used)
* 0x80000000: T0SZ = 0, T1SZ = 1
* 0xc0000000: T0SZ = 0, T1SZ = 2
*
* Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise
* booting secondary CPUs would end up using TTBR1 for the identity
* mapping set up in TTBR0.
*/
bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET?
orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ
#if defined CONFIG_VMSPLIT_2G
/* PAGE_OFFSET == 0x80000000, T1SZ == 1 */
add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries
#elif defined CONFIG_VMSPLIT_3G
/* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */
add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd
#endif
/* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */
9001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register
mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1
.endm
__CPUINIT
/*
* AT
* TFR EV X F IHD LR S
* .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM
* rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
* 11 0 110 1 0011 1100 .111 1101 < we want
*/
.align 2
.type v7_crval, #object
v7_crval:
crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c
.previous

View File

@@ -19,24 +19,11 @@
#include "proc-macros.S"
#define TTB_S (1 << 1)
#define TTB_RGN_NC (0 << 3)
#define TTB_RGN_OC_WBWA (1 << 3)
#define TTB_RGN_OC_WT (2 << 3)
#define TTB_RGN_OC_WB (3 << 3)
#define TTB_NOS (1 << 5)
#define TTB_IRGN_NC ((0 << 0) | (0 << 6))
#define TTB_IRGN_WBWA ((0 << 0) | (1 << 6))
#define TTB_IRGN_WT ((1 << 0) | (0 << 6))
#define TTB_IRGN_WB ((1 << 0) | (1 << 6))
/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB
#define PMD_FLAGS_UP PMD_SECT_WB
/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S
#ifdef CONFIG_ARM_LPAE
#include "proc-v7-3level.S"
#else
#include "proc-v7-2level.S"
#endif
ENTRY(cpu_v7_proc_init)
mov pc, lr
@@ -63,6 +50,7 @@ ENDPROC(cpu_v7_proc_fin)
* caches disabled.
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_v7_reset)
mrc p15, 0, r1, c1, c0, 0 @ ctrl register
bic r1, r1, #0x1 @ ...............m
@@ -71,6 +59,7 @@ ENTRY(cpu_v7_reset)
isb
mov pc, r0
ENDPROC(cpu_v7_reset)
.popsection
/*
* cpu_v7_do_idle()
@@ -97,127 +86,12 @@ ENTRY(cpu_v7_dcache_clean_area)
mov pc, lr
ENDPROC(cpu_v7_dcache_clean_area)
/*
* cpu_v7_switch_mm(pgd_phys, tsk)
*
* Set the translation table base pointer to be pgd_phys
*
* - pgd_phys - physical address of new TTB
*
* It is assumed that:
* - we are not using split page tables
*/
ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU
mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_ARM_ERRATA_430973
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
#endif
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID
isb
1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0
isb
#ifdef CONFIG_ARM_ERRATA_754322
dsb
#endif
mcr p15, 0, r1, c13, c0, 1 @ set context ID
isb
#endif
mov pc, lr
ENDPROC(cpu_v7_switch_mm)
/*
* cpu_v7_set_pte_ext(ptep, pte)
*
* Set a level 2 translation table entry.
*
* - ptep - pointer to level 2 translation table entry
* (hardware version is stored at +2048 bytes)
* - pte - PTE value to store
* - ext - value for extended PTE bits
*/
ENTRY(cpu_v7_set_pte_ext)
#ifdef CONFIG_MMU
str r1, [r0] @ linux version
bic r3, r1, #0x000003f0
bic r3, r3, #PTE_TYPE_MASK
orr r3, r3, r2
orr r3, r3, #PTE_EXT_AP0 | 2
tst r1, #1 << 4
orrne r3, r3, #PTE_EXT_TEX(1)
eor r1, r1, #L_PTE_DIRTY
tst r1, #L_PTE_RDONLY | L_PTE_DIRTY
orrne r3, r3, #PTE_EXT_APX
tst r1, #L_PTE_USER
orrne r3, r3, #PTE_EXT_AP1
#ifdef CONFIG_CPU_USE_DOMAINS
@ allow kernel read/write access to read-only user pages
tstne r3, #PTE_EXT_APX
bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0
#endif
tst r1, #L_PTE_XN
orrne r3, r3, #PTE_EXT_XN
tst r1, #L_PTE_YOUNG
tstne r1, #L_PTE_PRESENT
moveq r3, #0
ARM( str r3, [r0, #2048]! )
THUMB( add r0, r0, #2048 )
THUMB( str r3, [r0] )
mcr p15, 0, r0, c7, c10, 1 @ flush_pte
#endif
mov pc, lr
ENDPROC(cpu_v7_set_pte_ext)
string cpu_v7_name, "ARMv7 Processor"
.align
/*
* Memory region attributes with SCTLR.TRE=1
*
* n = TEX[0],C,B
* TR = PRRR[2n+1:2n] - memory type
* IR = NMRR[2n+1:2n] - inner cacheable property
* OR = NMRR[2n+17:2n+16] - outer cacheable property
*
* n TR IR OR
* UNCACHED 000 00
* BUFFERABLE 001 10 00 00
* WRITETHROUGH 010 10 10 10
* WRITEBACK 011 10 11 11
* reserved 110
* WRITEALLOC 111 10 01 01
* DEV_SHARED 100 01
* DEV_NONSHARED 100 01
* DEV_WC 001 10
* DEV_CACHED 011 10
*
* Other attributes:
*
* DS0 = PRRR[16] = 0 - device shareable property
* DS1 = PRRR[17] = 1 - device shareable property
* NS0 = PRRR[18] = 0 - normal shareable property
* NS1 = PRRR[19] = 1 - normal shareable property
* NOS = PRRR[24+n] = 1 - not outer shareable
*/
.equ PRRR, 0xff0a81a8
.equ NMRR, 0x40e040e0
/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
.globl cpu_v7_suspend_size
.equ cpu_v7_suspend_size, 4 * 7
.equ cpu_v7_suspend_size, 4 * 8
#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7_do_suspend)
stmfd sp!, {r4 - r10, lr}
@@ -226,10 +100,11 @@ ENTRY(cpu_v7_do_suspend)
stmia r0!, {r4 - r5}
mrc p15, 0, r6, c3, c0, 0 @ Domain ID
mrc p15, 0, r7, c2, c0, 1 @ TTB 1
mrc p15, 0, r11, c2, c0, 2 @ TTB control register
mrc p15, 0, r8, c1, c0, 0 @ Control register
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
stmia r0, {r6 - r10}
stmia r0, {r6 - r11}
ldmfd sp!, {r4 - r10, pc}
ENDPROC(cpu_v7_do_suspend)
@@ -241,13 +116,15 @@ ENTRY(cpu_v7_do_resume)
ldmia r0!, {r4 - r5}
mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID
mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID
ldmia r0, {r6 - r10}
ldmia r0, {r6 - r11}
mcr p15, 0, r6, c3, c0, 0 @ Domain ID
#ifndef CONFIG_ARM_LPAE
ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP)
ALT_UP(orr r1, r1, #TTB_FLAGS_UP)
#endif
mcr p15, 0, r1, c2, c0, 0 @ TTB 0
mcr p15, 0, r7, c2, c0, 1 @ TTB 1
mcr p15, 0, ip, c2, c0, 2 @ TTB control register
mcr p15, 0, r11, c2, c0, 2 @ TTB control register
mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register
teq r4, r9 @ Is it already set?
mcrne p15, 0, r9, c1, c0, 1 @ No, so write it
@@ -380,12 +257,7 @@ __v7_setup:
dsb
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r10, c2, c0, 2 @ TTB control register
ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP)
ALT_UP(orr r4, r4, #TTB_FLAGS_UP)
ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP)
ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
mcr p15, 0, r8, c2, c0, 1 @ load TTB1
v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
ldr r5, =PRRR @ PRRR
ldr r6, =NMRR @ NMRR
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
@@ -407,16 +279,7 @@ __v7_setup:
mov pc, lr @ return to head.S:__ret
ENDPROC(__v7_setup)
/* AT
* TFR EV X F I D LR S
* .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM
* rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced
* 1 0 110 0011 1100 .111 1101 < we want
*/
.type v7_crval, #object
v7_crval:
crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c
.align 2
__v7_setup_stack:
.space 4 * 11 @ 11 registers
@@ -438,11 +301,11 @@ __v7_setup_stack:
*/
.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0
ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
PMD_FLAGS_SMP | \mm_mmuflags)
PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
PMD_FLAGS_UP | \mm_mmuflags)
.long PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | \io_mmuflags
PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags)
.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags
W(b) \initfunc
.long cpu_arch_name
.long cpu_elf_name
@@ -455,6 +318,7 @@ __v7_setup_stack:
.long v7_cache_fns
.endm
#ifndef CONFIG_ARM_LPAE
/*
* ARM Ltd. Cortex A5 processor.
*/
@@ -484,6 +348,7 @@ __v7_ca9mp_proc_info:
.long 0xff0ffff0
__v7_proc __v7_ca9mp_setup
.size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
#endif /* CONFIG_ARM_LPAE */
/*
* ARM Ltd. Cortex A15 processor.

View File

@@ -105,6 +105,7 @@ ENTRY(cpu_xsc3_proc_fin)
* loc: location to jump to for soft reset
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_xsc3_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
@@ -119,6 +120,8 @@ ENTRY(cpu_xsc3_reset)
@ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
mov pc, r0
ENDPROC(cpu_xsc3_reset)
.popsection
/*
* cpu_xsc3_do_idle()

View File

@@ -142,6 +142,7 @@ ENTRY(cpu_xscale_proc_fin)
* Beware PXA270 erratum E7.
*/
.align 5
.pushsection .idmap.text, "ax"
ENTRY(cpu_xscale_reset)
mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
msr cpsr_c, r1 @ reset CPSR
@@ -160,6 +161,8 @@ ENTRY(cpu_xscale_reset)
@ already containing those two last instructions to survive.
mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
mov pc, r0
ENDPROC(cpu_xscale_reset)
.popsection
/*
* cpu_xscale_do_idle()