Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (99 commits) drivers/virt: add missing linux/interrupt.h to fsl_hypervisor.c powerpc/85xx: fix mpic configuration in CAMP mode powerpc: Copy back TIF flags on return from softirq stack powerpc/64: Make server perfmon only built on ppc64 server devices powerpc/pseries: Fix hvc_vio.c build due to recent changes powerpc: Exporting boot_cpuid_phys powerpc: Add CFAR to oops output hvc_console: Add kdb support powerpc/pseries: Fix hvterm_raw_get_chars to accept < 16 chars, fixing xmon powerpc/irq: Quieten irq mapping printks powerpc: Enable lockup and hung task detectors in pseries and ppc64 defeconfigs powerpc: Add mpt2sas driver to pseries and ppc64 defconfig powerpc: Disable IRQs off tracer in ppc64 defconfig powerpc: Sync pseries and ppc64 defconfigs powerpc/pseries/hvconsole: Fix dropped console output hvc_console: Improve tty/console put_chars handling powerpc/kdump: Fix timeout in crash_kexec_wait_realmode powerpc/mm: Fix output of total_ram. powerpc/cpufreq: Add cpufreq driver for Momentum Maple boards powerpc: Correct annotations of pmu registration functions ... Fix up trivial Kconfig/Makefile conflicts in arch/powerpc, drivers, and drivers/cpufreq
This commit is contained in:
@@ -186,10 +186,11 @@ void __init MMU_init_hw(void)
|
||||
unsigned long __init mmu_mapin_ram(unsigned long top)
|
||||
{
|
||||
unsigned long addr;
|
||||
unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
|
||||
|
||||
/* Pin in enough TLBs to cover any lowmem not covered by the
|
||||
* initial 256M mapping established in head_44x.S */
|
||||
for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
|
||||
for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
|
||||
addr += PPC_PIN_SIZE) {
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_47x))
|
||||
ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
|
||||
@@ -218,19 +219,25 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
|
||||
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
{
|
||||
u64 size;
|
||||
|
||||
#ifndef CONFIG_RELOCATABLE
|
||||
/* We don't currently support the first MEMBLOCK not mapping 0
|
||||
* physical on those processors
|
||||
*/
|
||||
BUG_ON(first_memblock_base != 0);
|
||||
#endif
|
||||
|
||||
/* 44x has a 256M TLB entry pinned at boot */
|
||||
memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE));
|
||||
size = (min_t(u64, first_memblock_size, PPC_PIN_SIZE));
|
||||
memblock_set_current_limit(first_memblock_base + size);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void __cpuinit mmu_init_secondary(int cpu)
|
||||
{
|
||||
unsigned long addr;
|
||||
unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
|
||||
|
||||
/* Pin in enough TLBs to cover any lowmem not covered by the
|
||||
* initial 256M mapping established in head_44x.S
|
||||
@@ -241,7 +248,7 @@ void __cpuinit mmu_init_secondary(int cpu)
|
||||
* stack. current (r2) isn't initialized, smp_processor_id()
|
||||
* will not work, current thread info isn't accessible, ...
|
||||
*/
|
||||
for (addr = PPC_PIN_SIZE; addr < lowmem_end_addr;
|
||||
for (addr = memstart + PPC_PIN_SIZE; addr < lowmem_end_addr;
|
||||
addr += PPC_PIN_SIZE) {
|
||||
if (mmu_has_feature(MMU_FTR_TYPE_47x))
|
||||
ppc47x_pin_tlb(addr + PAGE_OFFSET, addr);
|
||||
|
@@ -191,38 +191,6 @@ void __init *early_get_page(void)
|
||||
return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
|
||||
}
|
||||
|
||||
/* Free up now-unused memory */
|
||||
static void free_sec(unsigned long start, unsigned long end, const char *name)
|
||||
{
|
||||
unsigned long cnt = 0;
|
||||
|
||||
while (start < end) {
|
||||
ClearPageReserved(virt_to_page(start));
|
||||
init_page_count(virt_to_page(start));
|
||||
free_page(start);
|
||||
cnt++;
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
if (cnt) {
|
||||
printk(" %ldk %s", cnt << (PAGE_SHIFT - 10), name);
|
||||
totalram_pages += cnt;
|
||||
}
|
||||
}
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
#define FREESEC(TYPE) \
|
||||
free_sec((unsigned long)(&__ ## TYPE ## _begin), \
|
||||
(unsigned long)(&__ ## TYPE ## _end), \
|
||||
#TYPE);
|
||||
|
||||
printk ("Freeing unused kernel memory:");
|
||||
FREESEC(init);
|
||||
printk("\n");
|
||||
ppc_md.progress = NULL;
|
||||
#undef FREESEC
|
||||
}
|
||||
|
||||
#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
|
||||
void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
phys_addr_t first_memblock_size)
|
||||
|
@@ -83,22 +83,6 @@ EXPORT_SYMBOL_GPL(memstart_addr);
|
||||
phys_addr_t kernstart_addr;
|
||||
EXPORT_SYMBOL_GPL(kernstart_addr);
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
addr = (unsigned long)__init_begin;
|
||||
for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
|
||||
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
|
||||
ClearPageReserved(virt_to_page(addr));
|
||||
init_page_count(virt_to_page(addr));
|
||||
free_page(addr);
|
||||
totalram_pages++;
|
||||
}
|
||||
printk ("Freeing unused kernel memory: %luk freed\n",
|
||||
((unsigned long)__init_end - (unsigned long)__init_begin) >> 10);
|
||||
}
|
||||
|
||||
static void pgd_ctor(void *addr)
|
||||
{
|
||||
memset(addr, 0, PGD_TABLE_SIZE);
|
||||
|
@@ -249,7 +249,7 @@ static int __init mark_nonram_nosave(void)
|
||||
*/
|
||||
void __init paging_init(void)
|
||||
{
|
||||
unsigned long total_ram = memblock_phys_mem_size();
|
||||
unsigned long long total_ram = memblock_phys_mem_size();
|
||||
phys_addr_t top_of_ram = memblock_end_of_DRAM();
|
||||
unsigned long max_zone_pfns[MAX_NR_ZONES];
|
||||
|
||||
@@ -269,7 +269,7 @@ void __init paging_init(void)
|
||||
kmap_prot = PAGE_KERNEL;
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
||||
printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%lx\n",
|
||||
printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
|
||||
(unsigned long long)top_of_ram, total_ram);
|
||||
printk(KERN_DEBUG "Memory hole size: %ldMB\n",
|
||||
(long int)((top_of_ram - total_ram) >> 20));
|
||||
@@ -337,8 +337,9 @@ void __init mem_init(void)
|
||||
|
||||
highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
|
||||
for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
|
||||
phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
if (memblock_is_reserved(pfn << PAGE_SHIFT))
|
||||
if (memblock_is_reserved(paddr))
|
||||
continue;
|
||||
ClearPageReserved(page);
|
||||
init_page_count(page);
|
||||
@@ -352,6 +353,15 @@ void __init mem_init(void)
|
||||
}
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
||||
#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
|
||||
/*
|
||||
* If smp is enabled, next_tlbcam_idx is initialized in the cpu up
|
||||
* functions.... do it here for the non-smp case.
|
||||
*/
|
||||
per_cpu(next_tlbcam_idx, smp_processor_id()) =
|
||||
(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
|
||||
#endif
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
|
||||
"%luk reserved, %luk data, %luk bss, %luk init)\n",
|
||||
nr_free_pages() << (PAGE_SHIFT-10),
|
||||
@@ -382,6 +392,25 @@ void __init mem_init(void)
|
||||
mem_init_done = 1;
|
||||
}
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
ppc_md.progress = ppc_printk_progress;
|
||||
|
||||
addr = (unsigned long)__init_begin;
|
||||
for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
|
||||
memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
|
||||
ClearPageReserved(virt_to_page(addr));
|
||||
init_page_count(virt_to_page(addr));
|
||||
free_page(addr);
|
||||
totalram_pages++;
|
||||
}
|
||||
pr_info("Freeing unused kernel memory: %luk freed\n",
|
||||
((unsigned long)__init_end -
|
||||
(unsigned long)__init_begin) >> 10);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
void __init free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
|
@@ -177,3 +177,7 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
flush_range(vma->vm_mm, start, end);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_tlb_range);
|
||||
|
||||
void __init early_init_mmu(void)
|
||||
{
|
||||
}
|
||||
|
@@ -30,6 +30,212 @@
|
||||
#define VPTE_PGD_SHIFT (VPTE_PUD_SHIFT + PUD_INDEX_SIZE)
|
||||
#define VPTE_INDEX_SIZE (VPTE_PGD_SHIFT + PGD_INDEX_SIZE)
|
||||
|
||||
/**********************************************************************
|
||||
* *
|
||||
* TLB miss handling for Book3E with a bolted linear mapping *
|
||||
* No virtual page table, no nested TLB misses *
|
||||
* *
|
||||
**********************************************************************/
|
||||
|
||||
.macro tlb_prolog_bolted addr
|
||||
mtspr SPRN_SPRG_TLB_SCRATCH,r13
|
||||
mfspr r13,SPRN_SPRG_PACA
|
||||
std r10,PACA_EXTLB+EX_TLB_R10(r13)
|
||||
mfcr r10
|
||||
std r11,PACA_EXTLB+EX_TLB_R11(r13)
|
||||
std r16,PACA_EXTLB+EX_TLB_R16(r13)
|
||||
mfspr r16,\addr /* get faulting address */
|
||||
std r14,PACA_EXTLB+EX_TLB_R14(r13)
|
||||
ld r14,PACAPGD(r13)
|
||||
std r15,PACA_EXTLB+EX_TLB_R15(r13)
|
||||
std r10,PACA_EXTLB+EX_TLB_CR(r13)
|
||||
TLB_MISS_PROLOG_STATS_BOLTED
|
||||
.endm
|
||||
|
||||
.macro tlb_epilog_bolted
|
||||
ld r14,PACA_EXTLB+EX_TLB_CR(r13)
|
||||
ld r10,PACA_EXTLB+EX_TLB_R10(r13)
|
||||
ld r11,PACA_EXTLB+EX_TLB_R11(r13)
|
||||
mtcr r14
|
||||
ld r14,PACA_EXTLB+EX_TLB_R14(r13)
|
||||
ld r15,PACA_EXTLB+EX_TLB_R15(r13)
|
||||
TLB_MISS_RESTORE_STATS_BOLTED
|
||||
ld r16,PACA_EXTLB+EX_TLB_R16(r13)
|
||||
mfspr r13,SPRN_SPRG_TLB_SCRATCH
|
||||
.endm
|
||||
|
||||
/* Data TLB miss */
|
||||
START_EXCEPTION(data_tlb_miss_bolted)
|
||||
tlb_prolog_bolted SPRN_DEAR
|
||||
|
||||
/* We need _PAGE_PRESENT and _PAGE_ACCESSED set */
|
||||
|
||||
/* We do the user/kernel test for the PID here along with the RW test
|
||||
*/
|
||||
/* We pre-test some combination of permissions to avoid double
|
||||
* faults:
|
||||
*
|
||||
* We move the ESR:ST bit into the position of _PAGE_BAP_SW in the PTE
|
||||
* ESR_ST is 0x00800000
|
||||
* _PAGE_BAP_SW is 0x00000010
|
||||
* So the shift is >> 19. This tests for supervisor writeability.
|
||||
* If the page happens to be supervisor writeable and not user
|
||||
* writeable, we will take a new fault later, but that should be
|
||||
* a rare enough case.
|
||||
*
|
||||
* We also move ESR_ST in _PAGE_DIRTY position
|
||||
* _PAGE_DIRTY is 0x00001000 so the shift is >> 11
|
||||
*
|
||||
* MAS1 is preset for all we need except for TID that needs to
|
||||
* be cleared for kernel translations
|
||||
*/
|
||||
|
||||
mfspr r11,SPRN_ESR
|
||||
|
||||
srdi r15,r16,60 /* get region */
|
||||
rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
|
||||
bne- dtlb_miss_fault_bolted
|
||||
|
||||
rlwinm r10,r11,32-19,27,27
|
||||
rlwimi r10,r11,32-16,19,19
|
||||
cmpwi r15,0
|
||||
ori r10,r10,_PAGE_PRESENT
|
||||
oris r11,r10,_PAGE_ACCESSED@h
|
||||
|
||||
TLB_MISS_STATS_SAVE_INFO_BOLTED
|
||||
bne tlb_miss_kernel_bolted
|
||||
|
||||
tlb_miss_common_bolted:
|
||||
/*
|
||||
* This is the guts of the TLB miss handler for bolted-linear.
|
||||
* We are entered with:
|
||||
*
|
||||
* r16 = faulting address
|
||||
* r15 = crap (free to use)
|
||||
* r14 = page table base
|
||||
* r13 = PACA
|
||||
* r11 = PTE permission mask
|
||||
* r10 = crap (free to use)
|
||||
*/
|
||||
rldicl r15,r16,64-PGDIR_SHIFT+3,64-PGD_INDEX_SIZE-3
|
||||
cmpldi cr0,r14,0
|
||||
clrrdi r15,r15,3
|
||||
beq tlb_miss_fault_bolted
|
||||
|
||||
BEGIN_MMU_FTR_SECTION
|
||||
/* Set the TLB reservation and search for existing entry. Then load
|
||||
* the entry.
|
||||
*/
|
||||
PPC_TLBSRX_DOT(0,r16)
|
||||
ldx r14,r14,r15
|
||||
beq normal_tlb_miss_done
|
||||
MMU_FTR_SECTION_ELSE
|
||||
ldx r14,r14,r15
|
||||
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_USE_TLBRSRV)
|
||||
|
||||
#ifndef CONFIG_PPC_64K_PAGES
|
||||
rldicl r15,r16,64-PUD_SHIFT+3,64-PUD_INDEX_SIZE-3
|
||||
clrrdi r15,r15,3
|
||||
|
||||
cmpldi cr0,r14,0
|
||||
beq tlb_miss_fault_bolted
|
||||
|
||||
ldx r14,r14,r15
|
||||
#endif /* CONFIG_PPC_64K_PAGES */
|
||||
|
||||
rldicl r15,r16,64-PMD_SHIFT+3,64-PMD_INDEX_SIZE-3
|
||||
clrrdi r15,r15,3
|
||||
|
||||
cmpldi cr0,r14,0
|
||||
beq tlb_miss_fault_bolted
|
||||
|
||||
ldx r14,r14,r15
|
||||
|
||||
rldicl r15,r16,64-PAGE_SHIFT+3,64-PTE_INDEX_SIZE-3
|
||||
clrrdi r15,r15,3
|
||||
|
||||
cmpldi cr0,r14,0
|
||||
beq tlb_miss_fault_bolted
|
||||
|
||||
ldx r14,r14,r15
|
||||
|
||||
/* Check if required permissions are met */
|
||||
andc. r15,r11,r14
|
||||
rldicr r15,r14,64-(PTE_RPN_SHIFT-PAGE_SHIFT),63-PAGE_SHIFT
|
||||
bne- tlb_miss_fault_bolted
|
||||
|
||||
/* Now we build the MAS:
|
||||
*
|
||||
* MAS 0 : Fully setup with defaults in MAS4 and TLBnCFG
|
||||
* MAS 1 : Almost fully setup
|
||||
* - PID already updated by caller if necessary
|
||||
* - TSIZE need change if !base page size, not
|
||||
* yet implemented for now
|
||||
* MAS 2 : Defaults not useful, need to be redone
|
||||
* MAS 3+7 : Needs to be done
|
||||
*/
|
||||
clrrdi r11,r16,12 /* Clear low crap in EA */
|
||||
clrldi r15,r15,12 /* Clear crap at the top */
|
||||
rlwimi r11,r14,32-19,27,31 /* Insert WIMGE */
|
||||
rlwimi r15,r14,32-8,22,25 /* Move in U bits */
|
||||
mtspr SPRN_MAS2,r11
|
||||
andi. r11,r14,_PAGE_DIRTY
|
||||
rlwimi r15,r14,32-2,26,31 /* Move in BAP bits */
|
||||
|
||||
/* Mask out SW and UW if !DIRTY (XXX optimize this !) */
|
||||
bne 1f
|
||||
li r11,MAS3_SW|MAS3_UW
|
||||
andc r15,r15,r11
|
||||
1:
|
||||
mtspr SPRN_MAS7_MAS3,r15
|
||||
tlbwe
|
||||
|
||||
TLB_MISS_STATS_X(MMSTAT_TLB_MISS_NORM_OK)
|
||||
tlb_epilog_bolted
|
||||
rfi
|
||||
|
||||
itlb_miss_kernel_bolted:
|
||||
li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */
|
||||
oris r11,r11,_PAGE_ACCESSED@h
|
||||
tlb_miss_kernel_bolted:
|
||||
mfspr r10,SPRN_MAS1
|
||||
ld r14,PACA_KERNELPGD(r13)
|
||||
cmpldi cr0,r15,8 /* Check for vmalloc region */
|
||||
rlwinm r10,r10,0,16,1 /* Clear TID */
|
||||
mtspr SPRN_MAS1,r10
|
||||
beq+ tlb_miss_common_bolted
|
||||
|
||||
tlb_miss_fault_bolted:
|
||||
/* We need to check if it was an instruction miss */
|
||||
andi. r10,r11,_PAGE_EXEC|_PAGE_BAP_SX
|
||||
bne itlb_miss_fault_bolted
|
||||
dtlb_miss_fault_bolted:
|
||||
TLB_MISS_STATS_D(MMSTAT_TLB_MISS_NORM_FAULT)
|
||||
tlb_epilog_bolted
|
||||
b exc_data_storage_book3e
|
||||
itlb_miss_fault_bolted:
|
||||
TLB_MISS_STATS_I(MMSTAT_TLB_MISS_NORM_FAULT)
|
||||
tlb_epilog_bolted
|
||||
b exc_instruction_storage_book3e
|
||||
|
||||
/* Instruction TLB miss */
|
||||
START_EXCEPTION(instruction_tlb_miss_bolted)
|
||||
tlb_prolog_bolted SPRN_SRR0
|
||||
|
||||
rldicl. r10,r16,64-PGTABLE_EADDR_SIZE,PGTABLE_EADDR_SIZE+4
|
||||
srdi r15,r16,60 /* get region */
|
||||
TLB_MISS_STATS_SAVE_INFO_BOLTED
|
||||
bne- itlb_miss_fault_bolted
|
||||
|
||||
li r11,_PAGE_PRESENT|_PAGE_EXEC /* Base perm */
|
||||
|
||||
/* We do the user/kernel test for the PID here along with the RW test
|
||||
*/
|
||||
|
||||
cmpldi cr0,r15,0 /* Check for user region */
|
||||
oris r11,r11,_PAGE_ACCESSED@h
|
||||
beq tlb_miss_common_bolted
|
||||
b itlb_miss_kernel_bolted
|
||||
|
||||
/**********************************************************************
|
||||
* *
|
||||
|
@@ -35,6 +35,7 @@
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/of_fdt.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/tlb.h>
|
||||
@@ -102,6 +103,12 @@ unsigned long linear_map_top; /* Top of linear mapping */
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#ifdef CONFIG_PPC_FSL_BOOK3E
|
||||
/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
|
||||
DEFINE_PER_CPU(int, next_tlbcam_idx);
|
||||
EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Base TLB flushing operations:
|
||||
*
|
||||
@@ -266,6 +273,17 @@ EXPORT_SYMBOL(flush_tlb_page);
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
#ifdef CONFIG_PPC_47x
|
||||
void __init early_init_mmu_47x(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long root = of_get_flat_dt_root();
|
||||
if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
|
||||
mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
#endif /* CONFIG_PPC_47x */
|
||||
|
||||
/*
|
||||
* Flush kernel TLB entries in the given range
|
||||
*/
|
||||
@@ -443,14 +461,27 @@ static void setup_page_sizes(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void setup_mmu_htw(void)
|
||||
static void __patch_exception(int exc, unsigned long addr)
|
||||
{
|
||||
extern unsigned int interrupt_base_book3e;
|
||||
extern unsigned int exc_data_tlb_miss_htw_book3e;
|
||||
extern unsigned int exc_instruction_tlb_miss_htw_book3e;
|
||||
unsigned int *ibase = &interrupt_base_book3e;
|
||||
|
||||
/* Our exceptions vectors start with a NOP and -then- a branch
|
||||
* to deal with single stepping from userspace which stops on
|
||||
* the second instruction. Thus we need to patch the second
|
||||
* instruction of the exception, not the first one
|
||||
*/
|
||||
|
||||
unsigned int *ibase = &interrupt_base_book3e;
|
||||
patch_branch(ibase + (exc / 4) + 1, addr, 0);
|
||||
}
|
||||
|
||||
#define patch_exception(exc, name) do { \
|
||||
extern unsigned int name; \
|
||||
__patch_exception((exc), (unsigned long)&name); \
|
||||
} while (0)
|
||||
|
||||
static void setup_mmu_htw(void)
|
||||
{
|
||||
/* Check if HW tablewalk is present, and if yes, enable it by:
|
||||
*
|
||||
* - patching the TLB miss handlers to branch to the
|
||||
@@ -462,19 +493,12 @@ static void setup_mmu_htw(void)
|
||||
|
||||
if ((tlb0cfg & TLBnCFG_IND) &&
|
||||
(tlb0cfg & TLBnCFG_PT)) {
|
||||
/* Our exceptions vectors start with a NOP and -then- a branch
|
||||
* to deal with single stepping from userspace which stops on
|
||||
* the second instruction. Thus we need to patch the second
|
||||
* instruction of the exception, not the first one
|
||||
*/
|
||||
patch_branch(ibase + (0x1c0 / 4) + 1,
|
||||
(unsigned long)&exc_data_tlb_miss_htw_book3e, 0);
|
||||
patch_branch(ibase + (0x1e0 / 4) + 1,
|
||||
(unsigned long)&exc_instruction_tlb_miss_htw_book3e, 0);
|
||||
patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
|
||||
patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
|
||||
book3e_htw_enabled = 1;
|
||||
}
|
||||
pr_info("MMU: Book3E Page Tables %s\n",
|
||||
book3e_htw_enabled ? "Enabled" : "Disabled");
|
||||
pr_info("MMU: Book3E HW tablewalk %s\n",
|
||||
book3e_htw_enabled ? "enabled" : "not supported");
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -549,6 +573,9 @@ static void __early_init_mmu(int boot_cpu)
|
||||
/* limit memory so we dont have linear faults */
|
||||
memblock_enforce_memory_limit(linear_map_top);
|
||||
memblock_analyze();
|
||||
|
||||
patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
|
||||
patch_exception(0x1e0, exc_instruction_tlb_miss_bolted_book3e);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -584,4 +611,11 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
|
||||
/* Finally limit subsequent allocations */
|
||||
memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
|
||||
}
|
||||
#else /* ! CONFIG_PPC64 */
|
||||
void __init early_init_mmu(void)
|
||||
{
|
||||
#ifdef CONFIG_PPC_47x
|
||||
early_init_mmu_47x();
|
||||
#endif
|
||||
}
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
Reference in New Issue
Block a user