Merge commit 'v2.6.35-rc3' into perf/core
Merge reason: Go from -rc1 base to -rc3 base, merge in fixes.
这个提交包含在:
@@ -57,8 +57,12 @@ obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
|
||||
obj-$(CONFIG_E500) += idle_e500.o
|
||||
obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o
|
||||
obj-$(CONFIG_TAU) += tau_6xx.o
|
||||
obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o \
|
||||
swsusp_$(CONFIG_WORD_SIZE).o
|
||||
obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o
|
||||
ifeq ($(CONFIG_FSL_BOOKE),y)
|
||||
obj-$(CONFIG_HIBERNATION) += swsusp_booke.o
|
||||
else
|
||||
obj-$(CONFIG_HIBERNATION) += swsusp_$(CONFIG_WORD_SIZE).o
|
||||
endif
|
||||
obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o
|
||||
obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o
|
||||
obj-$(CONFIG_44x) += cpu_setup_44x.o
|
||||
|
@@ -1840,7 +1840,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.oprofile_cpu_type = "ppc/e500mc",
|
||||
.oprofile_type = PPC_OPROFILE_FSL_EMB,
|
||||
.cpu_setup = __setup_cpu_e500mc,
|
||||
.machine_check = machine_check_e500,
|
||||
.machine_check = machine_check_e500mc,
|
||||
.platform = "ppce500mc",
|
||||
},
|
||||
{ /* default match */
|
||||
|
@@ -163,6 +163,7 @@ static void crash_kexec_prepare_cpus(int cpu)
|
||||
}
|
||||
|
||||
/* wait for all the CPUs to hit real mode but timeout if they don't come in */
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
static void crash_kexec_wait_realmode(int cpu)
|
||||
{
|
||||
unsigned int msecs;
|
||||
@@ -187,6 +188,7 @@ static void crash_kexec_wait_realmode(int cpu)
|
||||
}
|
||||
mb();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* This function will be called by secondary cpus or by kexec cpu
|
||||
@@ -445,7 +447,9 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
|
||||
crash_kexec_prepare_cpus(crashing_cpu);
|
||||
cpu_set(crashing_cpu, cpus_in_crash);
|
||||
crash_kexec_stop_spus();
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
crash_kexec_wait_realmode(crashing_cpu);
|
||||
#endif
|
||||
if (ppc_md.kexec_cpu_down)
|
||||
ppc_md.kexec_cpu_down(1, 0);
|
||||
}
|
||||
|
@@ -0,0 +1,237 @@
|
||||
|
||||
/* 1. Find the index of the entry we're executing in */
|
||||
bl invstr /* Find our address */
|
||||
invstr: mflr r6 /* Make it accessible */
|
||||
mfmsr r7
|
||||
rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
|
||||
mfspr r7, SPRN_PID0
|
||||
slwi r7,r7,16
|
||||
or r7,r7,r4
|
||||
mtspr SPRN_MAS6,r7
|
||||
tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */
|
||||
mfspr r7,SPRN_MAS1
|
||||
andis. r7,r7,MAS1_VALID@h
|
||||
bne match_TLB
|
||||
|
||||
mfspr r7,SPRN_MMUCFG
|
||||
rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */
|
||||
cmpwi r7,3
|
||||
bne match_TLB /* skip if NPIDS != 3 */
|
||||
|
||||
mfspr r7,SPRN_PID1
|
||||
slwi r7,r7,16
|
||||
or r7,r7,r4
|
||||
mtspr SPRN_MAS6,r7
|
||||
tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */
|
||||
mfspr r7,SPRN_MAS1
|
||||
andis. r7,r7,MAS1_VALID@h
|
||||
bne match_TLB
|
||||
mfspr r7, SPRN_PID2
|
||||
slwi r7,r7,16
|
||||
or r7,r7,r4
|
||||
mtspr SPRN_MAS6,r7
|
||||
tlbsx 0,r6 /* Fall through, we had to match */
|
||||
|
||||
match_TLB:
|
||||
mfspr r7,SPRN_MAS0
|
||||
rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */
|
||||
|
||||
mfspr r7,SPRN_MAS1 /* Insure IPROT set */
|
||||
oris r7,r7,MAS1_IPROT@h
|
||||
mtspr SPRN_MAS1,r7
|
||||
tlbwe
|
||||
|
||||
/* 2. Invalidate all entries except the entry we're executing in */
|
||||
mfspr r9,SPRN_TLB1CFG
|
||||
andi. r9,r9,0xfff
|
||||
li r6,0 /* Set Entry counter to 0 */
|
||||
1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
||||
rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
|
||||
mtspr SPRN_MAS0,r7
|
||||
tlbre
|
||||
mfspr r7,SPRN_MAS1
|
||||
rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
|
||||
cmpw r3,r6
|
||||
beq skpinv /* Dont update the current execution TLB */
|
||||
mtspr SPRN_MAS1,r7
|
||||
tlbwe
|
||||
isync
|
||||
skpinv: addi r6,r6,1 /* Increment */
|
||||
cmpw r6,r9 /* Are we done? */
|
||||
bne 1b /* If not, repeat */
|
||||
|
||||
/* Invalidate TLB0 */
|
||||
li r6,0x04
|
||||
tlbivax 0,r6
|
||||
TLBSYNC
|
||||
/* Invalidate TLB1 */
|
||||
li r6,0x0c
|
||||
tlbivax 0,r6
|
||||
TLBSYNC
|
||||
|
||||
/* 3. Setup a temp mapping and jump to it */
|
||||
andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
|
||||
addi r5, r5, 0x1
|
||||
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
||||
rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
|
||||
mtspr SPRN_MAS0,r7
|
||||
tlbre
|
||||
|
||||
/* grab and fixup the RPN */
|
||||
mfspr r6,SPRN_MAS1 /* extract MAS1[SIZE] */
|
||||
rlwinm r6,r6,25,27,31
|
||||
li r8,-1
|
||||
addi r6,r6,10
|
||||
slw r6,r8,r6 /* convert to mask */
|
||||
|
||||
bl 1f /* Find our address */
|
||||
1: mflr r7
|
||||
|
||||
mfspr r8,SPRN_MAS3
|
||||
#ifdef CONFIG_PHYS_64BIT
|
||||
mfspr r23,SPRN_MAS7
|
||||
#endif
|
||||
and r8,r6,r8
|
||||
subfic r9,r6,-4096
|
||||
and r9,r9,r7
|
||||
|
||||
or r25,r8,r9
|
||||
ori r8,r25,(MAS3_SX|MAS3_SW|MAS3_SR)
|
||||
|
||||
/* Just modify the entry ID and EPN for the temp mapping */
|
||||
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
||||
rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
|
||||
mtspr SPRN_MAS0,r7
|
||||
xori r6,r4,1 /* Setup TMP mapping in the other Address space */
|
||||
slwi r6,r6,12
|
||||
oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h
|
||||
ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_4K))@l
|
||||
mtspr SPRN_MAS1,r6
|
||||
mfspr r6,SPRN_MAS2
|
||||
li r7,0 /* temp EPN = 0 */
|
||||
rlwimi r7,r6,0,20,31
|
||||
mtspr SPRN_MAS2,r7
|
||||
mtspr SPRN_MAS3,r8
|
||||
tlbwe
|
||||
|
||||
xori r6,r4,1
|
||||
slwi r6,r6,5 /* setup new context with other address space */
|
||||
bl 1f /* Find our address */
|
||||
1: mflr r9
|
||||
rlwimi r7,r9,0,20,31
|
||||
addi r7,r7,(2f - 1b)
|
||||
mtspr SPRN_SRR0,r7
|
||||
mtspr SPRN_SRR1,r6
|
||||
rfi
|
||||
2:
|
||||
/* 4. Clear out PIDs & Search info */
|
||||
li r6,0
|
||||
mtspr SPRN_MAS6,r6
|
||||
mtspr SPRN_PID0,r6
|
||||
|
||||
mfspr r7,SPRN_MMUCFG
|
||||
rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */
|
||||
cmpwi r7,3
|
||||
bne 2f /* skip if NPIDS != 3 */
|
||||
|
||||
mtspr SPRN_PID1,r6
|
||||
mtspr SPRN_PID2,r6
|
||||
|
||||
/* 5. Invalidate mapping we started in */
|
||||
2:
|
||||
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
||||
rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
|
||||
mtspr SPRN_MAS0,r7
|
||||
tlbre
|
||||
mfspr r6,SPRN_MAS1
|
||||
rlwinm r6,r6,0,2,0 /* clear IPROT */
|
||||
mtspr SPRN_MAS1,r6
|
||||
tlbwe
|
||||
/* Invalidate TLB1 */
|
||||
li r9,0x0c
|
||||
tlbivax 0,r9
|
||||
TLBSYNC
|
||||
|
||||
/* The mapping only needs to be cache-coherent on SMP */
|
||||
#ifdef CONFIG_SMP
|
||||
#define M_IF_SMP MAS2_M
|
||||
#else
|
||||
#define M_IF_SMP 0
|
||||
#endif
|
||||
|
||||
#if defined(ENTRY_MAPPING_BOOT_SETUP)
|
||||
|
||||
/* 6. Setup KERNELBASE mapping in TLB1[0] */
|
||||
lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
|
||||
mtspr SPRN_MAS0,r6
|
||||
lis r6,(MAS1_VALID|MAS1_IPROT)@h
|
||||
ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
|
||||
mtspr SPRN_MAS1,r6
|
||||
lis r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@h
|
||||
ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@l
|
||||
mtspr SPRN_MAS2,r6
|
||||
mtspr SPRN_MAS3,r8
|
||||
tlbwe
|
||||
|
||||
/* 7. Jump to KERNELBASE mapping */
|
||||
lis r6,(KERNELBASE & ~0xfff)@h
|
||||
ori r6,r6,(KERNELBASE & ~0xfff)@l
|
||||
|
||||
#elif defined(ENTRY_MAPPING_KEXEC_SETUP)
|
||||
/*
|
||||
* 6. Setup a 1:1 mapping in TLB1. Esel 0 is unsued, 1 or 2 contains the tmp
|
||||
* mapping so we start at 3. We setup 8 mappings, each 256MiB in size. This
|
||||
* will cover the first 2GiB of memory.
|
||||
*/
|
||||
|
||||
lis r10, (MAS1_VALID|MAS1_IPROT)@h
|
||||
ori r10,r10, (MAS1_TSIZE(BOOK3E_PAGESZ_256M))@l
|
||||
li r11, 0
|
||||
li r0, 8
|
||||
mtctr r0
|
||||
|
||||
next_tlb_setup:
|
||||
addi r0, r11, 3
|
||||
rlwinm r0, r0, 16, 4, 15 // Compute esel
|
||||
rlwinm r9, r11, 28, 0, 3 // Compute [ER]PN
|
||||
oris r0, r0, (MAS0_TLBSEL(1))@h
|
||||
mtspr SPRN_MAS0,r0
|
||||
mtspr SPRN_MAS1,r10
|
||||
mtspr SPRN_MAS2,r9
|
||||
ori r9, r9, (MAS3_SX|MAS3_SW|MAS3_SR)
|
||||
mtspr SPRN_MAS3,r9
|
||||
tlbwe
|
||||
addi r11, r11, 1
|
||||
bdnz+ next_tlb_setup
|
||||
|
||||
/* 7. Jump to our 1:1 mapping */
|
||||
li r6, 0
|
||||
|
||||
#else
|
||||
#error You need to specify the mapping or not use this at all.
|
||||
#endif
|
||||
|
||||
lis r7,MSR_KERNEL@h
|
||||
ori r7,r7,MSR_KERNEL@l
|
||||
bl 1f /* Find our address */
|
||||
1: mflr r9
|
||||
rlwimi r6,r9,0,20,31
|
||||
addi r6,r6,(2f - 1b)
|
||||
add r6, r6, r25
|
||||
mtspr SPRN_SRR0,r6
|
||||
mtspr SPRN_SRR1,r7
|
||||
rfi /* start execution out of TLB1[0] entry */
|
||||
|
||||
/* 8. Clear out the temp mapping */
|
||||
2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
||||
rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
|
||||
mtspr SPRN_MAS0,r7
|
||||
tlbre
|
||||
mfspr r8,SPRN_MAS1
|
||||
rlwinm r8,r8,0,2,0 /* clear IPROT */
|
||||
mtspr SPRN_MAS1,r8
|
||||
tlbwe
|
||||
/* Invalidate TLB1 */
|
||||
li r9,0x0c
|
||||
tlbivax 0,r9
|
||||
TLBSYNC
|
@@ -94,204 +94,10 @@ _ENTRY(_start);
|
||||
*/
|
||||
|
||||
_ENTRY(__early_start)
|
||||
/* 1. Find the index of the entry we're executing in */
|
||||
bl invstr /* Find our address */
|
||||
invstr: mflr r6 /* Make it accessible */
|
||||
mfmsr r7
|
||||
rlwinm r4,r7,27,31,31 /* extract MSR[IS] */
|
||||
mfspr r7, SPRN_PID0
|
||||
slwi r7,r7,16
|
||||
or r7,r7,r4
|
||||
mtspr SPRN_MAS6,r7
|
||||
tlbsx 0,r6 /* search MSR[IS], SPID=PID0 */
|
||||
mfspr r7,SPRN_MAS1
|
||||
andis. r7,r7,MAS1_VALID@h
|
||||
bne match_TLB
|
||||
|
||||
mfspr r7,SPRN_MMUCFG
|
||||
rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */
|
||||
cmpwi r7,3
|
||||
bne match_TLB /* skip if NPIDS != 3 */
|
||||
|
||||
mfspr r7,SPRN_PID1
|
||||
slwi r7,r7,16
|
||||
or r7,r7,r4
|
||||
mtspr SPRN_MAS6,r7
|
||||
tlbsx 0,r6 /* search MSR[IS], SPID=PID1 */
|
||||
mfspr r7,SPRN_MAS1
|
||||
andis. r7,r7,MAS1_VALID@h
|
||||
bne match_TLB
|
||||
mfspr r7, SPRN_PID2
|
||||
slwi r7,r7,16
|
||||
or r7,r7,r4
|
||||
mtspr SPRN_MAS6,r7
|
||||
tlbsx 0,r6 /* Fall through, we had to match */
|
||||
|
||||
match_TLB:
|
||||
mfspr r7,SPRN_MAS0
|
||||
rlwinm r3,r7,16,20,31 /* Extract MAS0(Entry) */
|
||||
|
||||
mfspr r7,SPRN_MAS1 /* Insure IPROT set */
|
||||
oris r7,r7,MAS1_IPROT@h
|
||||
mtspr SPRN_MAS1,r7
|
||||
tlbwe
|
||||
|
||||
/* 2. Invalidate all entries except the entry we're executing in */
|
||||
mfspr r9,SPRN_TLB1CFG
|
||||
andi. r9,r9,0xfff
|
||||
li r6,0 /* Set Entry counter to 0 */
|
||||
1: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
||||
rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
|
||||
mtspr SPRN_MAS0,r7
|
||||
tlbre
|
||||
mfspr r7,SPRN_MAS1
|
||||
rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
|
||||
cmpw r3,r6
|
||||
beq skpinv /* Dont update the current execution TLB */
|
||||
mtspr SPRN_MAS1,r7
|
||||
tlbwe
|
||||
isync
|
||||
skpinv: addi r6,r6,1 /* Increment */
|
||||
cmpw r6,r9 /* Are we done? */
|
||||
bne 1b /* If not, repeat */
|
||||
|
||||
/* Invalidate TLB0 */
|
||||
li r6,0x04
|
||||
tlbivax 0,r6
|
||||
TLBSYNC
|
||||
/* Invalidate TLB1 */
|
||||
li r6,0x0c
|
||||
tlbivax 0,r6
|
||||
TLBSYNC
|
||||
|
||||
/* 3. Setup a temp mapping and jump to it */
|
||||
andi. r5, r3, 0x1 /* Find an entry not used and is non-zero */
|
||||
addi r5, r5, 0x1
|
||||
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
||||
rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
|
||||
mtspr SPRN_MAS0,r7
|
||||
tlbre
|
||||
|
||||
/* grab and fixup the RPN */
|
||||
mfspr r6,SPRN_MAS1 /* extract MAS1[SIZE] */
|
||||
rlwinm r6,r6,25,27,31
|
||||
li r8,-1
|
||||
addi r6,r6,10
|
||||
slw r6,r8,r6 /* convert to mask */
|
||||
|
||||
bl 1f /* Find our address */
|
||||
1: mflr r7
|
||||
|
||||
mfspr r8,SPRN_MAS3
|
||||
#ifdef CONFIG_PHYS_64BIT
|
||||
mfspr r23,SPRN_MAS7
|
||||
#endif
|
||||
and r8,r6,r8
|
||||
subfic r9,r6,-4096
|
||||
and r9,r9,r7
|
||||
|
||||
or r25,r8,r9
|
||||
ori r8,r25,(MAS3_SX|MAS3_SW|MAS3_SR)
|
||||
|
||||
/* Just modify the entry ID and EPN for the temp mapping */
|
||||
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
||||
rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
|
||||
mtspr SPRN_MAS0,r7
|
||||
xori r6,r4,1 /* Setup TMP mapping in the other Address space */
|
||||
slwi r6,r6,12
|
||||
oris r6,r6,(MAS1_VALID|MAS1_IPROT)@h
|
||||
ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_4K))@l
|
||||
mtspr SPRN_MAS1,r6
|
||||
mfspr r6,SPRN_MAS2
|
||||
li r7,0 /* temp EPN = 0 */
|
||||
rlwimi r7,r6,0,20,31
|
||||
mtspr SPRN_MAS2,r7
|
||||
mtspr SPRN_MAS3,r8
|
||||
tlbwe
|
||||
|
||||
xori r6,r4,1
|
||||
slwi r6,r6,5 /* setup new context with other address space */
|
||||
bl 1f /* Find our address */
|
||||
1: mflr r9
|
||||
rlwimi r7,r9,0,20,31
|
||||
addi r7,r7,(2f - 1b)
|
||||
mtspr SPRN_SRR0,r7
|
||||
mtspr SPRN_SRR1,r6
|
||||
rfi
|
||||
2:
|
||||
/* 4. Clear out PIDs & Search info */
|
||||
li r6,0
|
||||
mtspr SPRN_MAS6,r6
|
||||
mtspr SPRN_PID0,r6
|
||||
|
||||
mfspr r7,SPRN_MMUCFG
|
||||
rlwinm r7,r7,21,28,31 /* extract MMUCFG[NPIDS] */
|
||||
cmpwi r7,3
|
||||
bne 2f /* skip if NPIDS != 3 */
|
||||
|
||||
mtspr SPRN_PID1,r6
|
||||
mtspr SPRN_PID2,r6
|
||||
|
||||
/* 5. Invalidate mapping we started in */
|
||||
2:
|
||||
lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
||||
rlwimi r7,r3,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r3) */
|
||||
mtspr SPRN_MAS0,r7
|
||||
tlbre
|
||||
mfspr r6,SPRN_MAS1
|
||||
rlwinm r6,r6,0,2,0 /* clear IPROT */
|
||||
mtspr SPRN_MAS1,r6
|
||||
tlbwe
|
||||
/* Invalidate TLB1 */
|
||||
li r9,0x0c
|
||||
tlbivax 0,r9
|
||||
TLBSYNC
|
||||
|
||||
/* The mapping only needs to be cache-coherent on SMP */
|
||||
#ifdef CONFIG_SMP
|
||||
#define M_IF_SMP MAS2_M
|
||||
#else
|
||||
#define M_IF_SMP 0
|
||||
#endif
|
||||
|
||||
/* 6. Setup KERNELBASE mapping in TLB1[0] */
|
||||
lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
|
||||
mtspr SPRN_MAS0,r6
|
||||
lis r6,(MAS1_VALID|MAS1_IPROT)@h
|
||||
ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
|
||||
mtspr SPRN_MAS1,r6
|
||||
lis r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@h
|
||||
ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, M_IF_SMP)@l
|
||||
mtspr SPRN_MAS2,r6
|
||||
mtspr SPRN_MAS3,r8
|
||||
tlbwe
|
||||
|
||||
/* 7. Jump to KERNELBASE mapping */
|
||||
lis r6,(KERNELBASE & ~0xfff)@h
|
||||
ori r6,r6,(KERNELBASE & ~0xfff)@l
|
||||
lis r7,MSR_KERNEL@h
|
||||
ori r7,r7,MSR_KERNEL@l
|
||||
bl 1f /* Find our address */
|
||||
1: mflr r9
|
||||
rlwimi r6,r9,0,20,31
|
||||
addi r6,r6,(2f - 1b)
|
||||
mtspr SPRN_SRR0,r6
|
||||
mtspr SPRN_SRR1,r7
|
||||
rfi /* start execution out of TLB1[0] entry */
|
||||
|
||||
/* 8. Clear out the temp mapping */
|
||||
2: lis r7,0x1000 /* Set MAS0(TLBSEL) = 1 */
|
||||
rlwimi r7,r5,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r5) */
|
||||
mtspr SPRN_MAS0,r7
|
||||
tlbre
|
||||
mfspr r8,SPRN_MAS1
|
||||
rlwinm r8,r8,0,2,0 /* clear IPROT */
|
||||
mtspr SPRN_MAS1,r8
|
||||
tlbwe
|
||||
/* Invalidate TLB1 */
|
||||
li r9,0x0c
|
||||
tlbivax 0,r9
|
||||
TLBSYNC
|
||||
#define ENTRY_MAPPING_BOOT_SETUP
|
||||
#include "fsl_booke_entry_mapping.S"
|
||||
#undef ENTRY_MAPPING_BOOT_SETUP
|
||||
|
||||
/* Establish the interrupt vector offsets */
|
||||
SET_IVOR(0, CriticalInput);
|
||||
|
@@ -378,17 +378,6 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
||||
* single-stepped a copy of the instruction. The address of this
|
||||
* copy is p->ainsn.insn.
|
||||
*/
|
||||
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
int ret;
|
||||
unsigned int insn = *p->ainsn.insn;
|
||||
|
||||
regs->nip = (unsigned long)p->addr;
|
||||
ret = emulate_step(regs, insn);
|
||||
if (ret == 0)
|
||||
regs->nip = (unsigned long)p->addr + 4;
|
||||
}
|
||||
|
||||
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
@@ -406,7 +395,8 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||
cur->post_handler(cur, regs, 0);
|
||||
}
|
||||
|
||||
resume_execution(cur, regs);
|
||||
/* Adjust nip to after the single-stepped instruction */
|
||||
regs->nip = (unsigned long)cur->addr + 4;
|
||||
regs->msr |= kcb->kprobe_saved_msr;
|
||||
|
||||
/*Restore back the original saved kprobes variables and continue. */
|
||||
|
@@ -711,6 +711,22 @@ relocate_new_kernel:
|
||||
/* r4 = reboot_code_buffer */
|
||||
/* r5 = start_address */
|
||||
|
||||
#ifdef CONFIG_FSL_BOOKE
|
||||
|
||||
mr r29, r3
|
||||
mr r30, r4
|
||||
mr r31, r5
|
||||
|
||||
#define ENTRY_MAPPING_KEXEC_SETUP
|
||||
#include "fsl_booke_entry_mapping.S"
|
||||
#undef ENTRY_MAPPING_KEXEC_SETUP
|
||||
|
||||
mr r3, r29
|
||||
mr r4, r30
|
||||
mr r5, r31
|
||||
|
||||
li r0, 0
|
||||
#else
|
||||
li r0, 0
|
||||
|
||||
/*
|
||||
@@ -727,6 +743,7 @@ relocate_new_kernel:
|
||||
rfi
|
||||
|
||||
1:
|
||||
#endif
|
||||
/* from this point address translation is turned off */
|
||||
/* and interrupts are disabled */
|
||||
|
||||
|
@@ -1309,6 +1309,7 @@ void pcibios_allocate_bus_resources(struct pci_bus *bus)
|
||||
printk(KERN_WARNING "PCI: Cannot allocate resource region "
|
||||
"%d of PCI bridge %d, will remap\n", i, bus->number);
|
||||
clear_resource:
|
||||
res->start = res->end = 0;
|
||||
res->flags = 0;
|
||||
}
|
||||
|
||||
|
@@ -101,7 +101,7 @@ EXPORT_SYMBOL(pci_dram_offset);
|
||||
EXPORT_SYMBOL(start_thread);
|
||||
EXPORT_SYMBOL(kernel_thread);
|
||||
|
||||
#ifndef CONFIG_BOOKE
|
||||
#ifdef CONFIG_PPC_FPU
|
||||
EXPORT_SYMBOL_GPL(cvt_df);
|
||||
EXPORT_SYMBOL_GPL(cvt_fd);
|
||||
#endif
|
||||
|
193
arch/powerpc/kernel/swsusp_booke.S
普通文件
193
arch/powerpc/kernel/swsusp_booke.S
普通文件
@@ -0,0 +1,193 @@
|
||||
/*
|
||||
* Based on swsusp_32.S, modified for FSL BookE by
|
||||
* Anton Vorontsov <avorontsov@ru.mvista.com>
|
||||
* Copyright (c) 2009-2010 MontaVista Software, LLC.
|
||||
*/
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/mmu.h>
|
||||
|
||||
/*
|
||||
* Structure for storing CPU registers on the save area.
|
||||
*/
|
||||
#define SL_SP 0
|
||||
#define SL_PC 4
|
||||
#define SL_MSR 8
|
||||
#define SL_TCR 0xc
|
||||
#define SL_SPRG0 0x10
|
||||
#define SL_SPRG1 0x14
|
||||
#define SL_SPRG2 0x18
|
||||
#define SL_SPRG3 0x1c
|
||||
#define SL_SPRG4 0x20
|
||||
#define SL_SPRG5 0x24
|
||||
#define SL_SPRG6 0x28
|
||||
#define SL_SPRG7 0x2c
|
||||
#define SL_TBU 0x30
|
||||
#define SL_TBL 0x34
|
||||
#define SL_R2 0x38
|
||||
#define SL_CR 0x3c
|
||||
#define SL_LR 0x40
|
||||
#define SL_R12 0x44 /* r12 to r31 */
|
||||
#define SL_SIZE (SL_R12 + 80)
|
||||
|
||||
.section .data
|
||||
.align 5
|
||||
|
||||
_GLOBAL(swsusp_save_area)
|
||||
.space SL_SIZE
|
||||
|
||||
|
||||
.section .text
|
||||
.align 5
|
||||
|
||||
_GLOBAL(swsusp_arch_suspend)
|
||||
lis r11,swsusp_save_area@h
|
||||
ori r11,r11,swsusp_save_area@l
|
||||
|
||||
mflr r0
|
||||
stw r0,SL_LR(r11)
|
||||
mfcr r0
|
||||
stw r0,SL_CR(r11)
|
||||
stw r1,SL_SP(r11)
|
||||
stw r2,SL_R2(r11)
|
||||
stmw r12,SL_R12(r11)
|
||||
|
||||
/* Save MSR & TCR */
|
||||
mfmsr r4
|
||||
stw r4,SL_MSR(r11)
|
||||
mfspr r4,SPRN_TCR
|
||||
stw r4,SL_TCR(r11)
|
||||
|
||||
/* Get a stable timebase and save it */
|
||||
1: mfspr r4,SPRN_TBRU
|
||||
stw r4,SL_TBU(r11)
|
||||
mfspr r5,SPRN_TBRL
|
||||
stw r5,SL_TBL(r11)
|
||||
mfspr r3,SPRN_TBRU
|
||||
cmpw r3,r4
|
||||
bne 1b
|
||||
|
||||
/* Save SPRGs */
|
||||
mfsprg r4,0
|
||||
stw r4,SL_SPRG0(r11)
|
||||
mfsprg r4,1
|
||||
stw r4,SL_SPRG1(r11)
|
||||
mfsprg r4,2
|
||||
stw r4,SL_SPRG2(r11)
|
||||
mfsprg r4,3
|
||||
stw r4,SL_SPRG3(r11)
|
||||
mfsprg r4,4
|
||||
stw r4,SL_SPRG4(r11)
|
||||
mfsprg r4,5
|
||||
stw r4,SL_SPRG5(r11)
|
||||
mfsprg r4,6
|
||||
stw r4,SL_SPRG6(r11)
|
||||
mfsprg r4,7
|
||||
stw r4,SL_SPRG7(r11)
|
||||
|
||||
/* Call the low level suspend stuff (we should probably have made
|
||||
* a stackframe...
|
||||
*/
|
||||
bl swsusp_save
|
||||
|
||||
/* Restore LR from the save area */
|
||||
lis r11,swsusp_save_area@h
|
||||
ori r11,r11,swsusp_save_area@l
|
||||
lwz r0,SL_LR(r11)
|
||||
mtlr r0
|
||||
|
||||
blr
|
||||
|
||||
_GLOBAL(swsusp_arch_resume)
|
||||
sync
|
||||
|
||||
/* Load ptr the list of pages to copy in r3 */
|
||||
lis r11,(restore_pblist)@h
|
||||
ori r11,r11,restore_pblist@l
|
||||
lwz r3,0(r11)
|
||||
|
||||
/* Copy the pages. This is a very basic implementation, to
|
||||
* be replaced by something more cache efficient */
|
||||
1:
|
||||
li r0,256
|
||||
mtctr r0
|
||||
lwz r5,pbe_address(r3) /* source */
|
||||
lwz r6,pbe_orig_address(r3) /* destination */
|
||||
2:
|
||||
lwz r8,0(r5)
|
||||
lwz r9,4(r5)
|
||||
lwz r10,8(r5)
|
||||
lwz r11,12(r5)
|
||||
addi r5,r5,16
|
||||
stw r8,0(r6)
|
||||
stw r9,4(r6)
|
||||
stw r10,8(r6)
|
||||
stw r11,12(r6)
|
||||
addi r6,r6,16
|
||||
bdnz 2b
|
||||
lwz r3,pbe_next(r3)
|
||||
cmpwi 0,r3,0
|
||||
bne 1b
|
||||
|
||||
bl flush_dcache_L1
|
||||
bl flush_instruction_cache
|
||||
|
||||
lis r11,swsusp_save_area@h
|
||||
ori r11,r11,swsusp_save_area@l
|
||||
|
||||
lwz r4,SL_SPRG0(r11)
|
||||
mtsprg 0,r4
|
||||
lwz r4,SL_SPRG1(r11)
|
||||
mtsprg 1,r4
|
||||
lwz r4,SL_SPRG2(r11)
|
||||
mtsprg 2,r4
|
||||
lwz r4,SL_SPRG3(r11)
|
||||
mtsprg 3,r4
|
||||
lwz r4,SL_SPRG4(r11)
|
||||
mtsprg 4,r4
|
||||
lwz r4,SL_SPRG5(r11)
|
||||
mtsprg 5,r4
|
||||
lwz r4,SL_SPRG6(r11)
|
||||
mtsprg 6,r4
|
||||
lwz r4,SL_SPRG7(r11)
|
||||
mtsprg 7,r4
|
||||
|
||||
/* restore the MSR */
|
||||
lwz r3,SL_MSR(r11)
|
||||
mtmsr r3
|
||||
|
||||
/* Restore TB */
|
||||
li r3,0
|
||||
mtspr SPRN_TBWL,r3
|
||||
lwz r3,SL_TBU(r11)
|
||||
lwz r4,SL_TBL(r11)
|
||||
mtspr SPRN_TBWU,r3
|
||||
mtspr SPRN_TBWL,r4
|
||||
|
||||
/* Restore TCR and clear any pending bits in TSR. */
|
||||
lwz r4,SL_TCR(r11)
|
||||
mtspr SPRN_TCR,r4
|
||||
lis r4, (TSR_ENW | TSR_WIS | TSR_DIS | TSR_FIS)@h
|
||||
mtspr SPRN_TSR,r4
|
||||
|
||||
/* Kick decrementer */
|
||||
li r0,1
|
||||
mtdec r0
|
||||
|
||||
/* Restore the callee-saved registers and return */
|
||||
lwz r0,SL_CR(r11)
|
||||
mtcr r0
|
||||
lwz r2,SL_R2(r11)
|
||||
lmw r12,SL_R12(r11)
|
||||
lwz r1,SL_SP(r11)
|
||||
lwz r0,SL_LR(r11)
|
||||
mtlr r0
|
||||
|
||||
li r3,0
|
||||
blr
|
@@ -1,5 +1,6 @@
|
||||
/*
|
||||
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
||||
* Copyright 2007-2010 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
@@ -305,7 +306,7 @@ static inline int check_io_access(struct pt_regs *regs)
|
||||
#ifndef CONFIG_FSL_BOOKE
|
||||
#define get_mc_reason(regs) ((regs)->dsisr)
|
||||
#else
|
||||
#define get_mc_reason(regs) (mfspr(SPRN_MCSR) & MCSR_MASK)
|
||||
#define get_mc_reason(regs) (mfspr(SPRN_MCSR))
|
||||
#endif
|
||||
#define REASON_FP ESR_FP
|
||||
#define REASON_ILLEGAL (ESR_PIL | ESR_PUO)
|
||||
@@ -421,6 +422,91 @@ int machine_check_47x(struct pt_regs *regs)
|
||||
return 0;
|
||||
}
|
||||
#elif defined(CONFIG_E500)
|
||||
int machine_check_e500mc(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long mcsr = mfspr(SPRN_MCSR);
|
||||
unsigned long reason = mcsr;
|
||||
int recoverable = 1;
|
||||
|
||||
printk("Machine check in kernel mode.\n");
|
||||
printk("Caused by (from MCSR=%lx): ", reason);
|
||||
|
||||
if (reason & MCSR_MCP)
|
||||
printk("Machine Check Signal\n");
|
||||
|
||||
if (reason & MCSR_ICPERR) {
|
||||
printk("Instruction Cache Parity Error\n");
|
||||
|
||||
/*
|
||||
* This is recoverable by invalidating the i-cache.
|
||||
*/
|
||||
mtspr(SPRN_L1CSR1, mfspr(SPRN_L1CSR1) | L1CSR1_ICFI);
|
||||
while (mfspr(SPRN_L1CSR1) & L1CSR1_ICFI)
|
||||
;
|
||||
|
||||
/*
|
||||
* This will generally be accompanied by an instruction
|
||||
* fetch error report -- only treat MCSR_IF as fatal
|
||||
* if it wasn't due to an L1 parity error.
|
||||
*/
|
||||
reason &= ~MCSR_IF;
|
||||
}
|
||||
|
||||
if (reason & MCSR_DCPERR_MC) {
|
||||
printk("Data Cache Parity Error\n");
|
||||
recoverable = 0;
|
||||
}
|
||||
|
||||
if (reason & MCSR_L2MMU_MHIT) {
|
||||
printk("Hit on multiple TLB entries\n");
|
||||
recoverable = 0;
|
||||
}
|
||||
|
||||
if (reason & MCSR_NMI)
|
||||
printk("Non-maskable interrupt\n");
|
||||
|
||||
if (reason & MCSR_IF) {
|
||||
printk("Instruction Fetch Error Report\n");
|
||||
recoverable = 0;
|
||||
}
|
||||
|
||||
if (reason & MCSR_LD) {
|
||||
printk("Load Error Report\n");
|
||||
recoverable = 0;
|
||||
}
|
||||
|
||||
if (reason & MCSR_ST) {
|
||||
printk("Store Error Report\n");
|
||||
recoverable = 0;
|
||||
}
|
||||
|
||||
if (reason & MCSR_LDG) {
|
||||
printk("Guarded Load Error Report\n");
|
||||
recoverable = 0;
|
||||
}
|
||||
|
||||
if (reason & MCSR_TLBSYNC)
|
||||
printk("Simultaneous tlbsync operations\n");
|
||||
|
||||
if (reason & MCSR_BSL2_ERR) {
|
||||
printk("Level 2 Cache Error\n");
|
||||
recoverable = 0;
|
||||
}
|
||||
|
||||
if (reason & MCSR_MAV) {
|
||||
u64 addr;
|
||||
|
||||
addr = mfspr(SPRN_MCAR);
|
||||
addr |= (u64)mfspr(SPRN_MCARU) << 32;
|
||||
|
||||
printk("Machine Check %s Address: %#llx\n",
|
||||
reason & MCSR_MEA ? "Effective" : "Physical", addr);
|
||||
}
|
||||
|
||||
mtspr(SPRN_MCSR, mcsr);
|
||||
return mfspr(SPRN_MCSR) == 0 && recoverable;
|
||||
}
|
||||
|
||||
int machine_check_e500(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long reason = get_mc_reason(regs);
|
||||
|
@@ -223,19 +223,17 @@ SECTIONS
|
||||
#endif
|
||||
|
||||
/* The initial task and kernel stack */
|
||||
.data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
|
||||
INIT_TASK_DATA(THREAD_SIZE)
|
||||
}
|
||||
INIT_TASK_DATA_SECTION(THREAD_SIZE)
|
||||
|
||||
.data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
|
||||
.data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
|
||||
PAGE_ALIGNED_DATA(PAGE_SIZE)
|
||||
}
|
||||
|
||||
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
|
||||
.data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
|
||||
CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
|
||||
}
|
||||
|
||||
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
|
||||
.data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
|
||||
READ_MOSTLY_DATA(L1_CACHE_BYTES)
|
||||
}
|
||||
|
||||
|
在新工单中引用
屏蔽一个用户