Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/scottwood/linux into next
Freescale updates from Scott: "Highlights include 8xx optimizations, 32-bit checksum optimizations, 86xx consolidation, e5500/e6500 cpu hotplug, more fman and other dt bits, and minor fixes/cleanup."
This commit is contained in:
@@ -376,6 +376,7 @@ int main(void)
|
||||
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
|
||||
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
|
||||
DEFINE(CPU_SPEC_RESTORE, offsetof(struct cpu_spec, cpu_restore));
|
||||
DEFINE(CPU_DOWN_FLUSH, offsetof(struct cpu_spec, cpu_down_flush));
|
||||
|
||||
DEFINE(pbe_address, offsetof(struct pbe, address));
|
||||
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
|
||||
|
@@ -13,11 +13,13 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/mmu-book3e.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/mpc85xx.h>
|
||||
|
||||
_GLOBAL(__e500_icache_setup)
|
||||
mfspr r0, SPRN_L1CSR1
|
||||
@@ -233,3 +235,113 @@ _GLOBAL(__setup_cpu_e5500)
|
||||
mtlr r5
|
||||
blr
|
||||
#endif
|
||||
|
||||
/* flush L1 date cache, it can apply to e500v2, e500mc and e5500 */
|
||||
_GLOBAL(flush_dcache_L1)
|
||||
mfmsr r10
|
||||
wrteei 0
|
||||
|
||||
mfspr r3,SPRN_L1CFG0
|
||||
rlwinm r5,r3,9,3 /* Extract cache block size */
|
||||
twlgti r5,1 /* Only 32 and 64 byte cache blocks
|
||||
* are currently defined.
|
||||
*/
|
||||
li r4,32
|
||||
subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
|
||||
* log2(number of ways)
|
||||
*/
|
||||
slw r5,r4,r5 /* r5 = cache block size */
|
||||
|
||||
rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
|
||||
mulli r7,r7,13 /* An 8-way cache will require 13
|
||||
* loads per set.
|
||||
*/
|
||||
slw r7,r7,r6
|
||||
|
||||
/* save off HID0 and set DCFA */
|
||||
mfspr r8,SPRN_HID0
|
||||
ori r9,r8,HID0_DCFA@l
|
||||
mtspr SPRN_HID0,r9
|
||||
isync
|
||||
|
||||
LOAD_REG_IMMEDIATE(r6, KERNELBASE)
|
||||
mr r4, r6
|
||||
mtctr r7
|
||||
|
||||
1: lwz r3,0(r4) /* Load... */
|
||||
add r4,r4,r5
|
||||
bdnz 1b
|
||||
|
||||
msync
|
||||
mr r4, r6
|
||||
mtctr r7
|
||||
|
||||
1: dcbf 0,r4 /* ...and flush. */
|
||||
add r4,r4,r5
|
||||
bdnz 1b
|
||||
|
||||
/* restore HID0 */
|
||||
mtspr SPRN_HID0,r8
|
||||
isync
|
||||
|
||||
wrtee r10
|
||||
|
||||
blr
|
||||
|
||||
has_L2_cache:
|
||||
/* skip L2 cache on P2040/P2040E as they have no L2 cache */
|
||||
mfspr r3, SPRN_SVR
|
||||
/* shift right by 8 bits and clear E bit of SVR */
|
||||
rlwinm r4, r3, 24, ~0x800
|
||||
|
||||
lis r3, SVR_P2040@h
|
||||
ori r3, r3, SVR_P2040@l
|
||||
cmpw r4, r3
|
||||
beq 1f
|
||||
|
||||
li r3, 1
|
||||
blr
|
||||
1:
|
||||
li r3, 0
|
||||
blr
|
||||
|
||||
/* flush backside L2 cache */
|
||||
flush_backside_L2_cache:
|
||||
mflr r10
|
||||
bl has_L2_cache
|
||||
mtlr r10
|
||||
cmpwi r3, 0
|
||||
beq 2f
|
||||
|
||||
/* Flush the L2 cache */
|
||||
mfspr r3, SPRN_L2CSR0
|
||||
ori r3, r3, L2CSR0_L2FL@l
|
||||
msync
|
||||
isync
|
||||
mtspr SPRN_L2CSR0,r3
|
||||
isync
|
||||
|
||||
/* check if it is complete */
|
||||
1: mfspr r3,SPRN_L2CSR0
|
||||
andi. r3, r3, L2CSR0_L2FL@l
|
||||
bne 1b
|
||||
2:
|
||||
blr
|
||||
|
||||
_GLOBAL(cpu_down_flush_e500v2)
|
||||
mflr r0
|
||||
bl flush_dcache_L1
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
_GLOBAL(cpu_down_flush_e500mc)
|
||||
_GLOBAL(cpu_down_flush_e5500)
|
||||
mflr r0
|
||||
bl flush_dcache_L1
|
||||
bl flush_backside_L2_cache
|
||||
mtlr r0
|
||||
blr
|
||||
|
||||
/* L1 Data Cache of e6500 contains no modified data, no flush is required */
|
||||
_GLOBAL(cpu_down_flush_e6500)
|
||||
blr
|
||||
|
@@ -2050,6 +2050,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.cpu_setup = __setup_cpu_e500v2,
|
||||
.machine_check = machine_check_e500,
|
||||
.platform = "ppc8548",
|
||||
.cpu_down_flush = cpu_down_flush_e500v2,
|
||||
},
|
||||
#else
|
||||
{ /* e500mc */
|
||||
@@ -2069,6 +2070,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.cpu_setup = __setup_cpu_e500mc,
|
||||
.machine_check = machine_check_e500mc,
|
||||
.platform = "ppce500mc",
|
||||
.cpu_down_flush = cpu_down_flush_e500mc,
|
||||
},
|
||||
#endif /* CONFIG_PPC_E500MC */
|
||||
#endif /* CONFIG_PPC32 */
|
||||
@@ -2093,6 +2095,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
#endif
|
||||
.machine_check = machine_check_e500mc,
|
||||
.platform = "ppce5500",
|
||||
.cpu_down_flush = cpu_down_flush_e5500,
|
||||
},
|
||||
{ /* e6500 */
|
||||
.pvr_mask = 0xffff0000,
|
||||
@@ -2115,6 +2118,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
#endif
|
||||
.machine_check = machine_check_e500mc,
|
||||
.platform = "ppce6500",
|
||||
.cpu_down_flush = cpu_down_flush_e6500,
|
||||
},
|
||||
#endif /* CONFIG_PPC_E500MC */
|
||||
#ifdef CONFIG_PPC32
|
||||
|
@@ -40,6 +40,7 @@
|
||||
#include <asm/kvm_book3s_asm.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/cputhreads.h>
|
||||
|
||||
/* The physical memory is laid out such that the secondary processor
|
||||
* spin code sits at 0x0000...0x00ff. On server, the vectors follow
|
||||
@@ -181,6 +182,64 @@ exception_marker:
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
/*
|
||||
* The booting_thread_hwid holds the thread id we want to boot in cpu
|
||||
* hotplug case. It is set by cpu hotplug code, and is invalid by default.
|
||||
* The thread id is the same as the initial value of SPRN_PIR[THREAD_ID]
|
||||
* bit field.
|
||||
*/
|
||||
.globl booting_thread_hwid
|
||||
booting_thread_hwid:
|
||||
.long INVALID_THREAD_HWID
|
||||
.align 3
|
||||
/*
|
||||
* start a thread in the same core
|
||||
* input parameters:
|
||||
* r3 = the thread physical id
|
||||
* r4 = the entry point where thread starts
|
||||
*/
|
||||
_GLOBAL(book3e_start_thread)
|
||||
LOAD_REG_IMMEDIATE(r5, MSR_KERNEL)
|
||||
cmpi 0, r3, 0
|
||||
beq 10f
|
||||
cmpi 0, r3, 1
|
||||
beq 11f
|
||||
/* If the thread id is invalid, just exit. */
|
||||
b 13f
|
||||
10:
|
||||
mttmr TMRN_IMSR0, r5
|
||||
mttmr TMRN_INIA0, r4
|
||||
b 12f
|
||||
11:
|
||||
mttmr TMRN_IMSR1, r5
|
||||
mttmr TMRN_INIA1, r4
|
||||
12:
|
||||
isync
|
||||
li r6, 1
|
||||
sld r6, r6, r3
|
||||
mtspr SPRN_TENS, r6
|
||||
13:
|
||||
blr
|
||||
|
||||
/*
|
||||
* stop a thread in the same core
|
||||
* input parameter:
|
||||
* r3 = the thread physical id
|
||||
*/
|
||||
_GLOBAL(book3e_stop_thread)
|
||||
cmpi 0, r3, 0
|
||||
beq 10f
|
||||
cmpi 0, r3, 1
|
||||
beq 10f
|
||||
/* If the thread id is invalid, just exit. */
|
||||
b 13f
|
||||
10:
|
||||
li r4, 1
|
||||
sld r4, r4, r3
|
||||
mtspr SPRN_TENC, r4
|
||||
13:
|
||||
blr
|
||||
|
||||
_GLOBAL(fsl_secondary_thread_init)
|
||||
mfspr r4,SPRN_BUCSR
|
||||
|
||||
@@ -261,6 +320,44 @@ _GLOBAL(generic_secondary_smp_init)
|
||||
mr r3,r24
|
||||
mr r4,r25
|
||||
bl book3e_secondary_core_init
|
||||
|
||||
/*
|
||||
* After common core init has finished, check if the current thread is the
|
||||
* one we wanted to boot. If not, start the specified thread and stop the
|
||||
* current thread.
|
||||
*/
|
||||
LOAD_REG_ADDR(r4, booting_thread_hwid)
|
||||
lwz r3, 0(r4)
|
||||
li r5, INVALID_THREAD_HWID
|
||||
cmpw r3, r5
|
||||
beq 20f
|
||||
|
||||
/*
|
||||
* The value of booting_thread_hwid has been stored in r3,
|
||||
* so make it invalid.
|
||||
*/
|
||||
stw r5, 0(r4)
|
||||
|
||||
/*
|
||||
* Get the current thread id and check if it is the one we wanted.
|
||||
* If not, start the one specified in booting_thread_hwid and stop
|
||||
* the current thread.
|
||||
*/
|
||||
mfspr r8, SPRN_TIR
|
||||
cmpw r3, r8
|
||||
beq 20f
|
||||
|
||||
/* start the specified thread */
|
||||
LOAD_REG_ADDR(r5, fsl_secondary_thread_init)
|
||||
ld r4, 0(r5)
|
||||
bl book3e_start_thread
|
||||
|
||||
/* stop the current thread */
|
||||
mr r3, r8
|
||||
bl book3e_stop_thread
|
||||
10:
|
||||
b 10b
|
||||
20:
|
||||
#endif
|
||||
|
||||
generic_secondary_common_init:
|
||||
|
@@ -329,7 +329,7 @@ InstructionTLBMiss:
|
||||
/* If we are faulting a kernel address, we have to use the
|
||||
* kernel page tables.
|
||||
*/
|
||||
#ifdef CONFIG_MODULES
|
||||
#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC)
|
||||
/* Only modules will cause ITLB Misses as we always
|
||||
* pin the first 8MB of kernel memory */
|
||||
mfspr r11, SPRN_SRR0 /* Get effective address of fault */
|
||||
@@ -385,27 +385,26 @@ InstructionTLBMiss:
|
||||
|
||||
. = 0x1200
|
||||
DataStoreTLBMiss:
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
mtspr SPRN_SPRG_SCRATCH2, r3
|
||||
#endif
|
||||
EXCEPTION_PROLOG_0
|
||||
mfcr r10
|
||||
mfcr r3
|
||||
|
||||
/* If we are faulting a kernel address, we have to use the
|
||||
* kernel page tables.
|
||||
*/
|
||||
mfspr r11, SPRN_MD_EPN
|
||||
IS_KERNEL(r11, r11)
|
||||
mfspr r10, SPRN_MD_EPN
|
||||
IS_KERNEL(r11, r10)
|
||||
mfspr r11, SPRN_M_TW /* Get level 1 table */
|
||||
BRANCH_UNLESS_KERNEL(3f)
|
||||
lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
|
||||
3:
|
||||
mtcr r10
|
||||
mfspr r10, SPRN_MD_EPN
|
||||
|
||||
/* Insert level 1 index */
|
||||
rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
|
||||
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
|
||||
mtcr r11
|
||||
bt- 28,DTLBMiss8M /* bit 28 = Large page (8M) */
|
||||
mtcr r3
|
||||
|
||||
/* We have a pte table, so load fetch the pte from the table.
|
||||
*/
|
||||
@@ -453,13 +452,34 @@ DataStoreTLBMiss:
|
||||
MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
|
||||
|
||||
/* Restore registers */
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
mfspr r3, SPRN_SPRG_SCRATCH2
|
||||
#endif
|
||||
mtspr SPRN_DAR, r11 /* Tag DAR */
|
||||
EXCEPTION_EPILOG_0
|
||||
rfi
|
||||
|
||||
DTLBMiss8M:
|
||||
mtcr r3
|
||||
ori r11, r11, MD_SVALID
|
||||
MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
|
||||
#ifdef CONFIG_PPC_16K_PAGES
|
||||
/*
|
||||
* In 16k pages mode, each PGD entry defines a 64M block.
|
||||
* Here we select the 8M page within the block.
|
||||
*/
|
||||
rlwimi r11, r10, 0, 0x03800000
|
||||
#endif
|
||||
rlwinm r10, r11, 0, 0xff800000
|
||||
ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \
|
||||
_PAGE_PRESENT
|
||||
MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
|
||||
|
||||
li r11, RPN_PATTERN
|
||||
mfspr r3, SPRN_SPRG_SCRATCH2
|
||||
mtspr SPRN_DAR, r11 /* Tag DAR */
|
||||
EXCEPTION_EPILOG_0
|
||||
rfi
|
||||
|
||||
|
||||
/* This is an instruction TLB error on the MPC8xx. This could be due
|
||||
* to many reasons, such as executing guarded memory or illegal instruction
|
||||
* addresses. There is nothing to do but handle a big time error fault.
|
||||
@@ -537,13 +557,15 @@ FixupDAR:/* Entry point for dcbx workaround. */
|
||||
/* Insert level 1 index */
|
||||
3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
|
||||
lwz r11, (swapper_pg_dir-PAGE_OFFSET)@l(r11) /* Get the level 1 entry */
|
||||
mtcr r11
|
||||
bt 28,200f /* bit 28 = Large page (8M) */
|
||||
rlwinm r11, r11,0,0,19 /* Extract page descriptor page address */
|
||||
/* Insert level 2 index */
|
||||
rlwimi r11, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
|
||||
lwz r11, 0(r11) /* Get the pte */
|
||||
/* concat physical page address(r11) and page offset(r10) */
|
||||
rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31
|
||||
lwz r11,0(r11)
|
||||
201: lwz r11,0(r11)
|
||||
/* Check if it really is a dcbx instruction. */
|
||||
/* dcbt and dcbtst does not generate DTLB Misses/Errors,
|
||||
* no need to include them here */
|
||||
@@ -562,6 +584,10 @@ FixupDAR:/* Entry point for dcbx workaround. */
|
||||
141: mfspr r10,SPRN_SPRG_SCRATCH2
|
||||
b DARFixed /* Nope, go back to normal TLB processing */
|
||||
|
||||
/* concat physical page address(r11) and page offset(r10) */
|
||||
200: rlwimi r11, r10, 0, 32 - (PAGE_SHIFT << 1), 31
|
||||
b 201b
|
||||
|
||||
144: mfspr r10, SPRN_DSISR
|
||||
rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */
|
||||
mtspr SPRN_DSISR, r10
|
||||
@@ -856,68 +882,6 @@ initial_mmu:
|
||||
blr
|
||||
|
||||
|
||||
/*
|
||||
* Set up to use a given MMU context.
|
||||
* r3 is context number, r4 is PGD pointer.
|
||||
*
|
||||
* We place the physical address of the new task page directory loaded
|
||||
* into the MMU base register, and set the ASID compare register with
|
||||
* the new "context."
|
||||
*/
|
||||
_GLOBAL(set_context)
|
||||
|
||||
#ifdef CONFIG_BDI_SWITCH
|
||||
/* Context switch the PTE pointer for the Abatron BDI2000.
|
||||
* The PGDIR is passed as second argument.
|
||||
*/
|
||||
lis r5, KERNELBASE@h
|
||||
lwz r5, 0xf0(r5)
|
||||
stw r4, 0x4(r5)
|
||||
#endif
|
||||
|
||||
/* Register M_TW will contain base address of level 1 table minus the
|
||||
* lower part of the kernel PGDIR base address, so that all accesses to
|
||||
* level 1 table are done relative to lower part of kernel PGDIR base
|
||||
* address.
|
||||
*/
|
||||
li r5, (swapper_pg_dir-PAGE_OFFSET)@l
|
||||
sub r4, r4, r5
|
||||
tophys (r4, r4)
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
lis r6, cpu6_errata_word@h
|
||||
ori r6, r6, cpu6_errata_word@l
|
||||
li r7, 0x3f80
|
||||
stw r7, 12(r6)
|
||||
lwz r7, 12(r6)
|
||||
#endif
|
||||
mtspr SPRN_M_TW, r4 /* Update pointeur to level 1 table */
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
li r7, 0x3380
|
||||
stw r7, 12(r6)
|
||||
lwz r7, 12(r6)
|
||||
#endif
|
||||
mtspr SPRN_M_CASID, r3 /* Update context */
|
||||
SYNC
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_8xx_CPU6
|
||||
/* It's here because it is unique to the 8xx.
|
||||
* It is important we get called with interrupts disabled. I used to
|
||||
* do that, but it appears that all code that calls this already had
|
||||
* interrupt disabled.
|
||||
*/
|
||||
.globl set_dec_cpu6
|
||||
set_dec_cpu6:
|
||||
lis r7, cpu6_errata_word@h
|
||||
ori r7, r7, cpu6_errata_word@l
|
||||
li r4, 0x2c00
|
||||
stw r4, 8(r7)
|
||||
lwz r4, 8(r7)
|
||||
mtspr 22, r3 /* Update Decrementer */
|
||||
SYNC
|
||||
blr
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We put a few things here that have to be page-aligned.
|
||||
* This stuff goes at the beginning of the data segment,
|
||||
|
@@ -1037,80 +1037,6 @@ _GLOBAL(set_context)
|
||||
isync /* Force context change */
|
||||
blr
|
||||
|
||||
_GLOBAL(flush_dcache_L1)
|
||||
mfspr r3,SPRN_L1CFG0
|
||||
|
||||
rlwinm r5,r3,9,3 /* Extract cache block size */
|
||||
twlgti r5,1 /* Only 32 and 64 byte cache blocks
|
||||
* are currently defined.
|
||||
*/
|
||||
li r4,32
|
||||
subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
|
||||
* log2(number of ways)
|
||||
*/
|
||||
slw r5,r4,r5 /* r5 = cache block size */
|
||||
|
||||
rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
|
||||
mulli r7,r7,13 /* An 8-way cache will require 13
|
||||
* loads per set.
|
||||
*/
|
||||
slw r7,r7,r6
|
||||
|
||||
/* save off HID0 and set DCFA */
|
||||
mfspr r8,SPRN_HID0
|
||||
ori r9,r8,HID0_DCFA@l
|
||||
mtspr SPRN_HID0,r9
|
||||
isync
|
||||
|
||||
lis r4,KERNELBASE@h
|
||||
mtctr r7
|
||||
|
||||
1: lwz r3,0(r4) /* Load... */
|
||||
add r4,r4,r5
|
||||
bdnz 1b
|
||||
|
||||
msync
|
||||
lis r4,KERNELBASE@h
|
||||
mtctr r7
|
||||
|
||||
1: dcbf 0,r4 /* ...and flush. */
|
||||
add r4,r4,r5
|
||||
bdnz 1b
|
||||
|
||||
/* restore HID0 */
|
||||
mtspr SPRN_HID0,r8
|
||||
isync
|
||||
|
||||
blr
|
||||
|
||||
/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
|
||||
_GLOBAL(__flush_disable_L1)
|
||||
mflr r10
|
||||
bl flush_dcache_L1 /* Flush L1 d-cache */
|
||||
mtlr r10
|
||||
|
||||
mfspr r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */
|
||||
li r5, 2
|
||||
rlwimi r4, r5, 0, 3
|
||||
|
||||
msync
|
||||
isync
|
||||
mtspr SPRN_L1CSR0, r4
|
||||
isync
|
||||
|
||||
1: mfspr r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */
|
||||
andi. r4, r4, 2
|
||||
bne 1b
|
||||
|
||||
mfspr r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */
|
||||
li r5, 2
|
||||
rlwimi r4, r5, 0, 3
|
||||
|
||||
mtspr SPRN_L1CSR1, r4
|
||||
isync
|
||||
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* When we get here, r24 needs to hold the CPU # */
|
||||
.globl __secondary_start
|
||||
|
@@ -91,17 +91,16 @@ _GLOBAL(mulhdu)
|
||||
addc r7,r0,r7
|
||||
addze r4,r4
|
||||
1: beqlr cr1 /* all done if high part of A is 0 */
|
||||
mr r10,r3
|
||||
mullw r9,r3,r5
|
||||
mulhwu r3,r3,r5
|
||||
mulhwu r10,r3,r5
|
||||
beq 2f
|
||||
mullw r0,r10,r6
|
||||
mulhwu r8,r10,r6
|
||||
mullw r0,r3,r6
|
||||
mulhwu r8,r3,r6
|
||||
addc r7,r0,r7
|
||||
adde r4,r4,r8
|
||||
addze r3,r3
|
||||
addze r10,r10
|
||||
2: addc r4,r4,r9
|
||||
addze r3,r3
|
||||
addze r3,r10
|
||||
blr
|
||||
|
||||
/*
|
||||
@@ -296,12 +295,9 @@ _GLOBAL(real_writeb)
|
||||
* Flush instruction cache.
|
||||
* This is a no-op on the 601.
|
||||
*/
|
||||
#ifndef CONFIG_PPC_8xx
|
||||
_GLOBAL(flush_instruction_cache)
|
||||
#if defined(CONFIG_8xx)
|
||||
isync
|
||||
lis r5, IDC_INVALL@h
|
||||
mtspr SPRN_IC_CST, r5
|
||||
#elif defined(CONFIG_4xx)
|
||||
#if defined(CONFIG_4xx)
|
||||
#ifdef CONFIG_403GCX
|
||||
li r3, 512
|
||||
mtctr r3
|
||||
@@ -334,9 +330,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
|
||||
mfspr r3,SPRN_HID0
|
||||
ori r3,r3,HID0_ICFI
|
||||
mtspr SPRN_HID0,r3
|
||||
#endif /* CONFIG_8xx/4xx */
|
||||
#endif /* CONFIG_4xx */
|
||||
isync
|
||||
blr
|
||||
#endif /* CONFIG_PPC_8xx */
|
||||
|
||||
/*
|
||||
* Write any modified data cache blocks out to memory
|
||||
@@ -350,10 +347,9 @@ BEGIN_FTR_SECTION
|
||||
PURGE_PREFETCHED_INS
|
||||
blr /* for 601, do nothing */
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
|
||||
li r5,L1_CACHE_BYTES-1
|
||||
andc r3,r3,r5
|
||||
rlwinm r3,r3,0,0,31 - L1_CACHE_SHIFT
|
||||
subf r4,r3,r4
|
||||
add r4,r4,r5
|
||||
addi r4,r4,L1_CACHE_BYTES - 1
|
||||
srwi. r4,r4,L1_CACHE_SHIFT
|
||||
beqlr
|
||||
mtctr r4
|
||||
@@ -376,71 +372,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
|
||||
sync /* additional sync needed on g4 */
|
||||
isync
|
||||
blr
|
||||
/*
|
||||
* Write any modified data cache blocks out to memory.
|
||||
* Does not invalidate the corresponding cache lines (especially for
|
||||
* any corresponding instruction cache).
|
||||
*
|
||||
* clean_dcache_range(unsigned long start, unsigned long stop)
|
||||
*/
|
||||
_GLOBAL(clean_dcache_range)
|
||||
li r5,L1_CACHE_BYTES-1
|
||||
andc r3,r3,r5
|
||||
subf r4,r3,r4
|
||||
add r4,r4,r5
|
||||
srwi. r4,r4,L1_CACHE_SHIFT
|
||||
beqlr
|
||||
mtctr r4
|
||||
|
||||
1: dcbst 0,r3
|
||||
addi r3,r3,L1_CACHE_BYTES
|
||||
bdnz 1b
|
||||
sync /* wait for dcbst's to get to ram */
|
||||
blr
|
||||
|
||||
/*
|
||||
* Write any modified data cache blocks out to memory and invalidate them.
|
||||
* Does not invalidate the corresponding instruction cache blocks.
|
||||
*
|
||||
* flush_dcache_range(unsigned long start, unsigned long stop)
|
||||
*/
|
||||
_GLOBAL(flush_dcache_range)
|
||||
li r5,L1_CACHE_BYTES-1
|
||||
andc r3,r3,r5
|
||||
subf r4,r3,r4
|
||||
add r4,r4,r5
|
||||
srwi. r4,r4,L1_CACHE_SHIFT
|
||||
beqlr
|
||||
mtctr r4
|
||||
|
||||
1: dcbf 0,r3
|
||||
addi r3,r3,L1_CACHE_BYTES
|
||||
bdnz 1b
|
||||
sync /* wait for dcbst's to get to ram */
|
||||
blr
|
||||
|
||||
/*
|
||||
* Like above, but invalidate the D-cache. This is used by the 8xx
|
||||
* to invalidate the cache so the PPC core doesn't get stale data
|
||||
* from the CPM (no cache snooping here :-).
|
||||
*
|
||||
* invalidate_dcache_range(unsigned long start, unsigned long stop)
|
||||
*/
|
||||
_GLOBAL(invalidate_dcache_range)
|
||||
li r5,L1_CACHE_BYTES-1
|
||||
andc r3,r3,r5
|
||||
subf r4,r3,r4
|
||||
add r4,r4,r5
|
||||
srwi. r4,r4,L1_CACHE_SHIFT
|
||||
beqlr
|
||||
mtctr r4
|
||||
|
||||
1: dcbi 0,r3
|
||||
addi r3,r3,L1_CACHE_BYTES
|
||||
bdnz 1b
|
||||
sync /* wait for dcbi's to get to ram */
|
||||
blr
|
||||
|
||||
/*
|
||||
* Flush a particular page from the data cache to RAM.
|
||||
* Note: this is necessary because the instruction cache does *not*
|
||||
@@ -518,22 +449,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
|
||||
blr
|
||||
#endif /* CONFIG_BOOKE */
|
||||
|
||||
/*
|
||||
* Clear pages using the dcbz instruction, which doesn't cause any
|
||||
* memory traffic (except to write out any cache lines which get
|
||||
* displaced). This only works on cacheable memory.
|
||||
*
|
||||
* void clear_pages(void *page, int order) ;
|
||||
*/
|
||||
_GLOBAL(clear_pages)
|
||||
li r0,PAGE_SIZE/L1_CACHE_BYTES
|
||||
slw r0,r0,r4
|
||||
mtctr r0
|
||||
1: dcbz 0,r3
|
||||
addi r3,r3,L1_CACHE_BYTES
|
||||
bdnz 1b
|
||||
blr
|
||||
|
||||
/*
|
||||
* Copy a whole page. We use the dcbz instruction on the destination
|
||||
* to reduce memory traffic (it eliminates the unnecessary reads of
|
||||
|
@@ -6,7 +6,9 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/epapr_hcalls.h>
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
EXPORT_SYMBOL(flush_dcache_range);
|
||||
#endif
|
||||
EXPORT_SYMBOL(flush_icache_range);
|
||||
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
@@ -10,7 +10,6 @@
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/dcr.h>
|
||||
|
||||
EXPORT_SYMBOL(clear_pages);
|
||||
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
|
||||
EXPORT_SYMBOL(DMA_MODE_READ);
|
||||
EXPORT_SYMBOL(DMA_MODE_WRITE);
|
||||
|
@@ -427,7 +427,7 @@ void generic_cpu_die(unsigned int cpu)
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
smp_rmb();
|
||||
if (per_cpu(cpu_state, cpu) == CPU_DEAD)
|
||||
if (is_cpu_dead(cpu))
|
||||
return;
|
||||
msleep(100);
|
||||
}
|
||||
@@ -454,6 +454,11 @@ int generic_check_cpu_restart(unsigned int cpu)
|
||||
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;
|
||||
}
|
||||
|
||||
int is_cpu_dead(unsigned int cpu)
|
||||
{
|
||||
return per_cpu(cpu_state, cpu) == CPU_DEAD;
|
||||
}
|
||||
|
||||
static bool secondaries_inhibited(void)
|
||||
{
|
||||
return kvm_hv_mode_active();
|
||||
|
Reference in New Issue
Block a user