Merge remote-tracking branch 'kumar/next' into next

This commit is contained in:
Benjamin Herrenschmidt
2012-09-18 16:04:33 +10:00
101 changed files with 2422 additions and 1025 deletions

View File

@@ -16,6 +16,8 @@
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/ppc_asm.h>
#include <asm/mmu-book3e.h>
#include <asm/asm-offsets.h>
_GLOBAL(__e500_icache_setup)
mfspr r0, SPRN_L1CSR1
@@ -73,27 +75,81 @@ _GLOBAL(__setup_cpu_e500v2)
mtlr r4
blr
_GLOBAL(__setup_cpu_e500mc)
mr r5, r4
mflr r4
_GLOBAL(__setup_cpu_e5500)
mflr r5
bl __e500_icache_setup
bl __e500_dcache_setup
bl __setup_e500mc_ivors
mtlr r4
/*
* We only want to touch IVOR38-41 if we're running on hardware
* that supports category E.HV. The architectural way to determine
* this is MMUCFG[LPIDSIZE].
*/
mfspr r3, SPRN_MMUCFG
rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
beq 1f
bl __setup_ehv_ivors
b 2f
1:
lwz r3, CPU_SPEC_FEATURES(r4)
/* We need this check as cpu_setup is also called for
* the secondary cores. So, if we have already cleared
* the feature on the primary core, avoid doing it on the
* secondary core.
*/
andis. r6, r3, CPU_FTR_EMB_HV@h
beq 2f
rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
stw r3, CPU_SPEC_FEATURES(r4)
2:
mtlr r5
blr
#endif
/* Right now, restore and setup are the same thing */
#ifdef CONFIG_PPC_BOOK3E_64
_GLOBAL(__restore_cpu_e5500)
_GLOBAL(__setup_cpu_e5500)
mflr r4
bl __e500_icache_setup
bl __e500_dcache_setup
#ifdef CONFIG_PPC_BOOK3E_64
bl .__setup_base_ivors
bl .setup_perfmon_ivor
bl .setup_doorbell_ivors
/*
* We only want to touch IVOR38-41 if we're running on hardware
* that supports category E.HV. The architectural way to determine
* this is MMUCFG[LPIDSIZE].
*/
mfspr r10,SPRN_MMUCFG
rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
beq 1f
bl .setup_ehv_ivors
#else
bl __setup_e500mc_ivors
#endif
1:
mtlr r4
blr
_GLOBAL(__setup_cpu_e5500)
mflr r5
bl __e500_icache_setup
bl __e500_dcache_setup
bl .__setup_base_ivors
bl .setup_perfmon_ivor
bl .setup_doorbell_ivors
/*
* We only want to touch IVOR38-41 if we're running on hardware
* that supports category E.HV. The architectural way to determine
* this is MMUCFG[LPIDSIZE].
*/
mfspr r10,SPRN_MMUCFG
rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
beq 1f
bl .setup_ehv_ivors
b 2f
1:
ld r10,CPU_SPEC_FEATURES(r4)
LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
andc r10,r10,r9
std r10,CPU_SPEC_FEATURES(r4)
2:
mtlr r5
blr
#endif

View File

@@ -2016,7 +2016,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_cpu_type = "ppc/e500mc",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e5500,
#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e5500,
#endif
.machine_check = machine_check_e500mc,
.platform = "ppce5500",
},
@@ -2034,7 +2036,9 @@ static struct cpu_spec __initdata cpu_specs[] = {
.oprofile_cpu_type = "ppc/e6500",
.oprofile_type = PPC_OPROFILE_FSL_EMB,
.cpu_setup = __setup_cpu_e5500,
#ifndef CONFIG_PPC32
.cpu_restore = __restore_cpu_e5500,
#endif
.machine_check = machine_check_e500mc,
.platform = "ppce6500",
},

View File

@@ -105,3 +105,23 @@ int __init swiotlb_setup_bus_notifier(void)
&ppc_swiotlb_plat_bus_notifier);
return 0;
}
void swiotlb_detect_4g(void)
{
if ((memblock_end_of_DRAM() - 1) > 0xffffffff)
ppc_swiotlb_enable = 1;
}
static int __init swiotlb_late_init(void)
{
if (ppc_swiotlb_enable) {
swiotlb_print_info();
set_pci_dma_ops(&swiotlb_dma_ops);
ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
} else {
swiotlb_free();
}
return 0;
}
subsys_initcall(swiotlb_late_init);

View File

@@ -1356,25 +1356,11 @@ _GLOBAL(setup_perfmon_ivor)
_GLOBAL(setup_doorbell_ivors)
SET_IVOR(36, 0x280) /* Processor Doorbell */
SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */
/* Check MMUCFG[LPIDSIZE] to determine if we have category E.HV */
mfspr r10,SPRN_MMUCFG
rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
beqlr
SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
blr
_GLOBAL(setup_ehv_ivors)
/*
* We may be running as a guest and lack E.HV even on a chip
* that normally has it.
*/
mfspr r10,SPRN_MMUCFG
rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
beqlr
SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */
SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */
SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */
SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */
blr

View File

@@ -895,15 +895,11 @@ _GLOBAL(__setup_e500mc_ivors)
mtspr SPRN_IVOR36,r3
li r3,CriticalDoorbell@l
mtspr SPRN_IVOR37,r3
sync
blr
/*
* We only want to touch IVOR38-41 if we're running on hardware
* that supports category E.HV. The architectural way to determine
* this is MMUCFG[LPIDSIZE].
*/
mfspr r3, SPRN_MMUCFG
andis. r3, r3, MMUCFG_LPIDSIZE@h
beq no_hv
/* setup ehv ivors for */
_GLOBAL(__setup_ehv_ivors)
li r3,GuestDoorbell@l
mtspr SPRN_IVOR38,r3
li r3,CriticalGuestDoorbell@l
@@ -912,14 +908,8 @@ _GLOBAL(__setup_e500mc_ivors)
mtspr SPRN_IVOR40,r3
li r3,Ehvpriv@l
mtspr SPRN_IVOR41,r3
skip_hv_ivors:
sync
blr
no_hv:
lwz r3, CPU_SPEC_FEATURES(r5)
rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
stw r3, CPU_SPEC_FEATURES(r5)
b skip_hv_ivors
#ifdef CONFIG_SPE
/*
@@ -1043,6 +1033,34 @@ _GLOBAL(flush_dcache_L1)
blr
/* Flush L1 d-cache, invalidate and disable d-cache and i-cache */
_GLOBAL(__flush_disable_L1)
mflr r10
bl flush_dcache_L1 /* Flush L1 d-cache */
mtlr r10
mfspr r4, SPRN_L1CSR0 /* Invalidate and disable d-cache */
li r5, 2
rlwimi r4, r5, 0, 3
msync
isync
mtspr SPRN_L1CSR0, r4
isync
1: mfspr r4, SPRN_L1CSR0 /* Wait for the invalidate to finish */
andi. r4, r4, 2
bne 1b
mfspr r4, SPRN_L1CSR1 /* Invalidate and disable i-cache */
li r5, 2
rlwimi r4, r5, 0, 3
mtspr SPRN_L1CSR1, r4
isync
blr
#ifdef CONFIG_SMP
/* When we get here, r24 needs to hold the CPU # */
.globl __secondary_start

View File

@@ -102,7 +102,7 @@ int __devinit smp_generic_kick_cpu(int nr)
* Ok it's not there, so it might be soft-unplugged, let's
* try to bring it back
*/
per_cpu(cpu_state, nr) = CPU_UP_PREPARE;
generic_set_cpu_up(nr);
smp_wmb();
smp_send_reschedule(nr);
#endif /* CONFIG_HOTPLUG_CPU */
@@ -413,6 +413,16 @@ void generic_set_cpu_dead(unsigned int cpu)
per_cpu(cpu_state, cpu) = CPU_DEAD;
}
/*
* The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise
* the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(),
* which makes the delay in generic_cpu_die() not happen.
*/
void generic_set_cpu_up(unsigned int cpu)
{
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
}
int generic_check_cpu_restart(unsigned int cpu)
{
return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE;