Merge branch 'topic/paca' into next
Bring in yet another series that touches KVM code, and might need to be merged into the kvm-ppc branch to resolve conflicts. This required some changes in pnv_power9_force_smt4_catch/release() due to the paca array becomming an array of pointers.
This commit is contained in:
@@ -221,12 +221,17 @@ int main(void)
|
||||
OFFSET(PACA_EXMC, paca_struct, exmc);
|
||||
OFFSET(PACA_EXSLB, paca_struct, exslb);
|
||||
OFFSET(PACA_EXNMI, paca_struct, exnmi);
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr);
|
||||
#endif
|
||||
OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
|
||||
OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
|
||||
OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
|
||||
OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area);
|
||||
OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use);
|
||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||
OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
|
||||
#endif
|
||||
OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx);
|
||||
OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count);
|
||||
OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx);
|
||||
|
@@ -238,7 +238,7 @@ static void __maybe_unused crash_kexec_wait_realmode(int cpu)
|
||||
if (i == cpu)
|
||||
continue;
|
||||
|
||||
while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) {
|
||||
while (paca_ptrs[i]->kexec_state < KEXEC_STATE_REAL_MODE) {
|
||||
barrier();
|
||||
if (!cpu_possible(i) || !cpu_online(i) || (msecs <= 0))
|
||||
break;
|
||||
|
@@ -392,19 +392,20 @@ generic_secondary_common_init:
|
||||
* physical cpu id in r24, we need to search the pacas to find
|
||||
* which logical id maps to our physical one.
|
||||
*/
|
||||
LOAD_REG_ADDR(r13, paca) /* Load paca pointer */
|
||||
ld r13,0(r13) /* Get base vaddr of paca array */
|
||||
#ifndef CONFIG_SMP
|
||||
addi r13,r13,PACA_SIZE /* know r13 if used accidentally */
|
||||
b kexec_wait /* wait for next kernel if !SMP */
|
||||
#else
|
||||
LOAD_REG_ADDR(r8, paca_ptrs) /* Load paca_ptrs pointe */
|
||||
ld r8,0(r8) /* Get base vaddr of array */
|
||||
LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */
|
||||
lwz r7,0(r7) /* also the max paca allocated */
|
||||
li r5,0 /* logical cpu id */
|
||||
1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
|
||||
1:
|
||||
sldi r9,r5,3 /* get paca_ptrs[] index from cpu id */
|
||||
ldx r13,r9,r8 /* r13 = paca_ptrs[cpu id] */
|
||||
lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */
|
||||
cmpw r6,r24 /* Compare to our id */
|
||||
beq 2f
|
||||
addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */
|
||||
addi r5,r5,1
|
||||
cmpw r5,r7 /* Check if more pacas exist */
|
||||
blt 1b
|
||||
@@ -756,10 +757,10 @@ _GLOBAL(pmac_secondary_start)
|
||||
mtmsrd r3 /* RI on */
|
||||
|
||||
/* Set up a paca value for this processor. */
|
||||
LOAD_REG_ADDR(r4,paca) /* Load paca pointer */
|
||||
ld r4,0(r4) /* Get base vaddr of paca array */
|
||||
mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */
|
||||
add r13,r13,r4 /* for this processor. */
|
||||
LOAD_REG_ADDR(r4,paca_ptrs) /* Load paca pointer */
|
||||
ld r4,0(r4) /* Get base vaddr of paca_ptrs array */
|
||||
sldi r5,r24,3 /* get paca_ptrs[] index from cpu id */
|
||||
ldx r13,r5,r4 /* r13 = paca_ptrs[cpu id] */
|
||||
SET_PACA(r13) /* Save vaddr of paca in an SPRG*/
|
||||
|
||||
/* Mark interrupts soft and hard disabled (they might be enabled
|
||||
|
@@ -168,24 +168,25 @@ static void kexec_prepare_cpus_wait(int wait_state)
|
||||
* are correctly onlined. If somehow we start a CPU on boot with RTAS
|
||||
* start-cpu, but somehow that CPU doesn't write callin_cpu_map[] in
|
||||
* time, the boot CPU will timeout. If it does eventually execute
|
||||
* stuff, the secondary will start up (paca[].cpu_start was written) and
|
||||
* get into a peculiar state. If the platform supports
|
||||
* smp_ops->take_timebase(), the secondary CPU will probably be spinning
|
||||
* in there. If not (i.e. pseries), the secondary will continue on and
|
||||
* try to online itself/idle/etc. If it survives that, we need to find
|
||||
* these possible-but-not-online-but-should-be CPUs and chaperone them
|
||||
* into kexec_smp_wait().
|
||||
* stuff, the secondary will start up (paca_ptrs[]->cpu_start was
|
||||
* written) and get into a peculiar state.
|
||||
* If the platform supports smp_ops->take_timebase(), the secondary CPU
|
||||
* will probably be spinning in there. If not (i.e. pseries), the
|
||||
* secondary will continue on and try to online itself/idle/etc. If it
|
||||
* survives that, we need to find these
|
||||
* possible-but-not-online-but-should-be CPUs and chaperone them into
|
||||
* kexec_smp_wait().
|
||||
*/
|
||||
for_each_online_cpu(i) {
|
||||
if (i == my_cpu)
|
||||
continue;
|
||||
|
||||
while (paca[i].kexec_state < wait_state) {
|
||||
while (paca_ptrs[i]->kexec_state < wait_state) {
|
||||
barrier();
|
||||
if (i != notified) {
|
||||
printk(KERN_INFO "kexec: waiting for cpu %d "
|
||||
"(physical %d) to enter %i state\n",
|
||||
i, paca[i].hw_cpu_id, wait_state);
|
||||
i, paca_ptrs[i]->hw_cpu_id, wait_state);
|
||||
notified = i;
|
||||
}
|
||||
}
|
||||
@@ -322,18 +323,24 @@ void default_machine_kexec(struct kimage *image)
|
||||
kexec_stack.thread_info.cpu = current_thread_info()->cpu;
|
||||
|
||||
/* We need a static PACA, too; copy this CPU's PACA over and switch to
|
||||
* it. Also poison per_cpu_offset to catch anyone using non-static
|
||||
* data.
|
||||
* it. Also poison per_cpu_offset and NULL lppaca to catch anyone using
|
||||
* non-static data.
|
||||
*/
|
||||
memcpy(&kexec_paca, get_paca(), sizeof(struct paca_struct));
|
||||
kexec_paca.data_offset = 0xedeaddeadeeeeeeeUL;
|
||||
paca = (struct paca_struct *)RELOC_HIDE(&kexec_paca, 0) -
|
||||
kexec_paca.paca_index;
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
kexec_paca.lppaca_ptr = NULL;
|
||||
#endif
|
||||
paca_ptrs[kexec_paca.paca_index] = &kexec_paca;
|
||||
|
||||
setup_paca(&kexec_paca);
|
||||
|
||||
/* XXX: If anyone does 'dynamic lppacas' this will also need to be
|
||||
* switched to a static version!
|
||||
/*
|
||||
* The lppaca should be unregistered at this point so the HV won't
|
||||
* touch it. In the case of a crash, none of the lppacas are
|
||||
* unregistered so there is not much we can do about it here.
|
||||
*/
|
||||
|
||||
/*
|
||||
* On Book3S, the copy must happen with the MMU off if we are either
|
||||
* using Radix page tables or we are not in an LPAR since we can
|
||||
|
@@ -20,116 +20,105 @@
|
||||
|
||||
#include "setup.h"
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
#ifndef CONFIG_SMP
|
||||
#define boot_cpuid 0
|
||||
#endif
|
||||
|
||||
static void *__init alloc_paca_data(unsigned long size, unsigned long align,
|
||||
unsigned long limit, int cpu)
|
||||
{
|
||||
unsigned long pa;
|
||||
int nid;
|
||||
|
||||
/*
|
||||
* boot_cpuid paca is allocated very early before cpu_to_node is up.
|
||||
* Set bottom-up mode, because the boot CPU should be on node-0,
|
||||
* which will put its paca in the right place.
|
||||
*/
|
||||
if (cpu == boot_cpuid) {
|
||||
nid = -1;
|
||||
memblock_set_bottom_up(true);
|
||||
} else {
|
||||
nid = early_cpu_to_node(cpu);
|
||||
}
|
||||
|
||||
pa = memblock_alloc_base_nid(size, align, limit, nid, MEMBLOCK_NONE);
|
||||
if (!pa) {
|
||||
pa = memblock_alloc_base(size, align, limit);
|
||||
if (!pa)
|
||||
panic("cannot allocate paca data");
|
||||
}
|
||||
|
||||
if (cpu == boot_cpuid)
|
||||
memblock_set_bottom_up(false);
|
||||
|
||||
return __va(pa);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
|
||||
/*
|
||||
* The structure which the hypervisor knows about - this structure
|
||||
* should not cross a page boundary. The vpa_init/register_vpa call
|
||||
* is now known to fail if the lppaca structure crosses a page
|
||||
* boundary. The lppaca is also used on POWER5 pSeries boxes.
|
||||
* The lppaca is 640 bytes long, and cannot readily
|
||||
* change since the hypervisor knows its layout, so a 1kB alignment
|
||||
* will suffice to ensure that it doesn't cross a page boundary.
|
||||
* See asm/lppaca.h for more detail.
|
||||
*
|
||||
* lppaca structures must must be 1kB in size, L1 cache line aligned,
|
||||
* and not cross 4kB boundary. A 1kB size and 1kB alignment will satisfy
|
||||
* these requirements.
|
||||
*/
|
||||
struct lppaca lppaca[] = {
|
||||
[0 ... (NR_LPPACAS-1)] = {
|
||||
static inline void init_lppaca(struct lppaca *lppaca)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(struct lppaca) != 640);
|
||||
|
||||
*lppaca = (struct lppaca) {
|
||||
.desc = cpu_to_be32(0xd397d781), /* "LpPa" */
|
||||
.size = cpu_to_be16(sizeof(struct lppaca)),
|
||||
.size = cpu_to_be16(0x400),
|
||||
.fpregs_in_use = 1,
|
||||
.slb_count = cpu_to_be16(64),
|
||||
.vmxregs_in_use = 0,
|
||||
.page_ins = 0,
|
||||
},
|
||||
.page_ins = 0, };
|
||||
};
|
||||
|
||||
static struct lppaca *extra_lppacas;
|
||||
static long __initdata lppaca_size;
|
||||
|
||||
static void __init allocate_lppacas(int nr_cpus, unsigned long limit)
|
||||
{
|
||||
if (nr_cpus <= NR_LPPACAS)
|
||||
return;
|
||||
|
||||
lppaca_size = PAGE_ALIGN(sizeof(struct lppaca) *
|
||||
(nr_cpus - NR_LPPACAS));
|
||||
extra_lppacas = __va(memblock_alloc_base(lppaca_size,
|
||||
PAGE_SIZE, limit));
|
||||
}
|
||||
|
||||
static struct lppaca * __init new_lppaca(int cpu)
|
||||
static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
|
||||
{
|
||||
struct lppaca *lp;
|
||||
size_t size = 0x400;
|
||||
|
||||
if (cpu < NR_LPPACAS)
|
||||
return &lppaca[cpu];
|
||||
BUILD_BUG_ON(size < sizeof(struct lppaca));
|
||||
|
||||
lp = extra_lppacas + (cpu - NR_LPPACAS);
|
||||
*lp = lppaca[0];
|
||||
if (early_cpu_has_feature(CPU_FTR_HVMODE))
|
||||
return NULL;
|
||||
|
||||
lp = alloc_paca_data(size, 0x400, limit, cpu);
|
||||
init_lppaca(lp);
|
||||
|
||||
return lp;
|
||||
}
|
||||
|
||||
static void __init free_lppacas(void)
|
||||
{
|
||||
long new_size = 0, nr;
|
||||
|
||||
if (!lppaca_size)
|
||||
return;
|
||||
nr = num_possible_cpus() - NR_LPPACAS;
|
||||
if (nr > 0)
|
||||
new_size = PAGE_ALIGN(nr * sizeof(struct lppaca));
|
||||
if (new_size >= lppaca_size)
|
||||
return;
|
||||
|
||||
memblock_free(__pa(extra_lppacas) + new_size, lppaca_size - new_size);
|
||||
lppaca_size = new_size;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void allocate_lppacas(int nr_cpus, unsigned long limit) { }
|
||||
static inline void free_lppacas(void) { }
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S */
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
|
||||
/*
|
||||
* 3 persistent SLBs are registered here. The buffer will be zero
|
||||
* 3 persistent SLBs are allocated here. The buffer will be zero
|
||||
* initially, hence will all be invaild until we actually write them.
|
||||
*
|
||||
* If you make the number of persistent SLB entries dynamic, please also
|
||||
* update PR KVM to flush and restore them accordingly.
|
||||
*/
|
||||
static struct slb_shadow * __initdata slb_shadow;
|
||||
|
||||
static void __init allocate_slb_shadows(int nr_cpus, int limit)
|
||||
{
|
||||
int size = PAGE_ALIGN(sizeof(struct slb_shadow) * nr_cpus);
|
||||
|
||||
if (early_radix_enabled())
|
||||
return;
|
||||
|
||||
slb_shadow = __va(memblock_alloc_base(size, PAGE_SIZE, limit));
|
||||
memset(slb_shadow, 0, size);
|
||||
}
|
||||
|
||||
static struct slb_shadow * __init init_slb_shadow(int cpu)
|
||||
static struct slb_shadow * __init new_slb_shadow(int cpu, unsigned long limit)
|
||||
{
|
||||
struct slb_shadow *s;
|
||||
|
||||
if (early_radix_enabled())
|
||||
return NULL;
|
||||
if (cpu != boot_cpuid) {
|
||||
/*
|
||||
* Boot CPU comes here before early_radix_enabled
|
||||
* is parsed (e.g., for disable_radix). So allocate
|
||||
* always and this will be fixed up in free_unused_pacas.
|
||||
*/
|
||||
if (early_radix_enabled())
|
||||
return NULL;
|
||||
}
|
||||
|
||||
s = &slb_shadow[cpu];
|
||||
|
||||
/*
|
||||
* When we come through here to initialise boot_paca, the slb_shadow
|
||||
* buffers are not allocated yet. That's OK, we'll get one later in
|
||||
* boot, but make sure we don't corrupt memory at 0.
|
||||
*/
|
||||
if (!slb_shadow)
|
||||
return NULL;
|
||||
s = alloc_paca_data(sizeof(*s), L1_CACHE_BYTES, limit, cpu);
|
||||
memset(s, 0, sizeof(*s));
|
||||
|
||||
s->persistent = cpu_to_be32(SLB_NUM_BOLTED);
|
||||
s->buffer_length = cpu_to_be32(sizeof(*s));
|
||||
@@ -137,10 +126,6 @@ static struct slb_shadow * __init init_slb_shadow(int cpu)
|
||||
return s;
|
||||
}
|
||||
|
||||
#else /* !CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
static void __init allocate_slb_shadows(int nr_cpus, int limit) { }
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S_64 */
|
||||
|
||||
/* The Paca is an array with one entry per processor. Each contains an
|
||||
@@ -152,14 +137,15 @@ static void __init allocate_slb_shadows(int nr_cpus, int limit) { }
|
||||
* processors. The processor VPD array needs one entry per physical
|
||||
* processor (not thread).
|
||||
*/
|
||||
struct paca_struct *paca;
|
||||
EXPORT_SYMBOL(paca);
|
||||
struct paca_struct **paca_ptrs __read_mostly;
|
||||
EXPORT_SYMBOL(paca_ptrs);
|
||||
|
||||
void __init initialise_paca(struct paca_struct *new_paca, int cpu)
|
||||
{
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
new_paca->lppaca_ptr = new_lppaca(cpu);
|
||||
#else
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
new_paca->lppaca_ptr = NULL;
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
new_paca->kernel_pgd = swapper_pg_dir;
|
||||
#endif
|
||||
new_paca->lock_token = 0x8000;
|
||||
@@ -173,7 +159,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu)
|
||||
new_paca->__current = &init_task;
|
||||
new_paca->data_offset = 0xfeeeeeeeeeeeeeeeULL;
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
new_paca->slb_shadow_ptr = init_slb_shadow(cpu);
|
||||
new_paca->slb_shadow_ptr = NULL;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3E
|
||||
@@ -203,12 +189,25 @@ void setup_paca(struct paca_struct *new_paca)
|
||||
|
||||
}
|
||||
|
||||
static int __initdata paca_size;
|
||||
static int __initdata paca_nr_cpu_ids;
|
||||
static int __initdata paca_ptrs_size;
|
||||
static int __initdata paca_struct_size;
|
||||
|
||||
void __init allocate_pacas(void)
|
||||
void __init allocate_paca_ptrs(void)
|
||||
{
|
||||
paca_nr_cpu_ids = nr_cpu_ids;
|
||||
|
||||
paca_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
|
||||
paca_ptrs = __va(memblock_alloc(paca_ptrs_size, 0));
|
||||
memset(paca_ptrs, 0x88, paca_ptrs_size);
|
||||
}
|
||||
|
||||
void __init allocate_paca(int cpu)
|
||||
{
|
||||
u64 limit;
|
||||
int cpu;
|
||||
struct paca_struct *paca;
|
||||
|
||||
BUG_ON(cpu >= paca_nr_cpu_ids);
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/*
|
||||
@@ -220,40 +219,44 @@ void __init allocate_pacas(void)
|
||||
limit = ppc64_rma_size;
|
||||
#endif
|
||||
|
||||
paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
|
||||
paca = alloc_paca_data(sizeof(struct paca_struct), L1_CACHE_BYTES,
|
||||
limit, cpu);
|
||||
paca_ptrs[cpu] = paca;
|
||||
memset(paca, 0, sizeof(struct paca_struct));
|
||||
|
||||
paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit));
|
||||
memset(paca, 0, paca_size);
|
||||
|
||||
printk(KERN_DEBUG "Allocated %u bytes for %u pacas at %p\n",
|
||||
paca_size, nr_cpu_ids, paca);
|
||||
|
||||
allocate_lppacas(nr_cpu_ids, limit);
|
||||
|
||||
allocate_slb_shadows(nr_cpu_ids, limit);
|
||||
|
||||
/* Can't use for_each_*_cpu, as they aren't functional yet */
|
||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
||||
initialise_paca(&paca[cpu], cpu);
|
||||
initialise_paca(paca, cpu);
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
paca->lppaca_ptr = new_lppaca(cpu, limit);
|
||||
#endif
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
paca->slb_shadow_ptr = new_slb_shadow(cpu, limit);
|
||||
#endif
|
||||
paca_struct_size += sizeof(struct paca_struct);
|
||||
}
|
||||
|
||||
void __init free_unused_pacas(void)
|
||||
{
|
||||
int new_size;
|
||||
int new_ptrs_size;
|
||||
|
||||
new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
|
||||
new_ptrs_size = sizeof(struct paca_struct *) * nr_cpu_ids;
|
||||
if (new_ptrs_size < paca_ptrs_size)
|
||||
memblock_free(__pa(paca_ptrs) + new_ptrs_size,
|
||||
paca_ptrs_size - new_ptrs_size);
|
||||
|
||||
if (new_size >= paca_size)
|
||||
return;
|
||||
paca_nr_cpu_ids = nr_cpu_ids;
|
||||
paca_ptrs_size = new_ptrs_size;
|
||||
|
||||
memblock_free(__pa(paca) + new_size, paca_size - new_size);
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
if (early_radix_enabled()) {
|
||||
/* Ugly fixup, see new_slb_shadow() */
|
||||
memblock_free(__pa(paca_ptrs[boot_cpuid]->slb_shadow_ptr),
|
||||
sizeof(struct slb_shadow));
|
||||
paca_ptrs[boot_cpuid]->slb_shadow_ptr = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
printk(KERN_DEBUG "Freed %u bytes for unused pacas\n",
|
||||
paca_size - new_size);
|
||||
|
||||
paca_size = new_size;
|
||||
|
||||
free_lppacas();
|
||||
printk(KERN_DEBUG "Allocated %u bytes for %u pacas\n",
|
||||
paca_ptrs_size + paca_struct_size, nr_cpu_ids);
|
||||
}
|
||||
|
||||
void copy_mm_to_paca(struct mm_struct *mm)
|
||||
|
@@ -365,7 +365,6 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
|
||||
DBG("boot cpu: logical %d physical %d\n", found,
|
||||
be32_to_cpu(intserv[found_thread]));
|
||||
boot_cpuid = found;
|
||||
set_hard_smp_processor_id(found, be32_to_cpu(intserv[found_thread]));
|
||||
|
||||
/*
|
||||
* PAPR defines "logical" PVR values for cpus that
|
||||
@@ -403,7 +402,9 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
|
||||
cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
|
||||
else if (!dt_cpu_ftrs_in_use())
|
||||
cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
|
||||
allocate_paca(boot_cpuid);
|
||||
#endif
|
||||
set_hard_smp_processor_id(found, be32_to_cpu(intserv[found_thread]));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -744,7 +745,7 @@ void __init early_init_devtree(void *params)
|
||||
* FIXME .. and the initrd too? */
|
||||
move_device_tree();
|
||||
|
||||
allocate_pacas();
|
||||
allocate_paca_ptrs();
|
||||
|
||||
DBG("Scanning CPUs ...\n");
|
||||
|
||||
@@ -874,5 +875,15 @@ EXPORT_SYMBOL(cpu_to_chip_id);
|
||||
|
||||
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Early firmware scanning must use this rather than
|
||||
* get_hard_smp_processor_id because we don't have pacas allocated
|
||||
* until memory topology is discovered.
|
||||
*/
|
||||
if (cpu_to_phys_id != NULL)
|
||||
return (int)phys_id == cpu_to_phys_id[cpu];
|
||||
#endif
|
||||
|
||||
return (int)phys_id == get_hard_smp_processor_id(cpu);
|
||||
}
|
||||
|
@@ -437,6 +437,8 @@ static void __init cpu_init_thread_core_maps(int tpc)
|
||||
}
|
||||
|
||||
|
||||
u32 *cpu_to_phys_id = NULL;
|
||||
|
||||
/**
|
||||
* setup_cpu_maps - initialize the following cpu maps:
|
||||
* cpu_possible_mask
|
||||
@@ -463,6 +465,10 @@ void __init smp_setup_cpu_maps(void)
|
||||
|
||||
DBG("smp_setup_cpu_maps()\n");
|
||||
|
||||
cpu_to_phys_id = __va(memblock_alloc(nr_cpu_ids * sizeof(u32),
|
||||
__alignof__(u32)));
|
||||
memset(cpu_to_phys_id, 0, nr_cpu_ids * sizeof(u32));
|
||||
|
||||
for_each_node_by_type(dn, "cpu") {
|
||||
const __be32 *intserv;
|
||||
__be32 cpu_be;
|
||||
@@ -480,6 +486,7 @@ void __init smp_setup_cpu_maps(void)
|
||||
intserv = of_get_property(dn, "reg", &len);
|
||||
if (!intserv) {
|
||||
cpu_be = cpu_to_be32(cpu);
|
||||
/* XXX: what is this? uninitialized?? */
|
||||
intserv = &cpu_be; /* assume logical == phys */
|
||||
len = 4;
|
||||
}
|
||||
@@ -499,8 +506,8 @@ void __init smp_setup_cpu_maps(void)
|
||||
"enable-method", "spin-table");
|
||||
|
||||
set_cpu_present(cpu, avail);
|
||||
set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
|
||||
set_cpu_possible(cpu, true);
|
||||
cpu_to_phys_id[cpu] = be32_to_cpu(intserv[j]);
|
||||
cpu++;
|
||||
}
|
||||
|
||||
@@ -835,6 +842,23 @@ static __init void print_system_info(void)
|
||||
pr_info("-----------------------------------------------------\n");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void smp_setup_pacas(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu == smp_processor_id())
|
||||
continue;
|
||||
allocate_paca(cpu);
|
||||
set_hard_smp_processor_id(cpu, cpu_to_phys_id[cpu]);
|
||||
}
|
||||
|
||||
memblock_free(__pa(cpu_to_phys_id), nr_cpu_ids * sizeof(u32));
|
||||
cpu_to_phys_id = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Called into from start_kernel this initializes memblock, which is used
|
||||
* to manage page allocation until mem_init is called.
|
||||
@@ -888,6 +912,9 @@ void __init setup_arch(char **cmdline_p)
|
||||
/* Check the SMT related command line arguments (ppc64). */
|
||||
check_smt_enabled();
|
||||
|
||||
/* Parse memory topology */
|
||||
mem_topology_setup();
|
||||
|
||||
/* On BookE, setup per-core TLB data structures. */
|
||||
setup_tlb_core_data();
|
||||
|
||||
@@ -899,6 +926,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
* so smp_release_cpus() does nothing for them.
|
||||
*/
|
||||
#ifdef CONFIG_SMP
|
||||
smp_setup_pacas();
|
||||
smp_release_cpus();
|
||||
#endif
|
||||
|
||||
|
@@ -45,14 +45,11 @@ void emergency_stack_init(void);
|
||||
static inline void emergency_stack_init(void) { };
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
void record_spr_defaults(void);
|
||||
#else
|
||||
static inline void record_spr_defaults(void) { };
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
u64 ppc64_bolted_size(void);
|
||||
|
||||
/* Default SPR values from firmware/kexec */
|
||||
extern unsigned long spr_default_dscr;
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@@ -110,7 +110,7 @@ void __init setup_tlb_core_data(void)
|
||||
if (cpu_first_thread_sibling(boot_cpuid) == first)
|
||||
first = boot_cpuid;
|
||||
|
||||
paca[cpu].tcd_ptr = &paca[first].tcd;
|
||||
paca_ptrs[cpu]->tcd_ptr = &paca_ptrs[first]->tcd;
|
||||
|
||||
/*
|
||||
* If we have threads, we need either tlbsrx.
|
||||
@@ -254,6 +254,14 @@ static void cpu_ready_for_interrupts(void)
|
||||
get_paca()->kernel_msr = MSR_KERNEL;
|
||||
}
|
||||
|
||||
unsigned long spr_default_dscr = 0;
|
||||
|
||||
void __init record_spr_defaults(void)
|
||||
{
|
||||
if (early_cpu_has_feature(CPU_FTR_DSCR))
|
||||
spr_default_dscr = mfspr(SPRN_DSCR);
|
||||
}
|
||||
|
||||
/*
|
||||
* Early initialization entry point. This is called by head.S
|
||||
* with MMU translation disabled. We rely on the "feature" of
|
||||
@@ -304,7 +312,11 @@ void __init early_setup(unsigned long dt_ptr)
|
||||
early_init_devtree(__va(dt_ptr));
|
||||
|
||||
/* Now we know the logical id of our boot cpu, setup the paca. */
|
||||
setup_paca(&paca[boot_cpuid]);
|
||||
if (boot_cpuid != 0) {
|
||||
/* Poison paca_ptrs[0] again if it's not the boot cpu */
|
||||
memset(&paca_ptrs[0], 0x88, sizeof(paca_ptrs[0]));
|
||||
}
|
||||
setup_paca(paca_ptrs[boot_cpuid]);
|
||||
fixup_boot_paca();
|
||||
|
||||
/*
|
||||
@@ -599,6 +611,21 @@ __init u64 ppc64_bolted_size(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void *__init alloc_stack(unsigned long limit, int cpu)
|
||||
{
|
||||
unsigned long pa;
|
||||
|
||||
pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
|
||||
early_cpu_to_node(cpu), MEMBLOCK_NONE);
|
||||
if (!pa) {
|
||||
pa = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
|
||||
if (!pa)
|
||||
panic("cannot allocate stacks");
|
||||
}
|
||||
|
||||
return __va(pa);
|
||||
}
|
||||
|
||||
void __init irqstack_early_init(void)
|
||||
{
|
||||
u64 limit = ppc64_bolted_size();
|
||||
@@ -610,12 +637,8 @@ void __init irqstack_early_init(void)
|
||||
* accessed in realmode.
|
||||
*/
|
||||
for_each_possible_cpu(i) {
|
||||
softirq_ctx[i] = (struct thread_info *)
|
||||
__va(memblock_alloc_base(THREAD_SIZE,
|
||||
THREAD_SIZE, limit));
|
||||
hardirq_ctx[i] = (struct thread_info *)
|
||||
__va(memblock_alloc_base(THREAD_SIZE,
|
||||
THREAD_SIZE, limit));
|
||||
softirq_ctx[i] = alloc_stack(limit, i);
|
||||
hardirq_ctx[i] = alloc_stack(limit, i);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -623,20 +646,21 @@ void __init irqstack_early_init(void)
|
||||
void __init exc_lvl_early_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned long sp;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
|
||||
critirq_ctx[i] = (struct thread_info *)__va(sp);
|
||||
paca[i].crit_kstack = __va(sp + THREAD_SIZE);
|
||||
void *sp;
|
||||
|
||||
sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
|
||||
dbgirq_ctx[i] = (struct thread_info *)__va(sp);
|
||||
paca[i].dbg_kstack = __va(sp + THREAD_SIZE);
|
||||
sp = alloc_stack(ULONG_MAX, i);
|
||||
critirq_ctx[i] = sp;
|
||||
paca_ptrs[i]->crit_kstack = sp + THREAD_SIZE;
|
||||
|
||||
sp = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
|
||||
mcheckirq_ctx[i] = (struct thread_info *)__va(sp);
|
||||
paca[i].mc_kstack = __va(sp + THREAD_SIZE);
|
||||
sp = alloc_stack(ULONG_MAX, i);
|
||||
dbgirq_ctx[i] = sp;
|
||||
paca_ptrs[i]->dbg_kstack = sp + THREAD_SIZE;
|
||||
|
||||
sp = alloc_stack(ULONG_MAX, i);
|
||||
mcheckirq_ctx[i] = sp;
|
||||
paca_ptrs[i]->mc_kstack = sp + THREAD_SIZE;
|
||||
}
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
|
||||
@@ -690,23 +714,24 @@ void __init emergency_stack_init(void)
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct thread_info *ti;
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
|
||||
ti = alloc_stack(limit, i);
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
emerg_stack_init_thread_info(ti, i);
|
||||
paca[i].emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
paca_ptrs[i]->emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S_64
|
||||
/* emergency stack for NMI exception handling. */
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
ti = alloc_stack(limit, i);
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
emerg_stack_init_thread_info(ti, i);
|
||||
paca[i].nmi_emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
paca_ptrs[i]->nmi_emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
|
||||
/* emergency stack for machine check exception handling. */
|
||||
ti = __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit));
|
||||
ti = alloc_stack(limit, i);
|
||||
memset(ti, 0, THREAD_SIZE);
|
||||
emerg_stack_init_thread_info(ti, i);
|
||||
paca[i].mc_emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
paca_ptrs[i]->mc_emergency_sp = (void *)ti + THREAD_SIZE;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
@@ -762,7 +787,7 @@ void __init setup_per_cpu_areas(void)
|
||||
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
|
||||
for_each_possible_cpu(cpu) {
|
||||
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
|
||||
paca[cpu].data_offset = __per_cpu_offset[cpu];
|
||||
paca_ptrs[cpu]->data_offset = __per_cpu_offset[cpu];
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@@ -876,8 +901,9 @@ static void init_fallback_flush(void)
|
||||
memset(l1d_flush_fallback_area, 0, l1d_size * 2);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
paca[cpu].rfi_flush_fallback_area = l1d_flush_fallback_area;
|
||||
paca[cpu].l1d_flush_size = l1d_size;
|
||||
struct paca_struct *paca = paca_ptrs[cpu];
|
||||
paca->rfi_flush_fallback_area = l1d_flush_fallback_area;
|
||||
paca->l1d_flush_size = l1d_size;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -123,8 +123,8 @@ int smp_generic_kick_cpu(int nr)
|
||||
* cpu_start field to become non-zero After we set cpu_start,
|
||||
* the processor will continue on to secondary_start
|
||||
*/
|
||||
if (!paca[nr].cpu_start) {
|
||||
paca[nr].cpu_start = 1;
|
||||
if (!paca_ptrs[nr]->cpu_start) {
|
||||
paca_ptrs[nr]->cpu_start = 1;
|
||||
smp_mb();
|
||||
return 0;
|
||||
}
|
||||
@@ -657,7 +657,7 @@ void smp_prepare_boot_cpu(void)
|
||||
{
|
||||
BUG_ON(smp_processor_id() != boot_cpuid);
|
||||
#ifdef CONFIG_PPC64
|
||||
paca[boot_cpuid].__current = current;
|
||||
paca_ptrs[boot_cpuid]->__current = current;
|
||||
#endif
|
||||
set_numa_node(numa_cpu_lookup_table[boot_cpuid]);
|
||||
current_set[boot_cpuid] = task_thread_info(current);
|
||||
@@ -748,8 +748,8 @@ static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle)
|
||||
struct thread_info *ti = task_thread_info(idle);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
paca[cpu].__current = idle;
|
||||
paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
|
||||
paca_ptrs[cpu]->__current = idle;
|
||||
paca_ptrs[cpu]->kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD;
|
||||
#endif
|
||||
ti->cpu = cpu;
|
||||
secondary_ti = current_set[cpu] = ti;
|
||||
|
@@ -20,6 +20,7 @@
|
||||
#include <asm/firmware.h>
|
||||
|
||||
#include "cacheinfo.h"
|
||||
#include "setup.h"
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#include <asm/paca.h>
|
||||
@@ -588,21 +589,18 @@ static DEVICE_ATTR(dscr_default, 0600,
|
||||
|
||||
static void sysfs_create_dscr_default(void)
|
||||
{
|
||||
int err = 0;
|
||||
if (cpu_has_feature(CPU_FTR_DSCR))
|
||||
err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
|
||||
}
|
||||
|
||||
void __init record_spr_defaults(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_DSCR)) {
|
||||
dscr_default = mfspr(SPRN_DSCR);
|
||||
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
|
||||
paca[cpu].dscr_default = dscr_default;
|
||||
int err = 0;
|
||||
int cpu;
|
||||
|
||||
dscr_default = spr_default_dscr;
|
||||
for_each_possible_cpu(cpu)
|
||||
paca_ptrs[cpu]->dscr_default = dscr_default;
|
||||
|
||||
err = device_create_file(cpu_subsys.dev_root, &dev_attr_dscr_default);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
#ifdef HAS_PPC_PMC_PA6T
|
||||
|
Reference in New Issue
Block a user