Merge branch 'devel-stable' into for-next
Conflicts: arch/arm/kernel/perf_event_cpu.c
This commit is contained in:
@@ -20,6 +20,126 @@
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/suspend.h>
|
||||
|
||||
/*
|
||||
* The public API for this code is documented in arch/arm/include/asm/mcpm.h.
|
||||
* For a comprehensive description of the main algorithm used here, please
|
||||
* see Documentation/arm/cluster-pm-race-avoidance.txt.
|
||||
*/
|
||||
|
||||
struct sync_struct mcpm_sync;
|
||||
|
||||
/*
|
||||
* __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
|
||||
* This must be called at the point of committing to teardown of a CPU.
|
||||
* The CPU cache (SCTRL.C bit) is expected to still be active.
|
||||
*/
|
||||
static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
|
||||
* cluster can be torn down without disrupting this CPU.
|
||||
* To avoid deadlocks, this must be called before a CPU is powered down.
|
||||
* The CPU cache (SCTRL.C bit) is expected to be off.
|
||||
* However L2 cache might or might not be active.
|
||||
*/
|
||||
static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
dmb();
|
||||
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
|
||||
sev();
|
||||
}
|
||||
|
||||
/*
|
||||
* __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
|
||||
* @state: the final state of the cluster:
|
||||
* CLUSTER_UP: no destructive teardown was done and the cluster has been
|
||||
* restored to the previous state (CPU cache still active); or
|
||||
* CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
|
||||
* (CPU cache disabled, L2 cache either enabled or disabled).
|
||||
*/
|
||||
static void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
|
||||
{
|
||||
dmb();
|
||||
mcpm_sync.clusters[cluster].cluster = state;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
|
||||
sev();
|
||||
}
|
||||
|
||||
/*
|
||||
* __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
|
||||
* This function should be called by the last man, after local CPU teardown
|
||||
* is complete. CPU cache expected to be active.
|
||||
*
|
||||
* Returns:
|
||||
* false: the critical section was not entered because an inbound CPU was
|
||||
* observed, or the cluster is already being set up;
|
||||
* true: the critical section was entered: it is now safe to tear down the
|
||||
* cluster.
|
||||
*/
|
||||
static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
unsigned int i;
|
||||
struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
|
||||
|
||||
/* Warn inbound CPUs that the cluster is being torn down: */
|
||||
c->cluster = CLUSTER_GOING_DOWN;
|
||||
sync_cache_w(&c->cluster);
|
||||
|
||||
/* Back out if the inbound cluster is already in the critical region: */
|
||||
sync_cache_r(&c->inbound);
|
||||
if (c->inbound == INBOUND_COMING_UP)
|
||||
goto abort;
|
||||
|
||||
/*
|
||||
* Wait for all CPUs to get out of the GOING_DOWN state, so that local
|
||||
* teardown is complete on each CPU before tearing down the cluster.
|
||||
*
|
||||
* If any CPU has been woken up again from the DOWN state, then we
|
||||
* shouldn't be taking the cluster down at all: abort in that case.
|
||||
*/
|
||||
sync_cache_r(&c->cpus);
|
||||
for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
|
||||
int cpustate;
|
||||
|
||||
if (i == cpu)
|
||||
continue;
|
||||
|
||||
while (1) {
|
||||
cpustate = c->cpus[i].cpu;
|
||||
if (cpustate != CPU_GOING_DOWN)
|
||||
break;
|
||||
|
||||
wfe();
|
||||
sync_cache_r(&c->cpus[i].cpu);
|
||||
}
|
||||
|
||||
switch (cpustate) {
|
||||
case CPU_DOWN:
|
||||
continue;
|
||||
|
||||
default:
|
||||
goto abort;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
abort:
|
||||
__mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
|
||||
return false;
|
||||
}
|
||||
|
||||
static int __mcpm_cluster_state(unsigned int cluster)
|
||||
{
|
||||
sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
|
||||
return mcpm_sync.clusters[cluster].cluster;
|
||||
}
|
||||
|
||||
extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
|
||||
|
||||
void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
|
||||
@@ -78,16 +198,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
|
||||
bool cpu_is_down, cluster_is_down;
|
||||
int ret = 0;
|
||||
|
||||
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
||||
if (!platform_ops)
|
||||
return -EUNATCH; /* try not to shadow power_up errors */
|
||||
might_sleep();
|
||||
|
||||
/* backward compatibility callback */
|
||||
if (platform_ops->power_up)
|
||||
return platform_ops->power_up(cpu, cluster);
|
||||
|
||||
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
||||
|
||||
/*
|
||||
* Since this is called with IRQs enabled, and no arch_spin_lock_irq
|
||||
* variant exists, we need to disable IRQs manually here.
|
||||
@@ -128,29 +243,17 @@ void mcpm_cpu_power_down(void)
|
||||
bool cpu_going_down, last_man;
|
||||
phys_reset_t phys_reset;
|
||||
|
||||
if (WARN_ON_ONCE(!platform_ops))
|
||||
return;
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
/*
|
||||
* Do this before calling into the power_down method,
|
||||
* as it might not always be safe to do afterwards.
|
||||
*/
|
||||
setup_mm_for_reboot();
|
||||
|
||||
/* backward compatibility callback */
|
||||
if (platform_ops->power_down) {
|
||||
platform_ops->power_down();
|
||||
goto not_dead;
|
||||
}
|
||||
|
||||
mpidr = read_cpuid_mpidr();
|
||||
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
||||
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
||||
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
|
||||
if (WARN_ON_ONCE(!platform_ops))
|
||||
return;
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
setup_mm_for_reboot();
|
||||
|
||||
__mcpm_cpu_going_down(cpu, cluster);
|
||||
|
||||
arch_spin_lock(&mcpm_lock);
|
||||
BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
|
||||
|
||||
@@ -187,7 +290,6 @@ void mcpm_cpu_power_down(void)
|
||||
if (cpu_going_down)
|
||||
wfi();
|
||||
|
||||
not_dead:
|
||||
/*
|
||||
* It is possible for a power_up request to happen concurrently
|
||||
* with a power_down request for the same CPU. In this case the
|
||||
@@ -219,22 +321,11 @@ int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void mcpm_cpu_suspend(u64 expected_residency)
|
||||
void mcpm_cpu_suspend(void)
|
||||
{
|
||||
if (WARN_ON_ONCE(!platform_ops))
|
||||
return;
|
||||
|
||||
/* backward compatibility callback */
|
||||
if (platform_ops->suspend) {
|
||||
phys_reset_t phys_reset;
|
||||
BUG_ON(!irqs_disabled());
|
||||
setup_mm_for_reboot();
|
||||
platform_ops->suspend(expected_residency);
|
||||
phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
|
||||
phys_reset(virt_to_phys(mcpm_entry_point));
|
||||
BUG();
|
||||
}
|
||||
|
||||
/* Some platforms might have to enable special resume modes, etc. */
|
||||
if (platform_ops->cpu_suspend_prepare) {
|
||||
unsigned int mpidr = read_cpuid_mpidr();
|
||||
@@ -256,12 +347,6 @@ int mcpm_cpu_powered_up(void)
|
||||
if (!platform_ops)
|
||||
return -EUNATCH;
|
||||
|
||||
/* backward compatibility callback */
|
||||
if (platform_ops->powered_up) {
|
||||
platform_ops->powered_up();
|
||||
return 0;
|
||||
}
|
||||
|
||||
mpidr = read_cpuid_mpidr();
|
||||
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
||||
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
|
||||
@@ -334,120 +419,6 @@ int __init mcpm_loopback(void (*cache_disable)(void))
|
||||
|
||||
#endif
|
||||
|
||||
struct sync_struct mcpm_sync;
|
||||
|
||||
/*
|
||||
* __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
|
||||
* This must be called at the point of committing to teardown of a CPU.
|
||||
* The CPU cache (SCTRL.C bit) is expected to still be active.
|
||||
*/
|
||||
void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
|
||||
* cluster can be torn down without disrupting this CPU.
|
||||
* To avoid deadlocks, this must be called before a CPU is powered down.
|
||||
* The CPU cache (SCTRL.C bit) is expected to be off.
|
||||
* However L2 cache might or might not be active.
|
||||
*/
|
||||
void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
dmb();
|
||||
mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
|
||||
sev();
|
||||
}
|
||||
|
||||
/*
|
||||
* __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
|
||||
* @state: the final state of the cluster:
|
||||
* CLUSTER_UP: no destructive teardown was done and the cluster has been
|
||||
* restored to the previous state (CPU cache still active); or
|
||||
* CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
|
||||
* (CPU cache disabled, L2 cache either enabled or disabled).
|
||||
*/
|
||||
void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
|
||||
{
|
||||
dmb();
|
||||
mcpm_sync.clusters[cluster].cluster = state;
|
||||
sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
|
||||
sev();
|
||||
}
|
||||
|
||||
/*
|
||||
* __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
|
||||
* This function should be called by the last man, after local CPU teardown
|
||||
* is complete. CPU cache expected to be active.
|
||||
*
|
||||
* Returns:
|
||||
* false: the critical section was not entered because an inbound CPU was
|
||||
* observed, or the cluster is already being set up;
|
||||
* true: the critical section was entered: it is now safe to tear down the
|
||||
* cluster.
|
||||
*/
|
||||
bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
|
||||
{
|
||||
unsigned int i;
|
||||
struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
|
||||
|
||||
/* Warn inbound CPUs that the cluster is being torn down: */
|
||||
c->cluster = CLUSTER_GOING_DOWN;
|
||||
sync_cache_w(&c->cluster);
|
||||
|
||||
/* Back out if the inbound cluster is already in the critical region: */
|
||||
sync_cache_r(&c->inbound);
|
||||
if (c->inbound == INBOUND_COMING_UP)
|
||||
goto abort;
|
||||
|
||||
/*
|
||||
* Wait for all CPUs to get out of the GOING_DOWN state, so that local
|
||||
* teardown is complete on each CPU before tearing down the cluster.
|
||||
*
|
||||
* If any CPU has been woken up again from the DOWN state, then we
|
||||
* shouldn't be taking the cluster down at all: abort in that case.
|
||||
*/
|
||||
sync_cache_r(&c->cpus);
|
||||
for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
|
||||
int cpustate;
|
||||
|
||||
if (i == cpu)
|
||||
continue;
|
||||
|
||||
while (1) {
|
||||
cpustate = c->cpus[i].cpu;
|
||||
if (cpustate != CPU_GOING_DOWN)
|
||||
break;
|
||||
|
||||
wfe();
|
||||
sync_cache_r(&c->cpus[i].cpu);
|
||||
}
|
||||
|
||||
switch (cpustate) {
|
||||
case CPU_DOWN:
|
||||
continue;
|
||||
|
||||
default:
|
||||
goto abort;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
abort:
|
||||
__mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
|
||||
return false;
|
||||
}
|
||||
|
||||
int __mcpm_cluster_state(unsigned int cluster)
|
||||
{
|
||||
sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
|
||||
return mcpm_sync.clusters[cluster].cluster;
|
||||
}
|
||||
|
||||
extern unsigned long mcpm_power_up_setup_phys;
|
||||
|
||||
int __init mcpm_sync_init(
|
||||
|
Reference in New Issue
Block a user