cpufreq: Simplify __cpufreq_remove_dev()

__cpufreq_remove_dev() is called on multiple occasions: cpufreq_driver
unregister and cpu removals.

Current implementation of this routine is overly complex without much need. If
the cpu to be removed is the policy->cpu, we remove the policy first and add all
other cpus again from policy->cpus and then finally call __cpufreq_remove_dev()
again to remove the cpu to be deleted. Haahhhh..

There exist a simple solution to removal of a cpu:
- Simply use the old policy structure
- update its fields like: policy->cpu, etc.
- notify any users of cpufreq, which depend on changing policy->cpu

Hence this patch, which tries to implement the above theory. It is tested well
by myself on ARM big.LITTLE TC2 SoC, which has 5 cores (2 A15 and 3 A7). Both
A15's share same struct policy and all A7's share same policy structure.

Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Tested-by: Shawn Guo <shawn.guo@linaro.org>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
This commit is contained in:
Viresh Kumar
2013-01-14 13:23:03 +00:00
committed by Rafael J. Wysocki
parent f85178048c
commit b8eed8af94
4 changed files with 120 additions and 105 deletions

View File

@@ -170,11 +170,13 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
static void cpufreq_stats_free_table(unsigned int cpu)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
if (stat) {
pr_debug("%s: Free stat table\n", __func__);
kfree(stat->time_in_state);
kfree(stat);
per_cpu(cpufreq_stats_table, cpu) = NULL;
}
per_cpu(cpufreq_stats_table, cpu) = NULL;
}
/* must be called early in the CPU removal sequence (before
@@ -183,8 +185,10 @@ static void cpufreq_stats_free_table(unsigned int cpu)
static void cpufreq_stats_free_sysfs(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
if (policy && policy->cpu == cpu)
if (policy && (cpumask_weight(policy->cpus) == 1)) {
pr_debug("%s: Free sysfs stat\n", __func__);
sysfs_remove_group(&policy->kobj, &stats_attr_group);
}
if (policy)
cpufreq_cpu_put(policy);
}
@@ -262,6 +266,19 @@ error_get_fail:
return ret;
}
static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
{
struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
policy->last_cpu);
pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
policy->cpu, policy->last_cpu);
per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
policy->last_cpu);
per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
stat->cpu = policy->cpu;
}
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
unsigned long val, void *data)
{
@@ -269,6 +286,12 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
struct cpufreq_policy *policy = data;
struct cpufreq_frequency_table *table;
unsigned int cpu = policy->cpu;
if (val == CPUFREQ_UPDATE_POLICY_CPU) {
cpufreq_stats_update_policy_cpu(policy);
return 0;
}
if (val != CPUFREQ_NOTIFY)
return 0;
table = cpufreq_frequency_get_table(cpu);