Merge branch 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (77 commits) x86: setup_per_cpu_areas() cleanup cpumask: fix compile error when CONFIG_NR_CPUS is not defined cpumask: use alloc_cpumask_var_node where appropriate cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t x86: use cpumask_var_t in acpi/boot.c x86: cleanup some remaining usages of NR_CPUS where s/b nr_cpu_ids sched: put back some stack hog changes that were undone in kernel/sched.c x86: enable cpus display of kernel_max and offlined cpus ia64: cpumask fix for is_affinity_mask_valid() cpumask: convert RCU implementations, fix xtensa: define __fls mn10300: define __fls m32r: define __fls h8300: define __fls frv: define __fls cris: define __fls cpumask: CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS cpumask: zero extra bits in alloc_cpumask_var_node cpumask: replace for_each_cpu_mask_nr with for_each_cpu in kernel/time/ cpumask: convert mm/ ...
This commit is contained in:
@@ -538,9 +538,10 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
||||
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
|
||||
union acpi_object *obj;
|
||||
struct acpi_madt_local_apic *lapic;
|
||||
cpumask_t tmp_map, new_map;
|
||||
cpumask_var_t tmp_map, new_map;
|
||||
u8 physid;
|
||||
int cpu;
|
||||
int retval = -ENOMEM;
|
||||
|
||||
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
|
||||
return -EINVAL;
|
||||
@@ -569,23 +570,37 @@ static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu)
|
||||
buffer.length = ACPI_ALLOCATE_BUFFER;
|
||||
buffer.pointer = NULL;
|
||||
|
||||
tmp_map = cpu_present_map;
|
||||
if (!alloc_cpumask_var(&tmp_map, GFP_KERNEL))
|
||||
goto out;
|
||||
|
||||
if (!alloc_cpumask_var(&new_map, GFP_KERNEL))
|
||||
goto free_tmp_map;
|
||||
|
||||
cpumask_copy(tmp_map, cpu_present_mask);
|
||||
acpi_register_lapic(physid, lapic->lapic_flags & ACPI_MADT_ENABLED);
|
||||
|
||||
/*
|
||||
* If mp_register_lapic successfully generates a new logical cpu
|
||||
* number, then the following will get us exactly what was mapped
|
||||
*/
|
||||
cpus_andnot(new_map, cpu_present_map, tmp_map);
|
||||
if (cpus_empty(new_map)) {
|
||||
cpumask_andnot(new_map, cpu_present_mask, tmp_map);
|
||||
if (cpumask_empty(new_map)) {
|
||||
printk ("Unable to map lapic to logical cpu number\n");
|
||||
return -EINVAL;
|
||||
retval = -EINVAL;
|
||||
goto free_new_map;
|
||||
}
|
||||
|
||||
cpu = first_cpu(new_map);
|
||||
cpu = cpumask_first(new_map);
|
||||
|
||||
*pcpu = cpu;
|
||||
return 0;
|
||||
retval = 0;
|
||||
|
||||
free_new_map:
|
||||
free_cpumask_var(new_map);
|
||||
free_tmp_map:
|
||||
free_cpumask_var(tmp_map);
|
||||
out:
|
||||
return retval;
|
||||
}
|
||||
|
||||
/* wrapper to silence section mismatch warning */
|
||||
@@ -598,7 +613,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
|
||||
int acpi_unmap_lsapic(int cpu)
|
||||
{
|
||||
per_cpu(x86_cpu_to_apicid, cpu) = -1;
|
||||
cpu_clear(cpu, cpu_present_map);
|
||||
set_cpu_present(cpu, false);
|
||||
num_processors--;
|
||||
|
||||
return (0);
|
||||
|
@@ -140,7 +140,7 @@ static int lapic_next_event(unsigned long delta,
|
||||
struct clock_event_device *evt);
|
||||
static void lapic_timer_setup(enum clock_event_mode mode,
|
||||
struct clock_event_device *evt);
|
||||
static void lapic_timer_broadcast(const cpumask_t *mask);
|
||||
static void lapic_timer_broadcast(const struct cpumask *mask);
|
||||
static void apic_pm_activate(void);
|
||||
|
||||
/*
|
||||
@@ -453,7 +453,7 @@ static void lapic_timer_setup(enum clock_event_mode mode,
|
||||
/*
|
||||
* Local APIC timer broadcast function
|
||||
*/
|
||||
static void lapic_timer_broadcast(const cpumask_t *mask)
|
||||
static void lapic_timer_broadcast(const struct cpumask *mask)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
|
||||
|
@@ -355,7 +355,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
||||
printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
|
||||
} else if (smp_num_siblings > 1) {
|
||||
|
||||
if (smp_num_siblings > NR_CPUS) {
|
||||
if (smp_num_siblings > nr_cpu_ids) {
|
||||
printk(KERN_WARNING "CPU: Unsupported number of siblings %d",
|
||||
smp_num_siblings);
|
||||
smp_num_siblings = 1;
|
||||
|
@@ -517,6 +517,17 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
|
||||
}
|
||||
}
|
||||
|
||||
static void free_acpi_perf_data(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
/* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
|
||||
for_each_possible_cpu(i)
|
||||
free_cpumask_var(per_cpu_ptr(acpi_perf_data, i)
|
||||
->shared_cpu_map);
|
||||
free_percpu(acpi_perf_data);
|
||||
}
|
||||
|
||||
/*
|
||||
* acpi_cpufreq_early_init - initialize ACPI P-States library
|
||||
*
|
||||
@@ -527,6 +538,7 @@ acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu)
|
||||
*/
|
||||
static int __init acpi_cpufreq_early_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
dprintk("acpi_cpufreq_early_init\n");
|
||||
|
||||
acpi_perf_data = alloc_percpu(struct acpi_processor_performance);
|
||||
@@ -534,6 +546,16 @@ static int __init acpi_cpufreq_early_init(void)
|
||||
dprintk("Memory allocation error for acpi_perf_data.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
for_each_possible_cpu(i) {
|
||||
if (!alloc_cpumask_var_node(
|
||||
&per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map,
|
||||
GFP_KERNEL, cpu_to_node(i))) {
|
||||
|
||||
/* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
|
||||
free_acpi_perf_data();
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
/* Do initialization in ACPI core */
|
||||
acpi_processor_preregister_performance(acpi_perf_data);
|
||||
@@ -604,9 +626,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
*/
|
||||
if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL ||
|
||||
policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
|
||||
policy->cpus = perf->shared_cpu_map;
|
||||
cpumask_copy(&policy->cpus, perf->shared_cpu_map);
|
||||
}
|
||||
policy->related_cpus = perf->shared_cpu_map;
|
||||
cpumask_copy(&policy->related_cpus, perf->shared_cpu_map);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
dmi_check_system(sw_any_bug_dmi_table);
|
||||
@@ -795,7 +817,7 @@ static int __init acpi_cpufreq_init(void)
|
||||
|
||||
ret = cpufreq_register_driver(&acpi_cpufreq_driver);
|
||||
if (ret)
|
||||
free_percpu(acpi_perf_data);
|
||||
free_acpi_perf_data();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@@ -310,6 +310,12 @@ static int powernow_acpi_init(void)
|
||||
goto err0;
|
||||
}
|
||||
|
||||
if (!alloc_cpumask_var(&acpi_processor_perf->shared_cpu_map,
|
||||
GFP_KERNEL)) {
|
||||
retval = -ENOMEM;
|
||||
goto err05;
|
||||
}
|
||||
|
||||
if (acpi_processor_register_performance(acpi_processor_perf, 0)) {
|
||||
retval = -EIO;
|
||||
goto err1;
|
||||
@@ -412,6 +418,8 @@ static int powernow_acpi_init(void)
|
||||
err2:
|
||||
acpi_processor_unregister_performance(acpi_processor_perf, 0);
|
||||
err1:
|
||||
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
|
||||
err05:
|
||||
kfree(acpi_processor_perf);
|
||||
err0:
|
||||
printk(KERN_WARNING PFX "ACPI perflib can not be used in this platform\n");
|
||||
@@ -652,6 +660,7 @@ static int powernow_cpu_exit (struct cpufreq_policy *policy) {
|
||||
#ifdef CONFIG_X86_POWERNOW_K7_ACPI
|
||||
if (acpi_processor_perf) {
|
||||
acpi_processor_unregister_performance(acpi_processor_perf, 0);
|
||||
free_cpumask_var(acpi_processor_perf->shared_cpu_map);
|
||||
kfree(acpi_processor_perf);
|
||||
}
|
||||
#endif
|
||||
|
@@ -766,7 +766,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned
|
||||
static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
||||
{
|
||||
struct cpufreq_frequency_table *powernow_table;
|
||||
int ret_val;
|
||||
int ret_val = -ENODEV;
|
||||
|
||||
if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) {
|
||||
dprintk("register performance failed: bad ACPI data\n");
|
||||
@@ -815,6 +815,13 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
|
||||
/* notify BIOS that we exist */
|
||||
acpi_processor_notify_smm(THIS_MODULE);
|
||||
|
||||
if (!alloc_cpumask_var(&data->acpi_data.shared_cpu_map, GFP_KERNEL)) {
|
||||
printk(KERN_ERR PFX
|
||||
"unable to alloc powernow_k8_data cpumask\n");
|
||||
ret_val = -ENOMEM;
|
||||
goto err_out_mem;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_mem:
|
||||
@@ -826,7 +833,7 @@ err_out:
|
||||
/* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */
|
||||
data->acpi_data.state_count = 0;
|
||||
|
||||
return -ENODEV;
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table)
|
||||
@@ -929,6 +936,7 @@ static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data)
|
||||
{
|
||||
if (data->acpi_data.state_count)
|
||||
acpi_processor_unregister_performance(&data->acpi_data, data->cpu);
|
||||
free_cpumask_var(data->acpi_data.shared_cpu_map);
|
||||
}
|
||||
|
||||
#else
|
||||
@@ -1134,7 +1142,8 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
data->cpu = pol->cpu;
|
||||
data->currpstate = HW_PSTATE_INVALID;
|
||||
|
||||
if (powernow_k8_cpu_init_acpi(data)) {
|
||||
rc = powernow_k8_cpu_init_acpi(data);
|
||||
if (rc) {
|
||||
/*
|
||||
* Use the PSB BIOS structure. This is only availabe on
|
||||
* an UP version, and is deprecated by AMD.
|
||||
@@ -1152,20 +1161,17 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
|
||||
"ACPI maintainers and complain to your BIOS "
|
||||
"vendor.\n");
|
||||
#endif
|
||||
kfree(data);
|
||||
return -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
if (pol->cpu != 0) {
|
||||
printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
|
||||
"CPU other than CPU0. Complain to your BIOS "
|
||||
"vendor.\n");
|
||||
kfree(data);
|
||||
return -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
rc = find_psb_table(data);
|
||||
if (rc) {
|
||||
kfree(data);
|
||||
return -ENODEV;
|
||||
goto err_out;
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -534,7 +534,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
|
||||
per_cpu(cpuid4_info, cpu) = NULL;
|
||||
}
|
||||
|
||||
static void get_cpu_leaves(void *_retval)
|
||||
static void __cpuinit get_cpu_leaves(void *_retval)
|
||||
{
|
||||
int j, *retval = _retval, cpu = smp_processor_id();
|
||||
|
||||
|
@@ -121,7 +121,7 @@ static int cpuid_open(struct inode *inode, struct file *file)
|
||||
lock_kernel();
|
||||
|
||||
cpu = iminor(file->f_path.dentry->d_inode);
|
||||
if (cpu >= NR_CPUS || !cpu_online(cpu)) {
|
||||
if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
|
||||
ret = -ENXIO; /* No such CPU */
|
||||
goto out;
|
||||
}
|
||||
|
@@ -214,11 +214,11 @@ static struct irq_cfg *get_one_free_irq_cfg(int cpu)
|
||||
|
||||
cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
|
||||
if (cfg) {
|
||||
/* FIXME: needs alloc_cpumask_var_node() */
|
||||
if (!alloc_cpumask_var(&cfg->domain, GFP_ATOMIC)) {
|
||||
if (!alloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
|
||||
kfree(cfg);
|
||||
cfg = NULL;
|
||||
} else if (!alloc_cpumask_var(&cfg->old_domain, GFP_ATOMIC)) {
|
||||
} else if (!alloc_cpumask_var_node(&cfg->old_domain,
|
||||
GFP_ATOMIC, node)) {
|
||||
free_cpumask_var(cfg->domain);
|
||||
kfree(cfg);
|
||||
cfg = NULL;
|
||||
|
@@ -136,7 +136,7 @@ static int msr_open(struct inode *inode, struct file *file)
|
||||
lock_kernel();
|
||||
cpu = iminor(file->f_path.dentry->d_inode);
|
||||
|
||||
if (cpu >= NR_CPUS || !cpu_online(cpu)) {
|
||||
if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
|
||||
ret = -ENXIO; /* No such CPU */
|
||||
goto out;
|
||||
}
|
||||
|
@@ -501,7 +501,7 @@ void native_machine_shutdown(void)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* See if there has been given a command line override */
|
||||
if ((reboot_cpu != -1) && (reboot_cpu < NR_CPUS) &&
|
||||
if ((reboot_cpu != -1) && (reboot_cpu < nr_cpu_ids) &&
|
||||
cpu_online(reboot_cpu))
|
||||
reboot_cpu_id = reboot_cpu;
|
||||
#endif
|
||||
@@ -511,7 +511,7 @@ void native_machine_shutdown(void)
|
||||
reboot_cpu_id = smp_processor_id();
|
||||
|
||||
/* Make certain I only run on the appropriate processor */
|
||||
set_cpus_allowed_ptr(current, &cpumask_of_cpu(reboot_cpu_id));
|
||||
set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
|
||||
|
||||
/* O.K Now that I'm on the appropriate processor,
|
||||
* stop all of the others.
|
||||
|
@@ -153,12 +153,10 @@ void __init setup_per_cpu_areas(void)
|
||||
align = max_t(unsigned long, PAGE_SIZE, align);
|
||||
size = roundup(old_size, align);
|
||||
|
||||
printk(KERN_INFO
|
||||
"NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
|
||||
pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n",
|
||||
NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids);
|
||||
|
||||
printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
|
||||
size);
|
||||
pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size);
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
#ifndef CONFIG_NEED_MULTIPLE_NODES
|
||||
@@ -169,22 +167,15 @@ void __init setup_per_cpu_areas(void)
|
||||
if (!node_online(node) || !NODE_DATA(node)) {
|
||||
ptr = __alloc_bootmem(size, align,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
printk(KERN_INFO
|
||||
"cpu %d has no node %d or node-local memory\n",
|
||||
pr_info("cpu %d has no node %d or node-local memory\n",
|
||||
cpu, node);
|
||||
if (ptr)
|
||||
printk(KERN_DEBUG
|
||||
"per cpu data for cpu%d at %016lx\n",
|
||||
cpu, __pa(ptr));
|
||||
}
|
||||
else {
|
||||
pr_debug("per cpu data for cpu%d at %016lx\n",
|
||||
cpu, __pa(ptr));
|
||||
} else {
|
||||
ptr = __alloc_bootmem_node(NODE_DATA(node), size, align,
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
if (ptr)
|
||||
printk(KERN_DEBUG
|
||||
"per cpu data for cpu%d on node%d "
|
||||
"at %016lx\n",
|
||||
cpu, node, __pa(ptr));
|
||||
pr_debug("per cpu data for cpu%d on node%d at %016lx\n",
|
||||
cpu, node, __pa(ptr));
|
||||
}
|
||||
#endif
|
||||
per_cpu_offset(cpu) = ptr - __per_cpu_start;
|
||||
@@ -339,25 +330,25 @@ static const cpumask_t cpu_mask_none;
|
||||
/*
|
||||
* Returns a pointer to the bitmask of CPUs on Node 'node'.
|
||||
*/
|
||||
const cpumask_t *_node_to_cpumask_ptr(int node)
|
||||
const cpumask_t *cpumask_of_node(int node)
|
||||
{
|
||||
if (node_to_cpumask_map == NULL) {
|
||||
printk(KERN_WARNING
|
||||
"_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n",
|
||||
"cpumask_of_node(%d): no node_to_cpumask_map!\n",
|
||||
node);
|
||||
dump_stack();
|
||||
return (const cpumask_t *)&cpu_online_map;
|
||||
}
|
||||
if (node >= nr_node_ids) {
|
||||
printk(KERN_WARNING
|
||||
"_node_to_cpumask_ptr(%d): node > nr_node_ids(%d)\n",
|
||||
"cpumask_of_node(%d): node > nr_node_ids(%d)\n",
|
||||
node, nr_node_ids);
|
||||
dump_stack();
|
||||
return &cpu_mask_none;
|
||||
}
|
||||
return &node_to_cpumask_map[node];
|
||||
}
|
||||
EXPORT_SYMBOL(_node_to_cpumask_ptr);
|
||||
EXPORT_SYMBOL(cpumask_of_node);
|
||||
|
||||
/*
|
||||
* Returns a bitmask of CPUs on Node 'node'.
|
||||
|
@@ -496,7 +496,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
||||
}
|
||||
|
||||
/* maps the cpu to the sched domain representing multi-core */
|
||||
cpumask_t cpu_coregroup_map(int cpu)
|
||||
const struct cpumask *cpu_coregroup_mask(int cpu)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
/*
|
||||
@@ -504,9 +504,14 @@ cpumask_t cpu_coregroup_map(int cpu)
|
||||
* And for power savings, we return cpu_core_map
|
||||
*/
|
||||
if (sched_mc_power_savings || sched_smt_power_savings)
|
||||
return per_cpu(cpu_core_map, cpu);
|
||||
return &per_cpu(cpu_core_map, cpu);
|
||||
else
|
||||
return c->llc_shared_map;
|
||||
return &c->llc_shared_map;
|
||||
}
|
||||
|
||||
cpumask_t cpu_coregroup_map(int cpu)
|
||||
{
|
||||
return *cpu_coregroup_mask(cpu);
|
||||
}
|
||||
|
||||
static void impress_friends(void)
|
||||
@@ -1149,7 +1154,7 @@ static void __init smp_cpu_index_default(void)
|
||||
for_each_possible_cpu(i) {
|
||||
c = &cpu_data(i);
|
||||
/* mark all to hotplug */
|
||||
c->cpu_index = NR_CPUS;
|
||||
c->cpu_index = nr_cpu_ids;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1293,6 +1298,8 @@ __init void prefill_possible_map(void)
|
||||
else
|
||||
possible = setup_possible_cpus;
|
||||
|
||||
total_cpus = max_t(int, possible, num_processors + disabled_cpus);
|
||||
|
||||
if (possible > CONFIG_NR_CPUS) {
|
||||
printk(KERN_WARNING
|
||||
"%d Processors exceeds NR_CPUS limit of %d\n",
|
||||
|
Reference in New Issue
Block a user