Merge ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86
* ssh://master.kernel.org/pub/scm/linux/kernel/git/tglx/linux-2.6-x86: (33 commits) x86: convert cpuinfo_x86 array to a per_cpu array x86: introduce frame_pointer() and stack_pointer() x86 & generic: change to __builtin_prefetch() i386: do not BUG_ON() when MSR is unknown x86: acpi use cpu_physical_id x86: convert cpu_llc_id to be a per cpu variable x86: convert cpu_to_apicid to be a per cpu variable i386: introduce "used_vectors" bitmap which can be used to reserve vectors. x86: use raw locks during oopses x86: honor _PAGE_PSE bit on page walks i386: do cpuid_device_create() in CPU_UP_PREPARE instead of CPU_ONLINE. x86: implement missing x86_64 function smp_call_function_mask() x86: use descriptor's functions instead of inline assembly i386: consolidate show_regs and show_registers for i386 i386: make callgraph use dump_trace() on i386/x86_64 x86: enable iommu_merge by default i386: i386 add AMD64 Barcelona PMU MSR definitions to msr.h x86: Unify i386 and x86-64 early quirks x86: enable HPET on ICH3 and ICH4 x86: force enable HPET on VT8235/8237 chipsets ... Manually fix trivial conflict with task pid container helper changes in arch/x86/kernel/process_32.c
Este cometimento está contido em:
@@ -2,7 +2,7 @@
|
||||
# Makefile for the linux kernel.
|
||||
#
|
||||
|
||||
extra-y := head_32.o init_task_32.o vmlinux.lds
|
||||
extra-y := head_32.o init_task.o vmlinux.lds
|
||||
|
||||
obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
|
||||
ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \
|
||||
@@ -17,6 +17,7 @@ obj-$(CONFIG_MCA) += mca_32.o
|
||||
obj-$(CONFIG_X86_MSR) += msr.o
|
||||
obj-$(CONFIG_X86_CPUID) += cpuid.o
|
||||
obj-$(CONFIG_MICROCODE) += microcode.o
|
||||
obj-$(CONFIG_PCI) += early-quirks.o
|
||||
obj-$(CONFIG_APM) += apm_32.o
|
||||
obj-$(CONFIG_X86_SMP) += smp_32.o smpboot_32.o tsc_sync.o
|
||||
obj-$(CONFIG_SMP) += smpcommon_32.o
|
||||
|
@@ -2,7 +2,7 @@
|
||||
# Makefile for the linux kernel.
|
||||
#
|
||||
|
||||
extra-y := head_64.o head64.o init_task_64.o vmlinux.lds
|
||||
extra-y := head_64.o head64.o init_task.o vmlinux.lds
|
||||
EXTRA_AFLAGS := -traditional
|
||||
obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
|
||||
ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \
|
||||
@@ -39,7 +39,7 @@ obj-$(CONFIG_K8_NB) += k8.o
|
||||
obj-$(CONFIG_AUDIT) += audit_64.o
|
||||
|
||||
obj-$(CONFIG_MODULES) += module_64.o
|
||||
obj-$(CONFIG_PCI) += early-quirks_64.o
|
||||
obj-$(CONFIG_PCI) += early-quirks.o
|
||||
|
||||
obj-y += topology.o
|
||||
obj-y += intel_cacheinfo.o
|
||||
|
@@ -1,7 +1,4 @@
|
||||
obj-$(CONFIG_ACPI) += boot.o
|
||||
ifneq ($(CONFIG_PCI),)
|
||||
obj-$(CONFIG_X86_IO_APIC) += earlyquirk_32.o
|
||||
endif
|
||||
obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o
|
||||
|
||||
ifneq ($(CONFIG_ACPI_PROCESSOR),)
|
||||
|
@@ -555,7 +555,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
|
||||
|
||||
int acpi_unmap_lsapic(int cpu)
|
||||
{
|
||||
x86_cpu_to_apicid[cpu] = -1;
|
||||
per_cpu(x86_cpu_to_apicid, cpu) = -1;
|
||||
cpu_clear(cpu, cpu_present_map);
|
||||
num_processors--;
|
||||
|
||||
|
@@ -29,7 +29,7 @@
|
||||
void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
|
||||
unsigned int cpu)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
flags->bm_check = 0;
|
||||
if (num_online_cpus() == 1)
|
||||
@@ -72,7 +72,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
|
||||
struct acpi_processor_cx *cx, struct acpi_power_register *reg)
|
||||
{
|
||||
struct cstate_entry *percpu_entry;
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
cpumask_t saved_mask;
|
||||
int retval;
|
||||
|
@@ -1,84 +0,0 @@
|
||||
/*
|
||||
* Do early PCI probing for bug detection when the main PCI subsystem is
|
||||
* not up yet.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/apic.h>
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
static int __init nvidia_hpet_check(struct acpi_table_header *header)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int __init check_bridge(int vendor, int device)
|
||||
{
|
||||
#ifdef CONFIG_ACPI
|
||||
static int warned;
|
||||
/* According to Nvidia all timer overrides are bogus unless HPET
|
||||
is enabled. */
|
||||
if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
|
||||
if (!warned && acpi_table_parse(ACPI_SIG_HPET,
|
||||
nvidia_hpet_check)) {
|
||||
warned = 1;
|
||||
acpi_skip_timer_override = 1;
|
||||
printk(KERN_INFO "Nvidia board "
|
||||
"detected. Ignoring ACPI "
|
||||
"timer override.\n");
|
||||
printk(KERN_INFO "If you got timer trouble "
|
||||
"try acpi_use_timer_override\n");
|
||||
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
|
||||
timer_over_8254 = 0;
|
||||
printk(KERN_INFO "ATI board detected. Disabling timer routing "
|
||||
"over 8254.\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init check_acpi_pci(void)
|
||||
{
|
||||
int num, slot, func;
|
||||
|
||||
/* Assume the machine supports type 1. If not it will
|
||||
always read ffffffff and should not have any side effect.
|
||||
Actually a few buggy systems can machine check. Allow the user
|
||||
to disable it by command line option at least -AK */
|
||||
if (!early_pci_allowed())
|
||||
return;
|
||||
|
||||
/* Poor man's PCI discovery */
|
||||
for (num = 0; num < 32; num++) {
|
||||
for (slot = 0; slot < 32; slot++) {
|
||||
for (func = 0; func < 8; func++) {
|
||||
u32 class;
|
||||
u32 vendor;
|
||||
class = read_pci_config(num, slot, func,
|
||||
PCI_CLASS_REVISION);
|
||||
if (class == 0xffffffff)
|
||||
break;
|
||||
|
||||
if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
|
||||
continue;
|
||||
|
||||
vendor = read_pci_config(num, slot, func,
|
||||
PCI_VENDOR_ID);
|
||||
|
||||
if (check_bridge(vendor & 0xffff, vendor >> 16))
|
||||
return;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
@@ -63,7 +63,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
|
||||
void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
|
||||
{
|
||||
unsigned int cpu = pr->id;
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
pr->pdc = NULL;
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
|
@@ -357,14 +357,14 @@ void alternatives_smp_switch(int smp)
|
||||
if (smp) {
|
||||
printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
|
||||
clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
|
||||
clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
|
||||
clear_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
|
||||
list_for_each_entry(mod, &smp_alt_modules, next)
|
||||
alternatives_smp_lock(mod->locks, mod->locks_end,
|
||||
mod->text, mod->text_end);
|
||||
} else {
|
||||
printk(KERN_INFO "SMP alternatives: switching to UP code\n");
|
||||
set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
|
||||
set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
|
||||
set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
|
||||
list_for_each_entry(mod, &smp_alt_modules, next)
|
||||
alternatives_smp_unlock(mod->locks, mod->locks_end,
|
||||
mod->text, mod->text_end);
|
||||
@@ -432,7 +432,7 @@ void __init alternative_instructions(void)
|
||||
if (1 == num_possible_cpus()) {
|
||||
printk(KERN_INFO "SMP alternatives: switching to UP code\n");
|
||||
set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
|
||||
set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability);
|
||||
set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
|
||||
alternatives_smp_unlock(__smp_locks, __smp_locks_end,
|
||||
_text, _etext);
|
||||
}
|
||||
|
@@ -19,7 +19,7 @@ config X86_POWERNOW_K8
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called powernow-k8.
|
||||
|
||||
For details, take a look at <file:Documentation/cpu-freq/>.
|
||||
For details, take a look at <file:Documentation/cpu-freq/>.
|
||||
|
||||
If in doubt, say N.
|
||||
|
@@ -77,7 +77,7 @@ static unsigned int acpi_pstate_strict;
|
||||
|
||||
static int check_est_cpu(unsigned int cpuid)
|
||||
{
|
||||
struct cpuinfo_x86 *cpu = &cpu_data[cpuid];
|
||||
struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
|
||||
|
||||
if (cpu->x86_vendor != X86_VENDOR_INTEL ||
|
||||
!cpu_has(cpu, X86_FEATURE_EST))
|
||||
@@ -560,7 +560,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
unsigned int cpu = policy->cpu;
|
||||
struct acpi_cpufreq_data *data;
|
||||
unsigned int result = 0;
|
||||
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
|
||||
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
|
||||
struct acpi_processor_performance *perf;
|
||||
|
||||
dprintk("acpi_cpufreq_cpu_init\n");
|
||||
|
@@ -305,7 +305,7 @@ static struct cpufreq_driver eps_driver = {
|
||||
|
||||
static int __init eps_init(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
/* This driver will work only on Centaur C7 processors with
|
||||
* Enhanced SpeedStep/PowerSaver registers */
|
||||
|
@@ -199,7 +199,7 @@ static int elanfreq_target (struct cpufreq_policy *policy,
|
||||
|
||||
static int elanfreq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
unsigned int i;
|
||||
int result;
|
||||
|
||||
@@ -280,7 +280,7 @@ static struct cpufreq_driver elanfreq_driver = {
|
||||
|
||||
static int __init elanfreq_init(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
/* Test if we have the right hardware */
|
||||
if ((c->x86_vendor != X86_VENDOR_AMD) ||
|
||||
|
@@ -780,7 +780,7 @@ static int longhaul_setup_southbridge(void)
|
||||
|
||||
static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
char *cpuname=NULL;
|
||||
int ret;
|
||||
u32 lo, hi;
|
||||
@@ -959,7 +959,7 @@ static struct cpufreq_driver longhaul_driver = {
|
||||
|
||||
static int __init longhaul_init(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
|
||||
return -ENODEV;
|
||||
|
@@ -172,7 +172,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
|
||||
u32 save_lo, save_hi;
|
||||
u32 eax, ebx, ecx, edx;
|
||||
u32 try_hi;
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (!low_freq || !high_freq)
|
||||
return -EINVAL;
|
||||
@@ -298,7 +298,7 @@ static struct cpufreq_driver longrun_driver = {
|
||||
*/
|
||||
static int __init longrun_init(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
|
||||
!cpu_has(c, X86_FEATURE_LONGRUN))
|
||||
|
@@ -195,7 +195,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
|
||||
|
||||
static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
|
||||
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
|
||||
int cpuid = 0;
|
||||
unsigned int i;
|
||||
|
||||
@@ -279,7 +279,7 @@ static struct cpufreq_driver p4clockmod_driver = {
|
||||
|
||||
static int __init cpufreq_p4_init(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@@ -215,7 +215,7 @@ static struct cpufreq_driver powernow_k6_driver = {
|
||||
*/
|
||||
static int __init powernow_k6_init(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
|
||||
if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) ||
|
||||
((c->x86_model != 12) && (c->x86_model != 13)))
|
||||
|
@@ -114,7 +114,7 @@ static int check_fsb(unsigned int fsbspeed)
|
||||
|
||||
static int check_powernow(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
unsigned int maxei, eax, ebx, ecx, edx;
|
||||
|
||||
if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) {
|
||||
|
@@ -102,7 +102,7 @@ static int sc520_freq_target (struct cpufreq_policy *policy,
|
||||
|
||||
static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
int result;
|
||||
|
||||
/* capability check */
|
||||
@@ -151,7 +151,7 @@ static struct cpufreq_driver sc520_freq_driver = {
|
||||
|
||||
static int __init sc520_freq_init(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
int err;
|
||||
|
||||
/* Test if we have the right hardware */
|
||||
|
@@ -230,7 +230,7 @@ static struct cpu_model models[] =
|
||||
|
||||
static int centrino_cpu_init_table(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu];
|
||||
struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
|
||||
struct cpu_model *model;
|
||||
|
||||
for(model = models; model->cpu_id != NULL; model++)
|
||||
@@ -340,7 +340,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
|
||||
|
||||
static int centrino_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu];
|
||||
struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
|
||||
unsigned freq;
|
||||
unsigned l, h;
|
||||
int ret;
|
||||
@@ -612,7 +612,7 @@ static struct cpufreq_driver centrino_driver = {
|
||||
*/
|
||||
static int __init centrino_init(void)
|
||||
{
|
||||
struct cpuinfo_x86 *cpu = cpu_data;
|
||||
struct cpuinfo_x86 *cpu = &cpu_data(0);
|
||||
|
||||
if (!cpu_has(cpu, X86_FEATURE_EST))
|
||||
return -ENODEV;
|
||||
|
@@ -228,7 +228,7 @@ EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency);
|
||||
|
||||
unsigned int speedstep_detect_processor (void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(0);
|
||||
u32 ebx, msr_lo, msr_hi;
|
||||
|
||||
dprintk("x86: %x, model: %x\n", c->x86, c->x86_model);
|
||||
|
@@ -295,7 +295,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
|
||||
unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
|
||||
#ifdef CONFIG_X86_HT
|
||||
unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data);
|
||||
unsigned int cpu = c->cpu_index;
|
||||
#endif
|
||||
|
||||
if (c->cpuid_level > 3) {
|
||||
@@ -417,14 +417,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
if (new_l2) {
|
||||
l2 = new_l2;
|
||||
#ifdef CONFIG_X86_HT
|
||||
cpu_llc_id[cpu] = l2_id;
|
||||
per_cpu(cpu_llc_id, cpu) = l2_id;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (new_l3) {
|
||||
l3 = new_l3;
|
||||
#ifdef CONFIG_X86_HT
|
||||
cpu_llc_id[cpu] = l3_id;
|
||||
per_cpu(cpu_llc_id, cpu) = l3_id;
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -459,7 +459,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
||||
struct _cpuid4_info *this_leaf, *sibling_leaf;
|
||||
unsigned long num_threads_sharing;
|
||||
int index_msb, i;
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
this_leaf = CPUID4_INFO_IDX(cpu, index);
|
||||
num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
|
||||
@@ -470,8 +470,8 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
|
||||
index_msb = get_count_order(num_threads_sharing);
|
||||
|
||||
for_each_online_cpu(i) {
|
||||
if (c[i].apicid >> index_msb ==
|
||||
c[cpu].apicid >> index_msb) {
|
||||
if (cpu_data(i).apicid >> index_msb ==
|
||||
c->apicid >> index_msb) {
|
||||
cpu_set(i, this_leaf->shared_cpu_map);
|
||||
if (i != cpu && cpuid4_info[i]) {
|
||||
sibling_leaf = CPUID4_INFO_IDX(i, index);
|
||||
|
@@ -120,7 +120,9 @@ int reserve_perfctr_nmi(unsigned int msr)
|
||||
unsigned int counter;
|
||||
|
||||
counter = nmi_perfctr_msr_to_bit(msr);
|
||||
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
|
||||
/* register not managed by the allocator? */
|
||||
if (counter > NMI_MAX_COUNTER_BITS)
|
||||
return 1;
|
||||
|
||||
if (!test_and_set_bit(counter, perfctr_nmi_owner))
|
||||
return 1;
|
||||
@@ -132,7 +134,9 @@ void release_perfctr_nmi(unsigned int msr)
|
||||
unsigned int counter;
|
||||
|
||||
counter = nmi_perfctr_msr_to_bit(msr);
|
||||
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
|
||||
/* register not managed by the allocator? */
|
||||
if (counter > NMI_MAX_COUNTER_BITS)
|
||||
return;
|
||||
|
||||
clear_bit(counter, perfctr_nmi_owner);
|
||||
}
|
||||
@@ -142,7 +146,9 @@ int reserve_evntsel_nmi(unsigned int msr)
|
||||
unsigned int counter;
|
||||
|
||||
counter = nmi_evntsel_msr_to_bit(msr);
|
||||
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
|
||||
/* register not managed by the allocator? */
|
||||
if (counter > NMI_MAX_COUNTER_BITS)
|
||||
return 1;
|
||||
|
||||
if (!test_and_set_bit(counter, evntsel_nmi_owner))
|
||||
return 1;
|
||||
@@ -154,7 +160,9 @@ void release_evntsel_nmi(unsigned int msr)
|
||||
unsigned int counter;
|
||||
|
||||
counter = nmi_evntsel_msr_to_bit(msr);
|
||||
BUG_ON(counter > NMI_MAX_COUNTER_BITS);
|
||||
/* register not managed by the allocator? */
|
||||
if (counter > NMI_MAX_COUNTER_BITS)
|
||||
return;
|
||||
|
||||
clear_bit(counter, evntsel_nmi_owner);
|
||||
}
|
||||
|
@@ -85,12 +85,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
/* nothing */
|
||||
};
|
||||
struct cpuinfo_x86 *c = v;
|
||||
int i, n = c - cpu_data;
|
||||
int i, n = 0;
|
||||
int fpu_exception;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (!cpu_online(n))
|
||||
return 0;
|
||||
n = c->cpu_index;
|
||||
#endif
|
||||
seq_printf(m, "processor\t: %d\n"
|
||||
"vendor_id\t: %s\n"
|
||||
@@ -175,11 +176,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
|
||||
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
return *pos < NR_CPUS ? cpu_data + *pos : NULL;
|
||||
if (*pos == 0) /* just in case, cpu 0 is not the first */
|
||||
*pos = first_cpu(cpu_possible_map);
|
||||
if ((*pos) < NR_CPUS && cpu_possible(*pos))
|
||||
return &cpu_data(*pos);
|
||||
return NULL;
|
||||
}
|
||||
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
++*pos;
|
||||
*pos = next_cpu(*pos, cpu_possible_map);
|
||||
return c_start(m, pos);
|
||||
}
|
||||
static void c_stop(struct seq_file *m, void *v)
|
||||
|
@@ -114,7 +114,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
|
||||
static int cpuid_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
unsigned int cpu = iminor(file->f_path.dentry->d_inode);
|
||||
struct cpuinfo_x86 *c = &(cpu_data)[cpu];
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
if (cpu >= NR_CPUS || !cpu_online(cpu))
|
||||
return -ENXIO; /* No such CPU */
|
||||
@@ -134,15 +134,18 @@ static const struct file_operations cpuid_fops = {
|
||||
.open = cpuid_open,
|
||||
};
|
||||
|
||||
static int __cpuinit cpuid_device_create(int i)
|
||||
static __cpuinit int cpuid_device_create(int cpu)
|
||||
{
|
||||
int err = 0;
|
||||
struct device *dev;
|
||||
|
||||
dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), "cpu%d",i);
|
||||
if (IS_ERR(dev))
|
||||
err = PTR_ERR(dev);
|
||||
return err;
|
||||
dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu),
|
||||
"cpu%d", cpu);
|
||||
return IS_ERR(dev) ? PTR_ERR(dev) : 0;
|
||||
}
|
||||
|
||||
static void cpuid_device_destroy(int cpu)
|
||||
{
|
||||
device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
|
||||
}
|
||||
|
||||
static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
|
||||
@@ -150,18 +153,21 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
|
||||
void *hcpu)
|
||||
{
|
||||
unsigned int cpu = (unsigned long)hcpu;
|
||||
int err = 0;
|
||||
|
||||
switch (action) {
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
cpuid_device_create(cpu);
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_UP_PREPARE_FROZEN:
|
||||
err = cpuid_device_create(cpu);
|
||||
break;
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
case CPU_DEAD:
|
||||
case CPU_DEAD_FROZEN:
|
||||
device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
|
||||
cpuid_device_destroy(cpu);
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
return err ? NOTIFY_BAD : NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
|
||||
@@ -198,7 +204,7 @@ static int __init cpuid_init(void)
|
||||
out_class:
|
||||
i = 0;
|
||||
for_each_online_cpu(i) {
|
||||
device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i));
|
||||
cpuid_device_destroy(i);
|
||||
}
|
||||
class_destroy(cpuid_class);
|
||||
out_chrdev:
|
||||
@@ -212,7 +218,7 @@ static void __exit cpuid_exit(void)
|
||||
int cpu = 0;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
|
||||
cpuid_device_destroy(cpu);
|
||||
class_destroy(cpuid_class);
|
||||
unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
|
||||
unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
|
||||
|
@@ -13,9 +13,13 @@
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/pci_ids.h>
|
||||
#include <asm/pci-direct.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/io_apic.h>
|
||||
#include <asm/apic.h>
|
||||
|
||||
#ifdef CONFIG_IOMMU
|
||||
#include <asm/iommu.h>
|
||||
#endif
|
||||
|
||||
static void __init via_bugs(void)
|
||||
{
|
||||
@@ -23,7 +27,8 @@ static void __init via_bugs(void)
|
||||
if ((end_pfn > MAX_DMA32_PFN || force_iommu) &&
|
||||
!iommu_aperture_allowed) {
|
||||
printk(KERN_INFO
|
||||
"Looks like a VIA chipset. Disabling IOMMU. Override with iommu=allowed\n");
|
||||
"Looks like a VIA chipset. Disabling IOMMU."
|
||||
" Override with iommu=allowed\n");
|
||||
iommu_aperture_disabled = 1;
|
||||
}
|
||||
#endif
|
||||
@@ -40,6 +45,7 @@ static int __init nvidia_hpet_check(struct acpi_table_header *header)
|
||||
static void __init nvidia_bugs(void)
|
||||
{
|
||||
#ifdef CONFIG_ACPI
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
/*
|
||||
* All timer overrides on Nvidia are
|
||||
* wrong unless HPET is enabled.
|
||||
@@ -58,6 +64,7 @@ static void __init nvidia_bugs(void)
|
||||
printk(KERN_INFO "If you got timer trouble "
|
||||
"try acpi_use_timer_override\n");
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
/* RED-PEN skip them on mptables too? */
|
||||
|
||||
@@ -65,11 +72,13 @@ static void __init nvidia_bugs(void)
|
||||
|
||||
static void __init ati_bugs(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
if (timer_over_8254 == 1) {
|
||||
timer_over_8254 = 0;
|
||||
printk(KERN_INFO
|
||||
"ATI board detected. Disabling timer routing over 8254.\n");
|
||||
"ATI board detected. Disabling timer routing over 8254.\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
struct chipset {
|
||||
@@ -104,7 +113,7 @@ void __init early_quirks(void)
|
||||
if (class == 0xffffffff)
|
||||
break;
|
||||
|
||||
if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
|
||||
if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
|
||||
continue;
|
||||
|
||||
vendor = read_pci_config(num, slot, func,
|
@@ -24,10 +24,19 @@
|
||||
#include <acpi/acpi_bus.h>
|
||||
#endif
|
||||
|
||||
/* which logical CPU number maps to which CPU (physical APIC ID) */
|
||||
u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly
|
||||
/*
|
||||
* which logical CPU number maps to which CPU (physical APIC ID)
|
||||
*
|
||||
* The following static array is used during kernel startup
|
||||
* and the x86_cpu_to_apicid_ptr contains the address of the
|
||||
* array during this time. Is it zeroed when the per_cpu
|
||||
* data area is removed.
|
||||
*/
|
||||
u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata
|
||||
= { [0 ... NR_CPUS-1] = BAD_APICID };
|
||||
EXPORT_SYMBOL(x86_cpu_to_apicid);
|
||||
void *x86_cpu_to_apicid_ptr;
|
||||
DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
|
||||
EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
|
||||
|
||||
struct genapic __read_mostly *genapic = &apic_flat;
|
||||
|
||||
|
@@ -172,7 +172,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
|
||||
*/
|
||||
cpu = first_cpu(cpumask);
|
||||
if ((unsigned)cpu < NR_CPUS)
|
||||
return x86_cpu_to_apicid[cpu];
|
||||
return per_cpu(x86_cpu_to_apicid, cpu);
|
||||
else
|
||||
return BAD_APICID;
|
||||
}
|
||||
|
@@ -58,7 +58,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
|
||||
|
||||
for (i = 0; i < IDT_ENTRIES; i++)
|
||||
set_intr_gate(i, early_idt_handler);
|
||||
asm volatile("lidt %0" :: "m" (idt_descr));
|
||||
load_idt((const struct desc_ptr *)&idt_descr);
|
||||
|
||||
early_printk("Kernel alive\n");
|
||||
|
||||
|
@@ -69,12 +69,15 @@ static inline void hpet_clear_mapping(void)
|
||||
* HPET command line enable / disable
|
||||
*/
|
||||
static int boot_hpet_disable;
|
||||
int hpet_force_user;
|
||||
|
||||
static int __init hpet_setup(char* str)
|
||||
{
|
||||
if (str) {
|
||||
if (!strncmp("disable", str, 7))
|
||||
boot_hpet_disable = 1;
|
||||
if (!strncmp("force", str, 5))
|
||||
hpet_force_user = 1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
@@ -403,7 +403,8 @@ void __init native_init_IRQ(void)
|
||||
int vector = FIRST_EXTERNAL_VECTOR + i;
|
||||
if (i >= NR_IRQS)
|
||||
break;
|
||||
if (vector != SYSCALL_VECTOR)
|
||||
/* SYSCALL_VECTOR was reserved in trap_init. */
|
||||
if (!test_bit(vector, used_vectors))
|
||||
set_intr_gate(vector, interrupt[i]);
|
||||
}
|
||||
|
||||
|
@@ -15,7 +15,6 @@ static struct files_struct init_files = INIT_FILES;
|
||||
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
|
||||
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
||||
struct mm_struct init_mm = INIT_MM(init_mm);
|
||||
|
||||
EXPORT_SYMBOL(init_mm);
|
||||
|
||||
/*
|
||||
@@ -25,7 +24,7 @@ EXPORT_SYMBOL(init_mm);
|
||||
* way process stacks are handled. This is done by having a special
|
||||
* "init_task" linker map entry..
|
||||
*/
|
||||
union thread_union init_thread_union
|
||||
union thread_union init_thread_union
|
||||
__attribute__((__section__(".data.init_task"))) =
|
||||
{ INIT_THREAD_INFO(init_task) };
|
||||
|
||||
@@ -35,12 +34,14 @@ union thread_union init_thread_union
|
||||
* All other task structs will be allocated on slabs in fork.c
|
||||
*/
|
||||
struct task_struct init_task = INIT_TASK(init_task);
|
||||
|
||||
EXPORT_SYMBOL(init_task);
|
||||
|
||||
/*
|
||||
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
||||
* no more per-task TSS's.
|
||||
*/
|
||||
* no more per-task TSS's. The TSS size is kept cacheline-aligned
|
||||
* so they are allowed to end up in the .data.cacheline_aligned
|
||||
* section. Since TSS's are completely CPU-local, we want them
|
||||
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
||||
*/
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
||||
|
@@ -1,54 +0,0 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/init_task.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mqueue.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/desc.h>
|
||||
|
||||
static struct fs_struct init_fs = INIT_FS;
|
||||
static struct files_struct init_files = INIT_FILES;
|
||||
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
|
||||
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
||||
struct mm_struct init_mm = INIT_MM(init_mm);
|
||||
|
||||
EXPORT_SYMBOL(init_mm);
|
||||
|
||||
/*
|
||||
* Initial task structure.
|
||||
*
|
||||
* We need to make sure that this is 8192-byte aligned due to the
|
||||
* way process stacks are handled. This is done by having a special
|
||||
* "init_task" linker map entry..
|
||||
*/
|
||||
union thread_union init_thread_union
|
||||
__attribute__((__section__(".data.init_task"))) =
|
||||
{ INIT_THREAD_INFO(init_task) };
|
||||
|
||||
/*
|
||||
* Initial task structure.
|
||||
*
|
||||
* All other task structs will be allocated on slabs in fork.c
|
||||
*/
|
||||
struct task_struct init_task = INIT_TASK(init_task);
|
||||
|
||||
EXPORT_SYMBOL(init_task);
|
||||
/*
|
||||
* per-CPU TSS segments. Threads are completely 'soft' on Linux,
|
||||
* no more per-task TSS's. The TSS size is kept cacheline-aligned
|
||||
* so they are allowed to end up in the .data.cacheline_aligned
|
||||
* section. Since TSS's are completely CPU-local, we want them
|
||||
* on exact cacheline boundaries, to eliminate cacheline ping-pong.
|
||||
*/
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
|
||||
|
||||
/* Copies of the original ist values from the tss are only accessed during
|
||||
* debugging, no special alignment required.
|
||||
*/
|
||||
DEFINE_PER_CPU(struct orig_ist, orig_ist);
|
||||
|
||||
#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
|
@@ -1198,7 +1198,7 @@ static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }
|
||||
static int __assign_irq_vector(int irq)
|
||||
{
|
||||
static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
|
||||
int vector, offset, i;
|
||||
int vector, offset;
|
||||
|
||||
BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
|
||||
|
||||
@@ -1215,11 +1215,8 @@ next:
|
||||
}
|
||||
if (vector == current_vector)
|
||||
return -ENOSPC;
|
||||
if (vector == SYSCALL_VECTOR)
|
||||
if (test_and_set_bit(vector, used_vectors))
|
||||
goto next;
|
||||
for (i = 0; i < NR_IRQ_VECTORS; i++)
|
||||
if (irq_vector[i] == vector)
|
||||
goto next;
|
||||
|
||||
current_vector = vector;
|
||||
current_offset = offset;
|
||||
@@ -2295,6 +2292,12 @@ static inline void __init check_timer(void)
|
||||
|
||||
void __init setup_IO_APIC(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Reserve all the system vectors. */
|
||||
for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++)
|
||||
set_bit(i, used_vectors);
|
||||
|
||||
enable_IO_APIC();
|
||||
|
||||
if (acpi_ioapic)
|
||||
|
@@ -799,7 +799,8 @@ static __cpuinit int mce_create_device(unsigned int cpu)
|
||||
{
|
||||
int err;
|
||||
int i;
|
||||
if (!mce_available(&cpu_data[cpu]))
|
||||
|
||||
if (!mce_available(&cpu_data(cpu)))
|
||||
return -EIO;
|
||||
|
||||
memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
|
||||
|
@@ -472,11 +472,11 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||
sprintf(name, "threshold_bank%i", bank);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
|
||||
if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
|
||||
i = first_cpu(per_cpu(cpu_core_map, cpu));
|
||||
|
||||
/* first core not up yet */
|
||||
if (cpu_data[i].cpu_core_id)
|
||||
if (cpu_data(i).cpu_core_id)
|
||||
goto out;
|
||||
|
||||
/* already linked */
|
||||
|
@@ -132,7 +132,7 @@ static struct ucode_cpu_info {
|
||||
|
||||
static void collect_cpu_info(int cpu_num)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu_num;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu_num);
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
|
||||
unsigned int val[2];
|
||||
|
||||
@@ -522,7 +522,7 @@ static struct platform_device *microcode_pdev;
|
||||
static int cpu_request_microcode(int cpu)
|
||||
{
|
||||
char name[30];
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
const struct firmware *firmware;
|
||||
void *buf;
|
||||
unsigned long size;
|
||||
@@ -570,7 +570,7 @@ static int cpu_request_microcode(int cpu)
|
||||
|
||||
static int apply_microcode_check_cpu(int cpu)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
cpumask_t old;
|
||||
unsigned int val[2];
|
||||
|
@@ -57,6 +57,8 @@ unsigned long mp_lapic_addr = 0;
|
||||
|
||||
/* Processor that is doing the boot up */
|
||||
unsigned int boot_cpu_id = -1U;
|
||||
EXPORT_SYMBOL(boot_cpu_id);
|
||||
|
||||
/* Internal processor count */
|
||||
unsigned int num_processors __cpuinitdata = 0;
|
||||
|
||||
@@ -86,7 +88,7 @@ static int __init mpf_checksum(unsigned char *mp, int len)
|
||||
return sum & 0xFF;
|
||||
}
|
||||
|
||||
static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
|
||||
static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
|
||||
{
|
||||
int cpu;
|
||||
cpumask_t tmp_map;
|
||||
@@ -123,7 +125,18 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
|
||||
cpu = 0;
|
||||
}
|
||||
bios_cpu_apicid[cpu] = m->mpc_apicid;
|
||||
x86_cpu_to_apicid[cpu] = m->mpc_apicid;
|
||||
/*
|
||||
* We get called early in the the start_kernel initialization
|
||||
* process when the per_cpu data area is not yet setup, so we
|
||||
* use a static array that is removed after the per_cpu data
|
||||
* area is created.
|
||||
*/
|
||||
if (x86_cpu_to_apicid_ptr) {
|
||||
u8 *x86_cpu_to_apicid = (u8 *)x86_cpu_to_apicid_ptr;
|
||||
x86_cpu_to_apicid[cpu] = m->mpc_apicid;
|
||||
} else {
|
||||
per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
|
||||
}
|
||||
|
||||
cpu_set(cpu, cpu_possible_map);
|
||||
cpu_set(cpu, cpu_present_map);
|
||||
|
@@ -112,7 +112,7 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
|
||||
static int msr_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
unsigned int cpu = iminor(file->f_path.dentry->d_inode);
|
||||
struct cpuinfo_x86 *c = &(cpu_data)[cpu];
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
if (cpu >= NR_CPUS || !cpu_online(cpu))
|
||||
return -ENXIO; /* No such CPU */
|
||||
|
@@ -11,7 +11,7 @@
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/calgary.h>
|
||||
|
||||
int iommu_merge __read_mostly = 0;
|
||||
int iommu_merge __read_mostly = 1;
|
||||
EXPORT_SYMBOL(iommu_merge);
|
||||
|
||||
dma_addr_t bad_dma_address __read_mostly;
|
||||
|
@@ -295,34 +295,52 @@ static int __init idle_setup(char *str)
|
||||
}
|
||||
early_param("idle", idle_setup);
|
||||
|
||||
void show_regs(struct pt_regs * regs)
|
||||
void __show_registers(struct pt_regs *regs, int all)
|
||||
{
|
||||
unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
|
||||
unsigned long d0, d1, d2, d3, d6, d7;
|
||||
unsigned long esp;
|
||||
unsigned short ss, gs;
|
||||
|
||||
if (user_mode_vm(regs)) {
|
||||
esp = regs->esp;
|
||||
ss = regs->xss & 0xffff;
|
||||
savesegment(gs, gs);
|
||||
} else {
|
||||
esp = (unsigned long) (®s->esp);
|
||||
savesegment(ss, ss);
|
||||
savesegment(gs, gs);
|
||||
}
|
||||
|
||||
printk("\n");
|
||||
printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
|
||||
printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
|
||||
printk("Pid: %d, comm: %s %s (%s %.*s)\n",
|
||||
task_pid_nr(current), current->comm,
|
||||
print_tainted(), init_utsname()->release,
|
||||
(int)strcspn(init_utsname()->version, " "),
|
||||
init_utsname()->version);
|
||||
|
||||
printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
|
||||
0xffff & regs->xcs, regs->eip, regs->eflags,
|
||||
smp_processor_id());
|
||||
print_symbol("EIP is at %s\n", regs->eip);
|
||||
|
||||
if (user_mode_vm(regs))
|
||||
printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
|
||||
printk(" EFLAGS: %08lx %s (%s %.*s)\n",
|
||||
regs->eflags, print_tainted(), init_utsname()->release,
|
||||
(int)strcspn(init_utsname()->version, " "),
|
||||
init_utsname()->version);
|
||||
printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
|
||||
regs->eax,regs->ebx,regs->ecx,regs->edx);
|
||||
printk("ESI: %08lx EDI: %08lx EBP: %08lx",
|
||||
regs->esi, regs->edi, regs->ebp);
|
||||
printk(" DS: %04x ES: %04x FS: %04x\n",
|
||||
0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs);
|
||||
regs->eax, regs->ebx, regs->ecx, regs->edx);
|
||||
printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
|
||||
regs->esi, regs->edi, regs->ebp, esp);
|
||||
printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
|
||||
regs->xds & 0xffff, regs->xes & 0xffff,
|
||||
regs->xfs & 0xffff, gs, ss);
|
||||
|
||||
if (!all)
|
||||
return;
|
||||
|
||||
cr0 = read_cr0();
|
||||
cr2 = read_cr2();
|
||||
cr3 = read_cr3();
|
||||
cr4 = read_cr4_safe();
|
||||
printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
|
||||
printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
|
||||
cr0, cr2, cr3, cr4);
|
||||
|
||||
get_debugreg(d0, 0);
|
||||
get_debugreg(d1, 1);
|
||||
@@ -330,10 +348,16 @@ void show_regs(struct pt_regs * regs)
|
||||
get_debugreg(d3, 3);
|
||||
printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
|
||||
d0, d1, d2, d3);
|
||||
|
||||
get_debugreg(d6, 6);
|
||||
get_debugreg(d7, 7);
|
||||
printk("DR6: %08lx DR7: %08lx\n", d6, d7);
|
||||
printk("DR6: %08lx DR7: %08lx\n",
|
||||
d6, d7);
|
||||
}
|
||||
|
||||
void show_regs(struct pt_regs *regs)
|
||||
{
|
||||
__show_registers(regs, 1);
|
||||
show_trace(NULL, regs, ®s->esp);
|
||||
}
|
||||
|
||||
|
@@ -45,9 +45,12 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
|
||||
if (!(config & 0x2))
|
||||
pci_write_config_byte(dev, 0xf4, config);
|
||||
}
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
|
||||
quirk_intel_irqbalance);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
|
||||
quirk_intel_irqbalance);
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
|
||||
quirk_intel_irqbalance);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_HPET_TIMER)
|
||||
@@ -56,7 +59,8 @@ unsigned long force_hpet_address;
|
||||
static enum {
|
||||
NONE_FORCE_HPET_RESUME,
|
||||
OLD_ICH_FORCE_HPET_RESUME,
|
||||
ICH_FORCE_HPET_RESUME
|
||||
ICH_FORCE_HPET_RESUME,
|
||||
VT8237_FORCE_HPET_RESUME
|
||||
} force_hpet_resume_type;
|
||||
|
||||
static void __iomem *rcba_base;
|
||||
@@ -146,17 +150,17 @@ static void ich_force_enable_hpet(struct pci_dev *dev)
|
||||
}
|
||||
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
|
||||
ich_force_enable_hpet);
|
||||
ich_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
|
||||
ich_force_enable_hpet);
|
||||
ich_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
|
||||
ich_force_enable_hpet);
|
||||
ich_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
|
||||
ich_force_enable_hpet);
|
||||
ich_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
|
||||
ich_force_enable_hpet);
|
||||
ich_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
|
||||
ich_force_enable_hpet);
|
||||
ich_force_enable_hpet);
|
||||
|
||||
|
||||
static struct pci_dev *cached_dev;
|
||||
@@ -232,10 +236,91 @@ static void old_ich_force_enable_hpet(struct pci_dev *dev)
|
||||
printk(KERN_DEBUG "Failed to force enable HPET\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Undocumented chipset features. Make sure that the user enforced
|
||||
* this.
|
||||
*/
|
||||
static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
|
||||
{
|
||||
if (hpet_force_user)
|
||||
old_ich_force_enable_hpet(dev);
|
||||
}
|
||||
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
|
||||
old_ich_force_enable_hpet_user);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
|
||||
old_ich_force_enable_hpet_user);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
|
||||
old_ich_force_enable_hpet_user);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
|
||||
old_ich_force_enable_hpet_user);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
|
||||
old_ich_force_enable_hpet);
|
||||
old_ich_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
|
||||
old_ich_force_enable_hpet);
|
||||
old_ich_force_enable_hpet);
|
||||
|
||||
|
||||
static void vt8237_force_hpet_resume(void)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if (!force_hpet_address || !cached_dev)
|
||||
return;
|
||||
|
||||
val = 0xfed00000 | 0x80;
|
||||
pci_write_config_dword(cached_dev, 0x68, val);
|
||||
|
||||
pci_read_config_dword(cached_dev, 0x68, &val);
|
||||
if (val & 0x80)
|
||||
printk(KERN_DEBUG "Force enabled HPET at resume\n");
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
static void vt8237_force_enable_hpet(struct pci_dev *dev)
|
||||
{
|
||||
u32 uninitialized_var(val);
|
||||
|
||||
if (!hpet_force_user || hpet_address || force_hpet_address)
|
||||
return;
|
||||
|
||||
pci_read_config_dword(dev, 0x68, &val);
|
||||
/*
|
||||
* Bit 7 is HPET enable bit.
|
||||
* Bit 31:10 is HPET base address (contrary to what datasheet claims)
|
||||
*/
|
||||
if (val & 0x80) {
|
||||
force_hpet_address = (val & ~0x3ff);
|
||||
printk(KERN_DEBUG "HPET at base address 0x%lx\n",
|
||||
force_hpet_address);
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* HPET is disabled. Trying enabling at FED00000 and check
|
||||
* whether it sticks
|
||||
*/
|
||||
val = 0xfed00000 | 0x80;
|
||||
pci_write_config_dword(dev, 0x68, val);
|
||||
|
||||
pci_read_config_dword(dev, 0x68, &val);
|
||||
if (val & 0x80) {
|
||||
force_hpet_address = (val & ~0x3ff);
|
||||
printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
|
||||
force_hpet_address);
|
||||
cached_dev = dev;
|
||||
force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
|
||||
return;
|
||||
}
|
||||
|
||||
printk(KERN_DEBUG "Failed to force enable HPET\n");
|
||||
}
|
||||
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
|
||||
vt8237_force_enable_hpet);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
|
||||
vt8237_force_enable_hpet);
|
||||
|
||||
|
||||
void force_hpet_resume(void)
|
||||
{
|
||||
@@ -246,6 +331,9 @@ void force_hpet_resume(void)
|
||||
case OLD_ICH_FORCE_HPET_RESUME:
|
||||
return old_ich_force_hpet_resume();
|
||||
|
||||
case VT8237_FORCE_HPET_RESUME:
|
||||
return vt8237_force_hpet_resume();
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@@ -11,6 +11,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/pgtable.h>
|
||||
@@ -136,7 +137,7 @@ void machine_emergency_restart(void)
|
||||
}
|
||||
|
||||
case BOOT_TRIPLE:
|
||||
__asm__ __volatile__("lidt (%0)": :"r" (&no_idt));
|
||||
load_idt((const struct desc_ptr *)&no_idt);
|
||||
__asm__ __volatile__("int3");
|
||||
|
||||
reboot_type = BOOT_KBD;
|
||||
|
@@ -12,6 +12,7 @@
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/reboot_fixups.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/geode.h>
|
||||
|
||||
static void cs5530a_warm_reset(struct pci_dev *dev)
|
||||
{
|
||||
@@ -24,11 +25,8 @@ static void cs5530a_warm_reset(struct pci_dev *dev)
|
||||
|
||||
static void cs5536_warm_reset(struct pci_dev *dev)
|
||||
{
|
||||
/*
|
||||
* 6.6.2.12 Soft Reset (DIVIL_SOFT_RESET)
|
||||
* writing 1 to the LSB of this MSR causes a hard reset.
|
||||
*/
|
||||
wrmsrl(0x51400017, 1ULL);
|
||||
/* writing 1 to the LSB of this MSR causes a hard reset */
|
||||
wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL);
|
||||
udelay(50); /* shouldn't get here but be safe and spin a while */
|
||||
}
|
||||
|
||||
|
@@ -184,6 +184,12 @@ void __cpuinit check_efer(void)
|
||||
|
||||
unsigned long kernel_eflags;
|
||||
|
||||
/*
|
||||
* Copies of the original ist values from the tss are only accessed during
|
||||
* debugging, no special alignment required.
|
||||
*/
|
||||
DEFINE_PER_CPU(struct orig_ist, orig_ist);
|
||||
|
||||
/*
|
||||
* cpu_init() initializes state that is per-CPU. Some data is already
|
||||
* initialized (naturally) in the bootstrap process, such as the GDT
|
||||
@@ -224,8 +230,8 @@ void __cpuinit cpu_init (void)
|
||||
memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
|
||||
|
||||
cpu_gdt_descr[cpu].size = GDT_SIZE;
|
||||
asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu]));
|
||||
asm volatile("lidt %0" :: "m" (idt_descr));
|
||||
load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]);
|
||||
load_idt((const struct desc_ptr *)&idt_descr);
|
||||
|
||||
memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
|
||||
syscall_init();
|
||||
|
@@ -661,9 +661,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
check_acpi_pci(); /* Checks more than just ACPI actually */
|
||||
#endif
|
||||
early_quirks();
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
|
@@ -302,6 +302,11 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
dmi_scan_machine();
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* setup to use the static apicid table during kernel startup */
|
||||
x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/*
|
||||
* Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
|
||||
@@ -554,7 +559,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
|
||||
but in the same order as the HT nodeids.
|
||||
If that doesn't result in a usable node fall back to the
|
||||
path for the previous case. */
|
||||
int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
|
||||
int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
|
||||
if (ht_nodeid >= 0 &&
|
||||
apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
|
||||
node = apicid_to_node[ht_nodeid];
|
||||
@@ -878,6 +883,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
|
||||
c->cpu_index = 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -984,6 +990,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
|
||||
static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
{
|
||||
struct cpuinfo_x86 *c = v;
|
||||
int cpu = 0;
|
||||
|
||||
/*
|
||||
* These flag bits must match the definitions in <asm/cpufeature.h>.
|
||||
@@ -1062,8 +1069,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (!cpu_online(c-cpu_data))
|
||||
if (!cpu_online(c->cpu_index))
|
||||
return 0;
|
||||
cpu = c->cpu_index;
|
||||
#endif
|
||||
|
||||
seq_printf(m,"processor\t: %u\n"
|
||||
@@ -1071,7 +1079,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
"cpu family\t: %d\n"
|
||||
"model\t\t: %d\n"
|
||||
"model name\t: %s\n",
|
||||
(unsigned)(c-cpu_data),
|
||||
(unsigned)cpu,
|
||||
c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
|
||||
c->x86,
|
||||
(int)c->x86_model,
|
||||
@@ -1083,7 +1091,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
seq_printf(m, "stepping\t: unknown\n");
|
||||
|
||||
if (cpu_has(c,X86_FEATURE_TSC)) {
|
||||
unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
|
||||
unsigned int freq = cpufreq_quick_get((unsigned)cpu);
|
||||
if (!freq)
|
||||
freq = cpu_khz;
|
||||
seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
|
||||
@@ -1096,7 +1104,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (smp_num_siblings * c->x86_max_cores > 1) {
|
||||
int cpu = c - cpu_data;
|
||||
seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
|
||||
seq_printf(m, "siblings\t: %d\n",
|
||||
cpus_weight(per_cpu(cpu_core_map, cpu)));
|
||||
@@ -1154,12 +1161,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
|
||||
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
return *pos < NR_CPUS ? cpu_data + *pos : NULL;
|
||||
if (*pos == 0) /* just in case, cpu 0 is not the first */
|
||||
*pos = first_cpu(cpu_possible_map);
|
||||
if ((*pos) < NR_CPUS && cpu_possible(*pos))
|
||||
return &cpu_data(*pos);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
++*pos;
|
||||
*pos = next_cpu(*pos, cpu_possible_map);
|
||||
return c_start(m, pos);
|
||||
}
|
||||
|
||||
|
@@ -610,7 +610,7 @@ static void stop_this_cpu (void * dummy)
|
||||
*/
|
||||
cpu_clear(smp_processor_id(), cpu_online_map);
|
||||
disable_local_APIC();
|
||||
if (cpu_data[smp_processor_id()].hlt_works_ok)
|
||||
if (cpu_data(smp_processor_id()).hlt_works_ok)
|
||||
for(;;) halt();
|
||||
for (;;);
|
||||
}
|
||||
@@ -676,7 +676,7 @@ static int convert_apicid_to_cpu(int apic_id)
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NR_CPUS; i++) {
|
||||
if (x86_cpu_to_apicid[i] == apic_id)
|
||||
if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
|
@@ -322,17 +322,27 @@ void unlock_ipi_call_lock(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* this function sends a 'generic call function' IPI to one other CPU
|
||||
* in the system.
|
||||
*
|
||||
* cpu is a standard Linux logical CPU number.
|
||||
* this function sends a 'generic call function' IPI to all other CPU
|
||||
* of the system defined in the mask.
|
||||
*/
|
||||
static void
|
||||
__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
||||
int nonatomic, int wait)
|
||||
|
||||
static int
|
||||
__smp_call_function_mask(cpumask_t mask,
|
||||
void (*func)(void *), void *info,
|
||||
int wait)
|
||||
{
|
||||
struct call_data_struct data;
|
||||
int cpus = 1;
|
||||
cpumask_t allbutself;
|
||||
int cpus;
|
||||
|
||||
allbutself = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), allbutself);
|
||||
|
||||
cpus_and(mask, mask, allbutself);
|
||||
cpus = cpus_weight(mask);
|
||||
|
||||
if (!cpus)
|
||||
return 0;
|
||||
|
||||
data.func = func;
|
||||
data.info = info;
|
||||
@@ -343,19 +353,55 @@ __smp_call_function_single(int cpu, void (*func) (void *info), void *info,
|
||||
|
||||
call_data = &data;
|
||||
wmb();
|
||||
/* Send a message to all other CPUs and wait for them to respond */
|
||||
send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
|
||||
|
||||
/* Send a message to other CPUs */
|
||||
if (cpus_equal(mask, allbutself))
|
||||
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
|
||||
else
|
||||
send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
|
||||
|
||||
/* Wait for response */
|
||||
while (atomic_read(&data.started) != cpus)
|
||||
cpu_relax();
|
||||
|
||||
if (!wait)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
while (atomic_read(&data.finished) != cpus)
|
||||
cpu_relax();
|
||||
|
||||
return 0;
|
||||
}
|
||||
/**
|
||||
* smp_call_function_mask(): Run a function on a set of other CPUs.
|
||||
* @mask: The set of cpus to run on. Must not include the current cpu.
|
||||
* @func: The function to run. This must be fast and non-blocking.
|
||||
* @info: An arbitrary pointer to pass to the function.
|
||||
* @wait: If true, wait (atomically) until function has completed on other CPUs.
|
||||
*
|
||||
* Returns 0 on success, else a negative status code.
|
||||
*
|
||||
* If @wait is true, then returns once @func has returned; otherwise
|
||||
* it returns just before the target cpu calls @func.
|
||||
*
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler or from a bottom half handler.
|
||||
*/
|
||||
int smp_call_function_mask(cpumask_t mask,
|
||||
void (*func)(void *), void *info,
|
||||
int wait)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Can deadlock when called with interrupts disabled */
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
spin_lock(&call_lock);
|
||||
ret = __smp_call_function_mask(mask, func, info, wait);
|
||||
spin_unlock(&call_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_mask);
|
||||
|
||||
/*
|
||||
* smp_call_function_single - Run a function on a specific CPU
|
||||
@@ -374,6 +420,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
|
||||
int nonatomic, int wait)
|
||||
{
|
||||
/* prevent preemption and reschedule on another processor */
|
||||
int ret;
|
||||
int me = get_cpu();
|
||||
|
||||
/* Can deadlock when called with interrupts disabled */
|
||||
@@ -387,50 +434,13 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock(&call_lock);
|
||||
__smp_call_function_single(cpu, func, info, nonatomic, wait);
|
||||
spin_unlock(&call_lock);
|
||||
ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
|
||||
|
||||
put_cpu();
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_single);
|
||||
|
||||
/*
|
||||
* this function sends a 'generic call function' IPI to all other CPUs
|
||||
* in the system.
|
||||
*/
|
||||
static void __smp_call_function (void (*func) (void *info), void *info,
|
||||
int nonatomic, int wait)
|
||||
{
|
||||
struct call_data_struct data;
|
||||
int cpus = num_online_cpus()-1;
|
||||
|
||||
if (!cpus)
|
||||
return;
|
||||
|
||||
data.func = func;
|
||||
data.info = info;
|
||||
atomic_set(&data.started, 0);
|
||||
data.wait = wait;
|
||||
if (wait)
|
||||
atomic_set(&data.finished, 0);
|
||||
|
||||
call_data = &data;
|
||||
wmb();
|
||||
/* Send a message to all other CPUs and wait for them to respond */
|
||||
send_IPI_allbutself(CALL_FUNCTION_VECTOR);
|
||||
|
||||
/* Wait for response */
|
||||
while (atomic_read(&data.started) != cpus)
|
||||
cpu_relax();
|
||||
|
||||
if (!wait)
|
||||
return;
|
||||
|
||||
while (atomic_read(&data.finished) != cpus)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
/*
|
||||
* smp_call_function - run a function on all other CPUs.
|
||||
* @func: The function to run. This must be fast and non-blocking.
|
||||
@@ -449,10 +459,7 @@ static void __smp_call_function (void (*func) (void *info), void *info,
|
||||
int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
|
||||
int wait)
|
||||
{
|
||||
spin_lock(&call_lock);
|
||||
__smp_call_function(func,info,nonatomic,wait);
|
||||
spin_unlock(&call_lock);
|
||||
return 0;
|
||||
return smp_call_function_mask(cpu_online_map, func, info, wait);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function);
|
||||
|
||||
@@ -479,7 +486,7 @@ void smp_send_stop(void)
|
||||
/* Don't deadlock on the call lock in panic */
|
||||
nolock = !spin_trylock(&call_lock);
|
||||
local_irq_save(flags);
|
||||
__smp_call_function(stop_this_cpu, NULL, 0, 0);
|
||||
__smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
|
||||
if (!nolock)
|
||||
spin_unlock(&call_lock);
|
||||
disable_local_APIC();
|
||||
|
@@ -67,7 +67,7 @@ int smp_num_siblings = 1;
|
||||
EXPORT_SYMBOL(smp_num_siblings);
|
||||
|
||||
/* Last level cache ID of each logical CPU */
|
||||
int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
|
||||
DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
|
||||
|
||||
/* representing HT siblings of each logical CPU */
|
||||
DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
|
||||
@@ -89,12 +89,20 @@ EXPORT_SYMBOL(cpu_possible_map);
|
||||
static cpumask_t smp_commenced_mask;
|
||||
|
||||
/* Per CPU bogomips and other parameters */
|
||||
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
|
||||
EXPORT_SYMBOL(cpu_data);
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_info);
|
||||
|
||||
u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly =
|
||||
{ [0 ... NR_CPUS-1] = 0xff };
|
||||
EXPORT_SYMBOL(x86_cpu_to_apicid);
|
||||
/*
|
||||
* The following static array is used during kernel startup
|
||||
* and the x86_cpu_to_apicid_ptr contains the address of the
|
||||
* array during this time. Is it zeroed when the per_cpu
|
||||
* data area is removed.
|
||||
*/
|
||||
u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
|
||||
{ [0 ... NR_CPUS-1] = BAD_APICID };
|
||||
void *x86_cpu_to_apicid_ptr;
|
||||
DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
|
||||
EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
|
||||
|
||||
u8 apicid_2_node[MAX_APICID];
|
||||
|
||||
@@ -150,9 +158,10 @@ void __init smp_alloc_memory(void)
|
||||
|
||||
void __cpuinit smp_store_cpu_info(int id)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data + id;
|
||||
struct cpuinfo_x86 *c = &cpu_data(id);
|
||||
|
||||
*c = boot_cpu_data;
|
||||
c->cpu_index = id;
|
||||
if (id!=0)
|
||||
identify_secondary_cpu(c);
|
||||
/*
|
||||
@@ -294,7 +303,7 @@ static int cpucount;
|
||||
/* maps the cpu to the sched domain representing multi-core */
|
||||
cpumask_t cpu_coregroup_map(int cpu)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
/*
|
||||
* For perf, we return last level cache shared map.
|
||||
* And for power savings, we return cpu_core_map
|
||||
@@ -311,41 +320,41 @@ static cpumask_t cpu_sibling_setup_map;
|
||||
void __cpuinit set_cpu_sibling_map(int cpu)
|
||||
{
|
||||
int i;
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
cpu_set(cpu, cpu_sibling_setup_map);
|
||||
|
||||
if (smp_num_siblings > 1) {
|
||||
for_each_cpu_mask(i, cpu_sibling_setup_map) {
|
||||
if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
|
||||
c[cpu].cpu_core_id == c[i].cpu_core_id) {
|
||||
if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
|
||||
c->cpu_core_id == cpu_data(i).cpu_core_id) {
|
||||
cpu_set(i, per_cpu(cpu_sibling_map, cpu));
|
||||
cpu_set(cpu, per_cpu(cpu_sibling_map, i));
|
||||
cpu_set(i, per_cpu(cpu_core_map, cpu));
|
||||
cpu_set(cpu, per_cpu(cpu_core_map, i));
|
||||
cpu_set(i, c[cpu].llc_shared_map);
|
||||
cpu_set(cpu, c[i].llc_shared_map);
|
||||
cpu_set(i, c->llc_shared_map);
|
||||
cpu_set(cpu, cpu_data(i).llc_shared_map);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
|
||||
}
|
||||
|
||||
cpu_set(cpu, c[cpu].llc_shared_map);
|
||||
cpu_set(cpu, c->llc_shared_map);
|
||||
|
||||
if (current_cpu_data.x86_max_cores == 1) {
|
||||
per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
|
||||
c[cpu].booted_cores = 1;
|
||||
c->booted_cores = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_cpu_mask(i, cpu_sibling_setup_map) {
|
||||
if (cpu_llc_id[cpu] != BAD_APICID &&
|
||||
cpu_llc_id[cpu] == cpu_llc_id[i]) {
|
||||
cpu_set(i, c[cpu].llc_shared_map);
|
||||
cpu_set(cpu, c[i].llc_shared_map);
|
||||
if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
|
||||
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
|
||||
cpu_set(i, c->llc_shared_map);
|
||||
cpu_set(cpu, cpu_data(i).llc_shared_map);
|
||||
}
|
||||
if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
|
||||
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
|
||||
cpu_set(i, per_cpu(cpu_core_map, cpu));
|
||||
cpu_set(cpu, per_cpu(cpu_core_map, i));
|
||||
/*
|
||||
@@ -357,15 +366,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
||||
* the booted_cores for this new cpu
|
||||
*/
|
||||
if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
|
||||
c[cpu].booted_cores++;
|
||||
c->booted_cores++;
|
||||
/*
|
||||
* increment the core count for all
|
||||
* the other cpus in this package
|
||||
*/
|
||||
if (i != cpu)
|
||||
c[i].booted_cores++;
|
||||
} else if (i != cpu && !c[cpu].booted_cores)
|
||||
c[cpu].booted_cores = c[i].booted_cores;
|
||||
cpu_data(i).booted_cores++;
|
||||
} else if (i != cpu && !c->booted_cores)
|
||||
c->booted_cores = cpu_data(i).booted_cores;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -804,7 +813,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
|
||||
|
||||
irq_ctx_init(cpu);
|
||||
|
||||
x86_cpu_to_apicid[cpu] = apicid;
|
||||
per_cpu(x86_cpu_to_apicid, cpu) = apicid;
|
||||
/*
|
||||
* This grunge runs the startup process for
|
||||
* the targeted processor.
|
||||
@@ -844,7 +853,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
|
||||
/* number CPUs logically, starting from 1 (BSP is 0) */
|
||||
Dprintk("OK.\n");
|
||||
printk("CPU%d: ", cpu);
|
||||
print_cpu_info(&cpu_data[cpu]);
|
||||
print_cpu_info(&cpu_data(cpu));
|
||||
Dprintk("CPU has booted.\n");
|
||||
} else {
|
||||
boot_error= 1;
|
||||
@@ -866,7 +875,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
|
||||
cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
|
||||
cpucount--;
|
||||
} else {
|
||||
x86_cpu_to_apicid[cpu] = apicid;
|
||||
per_cpu(x86_cpu_to_apicid, cpu) = apicid;
|
||||
cpu_set(cpu, cpu_present_map);
|
||||
}
|
||||
|
||||
@@ -915,7 +924,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
|
||||
struct warm_boot_cpu_info info;
|
||||
int apicid, ret;
|
||||
|
||||
apicid = x86_cpu_to_apicid[cpu];
|
||||
apicid = per_cpu(x86_cpu_to_apicid, cpu);
|
||||
if (apicid == BAD_APICID) {
|
||||
ret = -ENODEV;
|
||||
goto exit;
|
||||
@@ -961,11 +970,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
|
||||
*/
|
||||
smp_store_cpu_info(0); /* Final full version of the data */
|
||||
printk("CPU%d: ", 0);
|
||||
print_cpu_info(&cpu_data[0]);
|
||||
print_cpu_info(&cpu_data(0));
|
||||
|
||||
boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
|
||||
boot_cpu_logical_apicid = logical_smp_processor_id();
|
||||
x86_cpu_to_apicid[0] = boot_cpu_physical_apicid;
|
||||
per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
|
||||
|
||||
current_thread_info()->cpu = 0;
|
||||
|
||||
@@ -1008,6 +1017,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
|
||||
printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
|
||||
smpboot_clear_io_apic_irqs();
|
||||
phys_cpu_present_map = physid_mask_of_physid(0);
|
||||
map_cpu_to_logical_apicid();
|
||||
cpu_set(0, per_cpu(cpu_sibling_map, 0));
|
||||
cpu_set(0, per_cpu(cpu_core_map, 0));
|
||||
return;
|
||||
@@ -1029,6 +1039,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
|
||||
}
|
||||
smpboot_clear_io_apic_irqs();
|
||||
phys_cpu_present_map = physid_mask_of_physid(0);
|
||||
map_cpu_to_logical_apicid();
|
||||
cpu_set(0, per_cpu(cpu_sibling_map, 0));
|
||||
cpu_set(0, per_cpu(cpu_core_map, 0));
|
||||
return;
|
||||
@@ -1082,7 +1093,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
|
||||
Dprintk("Before bogomips.\n");
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++)
|
||||
if (cpu_isset(cpu, cpu_callout_map))
|
||||
bogosum += cpu_data[cpu].loops_per_jiffy;
|
||||
bogosum += cpu_data(cpu).loops_per_jiffy;
|
||||
printk(KERN_INFO
|
||||
"Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
|
||||
cpucount+1,
|
||||
@@ -1152,7 +1163,7 @@ void __init native_smp_prepare_boot_cpu(void)
|
||||
void remove_siblinginfo(int cpu)
|
||||
{
|
||||
int sibling;
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
|
||||
cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
|
||||
@@ -1160,15 +1171,15 @@ void remove_siblinginfo(int cpu)
|
||||
* last thread sibling in this cpu core going down
|
||||
*/
|
||||
if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
|
||||
c[sibling].booted_cores--;
|
||||
cpu_data(sibling).booted_cores--;
|
||||
}
|
||||
|
||||
for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
|
||||
cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
|
||||
cpus_clear(per_cpu(cpu_sibling_map, cpu));
|
||||
cpus_clear(per_cpu(cpu_core_map, cpu));
|
||||
c[cpu].phys_proc_id = 0;
|
||||
c[cpu].cpu_core_id = 0;
|
||||
c->phys_proc_id = 0;
|
||||
c->cpu_core_id = 0;
|
||||
cpu_clear(cpu, cpu_sibling_setup_map);
|
||||
}
|
||||
|
||||
|
@@ -65,7 +65,7 @@ int smp_num_siblings = 1;
|
||||
EXPORT_SYMBOL(smp_num_siblings);
|
||||
|
||||
/* Last level cache ID of each logical CPU */
|
||||
u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
|
||||
DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
|
||||
|
||||
/* Bitmask of currently online CPUs */
|
||||
cpumask_t cpu_online_map __read_mostly;
|
||||
@@ -84,8 +84,8 @@ cpumask_t cpu_possible_map;
|
||||
EXPORT_SYMBOL(cpu_possible_map);
|
||||
|
||||
/* Per CPU bogomips and other parameters */
|
||||
struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
|
||||
EXPORT_SYMBOL(cpu_data);
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
|
||||
EXPORT_PER_CPU_SYMBOL(cpu_info);
|
||||
|
||||
/* Set when the idlers are all forked */
|
||||
int smp_threads_ready;
|
||||
@@ -138,9 +138,10 @@ static unsigned long __cpuinit setup_trampoline(void)
|
||||
|
||||
static void __cpuinit smp_store_cpu_info(int id)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data + id;
|
||||
struct cpuinfo_x86 *c = &cpu_data(id);
|
||||
|
||||
*c = boot_cpu_data;
|
||||
c->cpu_index = id;
|
||||
identify_cpu(c);
|
||||
print_cpu_info(c);
|
||||
}
|
||||
@@ -237,7 +238,7 @@ void __cpuinit smp_callin(void)
|
||||
/* maps the cpu to the sched domain representing multi-core */
|
||||
cpumask_t cpu_coregroup_map(int cpu)
|
||||
{
|
||||
struct cpuinfo_x86 *c = cpu_data + cpu;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
/*
|
||||
* For perf, we return last level cache shared map.
|
||||
* And for power savings, we return cpu_core_map
|
||||
@@ -254,41 +255,41 @@ static cpumask_t cpu_sibling_setup_map;
|
||||
static inline void set_cpu_sibling_map(int cpu)
|
||||
{
|
||||
int i;
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
cpu_set(cpu, cpu_sibling_setup_map);
|
||||
|
||||
if (smp_num_siblings > 1) {
|
||||
for_each_cpu_mask(i, cpu_sibling_setup_map) {
|
||||
if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
|
||||
c[cpu].cpu_core_id == c[i].cpu_core_id) {
|
||||
if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
|
||||
c->cpu_core_id == cpu_data(i).cpu_core_id) {
|
||||
cpu_set(i, per_cpu(cpu_sibling_map, cpu));
|
||||
cpu_set(cpu, per_cpu(cpu_sibling_map, i));
|
||||
cpu_set(i, per_cpu(cpu_core_map, cpu));
|
||||
cpu_set(cpu, per_cpu(cpu_core_map, i));
|
||||
cpu_set(i, c[cpu].llc_shared_map);
|
||||
cpu_set(cpu, c[i].llc_shared_map);
|
||||
cpu_set(i, c->llc_shared_map);
|
||||
cpu_set(cpu, cpu_data(i).llc_shared_map);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
|
||||
}
|
||||
|
||||
cpu_set(cpu, c[cpu].llc_shared_map);
|
||||
cpu_set(cpu, c->llc_shared_map);
|
||||
|
||||
if (current_cpu_data.x86_max_cores == 1) {
|
||||
per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
|
||||
c[cpu].booted_cores = 1;
|
||||
c->booted_cores = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_cpu_mask(i, cpu_sibling_setup_map) {
|
||||
if (cpu_llc_id[cpu] != BAD_APICID &&
|
||||
cpu_llc_id[cpu] == cpu_llc_id[i]) {
|
||||
cpu_set(i, c[cpu].llc_shared_map);
|
||||
cpu_set(cpu, c[i].llc_shared_map);
|
||||
if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
|
||||
per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
|
||||
cpu_set(i, c->llc_shared_map);
|
||||
cpu_set(cpu, cpu_data(i).llc_shared_map);
|
||||
}
|
||||
if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
|
||||
if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
|
||||
cpu_set(i, per_cpu(cpu_core_map, cpu));
|
||||
cpu_set(cpu, per_cpu(cpu_core_map, i));
|
||||
/*
|
||||
@@ -300,15 +301,15 @@ static inline void set_cpu_sibling_map(int cpu)
|
||||
* the booted_cores for this new cpu
|
||||
*/
|
||||
if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
|
||||
c[cpu].booted_cores++;
|
||||
c->booted_cores++;
|
||||
/*
|
||||
* increment the core count for all
|
||||
* the other cpus in this package
|
||||
*/
|
||||
if (i != cpu)
|
||||
c[i].booted_cores++;
|
||||
} else if (i != cpu && !c[cpu].booted_cores)
|
||||
c[cpu].booted_cores = c[i].booted_cores;
|
||||
cpu_data(i).booted_cores++;
|
||||
} else if (i != cpu && !c->booted_cores)
|
||||
c->booted_cores = cpu_data(i).booted_cores;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -694,7 +695,7 @@ do_rest:
|
||||
clear_node_cpumask(cpu); /* was set by numa_add_cpu */
|
||||
cpu_clear(cpu, cpu_present_map);
|
||||
cpu_clear(cpu, cpu_possible_map);
|
||||
x86_cpu_to_apicid[cpu] = BAD_APICID;
|
||||
per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
@@ -840,6 +841,26 @@ static int __init smp_sanity_check(unsigned max_cpus)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy apicid's found by MP_processor_info from initial array to the per cpu
|
||||
* data area. The x86_cpu_to_apicid_init array is then expendable and the
|
||||
* x86_cpu_to_apicid_ptr is zeroed indicating that the static array is no
|
||||
* longer available.
|
||||
*/
|
||||
void __init smp_set_apicids(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_cpu_mask(cpu, cpu_possible_map) {
|
||||
if (per_cpu_offset(cpu))
|
||||
per_cpu(x86_cpu_to_apicid, cpu) =
|
||||
x86_cpu_to_apicid_init[cpu];
|
||||
}
|
||||
|
||||
/* indicate the static array will be going away soon */
|
||||
x86_cpu_to_apicid_ptr = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare for SMP bootup. The MP table or ACPI has been read
|
||||
* earlier. Just do some sanity checking here and enable APIC mode.
|
||||
@@ -849,6 +870,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
nmi_watchdog_default();
|
||||
current_cpu_data = boot_cpu_data;
|
||||
current_thread_info()->cpu = 0; /* needed? */
|
||||
smp_set_apicids();
|
||||
set_cpu_sibling_map(0);
|
||||
|
||||
if (smp_sanity_check(max_cpus) < 0) {
|
||||
@@ -968,7 +990,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
||||
static void remove_siblinginfo(int cpu)
|
||||
{
|
||||
int sibling;
|
||||
struct cpuinfo_x86 *c = cpu_data;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
|
||||
for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
|
||||
cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
|
||||
@@ -976,15 +998,15 @@ static void remove_siblinginfo(int cpu)
|
||||
* last thread sibling in this cpu core going down
|
||||
*/
|
||||
if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
|
||||
c[sibling].booted_cores--;
|
||||
cpu_data(sibling).booted_cores--;
|
||||
}
|
||||
|
||||
for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
|
||||
cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
|
||||
cpus_clear(per_cpu(cpu_sibling_map, cpu));
|
||||
cpus_clear(per_cpu(cpu_core_map, cpu));
|
||||
c[cpu].phys_proc_id = 0;
|
||||
c[cpu].cpu_core_id = 0;
|
||||
c->phys_proc_id = 0;
|
||||
c->cpu_core_id = 0;
|
||||
cpu_clear(cpu, cpu_sibling_setup_map);
|
||||
}
|
||||
|
||||
|
@@ -32,9 +32,9 @@ void __save_processor_state(struct saved_context *ctxt)
|
||||
/*
|
||||
* descriptor tables
|
||||
*/
|
||||
asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit));
|
||||
asm volatile ("sidt %0" : "=m" (ctxt->idt_limit));
|
||||
asm volatile ("str %0" : "=m" (ctxt->tr));
|
||||
store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
|
||||
store_idt((struct desc_ptr *)&ctxt->idt_limit);
|
||||
store_tr(ctxt->tr);
|
||||
|
||||
/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
|
||||
/*
|
||||
@@ -91,8 +91,9 @@ void __restore_processor_state(struct saved_context *ctxt)
|
||||
* now restore the descriptor tables to their proper values
|
||||
* ltr is done i fix_processor_context().
|
||||
*/
|
||||
asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit));
|
||||
asm volatile ("lidt %0" :: "m" (ctxt->idt_limit));
|
||||
load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
|
||||
load_idt((const struct desc_ptr *)&ctxt->idt_limit);
|
||||
|
||||
|
||||
/*
|
||||
* segment registers
|
||||
|
@@ -63,6 +63,9 @@
|
||||
|
||||
int panic_on_unrecovered_nmi;
|
||||
|
||||
DECLARE_BITMAP(used_vectors, NR_VECTORS);
|
||||
EXPORT_SYMBOL_GPL(used_vectors);
|
||||
|
||||
asmlinkage int system_call(void);
|
||||
|
||||
/* Do we ignore FPU interrupts ? */
|
||||
@@ -288,33 +291,9 @@ EXPORT_SYMBOL(dump_stack);
|
||||
void show_registers(struct pt_regs *regs)
|
||||
{
|
||||
int i;
|
||||
int in_kernel = 1;
|
||||
unsigned long esp;
|
||||
unsigned short ss, gs;
|
||||
|
||||
esp = (unsigned long) (®s->esp);
|
||||
savesegment(ss, ss);
|
||||
savesegment(gs, gs);
|
||||
if (user_mode_vm(regs)) {
|
||||
in_kernel = 0;
|
||||
esp = regs->esp;
|
||||
ss = regs->xss & 0xffff;
|
||||
}
|
||||
print_modules();
|
||||
printk(KERN_EMERG "CPU: %d\n"
|
||||
KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
|
||||
KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
|
||||
smp_processor_id(), 0xffff & regs->xcs, regs->eip,
|
||||
print_tainted(), regs->eflags, init_utsname()->release,
|
||||
(int)strcspn(init_utsname()->version, " "),
|
||||
init_utsname()->version);
|
||||
print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
|
||||
printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
|
||||
regs->eax, regs->ebx, regs->ecx, regs->edx);
|
||||
printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
|
||||
regs->esi, regs->edi, regs->ebp, esp);
|
||||
printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
|
||||
regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
|
||||
__show_registers(regs, 0);
|
||||
printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
|
||||
TASK_COMM_LEN, current->comm, task_pid_nr(current),
|
||||
current_thread_info(), current, task_thread_info(current));
|
||||
@@ -322,14 +301,14 @@ void show_registers(struct pt_regs *regs)
|
||||
* When in-kernel, we also print out the stack and code at the
|
||||
* time of the fault..
|
||||
*/
|
||||
if (in_kernel) {
|
||||
if (!user_mode_vm(regs)) {
|
||||
u8 *eip;
|
||||
unsigned int code_prologue = code_bytes * 43 / 64;
|
||||
unsigned int code_len = code_bytes;
|
||||
unsigned char c;
|
||||
|
||||
printk("\n" KERN_EMERG "Stack: ");
|
||||
show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
|
||||
show_stack_log_lvl(NULL, regs, ®s->esp, KERN_EMERG);
|
||||
|
||||
printk(KERN_EMERG "Code: ");
|
||||
|
||||
@@ -374,11 +353,11 @@ int is_valid_bugaddr(unsigned long eip)
|
||||
void die(const char * str, struct pt_regs * regs, long err)
|
||||
{
|
||||
static struct {
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
u32 lock_owner;
|
||||
int lock_owner_depth;
|
||||
} die = {
|
||||
.lock = __SPIN_LOCK_UNLOCKED(die.lock),
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED,
|
||||
.lock_owner = -1,
|
||||
.lock_owner_depth = 0
|
||||
};
|
||||
@@ -389,13 +368,14 @@ void die(const char * str, struct pt_regs * regs, long err)
|
||||
|
||||
if (die.lock_owner != raw_smp_processor_id()) {
|
||||
console_verbose();
|
||||
spin_lock_irqsave(&die.lock, flags);
|
||||
__raw_spin_lock(&die.lock);
|
||||
raw_local_save_flags(flags);
|
||||
die.lock_owner = smp_processor_id();
|
||||
die.lock_owner_depth = 0;
|
||||
bust_spinlocks(1);
|
||||
}
|
||||
else
|
||||
local_save_flags(flags);
|
||||
raw_local_save_flags(flags);
|
||||
|
||||
if (++die.lock_owner_depth < 3) {
|
||||
unsigned long esp;
|
||||
@@ -439,7 +419,8 @@ void die(const char * str, struct pt_regs * regs, long err)
|
||||
bust_spinlocks(0);
|
||||
die.lock_owner = -1;
|
||||
add_taint(TAINT_DIE);
|
||||
spin_unlock_irqrestore(&die.lock, flags);
|
||||
__raw_spin_unlock(&die.lock);
|
||||
raw_local_irq_restore(flags);
|
||||
|
||||
if (!regs)
|
||||
return;
|
||||
@@ -1142,6 +1123,8 @@ static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_EISA
|
||||
void __iomem *p = ioremap(0x0FFFD9, 4);
|
||||
if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
|
||||
@@ -1201,6 +1184,11 @@ void __init trap_init(void)
|
||||
|
||||
set_system_gate(SYSCALL_VECTOR,&system_call);
|
||||
|
||||
/* Reserve all the builtin and the syscall vector. */
|
||||
for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
|
||||
set_bit(i, used_vectors);
|
||||
set_bit(SYSCALL_VECTOR, used_vectors);
|
||||
|
||||
/*
|
||||
* Should be a barrier for any external CPU state.
|
||||
*/
|
||||
|
@@ -462,7 +462,7 @@ void out_of_line_bug(void)
|
||||
EXPORT_SYMBOL(out_of_line_bug);
|
||||
#endif
|
||||
|
||||
static DEFINE_SPINLOCK(die_lock);
|
||||
static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||
static int die_owner = -1;
|
||||
static unsigned int die_nest_count;
|
||||
|
||||
@@ -474,13 +474,13 @@ unsigned __kprobes long oops_begin(void)
|
||||
oops_enter();
|
||||
|
||||
/* racy, but better than risking deadlock. */
|
||||
local_irq_save(flags);
|
||||
raw_local_irq_save(flags);
|
||||
cpu = smp_processor_id();
|
||||
if (!spin_trylock(&die_lock)) {
|
||||
if (!__raw_spin_trylock(&die_lock)) {
|
||||
if (cpu == die_owner)
|
||||
/* nested oops. should stop eventually */;
|
||||
else
|
||||
spin_lock(&die_lock);
|
||||
__raw_spin_lock(&die_lock);
|
||||
}
|
||||
die_nest_count++;
|
||||
die_owner = cpu;
|
||||
@@ -494,12 +494,10 @@ void __kprobes oops_end(unsigned long flags)
|
||||
die_owner = -1;
|
||||
bust_spinlocks(0);
|
||||
die_nest_count--;
|
||||
if (die_nest_count)
|
||||
/* We still own the lock */
|
||||
local_irq_restore(flags);
|
||||
else
|
||||
if (!die_nest_count)
|
||||
/* Nest count reaches zero, release the lock. */
|
||||
spin_unlock_irqrestore(&die_lock, flags);
|
||||
__raw_spin_unlock(&die_lock);
|
||||
raw_local_irq_restore(flags);
|
||||
if (panic_on_oops)
|
||||
panic("Fatal exception");
|
||||
oops_exit();
|
||||
|
@@ -181,8 +181,8 @@ int recalibrate_cpu_khz(void)
|
||||
if (cpu_has_tsc) {
|
||||
cpu_khz = calculate_cpu_khz();
|
||||
tsc_khz = cpu_khz;
|
||||
cpu_data[0].loops_per_jiffy =
|
||||
cpufreq_scale(cpu_data[0].loops_per_jiffy,
|
||||
cpu_data(0).loops_per_jiffy =
|
||||
cpufreq_scale(cpu_data(0).loops_per_jiffy,
|
||||
cpu_khz_old, cpu_khz);
|
||||
return 0;
|
||||
} else
|
||||
@@ -215,7 +215,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
|
||||
return 0;
|
||||
}
|
||||
ref_freq = freq->old;
|
||||
loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
|
||||
loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
|
||||
cpu_khz_ref = cpu_khz;
|
||||
}
|
||||
|
||||
@@ -223,7 +223,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
|
||||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
|
||||
(val == CPUFREQ_RESUMECHANGE)) {
|
||||
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
|
||||
cpu_data[freq->cpu].loops_per_jiffy =
|
||||
cpu_data(freq->cpu).loops_per_jiffy =
|
||||
cpufreq_scale(loops_per_jiffy_ref,
|
||||
ref_freq, freq->new);
|
||||
|
||||
|
@@ -73,13 +73,13 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
|
||||
struct cpufreq_freqs *freq = data;
|
||||
unsigned long *lpj, dummy;
|
||||
|
||||
if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
|
||||
if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
|
||||
return 0;
|
||||
|
||||
lpj = &dummy;
|
||||
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
|
||||
#ifdef CONFIG_SMP
|
||||
lpj = &cpu_data[freq->cpu].loops_per_jiffy;
|
||||
lpj = &cpu_data(freq->cpu).loops_per_jiffy;
|
||||
#else
|
||||
lpj = &boot_cpu_data.loops_per_jiffy;
|
||||
#endif
|
||||
|
@@ -48,7 +48,7 @@
|
||||
({unsigned long v; \
|
||||
extern char __vsyscall_0; \
|
||||
asm("" : "=r" (v) : "0" (x)); \
|
||||
((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); })
|
||||
((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); })
|
||||
|
||||
/*
|
||||
* vsyscall_gtod_data contains data that is :
|
||||
@@ -291,7 +291,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
|
||||
#ifdef CONFIG_NUMA
|
||||
node = cpu_to_node(cpu);
|
||||
#endif
|
||||
if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP))
|
||||
if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
|
||||
write_rdtscp_aux((node << 12) | cpu);
|
||||
|
||||
/* Store cpu number in limit so that it can be loaded quickly
|
||||
|
Criar uma nova questão referindo esta
Bloquear um utilizador