Merge branch 'linus' into x86/urgent
Merge reason: we want to queue up a dependent fix. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -5,6 +5,7 @@
|
||||
# Don't trace early stages of a secondary CPU boot
|
||||
ifdef CONFIG_FUNCTION_TRACER
|
||||
CFLAGS_REMOVE_common.o = -pg
|
||||
CFLAGS_REMOVE_perf_event.o = -pg
|
||||
endif
|
||||
|
||||
# Make sure load_percpu_segment has no stackprotector
|
||||
|
@@ -535,7 +535,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
display_cacheinfo(c);
|
||||
cpu_detect_cache_sizes(c);
|
||||
|
||||
/* Multi core CPU? */
|
||||
if (c->extended_cpuid_level >= 0x80000008) {
|
||||
|
@@ -294,7 +294,7 @@ static void __cpuinit init_c3(struct cpuinfo_x86 *c)
|
||||
set_cpu_cap(c, X86_FEATURE_REP_GOOD);
|
||||
}
|
||||
|
||||
display_cacheinfo(c);
|
||||
cpu_detect_cache_sizes(c);
|
||||
}
|
||||
|
||||
enum {
|
||||
|
@@ -61,7 +61,7 @@ void __init setup_cpu_local_masks(void)
|
||||
static void __cpuinit default_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
display_cacheinfo(c);
|
||||
cpu_detect_cache_sizes(c);
|
||||
#else
|
||||
/* Not much we can do here... */
|
||||
/* Check if at least it has cpuid */
|
||||
@@ -383,7 +383,7 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
|
||||
void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int n, dummy, ebx, ecx, edx, l2size;
|
||||
|
||||
@@ -391,8 +391,6 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
|
||||
|
||||
if (n >= 0x80000005) {
|
||||
cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
|
||||
printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
|
||||
edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
|
||||
c->x86_cache_size = (ecx>>24) + (edx>>24);
|
||||
#ifdef CONFIG_X86_64
|
||||
/* On K8 L1 TLB is inclusive, so don't count it */
|
||||
@@ -422,9 +420,6 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
|
||||
#endif
|
||||
|
||||
c->x86_cache_size = l2size;
|
||||
|
||||
printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
|
||||
l2size, ecx & 0xFF);
|
||||
}
|
||||
|
||||
void __cpuinit detect_ht(struct cpuinfo_x86 *c)
|
||||
@@ -659,24 +654,31 @@ void __init early_cpu_init(void)
|
||||
const struct cpu_dev *const *cdev;
|
||||
int count = 0;
|
||||
|
||||
#ifdef PROCESSOR_SELECT
|
||||
printk(KERN_INFO "KERNEL supported cpus:\n");
|
||||
#endif
|
||||
|
||||
for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
|
||||
const struct cpu_dev *cpudev = *cdev;
|
||||
unsigned int j;
|
||||
|
||||
if (count >= X86_VENDOR_NUM)
|
||||
break;
|
||||
cpu_devs[count] = cpudev;
|
||||
count++;
|
||||
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (!cpudev->c_ident[j])
|
||||
continue;
|
||||
printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
|
||||
cpudev->c_ident[j]);
|
||||
}
|
||||
}
|
||||
#ifdef PROCESSOR_SELECT
|
||||
{
|
||||
unsigned int j;
|
||||
|
||||
for (j = 0; j < 2; j++) {
|
||||
if (!cpudev->c_ident[j])
|
||||
continue;
|
||||
printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
|
||||
cpudev->c_ident[j]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
early_identify_cpu(&boot_cpu_data);
|
||||
}
|
||||
|
||||
@@ -837,10 +839,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_MCE
|
||||
/* Init Machine Check Exception if available. */
|
||||
mcheck_init(c);
|
||||
#endif
|
||||
mcheck_cpu_init(c);
|
||||
|
||||
select_idle_routine(c);
|
||||
|
||||
|
@@ -32,6 +32,6 @@ struct cpu_dev {
|
||||
extern const struct cpu_dev *const __x86_cpu_dev_start[],
|
||||
*const __x86_cpu_dev_end[];
|
||||
|
||||
extern void display_cacheinfo(struct cpuinfo_x86 *c);
|
||||
extern void cpu_detect_cache_sizes(struct cpuinfo_x86 *c);
|
||||
|
||||
#endif
|
||||
|
@@ -526,15 +526,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = {
|
||||
|
||||
static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/* http://www.intel.com/Assets/PDF/specupdate/314554.pdf
|
||||
/* Intel Xeon Processor 7100 Series Specification Update
|
||||
* http://www.intel.com/Assets/PDF/specupdate/314554.pdf
|
||||
* AL30: A Machine Check Exception (MCE) Occurring during an
|
||||
* Enhanced Intel SpeedStep Technology Ratio Change May Cause
|
||||
* Both Processor Cores to Lock Up when HT is enabled*/
|
||||
* Both Processor Cores to Lock Up. */
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL) {
|
||||
if ((c->x86 == 15) &&
|
||||
(c->x86_model == 6) &&
|
||||
(c->x86_mask == 8) && smt_capable())
|
||||
(c->x86_mask == 8)) {
|
||||
printk(KERN_INFO "acpi-cpufreq: Intel(R) "
|
||||
"Xeon(R) 7100 Errata AL30, processors may "
|
||||
"lock up on frequency changes: disabling "
|
||||
"acpi-cpufreq.\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@@ -549,13 +555,18 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
unsigned int result = 0;
|
||||
struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
|
||||
struct acpi_processor_performance *perf;
|
||||
#ifdef CONFIG_SMP
|
||||
static int blacklisted;
|
||||
#endif
|
||||
|
||||
dprintk("acpi_cpufreq_cpu_init\n");
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
result = acpi_cpufreq_blacklist(c);
|
||||
if (result)
|
||||
return result;
|
||||
if (blacklisted)
|
||||
return blacklisted;
|
||||
blacklisted = acpi_cpufreq_blacklist(c);
|
||||
if (blacklisted)
|
||||
return blacklisted;
|
||||
#endif
|
||||
|
||||
data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
|
||||
|
@@ -813,7 +813,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
|
||||
memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr));
|
||||
break;
|
||||
case 1 ... 15:
|
||||
longhaul_version = TYPE_LONGHAUL_V1;
|
||||
longhaul_version = TYPE_LONGHAUL_V2;
|
||||
if (c->x86_mask < 8) {
|
||||
cpu_model = CPU_SAMUEL2;
|
||||
cpuname = "C3 'Samuel 2' [C5B]";
|
||||
|
@@ -1022,7 +1022,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
|
||||
* set it to 1 to avoid problems in the future.
|
||||
* For all others it's a BIOS bug.
|
||||
*/
|
||||
if (!boot_cpu_data.x86 == 0x11)
|
||||
if (boot_cpu_data.x86 != 0x11)
|
||||
printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
|
||||
"latency\n");
|
||||
max_latency = 1;
|
||||
|
@@ -232,28 +232,23 @@ static unsigned int speedstep_detect_chipset(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct get_freq_data {
|
||||
unsigned int speed;
|
||||
unsigned int processor;
|
||||
};
|
||||
|
||||
static void get_freq_data(void *_data)
|
||||
static void get_freq_data(void *_speed)
|
||||
{
|
||||
struct get_freq_data *data = _data;
|
||||
unsigned int *speed = _speed;
|
||||
|
||||
data->speed = speedstep_get_frequency(data->processor);
|
||||
*speed = speedstep_get_frequency(speedstep_processor);
|
||||
}
|
||||
|
||||
static unsigned int speedstep_get(unsigned int cpu)
|
||||
{
|
||||
struct get_freq_data data = { .processor = cpu };
|
||||
unsigned int speed;
|
||||
|
||||
/* You're supposed to ensure CPU is online. */
|
||||
if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0)
|
||||
if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0)
|
||||
BUG();
|
||||
|
||||
dprintk("detected %u kHz as current frequency\n", data.speed);
|
||||
return data.speed;
|
||||
dprintk("detected %u kHz as current frequency\n", speed);
|
||||
return speed;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -373,7 +373,7 @@ static void __cpuinit init_nsc(struct cpuinfo_x86 *c)
|
||||
/* Handle the GX (Formally known as the GX2) */
|
||||
|
||||
if (c->x86 == 5 && c->x86_model == 5)
|
||||
display_cacheinfo(c);
|
||||
cpu_detect_cache_sizes(c);
|
||||
else
|
||||
init_cyrix(c);
|
||||
}
|
||||
|
@@ -491,22 +491,6 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
|
||||
#endif
|
||||
}
|
||||
|
||||
if (trace)
|
||||
printk(KERN_INFO "CPU: Trace cache: %dK uops", trace);
|
||||
else if (l1i)
|
||||
printk(KERN_INFO "CPU: L1 I cache: %dK", l1i);
|
||||
|
||||
if (l1d)
|
||||
printk(KERN_CONT ", L1 D cache: %dK\n", l1d);
|
||||
else
|
||||
printk(KERN_CONT "\n");
|
||||
|
||||
if (l2)
|
||||
printk(KERN_INFO "CPU: L2 cache: %dK\n", l2);
|
||||
|
||||
if (l3)
|
||||
printk(KERN_INFO "CPU: L3 cache: %dK\n", l3);
|
||||
|
||||
c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
|
||||
|
||||
return l2;
|
||||
|
@@ -46,6 +46,9 @@
|
||||
|
||||
#include "mce-internal.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/mce.h>
|
||||
|
||||
int mce_disabled __read_mostly;
|
||||
|
||||
#define MISC_MCELOG_MINOR 227
|
||||
@@ -85,18 +88,26 @@ static DECLARE_WAIT_QUEUE_HEAD(mce_wait);
|
||||
static DEFINE_PER_CPU(struct mce, mces_seen);
|
||||
static int cpu_missing;
|
||||
|
||||
static void default_decode_mce(struct mce *m)
|
||||
/*
|
||||
* CPU/chipset specific EDAC code can register a notifier call here to print
|
||||
* MCE errors in a human-readable form.
|
||||
*/
|
||||
ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
|
||||
EXPORT_SYMBOL_GPL(x86_mce_decoder_chain);
|
||||
|
||||
static int default_decode_mce(struct notifier_block *nb, unsigned long val,
|
||||
void *data)
|
||||
{
|
||||
pr_emerg("No human readable MCE decoding support on this CPU type.\n");
|
||||
pr_emerg("Run the message through 'mcelog --ascii' to decode.\n");
|
||||
|
||||
return NOTIFY_STOP;
|
||||
}
|
||||
|
||||
/*
|
||||
* CPU/chipset specific EDAC code can register a callback here to print
|
||||
* MCE errors in a human-readable form:
|
||||
*/
|
||||
void (*x86_mce_decode_callback)(struct mce *m) = default_decode_mce;
|
||||
EXPORT_SYMBOL(x86_mce_decode_callback);
|
||||
static struct notifier_block mce_dec_nb = {
|
||||
.notifier_call = default_decode_mce,
|
||||
.priority = -1,
|
||||
};
|
||||
|
||||
/* MCA banks polled by the period polling timer for corrected events */
|
||||
DEFINE_PER_CPU(mce_banks_t, mce_poll_banks) = {
|
||||
@@ -141,6 +152,9 @@ void mce_log(struct mce *mce)
|
||||
{
|
||||
unsigned next, entry;
|
||||
|
||||
/* Emit the trace record: */
|
||||
trace_mce_record(mce);
|
||||
|
||||
mce->finished = 0;
|
||||
wmb();
|
||||
for (;;) {
|
||||
@@ -204,9 +218,9 @@ static void print_mce(struct mce *m)
|
||||
|
||||
/*
|
||||
* Print out human-readable details about the MCE error,
|
||||
* (if the CPU has an implementation for that):
|
||||
* (if the CPU has an implementation for that)
|
||||
*/
|
||||
x86_mce_decode_callback(m);
|
||||
atomic_notifier_call_chain(&x86_mce_decoder_chain, 0, m);
|
||||
}
|
||||
|
||||
static void print_mce_head(void)
|
||||
@@ -1122,7 +1136,7 @@ static int check_interval = 5 * 60; /* 5 minutes */
|
||||
static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */
|
||||
static DEFINE_PER_CPU(struct timer_list, mce_timer);
|
||||
|
||||
static void mcheck_timer(unsigned long data)
|
||||
static void mce_start_timer(unsigned long data)
|
||||
{
|
||||
struct timer_list *t = &per_cpu(mce_timer, data);
|
||||
int *n;
|
||||
@@ -1187,7 +1201,7 @@ int mce_notify_irq(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mce_notify_irq);
|
||||
|
||||
static int mce_banks_init(void)
|
||||
static int __cpuinit __mcheck_cpu_mce_banks_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -1206,7 +1220,7 @@ static int mce_banks_init(void)
|
||||
/*
|
||||
* Initialize Machine Checks for a CPU.
|
||||
*/
|
||||
static int __cpuinit mce_cap_init(void)
|
||||
static int __cpuinit __mcheck_cpu_cap_init(void)
|
||||
{
|
||||
unsigned b;
|
||||
u64 cap;
|
||||
@@ -1228,7 +1242,7 @@ static int __cpuinit mce_cap_init(void)
|
||||
WARN_ON(banks != 0 && b != banks);
|
||||
banks = b;
|
||||
if (!mce_banks) {
|
||||
int err = mce_banks_init();
|
||||
int err = __mcheck_cpu_mce_banks_init();
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
@@ -1244,7 +1258,7 @@ static int __cpuinit mce_cap_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void mce_init(void)
|
||||
static void __mcheck_cpu_init_generic(void)
|
||||
{
|
||||
mce_banks_t all_banks;
|
||||
u64 cap;
|
||||
@@ -1273,7 +1287,7 @@ static void mce_init(void)
|
||||
}
|
||||
|
||||
/* Add per CPU specific workarounds here */
|
||||
static int __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
|
||||
static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (c->x86_vendor == X86_VENDOR_UNKNOWN) {
|
||||
pr_info("MCE: unknown CPU type - not enabling MCE support.\n");
|
||||
@@ -1341,7 +1355,7 @@ static int __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c)
|
||||
static void __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (c->x86 != 5)
|
||||
return;
|
||||
@@ -1355,7 +1369,7 @@ static void __cpuinit mce_ancient_init(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
static void mce_cpu_features(struct cpuinfo_x86 *c)
|
||||
static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
|
||||
{
|
||||
switch (c->x86_vendor) {
|
||||
case X86_VENDOR_INTEL:
|
||||
@@ -1369,7 +1383,7 @@ static void mce_cpu_features(struct cpuinfo_x86 *c)
|
||||
}
|
||||
}
|
||||
|
||||
static void mce_init_timer(void)
|
||||
static void __mcheck_cpu_init_timer(void)
|
||||
{
|
||||
struct timer_list *t = &__get_cpu_var(mce_timer);
|
||||
int *n = &__get_cpu_var(mce_next_interval);
|
||||
@@ -1380,7 +1394,7 @@ static void mce_init_timer(void)
|
||||
*n = check_interval * HZ;
|
||||
if (!*n)
|
||||
return;
|
||||
setup_timer(t, mcheck_timer, smp_processor_id());
|
||||
setup_timer(t, mce_start_timer, smp_processor_id());
|
||||
t->expires = round_jiffies(jiffies + *n);
|
||||
add_timer_on(t, smp_processor_id());
|
||||
}
|
||||
@@ -1400,27 +1414,28 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) =
|
||||
* Called for each booted CPU to set up machine checks.
|
||||
* Must be called with preempt off:
|
||||
*/
|
||||
void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
|
||||
void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (mce_disabled)
|
||||
return;
|
||||
|
||||
mce_ancient_init(c);
|
||||
__mcheck_cpu_ancient_init(c);
|
||||
|
||||
if (!mce_available(c))
|
||||
return;
|
||||
|
||||
if (mce_cap_init() < 0 || mce_cpu_quirks(c) < 0) {
|
||||
if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) {
|
||||
mce_disabled = 1;
|
||||
return;
|
||||
}
|
||||
|
||||
machine_check_vector = do_machine_check;
|
||||
|
||||
mce_init();
|
||||
mce_cpu_features(c);
|
||||
mce_init_timer();
|
||||
__mcheck_cpu_init_generic();
|
||||
__mcheck_cpu_init_vendor(c);
|
||||
__mcheck_cpu_init_timer();
|
||||
INIT_WORK(&__get_cpu_var(mce_work), mce_process_work);
|
||||
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1640,6 +1655,15 @@ static int __init mcheck_enable(char *str)
|
||||
}
|
||||
__setup("mce", mcheck_enable);
|
||||
|
||||
int __init mcheck_init(void)
|
||||
{
|
||||
atomic_notifier_chain_register(&x86_mce_decoder_chain, &mce_dec_nb);
|
||||
|
||||
mcheck_intel_therm_init();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sysfs support
|
||||
*/
|
||||
@@ -1648,7 +1672,7 @@ __setup("mce", mcheck_enable);
|
||||
* Disable machine checks on suspend and shutdown. We can't really handle
|
||||
* them later.
|
||||
*/
|
||||
static int mce_disable(void)
|
||||
static int mce_disable_error_reporting(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
@@ -1663,12 +1687,12 @@ static int mce_disable(void)
|
||||
|
||||
static int mce_suspend(struct sys_device *dev, pm_message_t state)
|
||||
{
|
||||
return mce_disable();
|
||||
return mce_disable_error_reporting();
|
||||
}
|
||||
|
||||
static int mce_shutdown(struct sys_device *dev)
|
||||
{
|
||||
return mce_disable();
|
||||
return mce_disable_error_reporting();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1678,8 +1702,8 @@ static int mce_shutdown(struct sys_device *dev)
|
||||
*/
|
||||
static int mce_resume(struct sys_device *dev)
|
||||
{
|
||||
mce_init();
|
||||
mce_cpu_features(¤t_cpu_data);
|
||||
__mcheck_cpu_init_generic();
|
||||
__mcheck_cpu_init_vendor(¤t_cpu_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1689,8 +1713,8 @@ static void mce_cpu_restart(void *data)
|
||||
del_timer_sync(&__get_cpu_var(mce_timer));
|
||||
if (!mce_available(¤t_cpu_data))
|
||||
return;
|
||||
mce_init();
|
||||
mce_init_timer();
|
||||
__mcheck_cpu_init_generic();
|
||||
__mcheck_cpu_init_timer();
|
||||
}
|
||||
|
||||
/* Reinit MCEs after user configuration changes */
|
||||
@@ -1716,7 +1740,7 @@ static void mce_enable_ce(void *all)
|
||||
cmci_reenable();
|
||||
cmci_recheck();
|
||||
if (all)
|
||||
mce_init_timer();
|
||||
__mcheck_cpu_init_timer();
|
||||
}
|
||||
|
||||
static struct sysdev_class mce_sysclass = {
|
||||
@@ -1929,13 +1953,14 @@ static __cpuinit void mce_remove_device(unsigned int cpu)
|
||||
}
|
||||
|
||||
/* Make sure there are no machine checks on offlined CPUs. */
|
||||
static void mce_disable_cpu(void *h)
|
||||
static void __cpuinit mce_disable_cpu(void *h)
|
||||
{
|
||||
unsigned long action = *(unsigned long *)h;
|
||||
int i;
|
||||
|
||||
if (!mce_available(¤t_cpu_data))
|
||||
return;
|
||||
|
||||
if (!(action & CPU_TASKS_FROZEN))
|
||||
cmci_clear();
|
||||
for (i = 0; i < banks; i++) {
|
||||
@@ -1946,7 +1971,7 @@ static void mce_disable_cpu(void *h)
|
||||
}
|
||||
}
|
||||
|
||||
static void mce_reenable_cpu(void *h)
|
||||
static void __cpuinit mce_reenable_cpu(void *h)
|
||||
{
|
||||
unsigned long action = *(unsigned long *)h;
|
||||
int i;
|
||||
@@ -2027,7 +2052,7 @@ static __init void mce_init_banks(void)
|
||||
}
|
||||
}
|
||||
|
||||
static __init int mce_init_device(void)
|
||||
static __init int mcheck_init_device(void)
|
||||
{
|
||||
int err;
|
||||
int i = 0;
|
||||
@@ -2055,7 +2080,7 @@ static __init int mce_init_device(void)
|
||||
return err;
|
||||
}
|
||||
|
||||
device_initcall(mce_init_device);
|
||||
device_initcall(mcheck_init_device);
|
||||
|
||||
/*
|
||||
* Old style boot options parsing. Only for compatibility.
|
||||
@@ -2103,7 +2128,7 @@ static int fake_panic_set(void *data, u64 val)
|
||||
DEFINE_SIMPLE_ATTRIBUTE(fake_panic_fops, fake_panic_get,
|
||||
fake_panic_set, "%llu\n");
|
||||
|
||||
static int __init mce_debugfs_init(void)
|
||||
static int __init mcheck_debugfs_init(void)
|
||||
{
|
||||
struct dentry *dmce, *ffake_panic;
|
||||
|
||||
@@ -2117,5 +2142,5 @@ static int __init mce_debugfs_init(void)
|
||||
|
||||
return 0;
|
||||
}
|
||||
late_initcall(mce_debugfs_init);
|
||||
late_initcall(mcheck_debugfs_init);
|
||||
#endif
|
||||
|
@@ -49,6 +49,8 @@ static DEFINE_PER_CPU(struct thermal_state, thermal_state);
|
||||
|
||||
static atomic_t therm_throt_en = ATOMIC_INIT(0);
|
||||
|
||||
static u32 lvtthmr_init __read_mostly;
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
#define define_therm_throt_sysdev_one_ro(_name) \
|
||||
static SYSDEV_ATTR(_name, 0444, therm_throt_sysdev_show_##_name, NULL)
|
||||
@@ -254,6 +256,18 @@ asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
|
||||
ack_APIC_irq();
|
||||
}
|
||||
|
||||
void __init mcheck_intel_therm_init(void)
|
||||
{
|
||||
/*
|
||||
* This function is only called on boot CPU. Save the init thermal
|
||||
* LVT value on BSP and use that value to restore APs' thermal LVT
|
||||
* entry BIOS programmed later
|
||||
*/
|
||||
if (cpu_has(&boot_cpu_data, X86_FEATURE_ACPI) &&
|
||||
cpu_has(&boot_cpu_data, X86_FEATURE_ACC))
|
||||
lvtthmr_init = apic_read(APIC_LVTTHMR);
|
||||
}
|
||||
|
||||
void intel_init_thermal(struct cpuinfo_x86 *c)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
@@ -270,7 +284,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
|
||||
* since it might be delivered via SMI already:
|
||||
*/
|
||||
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
|
||||
h = apic_read(APIC_LVTTHMR);
|
||||
|
||||
/*
|
||||
* The initial value of thermal LVT entries on all APs always reads
|
||||
* 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
|
||||
* sequence to them and LVT registers are reset to 0s except for
|
||||
* the mask bits which are set to 1s when APs receive INIT IPI.
|
||||
* Always restore the value that BIOS has programmed on AP based on
|
||||
* BSP's info we saved since BIOS is always setting the same value
|
||||
* for all threads/cores
|
||||
*/
|
||||
apic_write(APIC_LVTTHMR, lvtthmr_init);
|
||||
|
||||
h = lvtthmr_init;
|
||||
|
||||
if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
|
||||
printk(KERN_DEBUG
|
||||
"CPU%d: Thermal monitoring handled by SMI\n", cpu);
|
||||
|
@@ -77,6 +77,18 @@ struct cpu_hw_events {
|
||||
struct debug_store *ds;
|
||||
};
|
||||
|
||||
struct event_constraint {
|
||||
unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
|
||||
int code;
|
||||
};
|
||||
|
||||
#define EVENT_CONSTRAINT(c, m) { .code = (c), .idxmsk[0] = (m) }
|
||||
#define EVENT_CONSTRAINT_END { .code = 0, .idxmsk[0] = 0 }
|
||||
|
||||
#define for_each_event_constraint(e, c) \
|
||||
for ((e) = (c); (e)->idxmsk[0]; (e)++)
|
||||
|
||||
|
||||
/*
|
||||
* struct x86_pmu - generic x86 pmu
|
||||
*/
|
||||
@@ -102,6 +114,8 @@ struct x86_pmu {
|
||||
u64 intel_ctrl;
|
||||
void (*enable_bts)(u64 config);
|
||||
void (*disable_bts)(void);
|
||||
int (*get_event_idx)(struct cpu_hw_events *cpuc,
|
||||
struct hw_perf_event *hwc);
|
||||
};
|
||||
|
||||
static struct x86_pmu x86_pmu __read_mostly;
|
||||
@@ -110,6 +124,8 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
|
||||
.enabled = 1,
|
||||
};
|
||||
|
||||
static const struct event_constraint *event_constraints;
|
||||
|
||||
/*
|
||||
* Not sure about some of these
|
||||
*/
|
||||
@@ -155,6 +171,16 @@ static u64 p6_pmu_raw_event(u64 hw_event)
|
||||
return hw_event & P6_EVNTSEL_MASK;
|
||||
}
|
||||
|
||||
static const struct event_constraint intel_p6_event_constraints[] =
|
||||
{
|
||||
EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
|
||||
EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
|
||||
EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
|
||||
EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
|
||||
EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
|
||||
EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
/*
|
||||
* Intel PerfMon v3. Used on Core2 and later.
|
||||
@@ -170,6 +196,35 @@ static const u64 intel_perfmon_event_map[] =
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
|
||||
};
|
||||
|
||||
static const struct event_constraint intel_core_event_constraints[] =
|
||||
{
|
||||
EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
|
||||
EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
|
||||
EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
|
||||
EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
|
||||
EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
|
||||
EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
|
||||
EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
|
||||
EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
|
||||
EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static const struct event_constraint intel_nehalem_event_constraints[] =
|
||||
{
|
||||
EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
|
||||
EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
|
||||
EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
|
||||
EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
|
||||
EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
|
||||
EVENT_CONSTRAINT(0x4c, 0x3), /* LOAD_HIT_PRE */
|
||||
EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
|
||||
EVENT_CONSTRAINT(0x52, 0x3), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
|
||||
EVENT_CONSTRAINT(0x53, 0x3), /* L1D_CACHE_LOCK_FB_HIT */
|
||||
EVENT_CONSTRAINT(0xc5, 0x3), /* CACHE_LOCK_CYCLES */
|
||||
EVENT_CONSTRAINT_END
|
||||
};
|
||||
|
||||
static u64 intel_pmu_event_map(int hw_event)
|
||||
{
|
||||
return intel_perfmon_event_map[hw_event];
|
||||
@@ -190,7 +245,7 @@ static u64 __read_mostly hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX];
|
||||
|
||||
static const u64 nehalem_hw_cache_event_ids
|
||||
static __initconst u64 nehalem_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
@@ -281,7 +336,7 @@ static const u64 nehalem_hw_cache_event_ids
|
||||
},
|
||||
};
|
||||
|
||||
static const u64 core2_hw_cache_event_ids
|
||||
static __initconst u64 core2_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
@@ -372,7 +427,7 @@ static const u64 core2_hw_cache_event_ids
|
||||
},
|
||||
};
|
||||
|
||||
static const u64 atom_hw_cache_event_ids
|
||||
static __initconst u64 atom_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
@@ -469,7 +524,7 @@ static u64 intel_pmu_raw_event(u64 hw_event)
|
||||
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
||||
#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
|
||||
#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
|
||||
#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
|
||||
#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
|
||||
|
||||
#define CORE_EVNTSEL_MASK \
|
||||
(CORE_EVNTSEL_EVENT_MASK | \
|
||||
@@ -481,7 +536,7 @@ static u64 intel_pmu_raw_event(u64 hw_event)
|
||||
return hw_event & CORE_EVNTSEL_MASK;
|
||||
}
|
||||
|
||||
static const u64 amd_hw_cache_event_ids
|
||||
static __initconst u64 amd_hw_cache_event_ids
|
||||
[PERF_COUNT_HW_CACHE_MAX]
|
||||
[PERF_COUNT_HW_CACHE_OP_MAX]
|
||||
[PERF_COUNT_HW_CACHE_RESULT_MAX] =
|
||||
@@ -932,6 +987,8 @@ static int __hw_perf_event_init(struct perf_event *event)
|
||||
*/
|
||||
hwc->config = ARCH_PERFMON_EVENTSEL_INT;
|
||||
|
||||
hwc->idx = -1;
|
||||
|
||||
/*
|
||||
* Count user and OS events unless requested not to.
|
||||
*/
|
||||
@@ -1334,8 +1391,7 @@ static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
|
||||
x86_pmu_enable_event(hwc, idx);
|
||||
}
|
||||
|
||||
static int
|
||||
fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
|
||||
static int fixed_mode_idx(struct hw_perf_event *hwc)
|
||||
{
|
||||
unsigned int hw_event;
|
||||
|
||||
@@ -1349,6 +1405,12 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
|
||||
if (!x86_pmu.num_events_fixed)
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* fixed counters do not take all possible filters
|
||||
*/
|
||||
if (hwc->config & ARCH_PERFMON_EVENT_FILTER_MASK)
|
||||
return -1;
|
||||
|
||||
if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
|
||||
return X86_PMC_IDX_FIXED_INSTRUCTIONS;
|
||||
if (unlikely(hw_event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
|
||||
@@ -1360,22 +1422,57 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a PMC slot for the freshly enabled / scheduled in event:
|
||||
* generic counter allocator: get next free counter
|
||||
*/
|
||||
static int x86_pmu_enable(struct perf_event *event)
|
||||
static int
|
||||
gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx;
|
||||
|
||||
idx = fixed_mode_idx(event, hwc);
|
||||
idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
|
||||
return idx == x86_pmu.num_events ? -1 : idx;
|
||||
}
|
||||
|
||||
/*
|
||||
* intel-specific counter allocator: check event constraints
|
||||
*/
|
||||
static int
|
||||
intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
|
||||
{
|
||||
const struct event_constraint *event_constraint;
|
||||
int i, code;
|
||||
|
||||
if (!event_constraints)
|
||||
goto skip;
|
||||
|
||||
code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
|
||||
|
||||
for_each_event_constraint(event_constraint, event_constraints) {
|
||||
if (code == event_constraint->code) {
|
||||
for_each_bit(i, event_constraint->idxmsk, X86_PMC_IDX_MAX) {
|
||||
if (!test_and_set_bit(i, cpuc->used_mask))
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
skip:
|
||||
return gen_get_event_idx(cpuc, hwc);
|
||||
}
|
||||
|
||||
static int
|
||||
x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
|
||||
{
|
||||
int idx;
|
||||
|
||||
idx = fixed_mode_idx(hwc);
|
||||
if (idx == X86_PMC_IDX_FIXED_BTS) {
|
||||
/* BTS is already occupied. */
|
||||
if (test_and_set_bit(idx, cpuc->used_mask))
|
||||
return -EAGAIN;
|
||||
|
||||
hwc->config_base = 0;
|
||||
hwc->event_base = 0;
|
||||
hwc->event_base = 0;
|
||||
hwc->idx = idx;
|
||||
} else if (idx >= 0) {
|
||||
/*
|
||||
@@ -1396,20 +1493,35 @@ static int x86_pmu_enable(struct perf_event *event)
|
||||
} else {
|
||||
idx = hwc->idx;
|
||||
/* Try to get the previous generic event again */
|
||||
if (test_and_set_bit(idx, cpuc->used_mask)) {
|
||||
if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
|
||||
try_generic:
|
||||
idx = find_first_zero_bit(cpuc->used_mask,
|
||||
x86_pmu.num_events);
|
||||
if (idx == x86_pmu.num_events)
|
||||
idx = x86_pmu.get_event_idx(cpuc, hwc);
|
||||
if (idx == -1)
|
||||
return -EAGAIN;
|
||||
|
||||
set_bit(idx, cpuc->used_mask);
|
||||
hwc->idx = idx;
|
||||
}
|
||||
hwc->config_base = x86_pmu.eventsel;
|
||||
hwc->event_base = x86_pmu.perfctr;
|
||||
hwc->config_base = x86_pmu.eventsel;
|
||||
hwc->event_base = x86_pmu.perfctr;
|
||||
}
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find a PMC slot for the freshly enabled / scheduled in event:
|
||||
*/
|
||||
static int x86_pmu_enable(struct perf_event *event)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
int idx;
|
||||
|
||||
idx = x86_schedule_event(cpuc, hwc);
|
||||
if (idx < 0)
|
||||
return idx;
|
||||
|
||||
perf_events_lapic_init();
|
||||
|
||||
x86_pmu.disable(hwc, idx);
|
||||
@@ -1852,7 +1964,7 @@ static __read_mostly struct notifier_block perf_event_nmi_notifier = {
|
||||
.priority = 1
|
||||
};
|
||||
|
||||
static struct x86_pmu p6_pmu = {
|
||||
static __initconst struct x86_pmu p6_pmu = {
|
||||
.name = "p6",
|
||||
.handle_irq = p6_pmu_handle_irq,
|
||||
.disable_all = p6_pmu_disable_all,
|
||||
@@ -1877,9 +1989,10 @@ static struct x86_pmu p6_pmu = {
|
||||
*/
|
||||
.event_bits = 32,
|
||||
.event_mask = (1ULL << 32) - 1,
|
||||
.get_event_idx = intel_get_event_idx,
|
||||
};
|
||||
|
||||
static struct x86_pmu intel_pmu = {
|
||||
static __initconst struct x86_pmu intel_pmu = {
|
||||
.name = "Intel",
|
||||
.handle_irq = intel_pmu_handle_irq,
|
||||
.disable_all = intel_pmu_disable_all,
|
||||
@@ -1900,9 +2013,10 @@ static struct x86_pmu intel_pmu = {
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.enable_bts = intel_pmu_enable_bts,
|
||||
.disable_bts = intel_pmu_disable_bts,
|
||||
.get_event_idx = intel_get_event_idx,
|
||||
};
|
||||
|
||||
static struct x86_pmu amd_pmu = {
|
||||
static __initconst struct x86_pmu amd_pmu = {
|
||||
.name = "AMD",
|
||||
.handle_irq = amd_pmu_handle_irq,
|
||||
.disable_all = amd_pmu_disable_all,
|
||||
@@ -1920,9 +2034,10 @@ static struct x86_pmu amd_pmu = {
|
||||
.apic = 1,
|
||||
/* use highest bit to detect overflow */
|
||||
.max_period = (1ULL << 47) - 1,
|
||||
.get_event_idx = gen_get_event_idx,
|
||||
};
|
||||
|
||||
static int p6_pmu_init(void)
|
||||
static __init int p6_pmu_init(void)
|
||||
{
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case 1:
|
||||
@@ -1932,10 +2047,12 @@ static int p6_pmu_init(void)
|
||||
case 7:
|
||||
case 8:
|
||||
case 11: /* Pentium III */
|
||||
event_constraints = intel_p6_event_constraints;
|
||||
break;
|
||||
case 9:
|
||||
case 13:
|
||||
/* Pentium M */
|
||||
event_constraints = intel_p6_event_constraints;
|
||||
break;
|
||||
default:
|
||||
pr_cont("unsupported p6 CPU model %d ",
|
||||
@@ -1954,7 +2071,7 @@ static int p6_pmu_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_pmu_init(void)
|
||||
static __init int intel_pmu_init(void)
|
||||
{
|
||||
union cpuid10_edx edx;
|
||||
union cpuid10_eax eax;
|
||||
@@ -2007,12 +2124,14 @@ static int intel_pmu_init(void)
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
pr_cont("Core2 events, ");
|
||||
event_constraints = intel_core_event_constraints;
|
||||
break;
|
||||
default:
|
||||
case 26:
|
||||
memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
|
||||
sizeof(hw_cache_event_ids));
|
||||
|
||||
event_constraints = intel_nehalem_event_constraints;
|
||||
pr_cont("Nehalem/Corei7 events, ");
|
||||
break;
|
||||
case 28:
|
||||
@@ -2025,7 +2144,7 @@ static int intel_pmu_init(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amd_pmu_init(void)
|
||||
static __init int amd_pmu_init(void)
|
||||
{
|
||||
/* Performance-monitoring supported from K7 and later: */
|
||||
if (boot_cpu_data.x86 < 6)
|
||||
@@ -2105,11 +2224,47 @@ static const struct pmu pmu = {
|
||||
.unthrottle = x86_pmu_unthrottle,
|
||||
};
|
||||
|
||||
static int
|
||||
validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
|
||||
{
|
||||
struct hw_perf_event fake_event = event->hw;
|
||||
|
||||
if (event->pmu && event->pmu != &pmu)
|
||||
return 0;
|
||||
|
||||
return x86_schedule_event(cpuc, &fake_event) >= 0;
|
||||
}
|
||||
|
||||
static int validate_group(struct perf_event *event)
|
||||
{
|
||||
struct perf_event *sibling, *leader = event->group_leader;
|
||||
struct cpu_hw_events fake_pmu;
|
||||
|
||||
memset(&fake_pmu, 0, sizeof(fake_pmu));
|
||||
|
||||
if (!validate_event(&fake_pmu, leader))
|
||||
return -ENOSPC;
|
||||
|
||||
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
|
||||
if (!validate_event(&fake_pmu, sibling))
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
if (!validate_event(&fake_pmu, event))
|
||||
return -ENOSPC;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct pmu *hw_perf_event_init(struct perf_event *event)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = __hw_perf_event_init(event);
|
||||
if (!err) {
|
||||
if (event->group_leader != event)
|
||||
err = validate_group(event);
|
||||
}
|
||||
if (err) {
|
||||
if (event->destroy)
|
||||
event->destroy(event);
|
||||
|
@@ -712,7 +712,7 @@ static void probe_nmi_watchdog(void)
|
||||
switch (boot_cpu_data.x86_vendor) {
|
||||
case X86_VENDOR_AMD:
|
||||
if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 &&
|
||||
boot_cpu_data.x86 != 16)
|
||||
boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17)
|
||||
return;
|
||||
wd_ops = &k7_wd_ops;
|
||||
break;
|
||||
|
@@ -26,7 +26,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
|
||||
|
||||
early_init_transmeta(c);
|
||||
|
||||
display_cacheinfo(c);
|
||||
cpu_detect_cache_sizes(c);
|
||||
|
||||
/* Print CMS and CPU revision */
|
||||
max = cpuid_eax(0x80860000);
|
||||
|
Reference in New Issue
Block a user