Merge branch 'linus' into tracing/core
Conflicts: kernel/trace/trace_events_filter.c We use the tracing/core version. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -3793,6 +3793,9 @@ int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
|
||||
mmr_pnode = uv_blade_to_pnode(mmr_blade);
|
||||
uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
|
||||
|
||||
if (cfg->move_in_progress)
|
||||
send_cleanup_vector(cfg);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
|
@@ -17,11 +17,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
return x2apic_enabled();
|
||||
}
|
||||
|
||||
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
|
||||
|
||||
/*
|
||||
* need to use more than cpu 0, because we need more vectors when
|
||||
* MSI-X are used.
|
||||
*/
|
||||
static const struct cpumask *x2apic_target_cpus(void)
|
||||
{
|
||||
return cpumask_of(0);
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -170,7 +172,7 @@ static unsigned long set_apic_id(unsigned int id)
|
||||
|
||||
static int x2apic_cluster_phys_pkg_id(int initial_apicid, int index_msb)
|
||||
{
|
||||
return current_cpu_data.initial_apicid >> index_msb;
|
||||
return initial_apicid >> index_msb;
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_self(int vector)
|
||||
|
@@ -27,11 +27,13 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Start with all IRQs pointing to boot CPU. IRQ balancing will shift them. */
|
||||
|
||||
/*
|
||||
* need to use more than cpu 0, because we need more vectors when
|
||||
* MSI-X are used.
|
||||
*/
|
||||
static const struct cpumask *x2apic_target_cpus(void)
|
||||
{
|
||||
return cpumask_of(0);
|
||||
return cpu_online_mask;
|
||||
}
|
||||
|
||||
static void x2apic_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
@@ -162,7 +164,7 @@ static unsigned long set_apic_id(unsigned int id)
|
||||
|
||||
static int x2apic_phys_pkg_id(int initial_apicid, int index_msb)
|
||||
{
|
||||
return current_cpu_data.initial_apicid >> index_msb;
|
||||
return initial_apicid >> index_msb;
|
||||
}
|
||||
|
||||
static void x2apic_send_IPI_self(int vector)
|
||||
|
@@ -261,7 +261,7 @@ struct apic apic_x2apic_uv_x = {
|
||||
.apic_id_registered = uv_apic_id_registered,
|
||||
|
||||
.irq_delivery_mode = dest_Fixed,
|
||||
.irq_dest_mode = 1, /* logical */
|
||||
.irq_dest_mode = 0, /* physical */
|
||||
|
||||
.target_cpus = uv_target_cpus,
|
||||
.disable_esr = 0,
|
||||
@@ -362,12 +362,6 @@ static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
|
||||
BUG();
|
||||
}
|
||||
|
||||
static __init void map_low_mmrs(void)
|
||||
{
|
||||
init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE);
|
||||
init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE);
|
||||
}
|
||||
|
||||
enum map_type {map_wb, map_uc};
|
||||
|
||||
static __init void map_high(char *id, unsigned long base, int shift,
|
||||
@@ -395,26 +389,6 @@ static __init void map_gru_high(int max_pnode)
|
||||
map_high("GRU", gru.s.base, shift, max_pnode, map_wb);
|
||||
}
|
||||
|
||||
static __init void map_config_high(int max_pnode)
|
||||
{
|
||||
union uvh_rh_gam_cfg_overlay_config_mmr_u cfg;
|
||||
int shift = UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
||||
|
||||
cfg.v = uv_read_local_mmr(UVH_RH_GAM_CFG_OVERLAY_CONFIG_MMR);
|
||||
if (cfg.s.enable)
|
||||
map_high("CONFIG", cfg.s.base, shift, max_pnode, map_uc);
|
||||
}
|
||||
|
||||
static __init void map_mmr_high(int max_pnode)
|
||||
{
|
||||
union uvh_rh_gam_mmr_overlay_config_mmr_u mmr;
|
||||
int shift = UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR_BASE_SHFT;
|
||||
|
||||
mmr.v = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR);
|
||||
if (mmr.s.enable)
|
||||
map_high("MMR", mmr.s.base, shift, max_pnode, map_uc);
|
||||
}
|
||||
|
||||
static __init void map_mmioh_high(int max_pnode)
|
||||
{
|
||||
union uvh_rh_gam_mmioh_overlay_config_mmr_u mmioh;
|
||||
@@ -566,8 +540,6 @@ void __init uv_system_init(void)
|
||||
unsigned long mmr_base, present, paddr;
|
||||
unsigned short pnode_mask;
|
||||
|
||||
map_low_mmrs();
|
||||
|
||||
m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
|
||||
m_val = m_n_config.s.m_skt;
|
||||
n_val = m_n_config.s.n_skt;
|
||||
@@ -591,6 +563,8 @@ void __init uv_system_init(void)
|
||||
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
|
||||
uv_blade_info = kmalloc(bytes, GFP_KERNEL);
|
||||
BUG_ON(!uv_blade_info);
|
||||
for (blade = 0; blade < uv_num_possible_blades(); blade++)
|
||||
uv_blade_info[blade].memory_nid = -1;
|
||||
|
||||
get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
|
||||
|
||||
@@ -629,6 +603,9 @@ void __init uv_system_init(void)
|
||||
lcpu = uv_blade_info[blade].nr_possible_cpus;
|
||||
uv_blade_info[blade].nr_possible_cpus++;
|
||||
|
||||
/* Any node on the blade, else will contain -1. */
|
||||
uv_blade_info[blade].memory_nid = nid;
|
||||
|
||||
uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base;
|
||||
uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_size;
|
||||
uv_cpu_hub_info(cpu)->m_val = m_val;
|
||||
@@ -662,11 +639,10 @@ void __init uv_system_init(void)
|
||||
pnode = (paddr >> m_val) & pnode_mask;
|
||||
blade = boot_pnode_to_blade(pnode);
|
||||
uv_node_to_blade[nid] = blade;
|
||||
max_pnode = max(pnode, max_pnode);
|
||||
}
|
||||
|
||||
map_gru_high(max_pnode);
|
||||
map_mmr_high(max_pnode);
|
||||
map_config_high(max_pnode);
|
||||
map_mmioh_high(max_pnode);
|
||||
|
||||
uv_cpu_init();
|
||||
|
@@ -811,7 +811,7 @@ static int apm_do_idle(void)
|
||||
u8 ret = 0;
|
||||
int idled = 0;
|
||||
int polling;
|
||||
int err;
|
||||
int err = 0;
|
||||
|
||||
polling = !!(current_thread_info()->status & TS_POLLING);
|
||||
if (polling) {
|
||||
|
@@ -356,7 +356,7 @@ static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
|
||||
#endif
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
|
||||
/* check CPU config space for extended APIC ID */
|
||||
if (c->x86 >= 0xf) {
|
||||
if (cpu_has_apic && c->x86 >= 0xf) {
|
||||
unsigned int val;
|
||||
val = read_pci_config(0, 24, 0, 0x68);
|
||||
if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
|
||||
|
@@ -1692,17 +1692,15 @@ static ssize_t set_trigger(struct sys_device *s, struct sysdev_attribute *attr,
|
||||
const char *buf, size_t siz)
|
||||
{
|
||||
char *p;
|
||||
int len;
|
||||
|
||||
strncpy(mce_helper, buf, sizeof(mce_helper));
|
||||
mce_helper[sizeof(mce_helper)-1] = 0;
|
||||
len = strlen(mce_helper);
|
||||
p = strchr(mce_helper, '\n');
|
||||
|
||||
if (*p)
|
||||
if (p)
|
||||
*p = 0;
|
||||
|
||||
return len;
|
||||
return strlen(mce_helper) + !!p;
|
||||
}
|
||||
|
||||
static ssize_t set_ignore_ce(struct sys_device *s,
|
||||
|
@@ -65,6 +65,52 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
|
||||
.enabled = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* Not sure about some of these
|
||||
*/
|
||||
static const u64 p6_perfmon_event_map[] =
|
||||
{
|
||||
[PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
|
||||
[PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000,
|
||||
[PERF_COUNT_HW_CACHE_MISSES] = 0x0000,
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
|
||||
[PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
|
||||
};
|
||||
|
||||
static u64 p6_pmu_event_map(int event)
|
||||
{
|
||||
return p6_perfmon_event_map[event];
|
||||
}
|
||||
|
||||
/*
|
||||
* Counter setting that is specified not to count anything.
|
||||
* We use this to effectively disable a counter.
|
||||
*
|
||||
* L2_RQSTS with 0 MESI unit mask.
|
||||
*/
|
||||
#define P6_NOP_COUNTER 0x0000002EULL
|
||||
|
||||
static u64 p6_pmu_raw_event(u64 event)
|
||||
{
|
||||
#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
|
||||
#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
|
||||
#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
|
||||
#define P6_EVNTSEL_INV_MASK 0x00800000ULL
|
||||
#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL
|
||||
|
||||
#define P6_EVNTSEL_MASK \
|
||||
(P6_EVNTSEL_EVENT_MASK | \
|
||||
P6_EVNTSEL_UNIT_MASK | \
|
||||
P6_EVNTSEL_EDGE_MASK | \
|
||||
P6_EVNTSEL_INV_MASK | \
|
||||
P6_EVNTSEL_COUNTER_MASK)
|
||||
|
||||
return event & P6_EVNTSEL_MASK;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Intel PerfMon v3. Used on Core2 and later.
|
||||
*/
|
||||
@@ -666,6 +712,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
|
||||
{
|
||||
struct perf_counter_attr *attr = &counter->attr;
|
||||
struct hw_perf_counter *hwc = &counter->hw;
|
||||
u64 config;
|
||||
int err;
|
||||
|
||||
if (!x86_pmu_initialized())
|
||||
@@ -718,14 +765,40 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
|
||||
|
||||
if (attr->config >= x86_pmu.max_events)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* The generic map:
|
||||
*/
|
||||
hwc->config |= x86_pmu.event_map(attr->config);
|
||||
config = x86_pmu.event_map(attr->config);
|
||||
|
||||
if (config == 0)
|
||||
return -ENOENT;
|
||||
|
||||
if (config == -1LL)
|
||||
return -EINVAL;
|
||||
|
||||
hwc->config |= config;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void p6_pmu_disable_all(void)
|
||||
{
|
||||
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||
u64 val;
|
||||
|
||||
if (!cpuc->enabled)
|
||||
return;
|
||||
|
||||
cpuc->enabled = 0;
|
||||
barrier();
|
||||
|
||||
/* p6 only has one enable register */
|
||||
rdmsrl(MSR_P6_EVNTSEL0, val);
|
||||
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||
wrmsrl(MSR_P6_EVNTSEL0, val);
|
||||
}
|
||||
|
||||
static void intel_pmu_disable_all(void)
|
||||
{
|
||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
|
||||
@@ -767,6 +840,23 @@ void hw_perf_disable(void)
|
||||
return x86_pmu.disable_all();
|
||||
}
|
||||
|
||||
static void p6_pmu_enable_all(void)
|
||||
{
|
||||
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||
unsigned long val;
|
||||
|
||||
if (cpuc->enabled)
|
||||
return;
|
||||
|
||||
cpuc->enabled = 1;
|
||||
barrier();
|
||||
|
||||
/* p6 only has one enable register */
|
||||
rdmsrl(MSR_P6_EVNTSEL0, val);
|
||||
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||
wrmsrl(MSR_P6_EVNTSEL0, val);
|
||||
}
|
||||
|
||||
static void intel_pmu_enable_all(void)
|
||||
{
|
||||
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
|
||||
@@ -784,13 +874,13 @@ static void amd_pmu_enable_all(void)
|
||||
barrier();
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
struct perf_counter *counter = cpuc->counters[idx];
|
||||
u64 val;
|
||||
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
|
||||
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
|
||||
continue;
|
||||
|
||||
val = counter->hw.config;
|
||||
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
|
||||
}
|
||||
@@ -819,16 +909,13 @@ static inline void intel_pmu_ack_status(u64 ack)
|
||||
|
||||
static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
|
||||
{
|
||||
int err;
|
||||
err = checking_wrmsrl(hwc->config_base + idx,
|
||||
(void)checking_wrmsrl(hwc->config_base + idx,
|
||||
hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
|
||||
}
|
||||
|
||||
static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
|
||||
{
|
||||
int err;
|
||||
err = checking_wrmsrl(hwc->config_base + idx,
|
||||
hwc->config);
|
||||
(void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -836,13 +923,24 @@ intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
|
||||
{
|
||||
int idx = __idx - X86_PMC_IDX_FIXED;
|
||||
u64 ctrl_val, mask;
|
||||
int err;
|
||||
|
||||
mask = 0xfULL << (idx * 4);
|
||||
|
||||
rdmsrl(hwc->config_base, ctrl_val);
|
||||
ctrl_val &= ~mask;
|
||||
err = checking_wrmsrl(hwc->config_base, ctrl_val);
|
||||
(void)checking_wrmsrl(hwc->config_base, ctrl_val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
|
||||
{
|
||||
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||
u64 val = P6_NOP_COUNTER;
|
||||
|
||||
if (cpuc->enabled)
|
||||
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||
|
||||
(void)checking_wrmsrl(hwc->config_base + idx, val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -943,6 +1041,19 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
|
||||
err = checking_wrmsrl(hwc->config_base, ctrl_val);
|
||||
}
|
||||
|
||||
static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
|
||||
{
|
||||
struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||
u64 val;
|
||||
|
||||
val = hwc->config;
|
||||
if (cpuc->enabled)
|
||||
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
|
||||
|
||||
(void)checking_wrmsrl(hwc->config_base + idx, val);
|
||||
}
|
||||
|
||||
|
||||
static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
|
||||
{
|
||||
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
|
||||
@@ -959,8 +1070,6 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
|
||||
|
||||
if (cpuc->enabled)
|
||||
x86_pmu_enable_counter(hwc, idx);
|
||||
else
|
||||
x86_pmu_disable_counter(hwc, idx);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -1176,6 +1285,49 @@ static void intel_pmu_reset(void)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static int p6_pmu_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct perf_sample_data data;
|
||||
struct cpu_hw_counters *cpuc;
|
||||
struct perf_counter *counter;
|
||||
struct hw_perf_counter *hwc;
|
||||
int idx, handled = 0;
|
||||
u64 val;
|
||||
|
||||
data.regs = regs;
|
||||
data.addr = 0;
|
||||
|
||||
cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
continue;
|
||||
|
||||
counter = cpuc->counters[idx];
|
||||
hwc = &counter->hw;
|
||||
|
||||
val = x86_perf_counter_update(counter, hwc, idx);
|
||||
if (val & (1ULL << (x86_pmu.counter_bits - 1)))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* counter overflow
|
||||
*/
|
||||
handled = 1;
|
||||
data.period = counter->hw.last_period;
|
||||
|
||||
if (!x86_perf_counter_set_period(counter, hwc, idx))
|
||||
continue;
|
||||
|
||||
if (perf_counter_overflow(counter, 1, &data))
|
||||
p6_pmu_disable_counter(hwc, idx);
|
||||
}
|
||||
|
||||
if (handled)
|
||||
inc_irq_stat(apic_perf_irqs);
|
||||
|
||||
return handled;
|
||||
}
|
||||
|
||||
/*
|
||||
* This handler is triggered by the local APIC, so the APIC IRQ handling
|
||||
@@ -1185,14 +1337,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct perf_sample_data data;
|
||||
struct cpu_hw_counters *cpuc;
|
||||
int bit, cpu, loops;
|
||||
int bit, loops;
|
||||
u64 ack, status;
|
||||
|
||||
data.regs = regs;
|
||||
data.addr = 0;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
cpuc = &per_cpu(cpu_hw_counters, cpu);
|
||||
cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||
|
||||
perf_disable();
|
||||
status = intel_pmu_get_status();
|
||||
@@ -1249,14 +1400,13 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
|
||||
struct cpu_hw_counters *cpuc;
|
||||
struct perf_counter *counter;
|
||||
struct hw_perf_counter *hwc;
|
||||
int cpu, idx, handled = 0;
|
||||
int idx, handled = 0;
|
||||
u64 val;
|
||||
|
||||
data.regs = regs;
|
||||
data.addr = 0;
|
||||
|
||||
cpu = smp_processor_id();
|
||||
cpuc = &per_cpu(cpu_hw_counters, cpu);
|
||||
cpuc = &__get_cpu_var(cpu_hw_counters);
|
||||
|
||||
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
|
||||
if (!test_bit(idx, cpuc->active_mask))
|
||||
@@ -1353,6 +1503,32 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
|
||||
.priority = 1
|
||||
};
|
||||
|
||||
static struct x86_pmu p6_pmu = {
|
||||
.name = "p6",
|
||||
.handle_irq = p6_pmu_handle_irq,
|
||||
.disable_all = p6_pmu_disable_all,
|
||||
.enable_all = p6_pmu_enable_all,
|
||||
.enable = p6_pmu_enable_counter,
|
||||
.disable = p6_pmu_disable_counter,
|
||||
.eventsel = MSR_P6_EVNTSEL0,
|
||||
.perfctr = MSR_P6_PERFCTR0,
|
||||
.event_map = p6_pmu_event_map,
|
||||
.raw_event = p6_pmu_raw_event,
|
||||
.max_events = ARRAY_SIZE(p6_perfmon_event_map),
|
||||
.max_period = (1ULL << 31) - 1,
|
||||
.version = 0,
|
||||
.num_counters = 2,
|
||||
/*
|
||||
* Counters have 40 bits implemented. However they are designed such
|
||||
* that bits [32-39] are sign extensions of bit 31. As such the
|
||||
* effective width of a counter for P6-like PMU is 32 bits only.
|
||||
*
|
||||
* See IA-32 Intel Architecture Software developer manual Vol 3B
|
||||
*/
|
||||
.counter_bits = 32,
|
||||
.counter_mask = (1ULL << 32) - 1,
|
||||
};
|
||||
|
||||
static struct x86_pmu intel_pmu = {
|
||||
.name = "Intel",
|
||||
.handle_irq = intel_pmu_handle_irq,
|
||||
@@ -1392,6 +1568,37 @@ static struct x86_pmu amd_pmu = {
|
||||
.max_period = (1ULL << 47) - 1,
|
||||
};
|
||||
|
||||
static int p6_pmu_init(void)
|
||||
{
|
||||
switch (boot_cpu_data.x86_model) {
|
||||
case 1:
|
||||
case 3: /* Pentium Pro */
|
||||
case 5:
|
||||
case 6: /* Pentium II */
|
||||
case 7:
|
||||
case 8:
|
||||
case 11: /* Pentium III */
|
||||
break;
|
||||
case 9:
|
||||
case 13:
|
||||
/* Pentium M */
|
||||
break;
|
||||
default:
|
||||
pr_cont("unsupported p6 CPU model %d ",
|
||||
boot_cpu_data.x86_model);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!cpu_has_apic) {
|
||||
pr_info("no Local APIC, try rebooting with lapic");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
x86_pmu = p6_pmu;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_pmu_init(void)
|
||||
{
|
||||
union cpuid10_edx edx;
|
||||
@@ -1400,8 +1607,14 @@ static int intel_pmu_init(void)
|
||||
unsigned int ebx;
|
||||
int version;
|
||||
|
||||
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
|
||||
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
|
||||
/* check for P6 processor family */
|
||||
if (boot_cpu_data.x86 == 6) {
|
||||
return p6_pmu_init();
|
||||
} else {
|
||||
return -ENODEV;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether the Architectural PerfMon supports
|
||||
|
@@ -354,7 +354,7 @@ void __init efi_init(void)
|
||||
*/
|
||||
c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2);
|
||||
if (c16) {
|
||||
for (i = 0; i < sizeof(vendor) && *c16; ++i)
|
||||
for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i)
|
||||
vendor[i] = *c16++;
|
||||
vendor[i] = '\0';
|
||||
} else
|
||||
@@ -512,7 +512,7 @@ void __init efi_enter_virtual_mode(void)
|
||||
&& end_pfn <= max_pfn_mapped))
|
||||
va = __va(md->phys_addr);
|
||||
else
|
||||
va = efi_ioremap(md->phys_addr, size);
|
||||
va = efi_ioremap(md->phys_addr, size, md->type);
|
||||
|
||||
md->virt_addr = (u64) (unsigned long) va;
|
||||
|
||||
|
@@ -98,10 +98,14 @@ void __init efi_call_phys_epilog(void)
|
||||
early_runtime_code_mapping_set_exec(0);
|
||||
}
|
||||
|
||||
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size)
|
||||
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
|
||||
u32 type)
|
||||
{
|
||||
unsigned long last_map_pfn;
|
||||
|
||||
if (type == EFI_MEMORY_MAPPED_IO)
|
||||
return ioremap(phys_addr, size);
|
||||
|
||||
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
|
||||
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size)
|
||||
return NULL;
|
||||
|
@@ -602,7 +602,11 @@ ignore_int:
|
||||
#endif
|
||||
iret
|
||||
|
||||
.section .cpuinit.data,"wa"
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
__CPUINITDATA
|
||||
#else
|
||||
__REFDATA
|
||||
#endif
|
||||
.align 4
|
||||
ENTRY(initial_code)
|
||||
.long i386_start_kernel
|
||||
|
@@ -187,7 +187,7 @@ static void __init apic_intr_init(void)
|
||||
#ifdef CONFIG_X86_THERMAL_VECTOR
|
||||
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
|
||||
#endif
|
||||
#ifdef CONFIG_X86_THRESHOLD
|
||||
#ifdef CONFIG_X86_MCE_THRESHOLD
|
||||
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
|
||||
#endif
|
||||
#if defined(CONFIG_X86_NEW_MCE) && defined(CONFIG_X86_LOCAL_APIC)
|
||||
|
@@ -347,7 +347,7 @@ static irqreturn_t mfgpt_tick(int irq, void *dev_id)
|
||||
|
||||
static struct irqaction mfgptirq = {
|
||||
.handler = mfgpt_tick,
|
||||
.flags = IRQF_DISABLED | IRQF_NOBALANCING,
|
||||
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
|
||||
.name = "mfgpt-timer"
|
||||
};
|
||||
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/pm.h>
|
||||
#include <linux/efi.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <acpi/reboot.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/apic.h>
|
||||
@@ -17,7 +18,6 @@
|
||||
#include <asm/cpu.h>
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# include <linux/dmi.h>
|
||||
# include <linux/ctype.h>
|
||||
# include <linux/mc146818rtc.h>
|
||||
#else
|
||||
@@ -249,6 +249,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-Z540N"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on CompuLab SBC-FITPC2 */
|
||||
.callback = set_bios_reboot,
|
||||
.ident = "CompuLab SBC-FITPC2",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "CompuLab"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "SBC-FITPC2"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
@@ -396,6 +404,46 @@ EXPORT_SYMBOL(machine_real_restart);
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
|
||||
/*
|
||||
* Some Apple MacBook and MacBookPro's needs reboot=p to be able to reboot
|
||||
*/
|
||||
static int __init set_pci_reboot(const struct dmi_system_id *d)
|
||||
{
|
||||
if (reboot_type != BOOT_CF9) {
|
||||
reboot_type = BOOT_CF9;
|
||||
printk(KERN_INFO "%s series board detected. "
|
||||
"Selecting PCI-method for reboots.\n", d->ident);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
|
||||
{ /* Handle problems with rebooting on Apple MacBook5,2 */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Apple MacBook",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on Apple MacBookPro5,1 */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Apple MacBookPro5,1",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"),
|
||||
},
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static int __init pci_reboot_init(void)
|
||||
{
|
||||
dmi_check_system(pci_reboot_dmi_table);
|
||||
return 0;
|
||||
}
|
||||
core_initcall(pci_reboot_init);
|
||||
|
||||
static inline void kb_wait(void)
|
||||
{
|
||||
int i;
|
||||
|
@@ -672,6 +672,19 @@ static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
|
||||
DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/*
|
||||
* AMI BIOS with low memory corruption was found on Intel DG45ID board.
|
||||
* It hase different DMI_BIOS_VENDOR = "Intel Corp.", for now we will
|
||||
* match only DMI_BOARD_NAME and see if there is more bad products
|
||||
* with this vendor.
|
||||
*/
|
||||
.callback = dmi_low_memory_corruption,
|
||||
.ident = "AMI BIOS",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_BOARD_NAME, "DG45ID"),
|
||||
},
|
||||
},
|
||||
#endif
|
||||
{}
|
||||
};
|
||||
|
@@ -275,15 +275,20 @@ static unsigned long pit_calibrate_tsc(u32 latch, unsigned long ms, int loopmin)
|
||||
* use the TSC value at the transitions to calculate a pretty
|
||||
* good value for the TSC frequencty.
|
||||
*/
|
||||
static inline int pit_verify_msb(unsigned char val)
|
||||
{
|
||||
/* Ignore LSB */
|
||||
inb(0x42);
|
||||
return inb(0x42) == val;
|
||||
}
|
||||
|
||||
static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
|
||||
{
|
||||
int count;
|
||||
u64 tsc = 0;
|
||||
|
||||
for (count = 0; count < 50000; count++) {
|
||||
/* Ignore LSB */
|
||||
inb(0x42);
|
||||
if (inb(0x42) != val)
|
||||
if (!pit_verify_msb(val))
|
||||
break;
|
||||
tsc = get_cycles();
|
||||
}
|
||||
@@ -336,8 +341,7 @@ static unsigned long quick_pit_calibrate(void)
|
||||
* to do that is to just read back the 16-bit counter
|
||||
* once from the PIT.
|
||||
*/
|
||||
inb(0x42);
|
||||
inb(0x42);
|
||||
pit_verify_msb(0);
|
||||
|
||||
if (pit_expect_msb(0xff, &tsc, &d1)) {
|
||||
for (i = 1; i <= MAX_QUICK_PIT_ITERATIONS; i++) {
|
||||
@@ -348,8 +352,19 @@ static unsigned long quick_pit_calibrate(void)
|
||||
* Iterate until the error is less than 500 ppm
|
||||
*/
|
||||
delta -= tsc;
|
||||
if (d1+d2 < delta >> 11)
|
||||
goto success;
|
||||
if (d1+d2 >= delta >> 11)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* Check the PIT one more time to verify that
|
||||
* all TSC reads were stable wrt the PIT.
|
||||
*
|
||||
* This also guarantees serialization of the
|
||||
* last cycle read ('d2') in pit_expect_msb.
|
||||
*/
|
||||
if (!pit_verify_msb(0xfe - i))
|
||||
break;
|
||||
goto success;
|
||||
}
|
||||
}
|
||||
printk("Fast TSC calibration failed\n");
|
||||
|
@@ -441,7 +441,7 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
|
||||
ap.ds = __USER_DS;
|
||||
ap.es = __USER_DS;
|
||||
ap.fs = __KERNEL_PERCPU;
|
||||
ap.gs = 0;
|
||||
ap.gs = __KERNEL_STACK_CANARY;
|
||||
|
||||
ap.eflags = 0;
|
||||
|
||||
|
@@ -112,11 +112,6 @@ SECTIONS
|
||||
_sdata = .;
|
||||
DATA_DATA
|
||||
CONSTRUCTORS
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/* End of data section */
|
||||
_edata = .;
|
||||
#endif
|
||||
} :data
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@@ -156,10 +151,8 @@ SECTIONS
|
||||
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
|
||||
*(.data.read_mostly)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* End of data section */
|
||||
_edata = .;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
@@ -400,8 +393,8 @@ SECTIONS
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
|
||||
"kernel image bigger than KERNEL_IMAGE_SIZE")
|
||||
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
|
||||
"kernel image bigger than KERNEL_IMAGE_SIZE");
|
||||
#else
|
||||
/*
|
||||
* Per-cpu symbols which need to be offset from __per_cpu_load
|
||||
@@ -414,12 +407,12 @@ INIT_PER_CPU(irq_stack_union);
|
||||
/*
|
||||
* Build-time check on the image size:
|
||||
*/
|
||||
ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
|
||||
"kernel image bigger than KERNEL_IMAGE_SIZE")
|
||||
. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
|
||||
"kernel image bigger than KERNEL_IMAGE_SIZE");
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
ASSERT((per_cpu__irq_stack_union == 0),
|
||||
"irq_stack_union is not at start of per-cpu area");
|
||||
. = ASSERT((per_cpu__irq_stack_union == 0),
|
||||
"irq_stack_union is not at start of per-cpu area");
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_X86_32 */
|
||||
@@ -427,7 +420,7 @@ ASSERT((per_cpu__irq_stack_union == 0),
|
||||
#ifdef CONFIG_KEXEC
|
||||
#include <asm/kexec.h>
|
||||
|
||||
ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
|
||||
"kexec control code size is too big")
|
||||
. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
|
||||
"kexec control code size is too big");
|
||||
#endif
|
||||
|
||||
|
Reference in New Issue
Block a user