Merge remote-tracking branch 'tip/perf/urgent' into perf/core

Merge reason: We are going to queue up a dependent patch:

"perf tools: Move parse event automated tests to separated object"

That depends on:

commit e7c72d8
perf tools: Add 'G' and 'H' modifiers to event parsing

Conflicts:
	tools/perf/builtin-stat.c

Conflicted with the recent 'perf_target' patches when checking the
result of perf_evsel open routines to see if a retry is needed to cope
with older kernels where the exclude guest/host perf_event_attr bits
were not used.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Arnaldo Carvalho de Melo
2012-05-18 13:13:33 -03:00
351 changed files with 3529 additions and 2088 deletions

View File

@@ -24,6 +24,10 @@ unsigned long acpi_realmode_flags;
static char temp_stack[4096];
#endif
asmlinkage void acpi_enter_s3(void)
{
acpi_enter_sleep_state(3, wake_sleep_flags);
}
/**
* acpi_suspend_lowlevel - save kernel state
*

View File

@@ -3,12 +3,16 @@
*/
#include <asm/trampoline.h>
#include <linux/linkage.h>
extern unsigned long saved_video_mode;
extern long saved_magic;
extern int wakeup_pmode_return;
extern u8 wake_sleep_flags;
extern asmlinkage void acpi_enter_s3(void);
extern unsigned long acpi_copy_wakeup_routine(unsigned long);
extern void wakeup_long64(void);

View File

@@ -74,9 +74,7 @@ restore_registers:
ENTRY(do_suspend_lowlevel)
call save_processor_state
call save_registers
pushl $3
call acpi_enter_sleep_state
addl $4, %esp
call acpi_enter_s3
# In case of S3 failure, we'll emerge here. Jump
# to ret_point to recover

View File

@@ -71,9 +71,7 @@ ENTRY(do_suspend_lowlevel)
movq %rsi, saved_rsi
addq $8, %rsp
movl $3, %edi
xorl %eax, %eax
call acpi_enter_sleep_state
call acpi_enter_s3
/* in case something went wrong, restore the machine status and go on */
jmp resume_point

View File

@@ -1637,9 +1637,11 @@ static int __init apic_verify(void)
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
/* The BIOS may have set up the APIC at some other address */
rdmsr(MSR_IA32_APICBASE, l, h);
if (l & MSR_IA32_APICBASE_ENABLE)
mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
if (boot_cpu_data.x86 >= 6) {
rdmsr(MSR_IA32_APICBASE, l, h);
if (l & MSR_IA32_APICBASE_ENABLE)
mp_lapic_addr = l & MSR_IA32_APICBASE_BASE;
}
pr_info("Found and enabled local APIC!\n");
return 0;
@@ -1657,13 +1659,15 @@ int __init apic_force_enable(unsigned long addr)
* MSR. This can only be done in software for Intel P6 or later
* and AMD K7 (Model > 1) or later.
*/
rdmsr(MSR_IA32_APICBASE, l, h);
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
pr_info("Local APIC disabled by BIOS -- reenabling.\n");
l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | addr;
wrmsr(MSR_IA32_APICBASE, l, h);
enabled_via_apicbase = 1;
if (boot_cpu_data.x86 >= 6) {
rdmsr(MSR_IA32_APICBASE, l, h);
if (!(l & MSR_IA32_APICBASE_ENABLE)) {
pr_info("Local APIC disabled by BIOS -- reenabling.\n");
l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | addr;
wrmsr(MSR_IA32_APICBASE, l, h);
enabled_via_apicbase = 1;
}
}
return apic_verify();
}
@@ -2209,10 +2213,12 @@ static void lapic_resume(void)
* FIXME! This will be wrong if we ever support suspend on
* SMP! We'll need to do this as part of the CPU restore!
*/
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
wrmsr(MSR_IA32_APICBASE, l, h);
if (boot_cpu_data.x86 >= 6) {
rdmsr(MSR_IA32_APICBASE, l, h);
l &= ~MSR_IA32_APICBASE_BASE;
l |= MSR_IA32_APICBASE_ENABLE | mp_lapic_addr;
wrmsr(MSR_IA32_APICBASE, l, h);
}
}
maxlvt = lapic_get_maxlvt();

View File

@@ -207,8 +207,11 @@ static void __init map_csrs(void)
static void fixup_cpu_id(struct cpuinfo_x86 *c, int node)
{
c->phys_proc_id = node;
per_cpu(cpu_llc_id, smp_processor_id()) = node;
if (c->phys_proc_id != node) {
c->phys_proc_id = node;
per_cpu(cpu_llc_id, smp_processor_id()) = node;
}
}
static int __init numachip_system_init(void)

View File

@@ -24,6 +24,12 @@ static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
if (x2apic_phys)
return x2apic_enabled();
else if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL) &&
x2apic_enabled()) {
printk(KERN_DEBUG "System requires x2apic physical mode\n");
return 1;
}
else
return 0;
}

View File

@@ -26,7 +26,8 @@
* contact AMD for precise details and a CPU swap.
*
* See http://www.multimania.com/poulot/k6bug.html
* http://www.amd.com/K6/k6docs/revgd.html
* and section 2.6.2 of "AMD-K6 Processor Revision Guide - Model 6"
* (Publication # 21266 Issue Date: August 1998)
*
* The following test is erm.. interesting. AMD neglected to up
* the chip setting when fixing the bug but they also tweaked some
@@ -94,7 +95,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
"system stability may be impaired when more than 32 MB are used.\n");
else
printk(KERN_CONT "probably OK (after B9730xxxx).\n");
printk(KERN_INFO "Please see http://membres.lycos.fr/poulot/k6bug.html\n");
}
/* K6 with old style WHCR */
@@ -353,10 +353,11 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
node = per_cpu(cpu_llc_id, cpu);
/*
* If core numbers are inconsistent, it's likely a multi-fabric platform,
* so invoke platform-specific handler
* On multi-fabric platform (e.g. Numascale NumaChip) a
* platform-specific handler needs to be called to fixup some
* IDs of the CPU.
*/
if (c->phys_proc_id != node)
if (x86_cpuinit.fixup_cpu_id)
x86_cpuinit.fixup_cpu_id(c, node);
if (!node_online(node)) {

View File

@@ -1162,15 +1162,6 @@ static void dbg_restore_debug_regs(void)
#define dbg_restore_debug_regs()
#endif /* ! CONFIG_KGDB */
/*
* Prints an error where the NUMA and configured core-number mismatch and the
* platform didn't override this to fix it up
*/
void __cpuinit x86_default_fixup_cpu_id(struct cpuinfo_x86 *c, int node)
{
pr_err("NUMA core number %d differs from configured core number %d\n", node, c->phys_proc_id);
}
/*
* cpu_init() initializes state that is per-CPU. Some data is already
* initialized (naturally) in the bootstrap process, such as the GDT

View File

@@ -433,14 +433,14 @@ int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu, unsigned slot,
/* check if @slot is already used or the index is already disabled */
ret = amd_get_l3_disable_slot(nb, slot);
if (ret >= 0)
return -EINVAL;
return -EEXIST;
if (index > nb->l3_cache.indices)
return -EINVAL;
/* check whether the other slot has disabled the same index already */
if (index == amd_get_l3_disable_slot(nb, !slot))
return -EINVAL;
return -EEXIST;
amd_l3_disable_index(nb, cpu, slot, index);
@@ -468,8 +468,8 @@ static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
err = amd_set_l3_disable_slot(this_leaf->base.nb, cpu, slot, val);
if (err) {
if (err == -EEXIST)
printk(KERN_WARNING "L3 disable slot %d in use!\n",
slot);
pr_warning("L3 slot %d in use/index already disabled!\n",
slot);
return err;
}
return count;

View File

@@ -235,6 +235,7 @@ int init_fpu(struct task_struct *tsk)
if (tsk_used_math(tsk)) {
if (HAVE_HWFP && tsk == current)
unlazy_fpu(tsk);
tsk->thread.fpu.last_cpu = ~0;
return 0;
}

View File

@@ -82,11 +82,6 @@ static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
{
struct cpuinfo_x86 *c = &cpu_data(cpu);
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
pr_warning("CPU%d: family %d not supported\n", cpu, c->x86);
return -1;
}
csig->rev = c->microcode;
pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
@@ -380,6 +375,13 @@ static struct microcode_ops microcode_amd_ops = {
struct microcode_ops * __init init_amd_microcode(void)
{
struct cpuinfo_x86 *c = &cpu_data(0);
if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
pr_warning("AMD CPU family 0x%x not supported\n", c->x86);
return NULL;
}
patch = (void *)get_zeroed_page(GFP_KERNEL);
if (!patch)
return NULL;

View File

@@ -419,10 +419,8 @@ static int mc_device_add(struct device *dev, struct subsys_interface *sif)
if (err)
return err;
if (microcode_init_cpu(cpu) == UCODE_ERROR) {
sysfs_remove_group(&dev->kobj, &mc_attr_group);
if (microcode_init_cpu(cpu) == UCODE_ERROR)
return -EINVAL;
}
return err;
}
@@ -528,11 +526,11 @@ static int __init microcode_init(void)
microcode_ops = init_intel_microcode();
else if (c->x86_vendor == X86_VENDOR_AMD)
microcode_ops = init_amd_microcode();
if (!microcode_ops) {
else
pr_err("no support for this CPU vendor\n");
if (!microcode_ops)
return -ENODEV;
}
microcode_pdev = platform_device_register_simple("microcode", -1,
NULL, 0);

View File

@@ -93,7 +93,6 @@ struct x86_init_ops x86_init __initdata = {
struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
.early_percpu_clock_init = x86_init_noop,
.setup_percpu_clockev = setup_secondary_APIC_clock,
.fixup_cpu_id = x86_default_fixup_cpu_id,
};
static void default_nmi_init(void) { };