Merge branch 'linus' into x86/cpu, to resolve conflicts
Conflicts: tools/power/x86/turbostat/turbostat.c Recent turbostat changes conflicted with a pending rename of x86 model names in tip:x86/cpu, sort it out. Signed-off-by: Ingo Molnar <mingo@kernel.org>
此提交包含在:
@@ -38,6 +38,7 @@ REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \
|
||||
|
||||
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -ffreestanding)
|
||||
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -fno-stack-protector)
|
||||
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), -Wno-address-of-packed-member)
|
||||
REALMODE_CFLAGS += $(call __cc-option, $(CC), $(REALMODE_CFLAGS), $(cc_stack_align4))
|
||||
export REALMODE_CFLAGS
|
||||
|
||||
|
@@ -72,7 +72,7 @@ static unsigned long find_trampoline_placement(void)
|
||||
|
||||
/* Find the first usable memory region under bios_start. */
|
||||
for (i = boot_params->e820_entries - 1; i >= 0; i--) {
|
||||
unsigned long new;
|
||||
unsigned long new = bios_start;
|
||||
|
||||
entry = &boot_params->e820_table[i];
|
||||
|
||||
|
@@ -661,10 +661,17 @@ fail:
|
||||
|
||||
throttle = perf_event_overflow(event, &data, ®s);
|
||||
out:
|
||||
if (throttle)
|
||||
if (throttle) {
|
||||
perf_ibs_stop(event, 0);
|
||||
else
|
||||
perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
|
||||
} else {
|
||||
period >>= 4;
|
||||
|
||||
if ((ibs_caps & IBS_CAPS_RDWROPCNT) &&
|
||||
(*config & IBS_OP_CNT_CTL))
|
||||
period |= *config & IBS_OP_CUR_CNT_RAND;
|
||||
|
||||
perf_ibs_enable_event(perf_ibs, hwc, period);
|
||||
}
|
||||
|
||||
perf_event_update_userpage(event);
|
||||
|
||||
|
@@ -3572,6 +3572,11 @@ static u64 bdw_limit_period(struct perf_event *event, u64 left)
|
||||
return left;
|
||||
}
|
||||
|
||||
static u64 nhm_limit_period(struct perf_event *event, u64 left)
|
||||
{
|
||||
return max(left, 32ULL);
|
||||
}
|
||||
|
||||
PMU_FORMAT_ATTR(event, "config:0-7" );
|
||||
PMU_FORMAT_ATTR(umask, "config:8-15" );
|
||||
PMU_FORMAT_ATTR(edge, "config:18" );
|
||||
@@ -4606,6 +4611,7 @@ __init int intel_pmu_init(void)
|
||||
x86_pmu.pebs_constraints = intel_nehalem_pebs_event_constraints;
|
||||
x86_pmu.enable_all = intel_pmu_nhm_enable_all;
|
||||
x86_pmu.extra_regs = intel_nehalem_extra_regs;
|
||||
x86_pmu.limit_period = nhm_limit_period;
|
||||
|
||||
mem_attr = nhm_mem_events_attrs;
|
||||
|
||||
|
@@ -16,7 +16,6 @@
|
||||
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern void mcount(void);
|
||||
extern atomic_t modifying_ftrace_code;
|
||||
extern void __fentry__(void);
|
||||
|
||||
|
@@ -252,16 +252,20 @@ struct pebs_lbr {
|
||||
#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
|
||||
#define IBSCTL_LVT_OFFSET_MASK 0x0F
|
||||
|
||||
/* ibs fetch bits/masks */
|
||||
/* IBS fetch bits/masks */
|
||||
#define IBS_FETCH_RAND_EN (1ULL<<57)
|
||||
#define IBS_FETCH_VAL (1ULL<<49)
|
||||
#define IBS_FETCH_ENABLE (1ULL<<48)
|
||||
#define IBS_FETCH_CNT 0xFFFF0000ULL
|
||||
#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
|
||||
|
||||
/* ibs op bits/masks */
|
||||
/* lower 4 bits of the current count are ignored: */
|
||||
#define IBS_OP_CUR_CNT (0xFFFF0ULL<<32)
|
||||
/*
|
||||
* IBS op bits/masks
|
||||
* The lower 7 bits of the current count are random bits
|
||||
* preloaded by hardware and ignored in software
|
||||
*/
|
||||
#define IBS_OP_CUR_CNT (0xFFF80ULL<<32)
|
||||
#define IBS_OP_CUR_CNT_RAND (0x0007FULL<<32)
|
||||
#define IBS_OP_CNT_CTL (1ULL<<19)
|
||||
#define IBS_OP_VAL (1ULL<<18)
|
||||
#define IBS_OP_ENABLE (1ULL<<17)
|
||||
|
@@ -1179,6 +1179,10 @@ void clear_local_APIC(void)
|
||||
apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
|
||||
v = apic_read(APIC_LVT1);
|
||||
apic_write(APIC_LVT1, v | APIC_LVT_MASKED);
|
||||
if (!x2apic_enabled()) {
|
||||
v = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
apic_write(APIC_LDR, v);
|
||||
}
|
||||
if (maxlvt >= 4) {
|
||||
v = apic_read(APIC_LVTPC);
|
||||
apic_write(APIC_LVTPC, v | APIC_LVT_MASKED);
|
||||
|
@@ -38,32 +38,12 @@ static int bigsmp_early_logical_apicid(int cpu)
|
||||
return early_per_cpu(x86_cpu_to_apicid, cpu);
|
||||
}
|
||||
|
||||
static inline unsigned long calculate_ldr(int cpu)
|
||||
{
|
||||
unsigned long val, id;
|
||||
|
||||
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
|
||||
id = per_cpu(x86_bios_cpu_apicid, cpu);
|
||||
val |= SET_APIC_LOGICAL_ID(id);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the logical destination ID.
|
||||
*
|
||||
* Intel recommends to set DFR, LDR and TPR before enabling
|
||||
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
|
||||
* document number 292116). So here it goes...
|
||||
* bigsmp enables physical destination mode
|
||||
* and doesn't use LDR and DFR
|
||||
*/
|
||||
static void bigsmp_init_apic_ldr(void)
|
||||
{
|
||||
unsigned long val;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
apic_write(APIC_DFR, APIC_DFR_FLAT);
|
||||
val = calculate_ldr(cpu);
|
||||
apic_write(APIC_LDR, val);
|
||||
}
|
||||
|
||||
static void bigsmp_setup_apic_routing(void)
|
||||
|
@@ -2438,7 +2438,13 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
|
||||
* dmar_alloc_hwirq() may be called before setup_IO_APIC(), so use
|
||||
* gsi_top if ioapic_dynirq_base hasn't been initialized yet.
|
||||
*/
|
||||
return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
|
||||
if (!ioapic_initialized)
|
||||
return gsi_top;
|
||||
/*
|
||||
* For DT enabled machines ioapic_dynirq_base is irrelevant and not
|
||||
* updated. So simply return @from if ioapic_dynirq_base == 0.
|
||||
*/
|
||||
return ioapic_dynirq_base ? : from;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
@@ -508,9 +508,12 @@ struct uprobe_xol_ops {
|
||||
void (*abort)(struct arch_uprobe *, struct pt_regs *);
|
||||
};
|
||||
|
||||
static inline int sizeof_long(void)
|
||||
static inline int sizeof_long(struct pt_regs *regs)
|
||||
{
|
||||
return in_ia32_syscall() ? 4 : 8;
|
||||
/*
|
||||
* Check registers for mode as in_xxx_syscall() does not apply here.
|
||||
*/
|
||||
return user_64bit_mode(regs) ? 8 : 4;
|
||||
}
|
||||
|
||||
static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
@@ -521,9 +524,9 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
|
||||
static int emulate_push_stack(struct pt_regs *regs, unsigned long val)
|
||||
{
|
||||
unsigned long new_sp = regs->sp - sizeof_long();
|
||||
unsigned long new_sp = regs->sp - sizeof_long(regs);
|
||||
|
||||
if (copy_to_user((void __user *)new_sp, &val, sizeof_long()))
|
||||
if (copy_to_user((void __user *)new_sp, &val, sizeof_long(regs)))
|
||||
return -EFAULT;
|
||||
|
||||
regs->sp = new_sp;
|
||||
@@ -556,7 +559,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs
|
||||
long correction = utask->vaddr - utask->xol_vaddr;
|
||||
regs->ip += correction;
|
||||
} else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
|
||||
regs->sp += sizeof_long(); /* Pop incorrect return address */
|
||||
regs->sp += sizeof_long(regs); /* Pop incorrect return address */
|
||||
if (emulate_push_stack(regs, utask->vaddr + auprobe->defparam.ilen))
|
||||
return -ERESTART;
|
||||
}
|
||||
@@ -675,7 +678,7 @@ static int branch_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
* "call" insn was executed out-of-line. Just restore ->sp and restart.
|
||||
* We could also restore ->ip and try to call branch_emulate_op() again.
|
||||
*/
|
||||
regs->sp += sizeof_long();
|
||||
regs->sp += sizeof_long(regs);
|
||||
return -ERESTART;
|
||||
}
|
||||
|
||||
@@ -1056,7 +1059,7 @@ bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
|
||||
unsigned long
|
||||
arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs)
|
||||
{
|
||||
int rasize = sizeof_long(), nleft;
|
||||
int rasize = sizeof_long(regs), nleft;
|
||||
unsigned long orig_ret_vaddr = 0; /* clear high bits for 32-bit apps */
|
||||
|
||||
if (copy_from_user(&orig_ret_vaddr, (void __user *)regs->sp, rasize))
|
||||
|
@@ -1781,7 +1781,7 @@ int kvm_vm_ioctl_hv_eventfd(struct kvm *kvm, struct kvm_hyperv_eventfd *args)
|
||||
int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
|
||||
struct kvm_cpuid_entry2 __user *entries)
|
||||
{
|
||||
uint16_t evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
|
||||
uint16_t evmcs_ver = 0;
|
||||
struct kvm_cpuid_entry2 cpuid_entries[] = {
|
||||
{ .function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS },
|
||||
{ .function = HYPERV_CPUID_INTERFACE },
|
||||
@@ -1793,6 +1793,9 @@ int kvm_vcpu_ioctl_get_hv_cpuid(struct kvm_vcpu *vcpu, struct kvm_cpuid2 *cpuid,
|
||||
};
|
||||
int i, nent = ARRAY_SIZE(cpuid_entries);
|
||||
|
||||
if (kvm_x86_ops->nested_get_evmcs_version)
|
||||
evmcs_ver = kvm_x86_ops->nested_get_evmcs_version(vcpu);
|
||||
|
||||
/* Skip NESTED_FEATURES if eVMCS is not supported */
|
||||
if (!evmcs_ver)
|
||||
--nent;
|
||||
|
@@ -7128,12 +7128,6 @@ failed:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
/* Not supported */
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nested_enable_evmcs(struct kvm_vcpu *vcpu,
|
||||
uint16_t *vmcs_version)
|
||||
{
|
||||
@@ -7332,7 +7326,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
|
||||
.mem_enc_unreg_region = svm_unregister_enc_region,
|
||||
|
||||
.nested_enable_evmcs = nested_enable_evmcs,
|
||||
.nested_get_evmcs_version = nested_get_evmcs_version,
|
||||
.nested_get_evmcs_version = NULL,
|
||||
|
||||
.need_emulation_on_page_fault = svm_need_emulation_on_page_fault,
|
||||
};
|
||||
|
@@ -7797,6 +7797,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
|
||||
.set_nested_state = NULL,
|
||||
.get_vmcs12_pages = NULL,
|
||||
.nested_enable_evmcs = NULL,
|
||||
.nested_get_evmcs_version = NULL,
|
||||
.need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
|
||||
};
|
||||
|
||||
|
@@ -6594,12 +6594,13 @@ restart:
|
||||
unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
|
||||
toggle_interruptibility(vcpu, ctxt->interruptibility);
|
||||
vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
|
||||
kvm_rip_write(vcpu, ctxt->eip);
|
||||
if (r == EMULATE_DONE && ctxt->tf)
|
||||
kvm_vcpu_do_singlestep(vcpu, &r);
|
||||
if (!ctxt->have_exception ||
|
||||
exception_type(ctxt->exception.vector) == EXCPT_TRAP)
|
||||
exception_type(ctxt->exception.vector) == EXCPT_TRAP) {
|
||||
kvm_rip_write(vcpu, ctxt->eip);
|
||||
if (r == EMULATE_DONE && ctxt->tf)
|
||||
kvm_vcpu_do_singlestep(vcpu, &r);
|
||||
__kvm_set_rflags(vcpu, ctxt->eflags);
|
||||
}
|
||||
|
||||
/*
|
||||
* For STI, interrupts are shadowed; so KVM_REQ_EVENT will
|
||||
|
@@ -516,7 +516,7 @@ static inline void check_conflict(int warnlvl, pgprot_t prot, pgprotval_t val,
|
||||
*/
|
||||
static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
|
||||
unsigned long pfn, unsigned long npg,
|
||||
int warnlvl)
|
||||
unsigned long lpsize, int warnlvl)
|
||||
{
|
||||
pgprotval_t forbidden, res;
|
||||
unsigned long end;
|
||||
@@ -535,9 +535,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
|
||||
check_conflict(warnlvl, prot, res, start, end, pfn, "Text NX");
|
||||
forbidden = res;
|
||||
|
||||
res = protect_kernel_text_ro(start, end);
|
||||
check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
|
||||
forbidden |= res;
|
||||
/*
|
||||
* Special case to preserve a large page. If the change spawns the
|
||||
* full large page mapping then there is no point to split it
|
||||
* up. Happens with ftrace and is going to be removed once ftrace
|
||||
* switched to text_poke().
|
||||
*/
|
||||
if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) {
|
||||
res = protect_kernel_text_ro(start, end);
|
||||
check_conflict(warnlvl, prot, res, start, end, pfn, "Text RO");
|
||||
forbidden |= res;
|
||||
}
|
||||
|
||||
/* Check the PFN directly */
|
||||
res = protect_pci_bios(pfn, pfn + npg - 1);
|
||||
@@ -819,7 +827,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
|
||||
* extra conditional required here.
|
||||
*/
|
||||
chk_prot = static_protections(old_prot, lpaddr, old_pfn, numpages,
|
||||
CPA_CONFLICT);
|
||||
psize, CPA_CONFLICT);
|
||||
|
||||
if (WARN_ON_ONCE(pgprot_val(chk_prot) != pgprot_val(old_prot))) {
|
||||
/*
|
||||
@@ -855,7 +863,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
|
||||
* protection requirement in the large page.
|
||||
*/
|
||||
new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
|
||||
CPA_DETECT);
|
||||
psize, CPA_DETECT);
|
||||
|
||||
/*
|
||||
* If there is a conflict, split the large page.
|
||||
@@ -906,7 +914,8 @@ static void split_set_pte(struct cpa_data *cpa, pte_t *pte, unsigned long pfn,
|
||||
if (!cpa->force_static_prot)
|
||||
goto set;
|
||||
|
||||
prot = static_protections(ref_prot, address, pfn, npg, CPA_PROTECT);
|
||||
/* Hand in lpsize = 0 to enforce the protection mechanism */
|
||||
prot = static_protections(ref_prot, address, pfn, npg, 0, CPA_PROTECT);
|
||||
|
||||
if (pgprot_val(prot) == pgprot_val(ref_prot))
|
||||
goto set;
|
||||
@@ -1503,7 +1512,8 @@ repeat:
|
||||
pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
|
||||
|
||||
cpa_inc_4k_install();
|
||||
new_prot = static_protections(new_prot, address, pfn, 1,
|
||||
/* Hand in lpsize = 0 to enforce the protection mechanism */
|
||||
new_prot = static_protections(new_prot, address, pfn, 1, 0,
|
||||
CPA_PROTECT);
|
||||
|
||||
new_prot = pgprot_clear_protnone_bits(new_prot);
|
||||
|
新增問題並參考
封鎖使用者