Backmerge tag 'v4.9-rc8' into drm-next

Linux 4.9-rc8

Daniel requested this so we could apply some follow on fixes cleanly to -next.
This commit is contained in:
Dave Airlie
2016-12-05 17:11:48 +10:00
کامیت f03ee46be9
698فایلهای تغییر یافته به همراه7648 افزوده شده و 3618 حذف شده

مشاهده پرونده

@@ -40,8 +40,8 @@ GCOV_PROFILE := n
UBSAN_SANITIZE :=n
LDFLAGS := -m elf_$(UTS_MACHINE)
ifeq ($(CONFIG_RELOCATABLE),y)
# If kernel is relocatable, build compressed kernel as PIE.
# Compressed kernel should be built as PIE since it may be loaded at any
# address by the bootloader.
ifeq ($(CONFIG_X86_32),y)
LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
else
@@ -51,7 +51,6 @@ else
LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
&& echo "-z noreloc-overflow -pie --no-dynamic-linker")
endif
endif
LDFLAGS_vmlinux := -T
hostprogs-y := mkpiggy

مشاهده پرونده

@@ -87,6 +87,12 @@ int validate_cpu(void)
return -1;
}
if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) &&
!has_eflag(X86_EFLAGS_ID)) {
printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n");
return -1;
}
if (err_flags) {
puts("This kernel requires the following features "
"not present on the CPU:\n");

مشاهده پرونده

@@ -662,7 +662,13 @@ static int __init amd_core_pmu_init(void)
pr_cont("Fam15h ");
x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
break;
case 0x17:
pr_cont("Fam17h ");
/*
* In family 17h, there are no event constraints in the PMC hardware.
* We fallback to using default amd_get_event_constraints.
*/
break;
default:
pr_err("core perfctr but no constraints; unknown hardware!\n");
return -ENODEV;

مشاهده پرونده

@@ -2352,7 +2352,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
frame.next_frame = 0;
frame.return_address = 0;
if (!access_ok(VERIFY_READ, fp, 8))
if (!valid_user_frame(fp, sizeof(frame)))
break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
@@ -2362,9 +2362,6 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *ent
if (bytes != 0)
break;
if (!valid_user_frame(fp, sizeof(frame)))
break;
perf_callchain_store(entry, cs_base + frame.return_address);
fp = compat_ptr(ss_base + frame.next_frame);
}
@@ -2413,7 +2410,7 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
frame.next_frame = NULL;
frame.return_address = 0;
if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
if (!valid_user_frame(fp, sizeof(frame)))
break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
@@ -2423,9 +2420,6 @@ perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs
if (bytes != 0)
break;
if (!valid_user_frame(fp, sizeof(frame)))
break;
perf_callchain_store(entry, frame.return_address);
fp = (void __user *)frame.next_frame;
}

مشاهده پرونده

@@ -1108,20 +1108,20 @@ static void setup_pebs_sample_data(struct perf_event *event,
}
/*
* We use the interrupt regs as a base because the PEBS record
* does not contain a full regs set, specifically it seems to
* lack segment descriptors, which get used by things like
* user_mode().
* We use the interrupt regs as a base because the PEBS record does not
* contain a full regs set, specifically it seems to lack segment
* descriptors, which get used by things like user_mode().
*
* In the simple case fix up only the IP and BP,SP regs, for
* PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
* A possible PERF_SAMPLE_REGS will have to transfer all regs.
* In the simple case fix up only the IP for PERF_SAMPLE_IP.
*
* We must however always use BP,SP from iregs for the unwinder to stay
* sane; the record BP,SP can point into thin air when the record is
* from a previous PMI context or an (I)RET happend between the record
* and PMI.
*/
*regs = *iregs;
regs->flags = pebs->flags;
set_linear_ip(regs, pebs->ip);
regs->bp = pebs->bp;
regs->sp = pebs->sp;
if (sample_type & PERF_SAMPLE_REGS_INTR) {
regs->ax = pebs->ax;
@@ -1130,10 +1130,21 @@ static void setup_pebs_sample_data(struct perf_event *event,
regs->dx = pebs->dx;
regs->si = pebs->si;
regs->di = pebs->di;
regs->bp = pebs->bp;
regs->sp = pebs->sp;
regs->flags = pebs->flags;
/*
* Per the above; only set BP,SP if we don't need callchains.
*
* XXX: does this make sense?
*/
if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
regs->bp = pebs->bp;
regs->sp = pebs->sp;
}
/*
* Preserve PERF_EFLAGS_VM from set_linear_ip().
*/
regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
#ifndef CONFIG_X86_32
regs->r8 = pebs->r8;
regs->r9 = pebs->r9;

مشاهده پرونده

@@ -319,9 +319,9 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
*/
static int uncore_pmu_event_init(struct perf_event *event);
static bool is_uncore_event(struct perf_event *event)
static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
{
return event->pmu->event_init == uncore_pmu_event_init;
return &box->pmu->pmu == event->pmu;
}
static int
@@ -340,7 +340,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
n = box->n_events;
if (is_uncore_event(leader)) {
if (is_box_event(box, leader)) {
box->event_list[n] = leader;
n++;
}
@@ -349,7 +349,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader,
return n;
list_for_each_entry(event, &leader->sibling_list, group_entry) {
if (!is_uncore_event(event) ||
if (!is_box_event(box, event) ||
event->state <= PERF_EVENT_STATE_OFF)
continue;

مشاهده پرونده

@@ -8,8 +8,12 @@
#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
#define PCI_DEVICE_ID_INTEL_BDW_IMC 0x1604
#define PCI_DEVICE_ID_INTEL_SKL_IMC 0x191f
#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x190c
#define PCI_DEVICE_ID_INTEL_SKL_U_IMC 0x1904
#define PCI_DEVICE_ID_INTEL_SKL_Y_IMC 0x190c
#define PCI_DEVICE_ID_INTEL_SKL_HD_IMC 0x1900
#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
@@ -486,24 +490,12 @@ static int snb_uncore_imc_event_add(struct perf_event *event, int flags)
snb_uncore_imc_event_start(event, 0);
box->n_events++;
return 0;
}
static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
{
struct intel_uncore_box *box = uncore_event_to_box(event);
int i;
snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
for (i = 0; i < box->n_events; i++) {
if (event == box->event_list[i]) {
--box->n_events;
break;
}
}
}
int snb_pci2phy_map_init(int devid)
@@ -616,13 +608,29 @@ static const struct pci_device_id bdw_uncore_pci_ids[] = {
static const struct pci_device_id skl_uncore_pci_ids[] = {
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_IMC),
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_Y_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_U_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HD_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_HQ_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SD_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* IMC */
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
{ /* end: all zeroes */ },
};
@@ -666,8 +674,12 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver), /* 5th Gen Core U */
IMC_DEV(SKL_IMC, &skl_uncore_pci_driver), /* 6th Gen Core */
IMC_DEV(SKL_Y_IMC, &skl_uncore_pci_driver), /* 6th Gen Core Y */
IMC_DEV(SKL_U_IMC, &skl_uncore_pci_driver), /* 6th Gen Core U */
IMC_DEV(SKL_HD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Dual Core */
IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
{ /* end marker */ }
};

مشاهده پرونده

@@ -113,7 +113,7 @@ struct debug_store {
* Per register state.
*/
struct er_account {
raw_spinlock_t lock; /* per-core: protect structure */
raw_spinlock_t lock; /* per-core: protect structure */
u64 config; /* extra MSR config */
u64 reg; /* extra MSR number */
atomic_t ref; /* reference count */

مشاهده پرونده

@@ -17,6 +17,7 @@
extern int intel_mid_pci_init(void);
extern int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state);
extern pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev);
extern void intel_mid_pwr_power_off(void);

مشاهده پرونده

@@ -347,7 +347,6 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
#ifdef CONFIG_SMP
unsigned bits;
int cpu = smp_processor_id();
unsigned int socket_id, core_complex_id;
bits = c->x86_coreid_bits;
/* Low order bits define the core id (index of core in socket) */
@@ -365,10 +364,7 @@ static void amd_detect_cmp(struct cpuinfo_x86 *c)
if (c->x86 != 0x17 || !cpuid_edx(0x80000006))
return;
socket_id = (c->apicid >> bits) - 1;
core_complex_id = (c->apicid & ((1 << bits) - 1)) >> 3;
per_cpu(cpu_llc_id, cpu) = (socket_id << 3) | core_complex_id;
per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
#endif
}

مشاهده پرونده

@@ -978,6 +978,35 @@ static void x86_init_cache_qos(struct cpuinfo_x86 *c)
}
}
/*
* The physical to logical package id mapping is initialized from the
* acpi/mptables information. Make sure that CPUID actually agrees with
* that.
*/
static void sanitize_package_id(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_SMP
unsigned int pkg, apicid, cpu = smp_processor_id();
apicid = apic->cpu_present_to_apicid(cpu);
pkg = apicid >> boot_cpu_data.x86_coreid_bits;
if (apicid != c->initial_apicid) {
pr_err(FW_BUG "CPU%u: APIC id mismatch. Firmware: %x CPUID: %x\n",
cpu, apicid, c->initial_apicid);
c->initial_apicid = apicid;
}
if (pkg != c->phys_proc_id) {
pr_err(FW_BUG "CPU%u: Using firmware package id %u instead of %u\n",
cpu, pkg, c->phys_proc_id);
c->phys_proc_id = pkg;
}
c->logical_proc_id = topology_phys_to_logical_pkg(pkg);
#else
c->logical_proc_id = 0;
#endif
}
/*
* This does the hard work of actually picking apart the CPU stuff...
*/
@@ -1103,8 +1132,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
#ifdef CONFIG_NUMA
numa_add_cpu(smp_processor_id());
#endif
/* The boot/hotplug time assigment got cleared, restore it */
c->logical_proc_id = topology_phys_to_logical_pkg(c->phys_proc_id);
sanitize_package_id(c);
}
/*

مشاهده پرونده

@@ -112,7 +112,7 @@ void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
for (; stack < stack_info.end; stack++) {
unsigned long real_addr;
int reliable = 0;
unsigned long addr = *stack;
unsigned long addr = READ_ONCE_NOCHECK(*stack);
unsigned long *ret_addr_p =
unwind_get_return_address_ptr(&state);

مشاهده پرونده

@@ -521,14 +521,14 @@ void fpu__clear(struct fpu *fpu)
{
WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
/* FPU state will be reallocated lazily at the first use. */
fpu__drop(fpu);
} else {
if (!fpu->fpstate_active) {
fpu__activate_curr(fpu);
user_fpu_begin();
}
fpu__drop(fpu);
/*
* Make sure fpstate is cleared and initialized.
*/
if (static_cpu_has(X86_FEATURE_FPU)) {
fpu__activate_curr(fpu);
user_fpu_begin();
copy_init_fpstate_to_fpregs();
}
}

مشاهده پرونده

@@ -665,14 +665,17 @@ __PAGE_ALIGNED_BSS
initial_pg_pmd:
.fill 1024*KPMDS,4,0
#else
ENTRY(initial_page_table)
.globl initial_page_table
initial_page_table:
.fill 1024,4,0
#endif
initial_pg_fixmap:
.fill 1024,4,0
ENTRY(empty_zero_page)
.globl empty_zero_page
empty_zero_page:
.fill 4096,1,0
ENTRY(swapper_pg_dir)
.globl swapper_pg_dir
swapper_pg_dir:
.fill 1024,4,0
EXPORT_SYMBOL(empty_zero_page)

مشاهده پرونده

@@ -66,13 +66,36 @@ __init int create_simplefb(const struct screen_info *si,
{
struct platform_device *pd;
struct resource res;
unsigned long len;
u64 base, size;
u32 length;
/* don't use lfb_size as it may contain the whole VMEM instead of only
* the part that is occupied by the framebuffer */
len = mode->height * mode->stride;
len = PAGE_ALIGN(len);
if (len > (u64)si->lfb_size << 16) {
/*
* If the 64BIT_BASE capability is set, ext_lfb_base will contain the
* upper half of the base address. Assemble the address, then make sure
* it is valid and we can actually access it.
*/
base = si->lfb_base;
if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
base |= (u64)si->ext_lfb_base << 32;
if (!base || (u64)(resource_size_t)base != base) {
printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
return -EINVAL;
}
/*
* Don't use lfb_size as IORESOURCE size, since it may contain the
* entire VMEM, and thus require huge mappings. Use just the part we
* need, that is, the part where the framebuffer is located. But verify
* that it does not exceed the advertised VMEM.
* Note that in case of VBE, the lfb_size is shifted by 16 bits for
* historical reasons.
*/
size = si->lfb_size;
if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
size <<= 16;
length = mode->height * mode->stride;
length = PAGE_ALIGN(length);
if (length > size) {
printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
return -EINVAL;
}
@@ -81,8 +104,8 @@ __init int create_simplefb(const struct screen_info *si,
memset(&res, 0, sizeof(res));
res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
res.name = simplefb_resname;
res.start = si->lfb_base;
res.end = si->lfb_base + len - 1;
res.start = base;
res.end = res.start + length - 1;
if (res.end <= res.start)
return -EINVAL;

مشاهده پرونده

@@ -7,11 +7,13 @@
unsigned long unwind_get_return_address(struct unwind_state *state)
{
unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
if (unwind_done(state))
return 0;
return ftrace_graph_ret_addr(state->task, &state->graph_idx,
*state->sp, state->sp);
addr, state->sp);
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
@@ -23,8 +25,10 @@ bool unwind_next_frame(struct unwind_state *state)
return false;
do {
unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
for (state->sp++; state->sp < info->end; state->sp++)
if (__kernel_text_address(*state->sp))
if (__kernel_text_address(addr))
return true;
state->sp = info->next_sp;

مشاهده پرونده

@@ -2105,16 +2105,10 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned short sel, old_sel;
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
unsigned short sel;
struct desc_struct new_desc;
u8 cpl = ctxt->ops->cpl(ctxt);
/* Assignment of RIP may only fail in 64-bit mode */
if (ctxt->mode == X86EMUL_MODE_PROT64)
ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
VCPU_SREG_CS);
memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl,
@@ -2124,12 +2118,10 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
return rc;
rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
if (rc != X86EMUL_CONTINUE) {
WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
/* assigning eip failed; restore the old cs */
ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
return rc;
}
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
return rc;
}
@@ -2189,14 +2181,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
{
int rc;
unsigned long eip, cs;
u16 old_cs;
int cpl = ctxt->ops->cpl(ctxt);
struct desc_struct old_desc, new_desc;
const struct x86_emulate_ops *ops = ctxt->ops;
if (ctxt->mode == X86EMUL_MODE_PROT64)
ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
VCPU_SREG_CS);
struct desc_struct new_desc;
rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
@@ -2213,10 +2199,10 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
rc = assign_eip_far(ctxt, eip, &new_desc);
if (rc != X86EMUL_CONTINUE) {
WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
}
/* Error handling is not implemented. */
if (rc != X86EMUL_CONTINUE)
return X86EMUL_UNHANDLEABLE;
return rc;
}

مشاهده پرونده

@@ -94,7 +94,7 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS);
bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
}
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);

مشاهده پرونده

@@ -42,13 +42,13 @@ struct kvm_vcpu;
struct dest_map {
/* vcpu bitmap where IRQ has been sent */
DECLARE_BITMAP(map, KVM_MAX_VCPUS);
DECLARE_BITMAP(map, KVM_MAX_VCPU_ID);
/*
* Vector sent to a given vcpu, only valid when
* the vcpu's bit in map is set
*/
u8 vectors[KVM_MAX_VCPUS];
u8 vectors[KVM_MAX_VCPU_ID];
};

مشاهده پرونده

@@ -41,6 +41,15 @@ static int kvm_set_pic_irq(struct kvm_kernel_irq_routing_entry *e,
bool line_status)
{
struct kvm_pic *pic = pic_irqchip(kvm);
/*
* XXX: rejecting pic routes when pic isn't in use would be better,
* but the default routing table is installed while kvm->arch.vpic is
* NULL and KVM_CREATE_IRQCHIP can race with KVM_IRQ_LINE.
*/
if (!pic)
return -1;
return kvm_pic_set_irq(pic, e->irqchip.pin, irq_source_id, level);
}
@@ -49,6 +58,10 @@ static int kvm_set_ioapic_irq(struct kvm_kernel_irq_routing_entry *e,
bool line_status)
{
struct kvm_ioapic *ioapic = kvm->arch.vioapic;
if (!ioapic)
return -1;
return kvm_ioapic_set_irq(ioapic, e->irqchip.pin, irq_source_id, level,
line_status);
}
@@ -156,6 +169,16 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
}
static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
bool line_status)
{
if (!level)
return -1;
return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
}
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
bool line_status)
@@ -163,18 +186,26 @@ int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
struct kvm_lapic_irq irq;
int r;
if (unlikely(e->type != KVM_IRQ_ROUTING_MSI))
return -EWOULDBLOCK;
switch (e->type) {
case KVM_IRQ_ROUTING_HV_SINT:
return kvm_hv_set_sint(e, kvm, irq_source_id, level,
line_status);
if (kvm_msi_route_invalid(kvm, e))
return -EINVAL;
case KVM_IRQ_ROUTING_MSI:
if (kvm_msi_route_invalid(kvm, e))
return -EINVAL;
kvm_set_msi_irq(kvm, e, &irq);
kvm_set_msi_irq(kvm, e, &irq);
if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
return r;
else
return -EWOULDBLOCK;
if (kvm_irq_delivery_to_apic_fast(kvm, NULL, &irq, &r, NULL))
return r;
break;
default:
break;
}
return -EWOULDBLOCK;
}
int kvm_request_irq_source_id(struct kvm *kvm)
@@ -254,16 +285,6 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
srcu_read_unlock(&kvm->irq_srcu, idx);
}
static int kvm_hv_set_sint(struct kvm_kernel_irq_routing_entry *e,
struct kvm *kvm, int irq_source_id, int level,
bool line_status)
{
if (!level)
return -1;
return kvm_hv_synic_set_irq(kvm, e->hv_sint.vcpu, e->hv_sint.sint);
}
int kvm_set_routing_entry(struct kvm *kvm,
struct kvm_kernel_irq_routing_entry *e,
const struct kvm_irq_routing_entry *ue)
@@ -423,18 +444,6 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
srcu_read_unlock(&kvm->irq_srcu, idx);
}
int kvm_arch_set_irq(struct kvm_kernel_irq_routing_entry *irq, struct kvm *kvm,
int irq_source_id, int level, bool line_status)
{
switch (irq->type) {
case KVM_IRQ_ROUTING_HV_SINT:
return kvm_hv_set_sint(irq, kvm, irq_source_id, level,
line_status);
default:
return -EWOULDBLOCK;
}
}
void kvm_arch_irq_routing_update(struct kvm *kvm)
{
kvm_hv_irq_routing_update(kvm);

مشاهده پرونده

@@ -138,7 +138,7 @@ static inline bool kvm_apic_map_get_logical_dest(struct kvm_apic_map *map,
*mask = dest_id & 0xff;
return true;
case KVM_APIC_MODE_XAPIC_CLUSTER:
*cluster = map->xapic_cluster_map[dest_id >> 4];
*cluster = map->xapic_cluster_map[(dest_id >> 4) & 0xf];
*mask = dest_id & 0xf;
return true;
default:

مشاهده پرونده

@@ -210,7 +210,18 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
struct kvm_shared_msrs *locals
= container_of(urn, struct kvm_shared_msrs, urn);
struct kvm_shared_msr_values *values;
unsigned long flags;
/*
* Disabling irqs at this point since the following code could be
* interrupted and executed through kvm_arch_hardware_disable()
*/
local_irq_save(flags);
if (locals->registered) {
locals->registered = false;
user_return_notifier_unregister(urn);
}
local_irq_restore(flags);
for (slot = 0; slot < shared_msrs_global.nr; ++slot) {
values = &locals->values[slot];
if (values->host != values->curr) {
@@ -218,8 +229,6 @@ static void kvm_on_user_return(struct user_return_notifier *urn)
values->curr = values->host;
}
}
locals->registered = false;
user_return_notifier_unregister(urn);
}
static void shared_msr_update(unsigned slot, u32 msr)
@@ -1724,18 +1733,23 @@ static void kvm_gen_update_masterclock(struct kvm *kvm)
static u64 __get_kvmclock_ns(struct kvm *kvm)
{
struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, 0);
struct kvm_arch *ka = &kvm->arch;
s64 ns;
struct pvclock_vcpu_time_info hv_clock;
if (vcpu->arch.hv_clock.flags & PVCLOCK_TSC_STABLE_BIT) {
u64 tsc = kvm_read_l1_tsc(vcpu, rdtsc());
ns = __pvclock_read_cycles(&vcpu->arch.hv_clock, tsc);
} else {
ns = ktime_get_boot_ns() + ka->kvmclock_offset;
spin_lock(&ka->pvclock_gtod_sync_lock);
if (!ka->use_master_clock) {
spin_unlock(&ka->pvclock_gtod_sync_lock);
return ktime_get_boot_ns() + ka->kvmclock_offset;
}
return ns;
hv_clock.tsc_timestamp = ka->master_cycle_now;
hv_clock.system_time = ka->master_kernel_ns + ka->kvmclock_offset;
spin_unlock(&ka->pvclock_gtod_sync_lock);
kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
&hv_clock.tsc_shift,
&hv_clock.tsc_to_system_mul);
return __pvclock_read_cycles(&hv_clock, rdtsc());
}
u64 get_kvmclock_ns(struct kvm *kvm)
@@ -2596,7 +2610,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_PIT_STATE2:
case KVM_CAP_SET_IDENTITY_MAP_ADDR:
case KVM_CAP_XEN_HVM:
case KVM_CAP_ADJUST_CLOCK:
case KVM_CAP_VCPU_EVENTS:
case KVM_CAP_HYPERV:
case KVM_CAP_HYPERV_VAPIC:
@@ -2623,6 +2636,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
#endif
r = 1;
break;
case KVM_CAP_ADJUST_CLOCK:
r = KVM_CLOCK_TSC_STABLE;
break;
case KVM_CAP_X86_SMM:
/* SMBASE is usually relocated above 1M on modern chipsets,
* and SMM handlers might indeed rely on 4G segment limits,
@@ -3415,6 +3431,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
};
case KVM_SET_VAPIC_ADDR: {
struct kvm_vapic_addr va;
int idx;
r = -EINVAL;
if (!lapic_in_kernel(vcpu))
@@ -3422,7 +3439,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
idx = srcu_read_lock(&vcpu->kvm->srcu);
r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}
case KVM_X86_SETUP_MCE: {
@@ -4103,9 +4122,11 @@ long kvm_arch_vm_ioctl(struct file *filp,
struct kvm_clock_data user_ns;
u64 now_ns;
now_ns = get_kvmclock_ns(kvm);
local_irq_disable();
now_ns = __get_kvmclock_ns(kvm);
user_ns.clock = now_ns;
user_ns.flags = 0;
user_ns.flags = kvm->arch.use_master_clock ? KVM_CLOCK_TSC_STABLE : 0;
local_irq_enable();
memset(&user_ns.pad, 0, sizeof(user_ns.pad));
r = -EFAULT;

مشاهده پرونده

@@ -135,7 +135,12 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
if (early_recursion_flag > 2)
goto halt_loop;
if (regs->cs != __KERNEL_CS)
/*
* Old CPUs leave the high bits of CS on the stack
* undefined. I'm not sure which CPUs do this, but at least
* the 486 DX works this way.
*/
if ((regs->cs & 0xFFFF) != __KERNEL_CS)
goto fail;
/*

مشاهده پرونده

@@ -861,7 +861,7 @@ static void __init __efi_enter_virtual_mode(void)
int count = 0, pg_shift = 0;
void *new_memmap = NULL;
efi_status_t status;
phys_addr_t pa;
unsigned long pa;
efi.systab = NULL;

مشاهده پرونده

@@ -31,6 +31,7 @@
#include <linux/io.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/ucs2_string.h>
#include <asm/setup.h>
#include <asm/page.h>
@@ -211,6 +212,35 @@ void efi_sync_low_kernel_mappings(void)
memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
}
/*
* Wrapper for slow_virt_to_phys() that handles NULL addresses.
*/
static inline phys_addr_t
virt_to_phys_or_null_size(void *va, unsigned long size)
{
bool bad_size;
if (!va)
return 0;
if (virt_addr_valid(va))
return virt_to_phys(va);
/*
* A fully aligned variable on the stack is guaranteed not to
* cross a page bounary. Try to catch strings on the stack by
* checking that 'size' is a power of two.
*/
bad_size = size > PAGE_SIZE || !is_power_of_2(size);
WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
return slow_virt_to_phys(va);
}
#define virt_to_phys_or_null(addr) \
virt_to_phys_or_null_size((addr), sizeof(*(addr)))
int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
{
unsigned long pfn, text;
@@ -494,8 +524,8 @@ static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
spin_lock(&rtc_lock);
phys_tm = virt_to_phys(tm);
phys_tc = virt_to_phys(tc);
phys_tm = virt_to_phys_or_null(tm);
phys_tc = virt_to_phys_or_null(tc);
status = efi_thunk(get_time, phys_tm, phys_tc);
@@ -511,7 +541,7 @@ static efi_status_t efi_thunk_set_time(efi_time_t *tm)
spin_lock(&rtc_lock);
phys_tm = virt_to_phys(tm);
phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(set_time, phys_tm);
@@ -529,9 +559,9 @@ efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
spin_lock(&rtc_lock);
phys_enabled = virt_to_phys(enabled);
phys_pending = virt_to_phys(pending);
phys_tm = virt_to_phys(tm);
phys_enabled = virt_to_phys_or_null(enabled);
phys_pending = virt_to_phys_or_null(pending);
phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(get_wakeup_time, phys_enabled,
phys_pending, phys_tm);
@@ -549,7 +579,7 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
spin_lock(&rtc_lock);
phys_tm = virt_to_phys(tm);
phys_tm = virt_to_phys_or_null(tm);
status = efi_thunk(set_wakeup_time, enabled, phys_tm);
@@ -558,6 +588,10 @@ efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
return status;
}
static unsigned long efi_name_size(efi_char16_t *name)
{
return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
}
static efi_status_t
efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
@@ -567,11 +601,11 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 phys_name, phys_vendor, phys_attr;
u32 phys_data_size, phys_data;
phys_data_size = virt_to_phys(data_size);
phys_vendor = virt_to_phys(vendor);
phys_name = virt_to_phys(name);
phys_attr = virt_to_phys(attr);
phys_data = virt_to_phys(data);
phys_data_size = virt_to_phys_or_null(data_size);
phys_vendor = virt_to_phys_or_null(vendor);
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
phys_attr = virt_to_phys_or_null(attr);
phys_data = virt_to_phys_or_null_size(data, *data_size);
status = efi_thunk(get_variable, phys_name, phys_vendor,
phys_attr, phys_data_size, phys_data);
@@ -586,9 +620,9 @@ efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 phys_name, phys_vendor, phys_data;
efi_status_t status;
phys_name = virt_to_phys(name);
phys_vendor = virt_to_phys(vendor);
phys_data = virt_to_phys(data);
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
phys_vendor = virt_to_phys_or_null(vendor);
phys_data = virt_to_phys_or_null_size(data, data_size);
/* If data_size is > sizeof(u32) we've got problems */
status = efi_thunk(set_variable, phys_name, phys_vendor,
@@ -605,9 +639,9 @@ efi_thunk_get_next_variable(unsigned long *name_size,
efi_status_t status;
u32 phys_name_size, phys_name, phys_vendor;
phys_name_size = virt_to_phys(name_size);
phys_vendor = virt_to_phys(vendor);
phys_name = virt_to_phys(name);
phys_name_size = virt_to_phys_or_null(name_size);
phys_vendor = virt_to_phys_or_null(vendor);
phys_name = virt_to_phys_or_null_size(name, *name_size);
status = efi_thunk(get_next_variable, phys_name_size,
phys_name, phys_vendor);
@@ -621,7 +655,7 @@ efi_thunk_get_next_high_mono_count(u32 *count)
efi_status_t status;
u32 phys_count;
phys_count = virt_to_phys(count);
phys_count = virt_to_phys_or_null(count);
status = efi_thunk(get_next_high_mono_count, phys_count);
return status;
@@ -633,7 +667,7 @@ efi_thunk_reset_system(int reset_type, efi_status_t status,
{
u32 phys_data;
phys_data = virt_to_phys(data);
phys_data = virt_to_phys_or_null_size(data, data_size);
efi_thunk(reset_system, reset_type, status, data_size, phys_data);
}
@@ -661,9 +695,9 @@ efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
return EFI_UNSUPPORTED;
phys_storage = virt_to_phys(storage_space);
phys_remaining = virt_to_phys(remaining_space);
phys_max = virt_to_phys(max_variable_size);
phys_storage = virt_to_phys_or_null(storage_space);
phys_remaining = virt_to_phys_or_null(remaining_space);
phys_max = virt_to_phys_or_null(max_variable_size);
status = efi_thunk(query_variable_info, attr, phys_storage,
phys_remaining, phys_max);

مشاهده پرونده

@@ -28,4 +28,4 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
# MISC Devices
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o
obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o

مشاهده پرونده

@@ -1,5 +1,5 @@
/*
* platform_wdt.c: Watchdog platform library file
* Intel Merrifield watchdog platform device library file
*
* (C) Copyright 2014 Intel Corporation
* Author: David Cohen <david.a.cohen@linux.intel.com>
@@ -14,7 +14,9 @@
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/platform_data/intel-mid_wdt.h>
#include <asm/intel-mid.h>
#include <asm/intel_scu_ipc.h>
#include <asm/io_apic.h>
#define TANGIER_EXT_TIMER0_MSI 15
@@ -50,14 +52,34 @@ static struct intel_mid_wdt_pdata tangier_pdata = {
.probe = tangier_probe,
};
static int __init register_mid_wdt(void)
static int wdt_scu_status_change(struct notifier_block *nb,
unsigned long code, void *data)
{
if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
wdt_dev.dev.platform_data = &tangier_pdata;
return platform_device_register(&wdt_dev);
if (code == SCU_DOWN) {
platform_device_unregister(&wdt_dev);
return 0;
}
return -ENODEV;
return platform_device_register(&wdt_dev);
}
static struct notifier_block wdt_scu_notifier = {
.notifier_call = wdt_scu_status_change,
};
static int __init register_mid_wdt(void)
{
if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
return -ENODEV;
wdt_dev.dev.platform_data = &tangier_pdata;
/*
* We need to be sure that the SCU IPC is ready before watchdog device
* can be registered:
*/
intel_scu_notifier_add(&wdt_scu_notifier);
return 0;
}
rootfs_initcall(register_mid_wdt);

مشاهده پرونده

@@ -272,6 +272,25 @@ int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
}
EXPORT_SYMBOL_GPL(intel_mid_pci_set_power_state);
pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev)
{
struct mid_pwr *pwr = midpwr;
int id, reg, bit;
u32 power;
if (!pwr || !pwr->available)
return PCI_UNKNOWN;
id = intel_mid_pwr_get_lss_id(pdev);
if (id < 0)
return PCI_UNKNOWN;
reg = (id * LSS_PWS_BITS) / 32;
bit = (id * LSS_PWS_BITS) % 32;
power = mid_pwr_get_state(pwr, reg);
return (__force pci_power_t)((power >> bit) & 3);
}
void intel_mid_pwr_power_off(void)
{
struct mid_pwr *pwr = midpwr;

مشاهده پرونده

@@ -16,6 +16,7 @@ KCOV_INSTRUMENT := n
KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large
KBUILD_CFLAGS += -m$(BITS)
KBUILD_CFLAGS += $(call cc-option,-fno-PIE)
$(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE
$(call if_changed,ld)