Merge 4.16-rc7 into char-misc-next

We want the hyperv fix in here for merging and testing.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
このコミットが含まれているのは:
Greg Kroah-Hartman
2018-03-28 12:27:35 +02:00
コミット b24d0d5b12
769個のファイルの変更8090行の追加5344行の削除

ファイルの表示

@@ -105,7 +105,7 @@ static void probe_xeon_phi_r3mwait(struct cpuinfo_x86 *c)
/*
* Early microcode releases for the Spectre v2 mitigation were broken.
* Information taken from;
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/01/microcode-update-guidance.pdf
* - https://newsroom.intel.com/wp-content/uploads/sites/11/2018/03/microcode-update-guidance.pdf
* - https://kb.vmware.com/s/article/52345
* - Microcode revisions observed in the wild
* - Release note from 20180108 microcode release
@@ -123,7 +123,6 @@ static const struct sku_microcode spectre_bad_microcodes[] = {
{ INTEL_FAM6_KABYLAKE_MOBILE, 0x09, 0x80 },
{ INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
{ INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
{ INTEL_FAM6_SKYLAKE_DESKTOP, 0x03, 0xc2 },
{ INTEL_FAM6_BROADWELL_CORE, 0x04, 0x28 },
{ INTEL_FAM6_BROADWELL_GT3E, 0x01, 0x1b },
{ INTEL_FAM6_BROADWELL_XEON_D, 0x02, 0x14 },
@@ -144,6 +143,13 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
{
int i;
/*
* We know that the hypervisor lie to us on the microcode version so
* we may as well hope that it is running the correct version.
*/
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
return false;
for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) {
if (c->x86_model == spectre_bad_microcodes[i].model &&
c->x86_stepping == spectre_bad_microcodes[i].stepping)

ファイルの表示

@@ -56,6 +56,9 @@
static DEFINE_MUTEX(mce_log_mutex);
/* sysfs synchronization */
static DEFINE_MUTEX(mce_sysfs_mutex);
#define CREATE_TRACE_POINTS
#include <trace/events/mce.h>
@@ -130,6 +133,8 @@ void mce_setup(struct mce *m)
if (this_cpu_has(X86_FEATURE_INTEL_PPIN))
rdmsrl(MSR_PPIN, m->ppin);
m->microcode = boot_cpu_data.microcode;
}
DEFINE_PER_CPU(struct mce, injectm);
@@ -262,7 +267,7 @@ static void __print_mce(struct mce *m)
*/
pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n",
m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid,
cpu_data(m->extcpu).microcode);
m->microcode);
}
static void print_mce(struct mce *m)
@@ -2086,6 +2091,7 @@ static ssize_t set_ignore_ce(struct device *s,
if (kstrtou64(buf, 0, &new) < 0)
return -EINVAL;
mutex_lock(&mce_sysfs_mutex);
if (mca_cfg.ignore_ce ^ !!new) {
if (new) {
/* disable ce features */
@@ -2098,6 +2104,8 @@ static ssize_t set_ignore_ce(struct device *s,
on_each_cpu(mce_enable_ce, (void *)1, 1);
}
}
mutex_unlock(&mce_sysfs_mutex);
return size;
}
@@ -2110,6 +2118,7 @@ static ssize_t set_cmci_disabled(struct device *s,
if (kstrtou64(buf, 0, &new) < 0)
return -EINVAL;
mutex_lock(&mce_sysfs_mutex);
if (mca_cfg.cmci_disabled ^ !!new) {
if (new) {
/* disable cmci */
@@ -2121,6 +2130,8 @@ static ssize_t set_cmci_disabled(struct device *s,
on_each_cpu(mce_enable_ce, NULL, 1);
}
}
mutex_unlock(&mce_sysfs_mutex);
return size;
}
@@ -2128,8 +2139,19 @@ static ssize_t store_int_with_restart(struct device *s,
struct device_attribute *attr,
const char *buf, size_t size)
{
ssize_t ret = device_store_int(s, attr, buf, size);
unsigned long old_check_interval = check_interval;
ssize_t ret = device_store_ulong(s, attr, buf, size);
if (check_interval == old_check_interval)
return ret;
if (check_interval < 1)
check_interval = 1;
mutex_lock(&mce_sysfs_mutex);
mce_restart();
mutex_unlock(&mce_sysfs_mutex);
return ret;
}

ファイルの表示

@@ -339,7 +339,7 @@ int __init save_microcode_in_initrd_amd(unsigned int cpuid_1_eax)
return -EINVAL;
ret = load_microcode_amd(true, x86_family(cpuid_1_eax), desc.data, desc.size);
if (ret != UCODE_OK)
if (ret > UCODE_UPDATED)
return -EINVAL;
return 0;
@@ -683,27 +683,35 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
static enum ucode_state
load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
{
struct ucode_patch *p;
enum ucode_state ret;
/* free old equiv table */
free_equiv_cpu_table();
ret = __load_microcode_amd(family, data, size);
if (ret != UCODE_OK)
if (ret != UCODE_OK) {
cleanup();
#ifdef CONFIG_X86_32
/* save BSP's matching patch for early load */
if (save) {
struct ucode_patch *p = find_patch(0);
if (p) {
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
PATCH_MAX_SIZE));
}
return ret;
}
#endif
p = find_patch(0);
if (!p) {
return ret;
} else {
if (boot_cpu_data.microcode == p->patch_id)
return ret;
ret = UCODE_NEW;
}
/* save BSP's matching patch for early load */
if (!save)
return ret;
memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data), PATCH_MAX_SIZE));
return ret;
}

ファイルの表示

@@ -22,13 +22,16 @@
#define pr_fmt(fmt) "microcode: " fmt
#include <linux/platform_device.h>
#include <linux/stop_machine.h>
#include <linux/syscore_ops.h>
#include <linux/miscdevice.h>
#include <linux/capability.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <linux/nmi.h>
#include <linux/fs.h>
#include <linux/mm.h>
@@ -64,6 +67,11 @@ LIST_HEAD(microcode_cache);
*/
static DEFINE_MUTEX(microcode_mutex);
/*
* Serialize late loading so that CPUs get updated one-by-one.
*/
static DEFINE_SPINLOCK(update_lock);
struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
struct cpu_info_ctx {
@@ -373,26 +381,23 @@ static int collect_cpu_info(int cpu)
return ret;
}
struct apply_microcode_ctx {
enum ucode_state err;
};
static void apply_microcode_local(void *arg)
{
struct apply_microcode_ctx *ctx = arg;
enum ucode_state *err = arg;
ctx->err = microcode_ops->apply_microcode(smp_processor_id());
*err = microcode_ops->apply_microcode(smp_processor_id());
}
static int apply_microcode_on_target(int cpu)
{
struct apply_microcode_ctx ctx = { .err = 0 };
enum ucode_state err;
int ret;
ret = smp_call_function_single(cpu, apply_microcode_local, &ctx, 1);
if (!ret)
ret = ctx.err;
ret = smp_call_function_single(cpu, apply_microcode_local, &err, 1);
if (!ret) {
if (err == UCODE_ERROR)
ret = 1;
}
return ret;
}
@@ -489,19 +494,114 @@ static void __exit microcode_dev_exit(void)
/* fake device for request_firmware */
static struct platform_device *microcode_pdev;
static enum ucode_state reload_for_cpu(int cpu)
/*
* Late loading dance. Why the heavy-handed stomp_machine effort?
*
* - HT siblings must be idle and not execute other code while the other sibling
* is loading microcode in order to avoid any negative interactions caused by
* the loading.
*
* - In addition, microcode update on the cores must be serialized until this
* requirement can be relaxed in the future. Right now, this is conservative
* and good.
*/
#define SPINUNIT 100 /* 100 nsec */
static int check_online_cpus(void)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
enum ucode_state ustate;
if (num_online_cpus() == num_present_cpus())
return 0;
if (!uci->valid)
return UCODE_OK;
pr_err("Not all CPUs online, aborting microcode update.\n");
ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, true);
if (ustate != UCODE_OK)
return ustate;
return -EINVAL;
}
return apply_microcode_on_target(cpu);
static atomic_t late_cpus_in;
static atomic_t late_cpus_out;
static int __wait_for_cpus(atomic_t *t, long long timeout)
{
int all_cpus = num_online_cpus();
atomic_inc(t);
while (atomic_read(t) < all_cpus) {
if (timeout < SPINUNIT) {
pr_err("Timeout while waiting for CPUs rendezvous, remaining: %d\n",
all_cpus - atomic_read(t));
return 1;
}
ndelay(SPINUNIT);
timeout -= SPINUNIT;
touch_nmi_watchdog();
}
return 0;
}
/*
* Returns:
* < 0 - on error
* 0 - no update done
* 1 - microcode was updated
*/
static int __reload_late(void *info)
{
int cpu = smp_processor_id();
enum ucode_state err;
int ret = 0;
/*
* Wait for all CPUs to arrive. A load will not be attempted unless all
* CPUs show up.
* */
if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
return -1;
spin_lock(&update_lock);
apply_microcode_local(&err);
spin_unlock(&update_lock);
if (err > UCODE_NFOUND) {
pr_warn("Error reloading microcode on CPU %d\n", cpu);
return -1;
/* siblings return UCODE_OK because their engine got updated already */
} else if (err == UCODE_UPDATED || err == UCODE_OK) {
ret = 1;
} else {
return ret;
}
/*
* Increase the wait timeout to a safe value here since we're
* serializing the microcode update and that could take a while on a
* large number of CPUs. And that is fine as the *actual* timeout will
* be determined by the last CPU finished updating and thus cut short.
*/
if (__wait_for_cpus(&late_cpus_out, NSEC_PER_SEC * num_online_cpus()))
panic("Timeout during microcode update!\n");
return ret;
}
/*
* Reload microcode late on all CPUs. Wait for a sec until they
* all gather together.
*/
static int microcode_reload_late(void)
{
int ret;
atomic_set(&late_cpus_in, 0);
atomic_set(&late_cpus_out, 0);
ret = stop_machine_cpuslocked(__reload_late, NULL, cpu_online_mask);
if (ret > 0)
microcode_check();
return ret;
}
static ssize_t reload_store(struct device *dev,
@@ -509,10 +609,9 @@ static ssize_t reload_store(struct device *dev,
const char *buf, size_t size)
{
enum ucode_state tmp_ret = UCODE_OK;
bool do_callback = false;
int bsp = boot_cpu_data.cpu_index;
unsigned long val;
ssize_t ret = 0;
int cpu;
ret = kstrtoul(buf, 0, &val);
if (ret)
@@ -521,29 +620,24 @@ static ssize_t reload_store(struct device *dev,
if (val != 1)
return size;
tmp_ret = microcode_ops->request_microcode_fw(bsp, &microcode_pdev->dev, true);
if (tmp_ret != UCODE_NEW)
return size;
get_online_cpus();
ret = check_online_cpus();
if (ret)
goto put;
mutex_lock(&microcode_mutex);
for_each_online_cpu(cpu) {
tmp_ret = reload_for_cpu(cpu);
if (tmp_ret > UCODE_NFOUND) {
pr_warn("Error reloading microcode on CPU %d\n", cpu);
/* set retval for the first encountered reload error */
if (!ret)
ret = -EINVAL;
}
if (tmp_ret == UCODE_UPDATED)
do_callback = true;
}
if (!ret && do_callback)
microcode_check();
ret = microcode_reload_late();
mutex_unlock(&microcode_mutex);
put:
put_online_cpus();
if (!ret)
if (ret >= 0)
ret = size;
return ret;
@@ -611,10 +705,8 @@ static enum ucode_state microcode_init_cpu(int cpu, bool refresh_fw)
if (system_state != SYSTEM_RUNNING)
return UCODE_NFOUND;
ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev,
refresh_fw);
if (ustate == UCODE_OK) {
ustate = microcode_ops->request_microcode_fw(cpu, &microcode_pdev->dev, refresh_fw);
if (ustate == UCODE_NEW) {
pr_debug("CPU%d updated upon init\n", cpu);
apply_microcode_on_target(cpu);
}

ファイルの表示

@@ -589,6 +589,23 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
if (!mc)
return 0;
/*
* Save us the MSR write below - which is a particular expensive
* operation - when the other hyperthread has updated the microcode
* already.
*/
rev = intel_get_microcode_revision();
if (rev >= mc->hdr.rev) {
uci->cpu_sig.rev = rev;
return UCODE_OK;
}
/*
* Writeback and invalidate caches before updating microcode to avoid
* internal issues depending on what the microcode is updating.
*/
native_wbinvd();
/* write microcode via MSR 0x79 */
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
@@ -774,9 +791,9 @@ static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
static enum ucode_state apply_microcode_intel(int cpu)
{
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
struct cpuinfo_x86 *c = &cpu_data(cpu);
struct microcode_intel *mc;
struct ucode_cpu_info *uci;
struct cpuinfo_x86 *c;
static int prev_rev;
u32 rev;
@@ -784,15 +801,32 @@ static enum ucode_state apply_microcode_intel(int cpu)
if (WARN_ON(raw_smp_processor_id() != cpu))
return UCODE_ERROR;
uci = ucode_cpu_info + cpu;
mc = uci->mc;
/* Look for a newer patch in our cache: */
mc = find_patch(uci);
if (!mc) {
/* Look for a newer patch in our cache: */
mc = find_patch(uci);
mc = uci->mc;
if (!mc)
return UCODE_NFOUND;
}
/*
* Save us the MSR write below - which is a particular expensive
* operation - when the other hyperthread has updated the microcode
* already.
*/
rev = intel_get_microcode_revision();
if (rev >= mc->hdr.rev) {
uci->cpu_sig.rev = rev;
c->microcode = rev;
return UCODE_OK;
}
/*
* Writeback and invalidate caches before updating microcode to avoid
* internal issues depending on what the microcode is updating.
*/
native_wbinvd();
/* write microcode via MSR 0x79 */
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
@@ -813,8 +847,6 @@ static enum ucode_state apply_microcode_intel(int cpu)
prev_rev = rev;
}
c = &cpu_data(cpu);
uci->cpu_sig.rev = rev;
c->microcode = rev;
@@ -830,6 +862,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
unsigned int leftover = size;
unsigned int curr_mc_size = 0, new_mc_size = 0;
unsigned int csig, cpf;
enum ucode_state ret = UCODE_OK;
while (leftover) {
struct microcode_header_intel mc_header;
@@ -871,6 +904,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
new_mc = mc;
new_mc_size = mc_size;
mc = NULL; /* trigger new vmalloc */
ret = UCODE_NEW;
}
ucode_ptr += mc_size;
@@ -900,7 +934,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
return UCODE_OK;
return ret;
}
static int get_ucode_fw(void *to, const void *from, size_t n)

ファイルの表示

@@ -160,7 +160,6 @@ static const __initconst struct idt_data early_pf_idts[] = {
*/
static const __initconst struct idt_data dbg_idts[] = {
INTG(X86_TRAP_DB, debug),
INTG(X86_TRAP_BP, int3),
};
#endif
@@ -183,7 +182,6 @@ gate_desc debug_idt_table[IDT_ENTRIES] __page_aligned_bss;
static const __initconst struct idt_data ist_idts[] = {
ISTG(X86_TRAP_DB, debug, DEBUG_STACK),
ISTG(X86_TRAP_NMI, nmi, NMI_STACK),
SISTG(X86_TRAP_BP, int3, DEBUG_STACK),
ISTG(X86_TRAP_DF, double_fault, DOUBLEFAULT_STACK),
#ifdef CONFIG_X86_MCE
ISTG(X86_TRAP_MC, &machine_check, MCE_STACK),

ファイルの表示

@@ -23,7 +23,7 @@
/*
* this changes the io permissions bitmap in the current task.
*/
asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
SYSCALL_DEFINE3(ioperm, unsigned long, from, unsigned long, num, int, turn_on)
{
struct thread_struct *t = &current->thread;
struct tss_struct *tss;

ファイルの表示

@@ -1168,10 +1168,18 @@ NOKPROBE_SYMBOL(longjmp_break_handler);
bool arch_within_kprobe_blacklist(unsigned long addr)
{
bool is_in_entry_trampoline_section = false;
#ifdef CONFIG_X86_64
is_in_entry_trampoline_section =
(addr >= (unsigned long)__entry_trampoline_start &&
addr < (unsigned long)__entry_trampoline_end);
#endif
return (addr >= (unsigned long)__kprobes_text_start &&
addr < (unsigned long)__kprobes_text_end) ||
(addr >= (unsigned long)__entry_text_start &&
addr < (unsigned long)__entry_text_end);
addr < (unsigned long)__entry_text_end) ||
is_in_entry_trampoline_section;
}
int __init arch_init_kprobes(void)

ファイルの表示

@@ -37,7 +37,6 @@ static dma_addr_t nommu_map_page(struct device *dev, struct page *page,
WARN_ON(size == 0);
if (!check_addr("map_single", dev, bus, size))
return NOMMU_MAPPING_ERROR;
flush_write_buffers();
return bus;
}
@@ -72,25 +71,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
return 0;
s->dma_length = s->length;
}
flush_write_buffers();
return nents;
}
static void nommu_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size,
enum dma_data_direction dir)
{
flush_write_buffers();
}
static void nommu_sync_sg_for_device(struct device *dev,
struct scatterlist *sg, int nelems,
enum dma_data_direction dir)
{
flush_write_buffers();
}
static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr)
{
return dma_addr == NOMMU_MAPPING_ERROR;
@@ -101,8 +84,6 @@ const struct dma_map_ops nommu_dma_ops = {
.free = dma_generic_free_coherent,
.map_sg = nommu_map_sg,
.map_page = nommu_map_page,
.sync_single_for_device = nommu_sync_single_for_device,
.sync_sg_for_device = nommu_sync_sg_for_device,
.is_phys = 1,
.mapping_error = nommu_mapping_error,
.dma_supported = x86_dma_supported,

ファイルの表示

@@ -43,6 +43,13 @@ static inline void signal_compat_build_tests(void)
BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int));
#define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name))
BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0);
BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4);
BUILD_BUG_ON(offsetof(siginfo_t, si_code) != 8);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_signo) != 0);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_errno) != 4);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_code) != 8);
/*
* Ensure that the size of each si_field never changes.
* If it does, it is a sign that the
@@ -63,36 +70,94 @@ static inline void signal_compat_build_tests(void)
CHECK_CSI_SIZE (_kill, 2*sizeof(int));
CHECK_SI_SIZE (_kill, 2*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0xC);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
CHECK_CSI_OFFSET(_timer);
CHECK_CSI_SIZE (_timer, 3*sizeof(int));
CHECK_SI_SIZE (_timer, 6*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_tid) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x14);
BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_tid) != 0x0C);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_overrun) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14);
CHECK_CSI_OFFSET(_rt);
CHECK_CSI_SIZE (_rt, 3*sizeof(int));
CHECK_SI_SIZE (_rt, 4*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14);
CHECK_CSI_OFFSET(_sigchld);
CHECK_CSI_SIZE (_sigchld, 5*sizeof(int));
CHECK_SI_SIZE (_sigchld, 8*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14);
BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x18);
BUILD_BUG_ON(offsetof(siginfo_t, si_utime) != 0x20);
BUILD_BUG_ON(offsetof(siginfo_t, si_stime) != 0x28);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_status) != 0x14);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_utime) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_stime) != 0x1C);
#ifdef CONFIG_X86_X32_ABI
CHECK_CSI_OFFSET(_sigchld_x32);
CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int));
/* no _sigchld_x32 in the generic siginfo_t */
BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._utime) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._stime) != 0x20);
#endif
CHECK_CSI_OFFSET(_sigfault);
CHECK_CSI_SIZE (_sigfault, 4*sizeof(int));
CHECK_SI_SIZE (_sigfault, 8*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C);
BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_lower) != 0x20);
BUILD_BUG_ON(offsetof(siginfo_t, si_upper) != 0x28);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_lower) != 0x14);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_upper) != 0x18);
BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14);
CHECK_CSI_OFFSET(_sigpoll);
CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int));
CHECK_SI_SIZE (_sigpoll, 4*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_fd) != 0x18);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_band) != 0x0C);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_fd) != 0x10);
CHECK_CSI_OFFSET(_sigsys);
CHECK_CSI_SIZE (_sigsys, 3*sizeof(int));
CHECK_SI_SIZE (_sigsys, 4*sizeof(int));
BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x10);
BUILD_BUG_ON(offsetof(siginfo_t, si_syscall) != 0x18);
BUILD_BUG_ON(offsetof(siginfo_t, si_arch) != 0x1C);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_call_addr) != 0x0C);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_syscall) != 0x10);
BUILD_BUG_ON(offsetof(compat_siginfo_t, si_arch) != 0x14);
/* any new si_fields should be added here */
}

ファイルの表示

@@ -577,7 +577,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
}
NOKPROBE_SYMBOL(do_general_protection);
/* May run on IST stack. */
dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
{
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -592,6 +591,13 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
if (poke_int3_handler(regs))
return;
/*
* Use ist_enter despite the fact that we don't use an IST stack.
* We can be called from a kprobe in non-CONTEXT_KERNEL kernel
* mode or even during context tracking state changes.
*
* This means that we can't schedule. That's okay.
*/
ist_enter(regs);
RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
@@ -609,15 +615,10 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
SIGTRAP) == NOTIFY_STOP)
goto exit;
/*
* Let others (NMI) know that the debug stack is in use
* as we may switch to the interrupt stack.
*/
debug_stack_usage_inc();
cond_local_irq_enable(regs);
do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
cond_local_irq_disable(regs);
debug_stack_usage_dec();
exit:
ist_exit(regs);
}

ファイルの表示

@@ -727,7 +727,8 @@ void handle_vm86_fault(struct kernel_vm86_regs *regs, long error_code)
return;
check_vip:
if (VEFLAGS & X86_EFLAGS_VIP) {
if ((VEFLAGS & (X86_EFLAGS_VIP | X86_EFLAGS_VIF)) ==
(X86_EFLAGS_VIP | X86_EFLAGS_VIF)) {
save_v86_state(regs, VM86_STI);
return;
}

ファイルの表示

@@ -118,9 +118,11 @@ SECTIONS
#ifdef CONFIG_X86_64
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(__entry_trampoline_start) = .;
_entry_trampoline = .;
*(.entry_trampoline)
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(__entry_trampoline_end) = .;
ASSERT(. - _entry_trampoline == PAGE_SIZE, "entry trampoline is too big");
#endif