Merge branch 'linus' into stackprotector
Conflicts: arch/x86/kernel/Makefile include/asm-x86/pda.h
This commit is contained in:
@@ -10,6 +10,7 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mmiotrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
@@ -35,6 +36,7 @@
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/proto.h>
|
||||
#include <asm-generic/sections.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
/*
|
||||
* Page fault error code bits
|
||||
@@ -50,17 +52,23 @@
|
||||
#define PF_RSVD (1<<3)
|
||||
#define PF_INSTR (1<<4)
|
||||
|
||||
static inline int kmmio_fault(struct pt_regs *regs, unsigned long addr)
|
||||
{
|
||||
#ifdef CONFIG_MMIOTRACE_HOOKS
|
||||
if (unlikely(is_kmmio_active()))
|
||||
if (kmmio_handler(regs, addr) == 1)
|
||||
return -1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int notify_page_fault(struct pt_regs *regs)
|
||||
{
|
||||
#ifdef CONFIG_KPROBES
|
||||
int ret = 0;
|
||||
|
||||
/* kprobe_running() needs smp_processor_id() */
|
||||
#ifdef CONFIG_X86_32
|
||||
if (!user_mode_vm(regs)) {
|
||||
#else
|
||||
if (!user_mode(regs)) {
|
||||
#endif
|
||||
preempt_disable();
|
||||
if (kprobe_running() && kprobe_fault_handler(regs, 14))
|
||||
ret = 1;
|
||||
@@ -351,8 +359,6 @@ static int is_errata100(struct pt_regs *regs, unsigned long address)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void do_invalid_op(struct pt_regs *, unsigned long);
|
||||
|
||||
static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
|
||||
{
|
||||
#ifdef CONFIG_X86_F00F_BUG
|
||||
@@ -397,11 +403,7 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code,
|
||||
printk(KERN_CONT "NULL pointer dereference");
|
||||
else
|
||||
printk(KERN_CONT "paging request");
|
||||
#ifdef CONFIG_X86_32
|
||||
printk(KERN_CONT " at %08lx\n", address);
|
||||
#else
|
||||
printk(KERN_CONT " at %016lx\n", address);
|
||||
#endif
|
||||
printk(KERN_CONT " at %p\n", (void *) address);
|
||||
printk(KERN_ALERT "IP:");
|
||||
printk_address(regs->ip, 1);
|
||||
dump_pagetable(address);
|
||||
@@ -593,11 +595,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
unsigned long flags;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* We can fault from pretty much anywhere, with unknown IRQ state.
|
||||
*/
|
||||
trace_hardirqs_fixup();
|
||||
|
||||
tsk = current;
|
||||
mm = tsk->mm;
|
||||
prefetchw(&mm->mmap_sem);
|
||||
@@ -609,6 +606,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
|
||||
if (notify_page_fault(regs))
|
||||
return;
|
||||
if (unlikely(kmmio_fault(regs, address)))
|
||||
return;
|
||||
|
||||
/*
|
||||
* We fault-in kernel-space virtual memory on-demand. The
|
||||
@@ -803,14 +802,10 @@ bad_area_nosemaphore:
|
||||
if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
|
||||
printk_ratelimit()) {
|
||||
printk(
|
||||
#ifdef CONFIG_X86_32
|
||||
"%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
|
||||
#else
|
||||
"%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
|
||||
#endif
|
||||
"%s%s[%d]: segfault at %lx ip %p sp %p error %lx",
|
||||
task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
|
||||
tsk->comm, task_pid_nr(tsk), address, regs->ip,
|
||||
regs->sp, error_code);
|
||||
tsk->comm, task_pid_nr(tsk), address,
|
||||
(void *) regs->ip, (void *) regs->sp, error_code);
|
||||
print_vma_addr(" in ", regs->ip);
|
||||
printk("\n");
|
||||
}
|
||||
@@ -921,72 +916,45 @@ LIST_HEAD(pgd_list);
|
||||
|
||||
void vmalloc_sync_all(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Note that races in the updates of insync and start aren't
|
||||
* problematic: insync can only get set bits added, and updates to
|
||||
* start are only improving performance (without affecting correctness
|
||||
* if undone).
|
||||
*/
|
||||
static DECLARE_BITMAP(insync, PTRS_PER_PGD);
|
||||
static unsigned long start = TASK_SIZE;
|
||||
unsigned long address;
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
if (SHARED_KERNEL_PMD)
|
||||
return;
|
||||
|
||||
BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
|
||||
for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
|
||||
if (!test_bit(pgd_index(address), insync)) {
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
for (address = VMALLOC_START & PMD_MASK;
|
||||
address >= TASK_SIZE && address < FIXADDR_TOP;
|
||||
address += PMD_SIZE) {
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
|
||||
spin_lock_irqsave(&pgd_lock, flags);
|
||||
list_for_each_entry(page, &pgd_list, lru) {
|
||||
if (!vmalloc_sync_one(page_address(page),
|
||||
address))
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&pgd_lock, flags);
|
||||
if (!page)
|
||||
set_bit(pgd_index(address), insync);
|
||||
spin_lock_irqsave(&pgd_lock, flags);
|
||||
list_for_each_entry(page, &pgd_list, lru) {
|
||||
if (!vmalloc_sync_one(page_address(page),
|
||||
address))
|
||||
break;
|
||||
}
|
||||
if (address == start && test_bit(pgd_index(address), insync))
|
||||
start = address + PGDIR_SIZE;
|
||||
spin_unlock_irqrestore(&pgd_lock, flags);
|
||||
}
|
||||
#else /* CONFIG_X86_64 */
|
||||
/*
|
||||
* Note that races in the updates of insync and start aren't
|
||||
* problematic: insync can only get set bits added, and updates to
|
||||
* start are only improving performance (without affecting correctness
|
||||
* if undone).
|
||||
*/
|
||||
static DECLARE_BITMAP(insync, PTRS_PER_PGD);
|
||||
static unsigned long start = VMALLOC_START & PGDIR_MASK;
|
||||
unsigned long address;
|
||||
for (address = VMALLOC_START & PGDIR_MASK; address <= VMALLOC_END;
|
||||
address += PGDIR_SIZE) {
|
||||
const pgd_t *pgd_ref = pgd_offset_k(address);
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
|
||||
for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
|
||||
if (!test_bit(pgd_index(address), insync)) {
|
||||
const pgd_t *pgd_ref = pgd_offset_k(address);
|
||||
unsigned long flags;
|
||||
struct page *page;
|
||||
|
||||
if (pgd_none(*pgd_ref))
|
||||
continue;
|
||||
spin_lock_irqsave(&pgd_lock, flags);
|
||||
list_for_each_entry(page, &pgd_list, lru) {
|
||||
pgd_t *pgd;
|
||||
pgd = (pgd_t *)page_address(page) + pgd_index(address);
|
||||
if (pgd_none(*pgd))
|
||||
set_pgd(pgd, *pgd_ref);
|
||||
else
|
||||
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
|
||||
}
|
||||
spin_unlock_irqrestore(&pgd_lock, flags);
|
||||
set_bit(pgd_index(address), insync);
|
||||
if (pgd_none(*pgd_ref))
|
||||
continue;
|
||||
spin_lock_irqsave(&pgd_lock, flags);
|
||||
list_for_each_entry(page, &pgd_list, lru) {
|
||||
pgd_t *pgd;
|
||||
pgd = (pgd_t *)page_address(page) + pgd_index(address);
|
||||
if (pgd_none(*pgd))
|
||||
set_pgd(pgd, *pgd_ref);
|
||||
else
|
||||
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
|
||||
}
|
||||
if (address == start)
|
||||
start = address + PGDIR_SIZE;
|
||||
spin_unlock_irqrestore(&pgd_lock, flags);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
Reference in New Issue
Block a user