Merge branches 'x86/apic', 'x86/cleanups', 'x86/cpufeature', 'x86/crashdump', 'x86/debug', 'x86/defconfig', 'x86/detect-hyper', 'x86/doc', 'x86/dumpstack', 'x86/early-printk', 'x86/fpu', 'x86/idle', 'x86/io', 'x86/memory-corruption-check', 'x86/microcode', 'x86/mm', 'x86/mtrr', 'x86/nmi-watchdog', 'x86/pat2', 'x86/pci-ioapic-boot-irq-quirks', 'x86/ptrace', 'x86/quirks', 'x86/reboot', 'x86/setup-memory', 'x86/signal', 'x86/sparse-fixes', 'x86/time', 'x86/uv' and 'x86/xen' into x86/core
This commit is contained in:

szülő
3d44cc3e01
1ccedb7cdb
34945ede31
d437797406
c415b3dce3
beeb4195cb
f269b07e86
4e42ebd57b
e1286f2c68
878719e831
fd28a5b58d
adf77bac05
8f2466f45f
93093d099e
bb5574608a
f34a10bd9f
b6fd6f2673
30604bb410
5b9a0e14eb
67bac792cd
7a9787e1eb
f4166c54bf
69b88afa8d
8daa19051e
3e1e9002aa
8403295e0f
4db646b1af
205516c12d
c8182f0016
ecbf29cdb3
commit
fa623d1b02
@@ -413,6 +413,7 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
|
||||
unsigned long error_code)
|
||||
{
|
||||
unsigned long flags = oops_begin();
|
||||
int sig = SIGKILL;
|
||||
struct task_struct *tsk;
|
||||
|
||||
printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
|
||||
@@ -423,8 +424,8 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
|
||||
tsk->thread.trap_no = 14;
|
||||
tsk->thread.error_code = error_code;
|
||||
if (__die("Bad pagetable", regs, error_code))
|
||||
regs = NULL;
|
||||
oops_end(flags, regs, SIGKILL);
|
||||
sig = 0;
|
||||
oops_end(flags, regs, sig);
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -590,6 +591,7 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
|
||||
int fault;
|
||||
#ifdef CONFIG_X86_64
|
||||
unsigned long flags;
|
||||
int sig;
|
||||
#endif
|
||||
|
||||
tsk = current;
|
||||
@@ -849,11 +851,12 @@ no_context:
|
||||
bust_spinlocks(0);
|
||||
do_exit(SIGKILL);
|
||||
#else
|
||||
sig = SIGKILL;
|
||||
if (__die("Oops", regs, error_code))
|
||||
regs = NULL;
|
||||
sig = 0;
|
||||
/* Executive summary in case the body of the oops scrolled away */
|
||||
printk(KERN_EMERG "CR2: %016lx\n", address);
|
||||
oops_end(flags, regs, SIGKILL);
|
||||
oops_end(flags, regs, sig);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@@ -67,7 +67,7 @@ static unsigned long __meminitdata table_top;
|
||||
|
||||
static int __initdata after_init_bootmem;
|
||||
|
||||
static __init void *alloc_low_page(unsigned long *phys)
|
||||
static __init void *alloc_low_page(void)
|
||||
{
|
||||
unsigned long pfn = table_end++;
|
||||
void *adr;
|
||||
@@ -77,7 +77,6 @@ static __init void *alloc_low_page(unsigned long *phys)
|
||||
|
||||
adr = __va(pfn * PAGE_SIZE);
|
||||
memset(adr, 0, PAGE_SIZE);
|
||||
*phys = pfn * PAGE_SIZE;
|
||||
return adr;
|
||||
}
|
||||
|
||||
@@ -92,16 +91,17 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd)
|
||||
pmd_t *pmd_table;
|
||||
|
||||
#ifdef CONFIG_X86_PAE
|
||||
unsigned long phys;
|
||||
if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
|
||||
if (after_init_bootmem)
|
||||
pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE);
|
||||
else
|
||||
pmd_table = (pmd_t *)alloc_low_page(&phys);
|
||||
pmd_table = (pmd_t *)alloc_low_page();
|
||||
paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
|
||||
set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
|
||||
pud = pud_offset(pgd, 0);
|
||||
BUG_ON(pmd_table != pmd_offset(pud, 0));
|
||||
|
||||
return pmd_table;
|
||||
}
|
||||
#endif
|
||||
pud = pud_offset(pgd, 0);
|
||||
@@ -126,10 +126,8 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
|
||||
if (!page_table)
|
||||
page_table =
|
||||
(pte_t *)alloc_bootmem_low_pages(PAGE_SIZE);
|
||||
} else {
|
||||
unsigned long phys;
|
||||
page_table = (pte_t *)alloc_low_page(&phys);
|
||||
}
|
||||
} else
|
||||
page_table = (pte_t *)alloc_low_page();
|
||||
|
||||
paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
|
||||
set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
|
||||
@@ -969,8 +967,6 @@ void __init mem_init(void)
|
||||
int codesize, reservedpages, datasize, initsize;
|
||||
int tmp;
|
||||
|
||||
start_periodic_check_for_corruption();
|
||||
|
||||
#ifdef CONFIG_FLATMEM
|
||||
BUG_ON(!mem_map);
|
||||
#endif
|
||||
@@ -1040,11 +1036,25 @@ void __init mem_init(void)
|
||||
(unsigned long)&_text, (unsigned long)&_etext,
|
||||
((unsigned long)&_etext - (unsigned long)&_text) >> 10);
|
||||
|
||||
/*
|
||||
* Check boundaries twice: Some fundamental inconsistencies can
|
||||
* be detected at build time already.
|
||||
*/
|
||||
#define __FIXADDR_TOP (-PAGE_SIZE)
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
|
||||
BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
|
||||
#endif
|
||||
#define high_memory (-128UL << 20)
|
||||
BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
|
||||
#undef high_memory
|
||||
#undef __FIXADDR_TOP
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
|
||||
BUG_ON(VMALLOC_END > PKMAP_BASE);
|
||||
#endif
|
||||
BUG_ON(VMALLOC_START > VMALLOC_END);
|
||||
BUG_ON(VMALLOC_START >= VMALLOC_END);
|
||||
BUG_ON((unsigned long)high_memory > VMALLOC_START);
|
||||
|
||||
if (boot_cpu_data.wp_works_ok < 0)
|
||||
|
@@ -902,8 +902,6 @@ void __init mem_init(void)
|
||||
long codesize, reservedpages, datasize, initsize;
|
||||
unsigned long absent_pages;
|
||||
|
||||
start_periodic_check_for_corruption();
|
||||
|
||||
pci_iommu_alloc();
|
||||
|
||||
/* clear_bss() already clear the empty_zero_page */
|
||||
|
@@ -223,7 +223,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
||||
* Check if the request spans more than any BAR in the iomem resource
|
||||
* tree.
|
||||
*/
|
||||
WARN_ON(iomem_map_sanity_check(phys_addr, size));
|
||||
WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
|
||||
KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
|
||||
|
||||
/*
|
||||
* Don't allow anybody to remap normal RAM that we're using..
|
||||
|
@@ -596,6 +596,242 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
|
||||
free_memtype(addr, addr + size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal interface to reserve a range of physical memory with prot.
|
||||
* Reserved non RAM regions only and after successful reserve_memtype,
|
||||
* this func also keeps identity mapping (if any) in sync with this new prot.
|
||||
*/
|
||||
static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot)
|
||||
{
|
||||
int is_ram = 0;
|
||||
int id_sz, ret;
|
||||
unsigned long flags;
|
||||
unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK);
|
||||
|
||||
is_ram = pagerange_is_ram(paddr, paddr + size);
|
||||
|
||||
if (is_ram != 0) {
|
||||
/*
|
||||
* For mapping RAM pages, drivers need to call
|
||||
* set_memory_[uc|wc|wb] directly, for reserve and free, before
|
||||
* setting up the PTE.
|
||||
*/
|
||||
WARN_ON_ONCE(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (flags != want_flags) {
|
||||
free_memtype(paddr, paddr + size);
|
||||
printk(KERN_ERR
|
||||
"%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n",
|
||||
current->comm, current->pid,
|
||||
cattr_name(want_flags),
|
||||
(unsigned long long)paddr,
|
||||
(unsigned long long)(paddr + size),
|
||||
cattr_name(flags));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Need to keep identity mapping in sync */
|
||||
if (paddr >= __pa(high_memory))
|
||||
return 0;
|
||||
|
||||
id_sz = (__pa(high_memory) < paddr + size) ?
|
||||
__pa(high_memory) - paddr :
|
||||
size;
|
||||
|
||||
if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
|
||||
free_memtype(paddr, paddr + size);
|
||||
printk(KERN_ERR
|
||||
"%s:%d reserve_pfn_range ioremap_change_attr failed %s "
|
||||
"for %Lx-%Lx\n",
|
||||
current->comm, current->pid,
|
||||
cattr_name(flags),
|
||||
(unsigned long long)paddr,
|
||||
(unsigned long long)(paddr + size));
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal interface to free a range of physical memory.
|
||||
* Frees non RAM regions only.
|
||||
*/
|
||||
static void free_pfn_range(u64 paddr, unsigned long size)
|
||||
{
|
||||
int is_ram;
|
||||
|
||||
is_ram = pagerange_is_ram(paddr, paddr + size);
|
||||
if (is_ram == 0)
|
||||
free_memtype(paddr, paddr + size);
|
||||
}
|
||||
|
||||
/*
|
||||
* track_pfn_vma_copy is called when vma that is covering the pfnmap gets
|
||||
* copied through copy_page_range().
|
||||
*
|
||||
* If the vma has a linear pfn mapping for the entire range, we get the prot
|
||||
* from pte and reserve the entire vma range with single reserve_pfn_range call.
|
||||
* Otherwise, we reserve the entire vma range, my ging through the PTEs page
|
||||
* by page to get physical address and protection.
|
||||
*/
|
||||
int track_pfn_vma_copy(struct vm_area_struct *vma)
|
||||
{
|
||||
int retval = 0;
|
||||
unsigned long i, j;
|
||||
u64 paddr;
|
||||
unsigned long prot;
|
||||
unsigned long vma_start = vma->vm_start;
|
||||
unsigned long vma_end = vma->vm_end;
|
||||
unsigned long vma_size = vma_end - vma_start;
|
||||
|
||||
if (!pat_enabled)
|
||||
return 0;
|
||||
|
||||
if (is_linear_pfn_mapping(vma)) {
|
||||
/*
|
||||
* reserve the whole chunk covered by vma. We need the
|
||||
* starting address and protection from pte.
|
||||
*/
|
||||
if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
return reserve_pfn_range(paddr, vma_size, __pgprot(prot));
|
||||
}
|
||||
|
||||
/* reserve entire vma page by page, using pfn and prot from pte */
|
||||
for (i = 0; i < vma_size; i += PAGE_SIZE) {
|
||||
if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
|
||||
continue;
|
||||
|
||||
retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot));
|
||||
if (retval)
|
||||
goto cleanup_ret;
|
||||
}
|
||||
return 0;
|
||||
|
||||
cleanup_ret:
|
||||
/* Reserve error: Cleanup partial reservation and return error */
|
||||
for (j = 0; j < i; j += PAGE_SIZE) {
|
||||
if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
|
||||
continue;
|
||||
|
||||
free_pfn_range(paddr, PAGE_SIZE);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* track_pfn_vma_new is called when a _new_ pfn mapping is being established
|
||||
* for physical range indicated by pfn and size.
|
||||
*
|
||||
* prot is passed in as a parameter for the new mapping. If the vma has a
|
||||
* linear pfn mapping for the entire range reserve the entire vma range with
|
||||
* single reserve_pfn_range call.
|
||||
* Otherwise, we look t the pfn and size and reserve only the specified range
|
||||
* page by page.
|
||||
*
|
||||
* Note that this function can be called with caller trying to map only a
|
||||
* subrange/page inside the vma.
|
||||
*/
|
||||
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot,
|
||||
unsigned long pfn, unsigned long size)
|
||||
{
|
||||
int retval = 0;
|
||||
unsigned long i, j;
|
||||
u64 base_paddr;
|
||||
u64 paddr;
|
||||
unsigned long vma_start = vma->vm_start;
|
||||
unsigned long vma_end = vma->vm_end;
|
||||
unsigned long vma_size = vma_end - vma_start;
|
||||
|
||||
if (!pat_enabled)
|
||||
return 0;
|
||||
|
||||
if (is_linear_pfn_mapping(vma)) {
|
||||
/* reserve the whole chunk starting from vm_pgoff */
|
||||
paddr = (u64)vma->vm_pgoff << PAGE_SHIFT;
|
||||
return reserve_pfn_range(paddr, vma_size, prot);
|
||||
}
|
||||
|
||||
/* reserve page by page using pfn and size */
|
||||
base_paddr = (u64)pfn << PAGE_SHIFT;
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
paddr = base_paddr + i;
|
||||
retval = reserve_pfn_range(paddr, PAGE_SIZE, prot);
|
||||
if (retval)
|
||||
goto cleanup_ret;
|
||||
}
|
||||
return 0;
|
||||
|
||||
cleanup_ret:
|
||||
/* Reserve error: Cleanup partial reservation and return error */
|
||||
for (j = 0; j < i; j += PAGE_SIZE) {
|
||||
paddr = base_paddr + j;
|
||||
free_pfn_range(paddr, PAGE_SIZE);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
/*
|
||||
* untrack_pfn_vma is called while unmapping a pfnmap for a region.
|
||||
* untrack can be called for a specific region indicated by pfn and size or
|
||||
* can be for the entire vma (in which case size can be zero).
|
||||
*/
|
||||
void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned long i;
|
||||
u64 paddr;
|
||||
unsigned long prot;
|
||||
unsigned long vma_start = vma->vm_start;
|
||||
unsigned long vma_end = vma->vm_end;
|
||||
unsigned long vma_size = vma_end - vma_start;
|
||||
|
||||
if (!pat_enabled)
|
||||
return;
|
||||
|
||||
if (is_linear_pfn_mapping(vma)) {
|
||||
/* free the whole chunk starting from vm_pgoff */
|
||||
paddr = (u64)vma->vm_pgoff << PAGE_SHIFT;
|
||||
free_pfn_range(paddr, vma_size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (size != 0 && size != vma_size) {
|
||||
/* free page by page, using pfn and size */
|
||||
paddr = (u64)pfn << PAGE_SHIFT;
|
||||
for (i = 0; i < size; i += PAGE_SIZE) {
|
||||
paddr = paddr + i;
|
||||
free_pfn_range(paddr, PAGE_SIZE);
|
||||
}
|
||||
} else {
|
||||
/* free entire vma, page by page, using the pfn from pte */
|
||||
for (i = 0; i < vma_size; i += PAGE_SIZE) {
|
||||
if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
|
||||
continue;
|
||||
|
||||
free_pfn_range(paddr, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pgprot_t pgprot_writecombine(pgprot_t prot)
|
||||
{
|
||||
if (pat_enabled)
|
||||
return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC);
|
||||
else
|
||||
return pgprot_noncached(prot);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
|
||||
|
||||
/* get Nth element of the linked list */
|
||||
|
Reference in New Issue
Block a user