Merge branch 'linus' into x86/memory-corruption-check
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
|
||||
pat.o pgtable.o gup.o
|
||||
|
||||
obj-$(CONFIG_X86_32) += pgtable_32.o
|
||||
obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
|
||||
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o
|
||||
|
@@ -233,7 +233,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
len = (unsigned long) nr_pages << PAGE_SHIFT;
|
||||
end = start + len;
|
||||
if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
|
||||
start, len)))
|
||||
(void __user *)start, len)))
|
||||
goto slow_irqon;
|
||||
|
||||
/*
|
||||
|
@@ -334,7 +334,6 @@ int devmem_is_allowed(unsigned long pagenr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
pte_t *kmap_pte;
|
||||
pgprot_t kmap_prot;
|
||||
|
||||
@@ -357,6 +356,7 @@ static void __init kmap_init(void)
|
||||
kmap_prot = PAGE_KERNEL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
static void __init permanent_kmaps_init(pgd_t *pgd_base)
|
||||
{
|
||||
unsigned long vaddr;
|
||||
@@ -436,7 +436,6 @@ static void __init set_highmem_pages_init(void)
|
||||
#endif /* !CONFIG_NUMA */
|
||||
|
||||
#else
|
||||
# define kmap_init() do { } while (0)
|
||||
# define permanent_kmaps_init(pgd_base) do { } while (0)
|
||||
# define set_highmem_pages_init() do { } while (0)
|
||||
#endif /* CONFIG_HIGHMEM */
|
||||
|
@@ -350,8 +350,10 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
|
||||
* pagetable pages as RO. So assume someone who pre-setup
|
||||
* these mappings are more intelligent.
|
||||
*/
|
||||
if (pte_val(*pte))
|
||||
if (pte_val(*pte)) {
|
||||
pages++;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (0)
|
||||
printk(" pte=%p addr=%lx pte=%016lx\n",
|
||||
@@ -418,8 +420,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
|
||||
* not differ with respect to page frame and
|
||||
* attributes.
|
||||
*/
|
||||
if (page_size_mask & (1 << PG_LEVEL_2M))
|
||||
if (page_size_mask & (1 << PG_LEVEL_2M)) {
|
||||
pages++;
|
||||
continue;
|
||||
}
|
||||
new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
|
||||
}
|
||||
|
||||
@@ -499,8 +503,10 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
|
||||
* not differ with respect to page frame and
|
||||
* attributes.
|
||||
*/
|
||||
if (page_size_mask & (1 << PG_LEVEL_1G))
|
||||
if (page_size_mask & (1 << PG_LEVEL_1G)) {
|
||||
pages++;
|
||||
continue;
|
||||
}
|
||||
prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
|
||||
}
|
||||
|
||||
@@ -665,12 +671,13 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
|
||||
unsigned long last_map_addr = 0;
|
||||
unsigned long page_size_mask = 0;
|
||||
unsigned long start_pfn, end_pfn;
|
||||
unsigned long pos;
|
||||
|
||||
struct map_range mr[NR_RANGE_MR];
|
||||
int nr_range, i;
|
||||
int use_pse, use_gbpages;
|
||||
|
||||
printk(KERN_INFO "init_memory_mapping\n");
|
||||
printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end);
|
||||
|
||||
/*
|
||||
* Find space for the kernel direct mapping tables.
|
||||
@@ -704,35 +711,50 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
|
||||
|
||||
/* head if not big page alignment ?*/
|
||||
start_pfn = start >> PAGE_SHIFT;
|
||||
end_pfn = ((start + (PMD_SIZE - 1)) >> PMD_SHIFT)
|
||||
pos = start_pfn << PAGE_SHIFT;
|
||||
end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
|
||||
<< (PMD_SHIFT - PAGE_SHIFT);
|
||||
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
|
||||
if (start_pfn < end_pfn) {
|
||||
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
|
||||
pos = end_pfn << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/* big page (2M) range*/
|
||||
start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT)
|
||||
start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
|
||||
<< (PMD_SHIFT - PAGE_SHIFT);
|
||||
end_pfn = ((start + (PUD_SIZE - 1))>>PUD_SHIFT)
|
||||
end_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
|
||||
<< (PUD_SHIFT - PAGE_SHIFT);
|
||||
if (end_pfn > ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT)))
|
||||
end_pfn = ((end>>PUD_SHIFT)<<(PUD_SHIFT - PAGE_SHIFT));
|
||||
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
|
||||
page_size_mask & (1<<PG_LEVEL_2M));
|
||||
if (end_pfn > ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT)))
|
||||
end_pfn = ((end>>PMD_SHIFT)<<(PMD_SHIFT - PAGE_SHIFT));
|
||||
if (start_pfn < end_pfn) {
|
||||
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
|
||||
page_size_mask & (1<<PG_LEVEL_2M));
|
||||
pos = end_pfn << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/* big page (1G) range */
|
||||
start_pfn = end_pfn;
|
||||
end_pfn = (end>>PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
|
||||
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
|
||||
start_pfn = ((pos + (PUD_SIZE - 1))>>PUD_SHIFT)
|
||||
<< (PUD_SHIFT - PAGE_SHIFT);
|
||||
end_pfn = (end >> PUD_SHIFT) << (PUD_SHIFT - PAGE_SHIFT);
|
||||
if (start_pfn < end_pfn) {
|
||||
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
|
||||
page_size_mask &
|
||||
((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G)));
|
||||
pos = end_pfn << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/* tail is not big page (1G) alignment */
|
||||
start_pfn = end_pfn;
|
||||
end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
|
||||
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
|
||||
page_size_mask & (1<<PG_LEVEL_2M));
|
||||
start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT)
|
||||
<< (PMD_SHIFT - PAGE_SHIFT);
|
||||
end_pfn = (end >> PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT);
|
||||
if (start_pfn < end_pfn) {
|
||||
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn,
|
||||
page_size_mask & (1<<PG_LEVEL_2M));
|
||||
pos = end_pfn << PAGE_SHIFT;
|
||||
}
|
||||
|
||||
/* tail is not big page (2M) alignment */
|
||||
start_pfn = end_pfn;
|
||||
start_pfn = pos>>PAGE_SHIFT;
|
||||
end_pfn = end>>PAGE_SHIFT;
|
||||
nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
|
||||
|
||||
@@ -831,12 +853,12 @@ int arch_add_memory(int nid, u64 start, u64 size)
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
int ret;
|
||||
|
||||
last_mapped_pfn = init_memory_mapping(start, start + size-1);
|
||||
last_mapped_pfn = init_memory_mapping(start, start + size);
|
||||
if (last_mapped_pfn > max_pfn_mapped)
|
||||
max_pfn_mapped = last_mapped_pfn;
|
||||
|
||||
ret = __add_pages(zone, start_pfn, nr_pages);
|
||||
WARN_ON(1);
|
||||
WARN_ON_ONCE(ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -878,6 +900,7 @@ static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel,
|
||||
void __init mem_init(void)
|
||||
{
|
||||
long codesize, reservedpages, datasize, initsize;
|
||||
unsigned long absent_pages;
|
||||
|
||||
pci_iommu_alloc();
|
||||
|
||||
@@ -891,8 +914,9 @@ void __init mem_init(void)
|
||||
#else
|
||||
totalram_pages = free_all_bootmem();
|
||||
#endif
|
||||
reservedpages = max_pfn - totalram_pages -
|
||||
absent_pages_in_range(0, max_pfn);
|
||||
|
||||
absent_pages = absent_pages_in_range(0, max_pfn);
|
||||
reservedpages = max_pfn - totalram_pages - absent_pages;
|
||||
after_bootmem = 1;
|
||||
|
||||
codesize = (unsigned long) &_etext - (unsigned long) &_text;
|
||||
@@ -909,10 +933,11 @@ void __init mem_init(void)
|
||||
VSYSCALL_END - VSYSCALL_START);
|
||||
|
||||
printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
|
||||
"%ldk reserved, %ldk data, %ldk init)\n",
|
||||
"%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
|
||||
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
|
||||
max_pfn << (PAGE_SHIFT-10),
|
||||
codesize >> 10,
|
||||
absent_pages << (PAGE_SHIFT-10),
|
||||
reservedpages << (PAGE_SHIFT-10),
|
||||
datasize >> 10,
|
||||
initsize >> 10);
|
||||
|
59
arch/x86/mm/iomap_32.c
Normal file
59
arch/x86/mm/iomap_32.c
Normal file
@@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Copyright © 2008 Ingo Molnar
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but
|
||||
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
|
||||
*/
|
||||
|
||||
#include <asm/iomap.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
/* Map 'pfn' using fixed map 'type' and protections 'prot'
|
||||
*/
|
||||
void *
|
||||
iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
|
||||
{
|
||||
enum fixed_addresses idx;
|
||||
unsigned long vaddr;
|
||||
|
||||
pagefault_disable();
|
||||
|
||||
idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
||||
set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
|
||||
arch_flush_lazy_mmu_mode();
|
||||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
|
||||
|
||||
void
|
||||
iounmap_atomic(void *kvaddr, enum km_type type)
|
||||
{
|
||||
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|
||||
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
|
||||
|
||||
/*
|
||||
* Force other mappings to Oops if they'll try to access this pte
|
||||
* without first remap it. Keeping stale mappings around is a bad idea
|
||||
* also, in case the page changes cacheability attributes or becomes
|
||||
* a protected page in a hypervisor.
|
||||
*/
|
||||
if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
|
||||
kpte_clear_flush(kmap_pte-idx, vaddr);
|
||||
|
||||
arch_flush_lazy_mmu_mode();
|
||||
pagefault_enable();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iounmap_atomic);
|
@@ -387,7 +387,7 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
|
||||
unsigned long size)
|
||||
{
|
||||
unsigned long flags;
|
||||
void *ret;
|
||||
void __iomem *ret;
|
||||
int err;
|
||||
|
||||
/*
|
||||
@@ -399,11 +399,11 @@ static void __iomem *ioremap_default(resource_size_t phys_addr,
|
||||
if (err < 0)
|
||||
return NULL;
|
||||
|
||||
ret = (void *) __ioremap_caller(phys_addr, size, flags,
|
||||
__builtin_return_address(0));
|
||||
ret = __ioremap_caller(phys_addr, size, flags,
|
||||
__builtin_return_address(0));
|
||||
|
||||
free_memtype(phys_addr, phys_addr + size);
|
||||
return (void __iomem *)ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
|
||||
@@ -622,7 +622,7 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx)
|
||||
__early_set_fixmap(idx, 0, __pgprot(0));
|
||||
}
|
||||
|
||||
static void *prev_map[FIX_BTMAPS_SLOTS] __initdata;
|
||||
static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
|
||||
static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
|
||||
static int __init check_early_ioremap_leak(void)
|
||||
{
|
||||
@@ -645,7 +645,7 @@ static int __init check_early_ioremap_leak(void)
|
||||
}
|
||||
late_initcall(check_early_ioremap_leak);
|
||||
|
||||
static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
|
||||
static void __init __iomem *__early_ioremap(unsigned long phys_addr, unsigned long size, pgprot_t prot)
|
||||
{
|
||||
unsigned long offset, last_addr;
|
||||
unsigned int nrpages;
|
||||
@@ -713,23 +713,23 @@ static void __init *__early_ioremap(unsigned long phys_addr, unsigned long size,
|
||||
if (early_ioremap_debug)
|
||||
printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
|
||||
|
||||
prev_map[slot] = (void *) (offset + fix_to_virt(idx0));
|
||||
prev_map[slot] = (void __iomem *)(offset + fix_to_virt(idx0));
|
||||
return prev_map[slot];
|
||||
}
|
||||
|
||||
/* Remap an IO device */
|
||||
void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
|
||||
void __init __iomem *early_ioremap(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
|
||||
}
|
||||
|
||||
/* Remap memory */
|
||||
void __init *early_memremap(unsigned long phys_addr, unsigned long size)
|
||||
void __init __iomem *early_memremap(unsigned long phys_addr, unsigned long size)
|
||||
{
|
||||
return __early_ioremap(phys_addr, size, PAGE_KERNEL);
|
||||
}
|
||||
|
||||
void __init early_iounmap(void *addr, unsigned long size)
|
||||
void __init early_iounmap(void __iomem *addr, unsigned long size)
|
||||
{
|
||||
unsigned long virt_addr;
|
||||
unsigned long offset;
|
||||
@@ -779,7 +779,7 @@ void __init early_iounmap(void *addr, unsigned long size)
|
||||
--idx;
|
||||
--nrpages;
|
||||
}
|
||||
prev_map[slot] = 0;
|
||||
prev_map[slot] = NULL;
|
||||
}
|
||||
|
||||
void __this_fixmap_does_not_exist(void)
|
||||
|
@@ -222,6 +222,41 @@ static void __init remap_numa_kva(void)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HIBERNATION
|
||||
/**
|
||||
* resume_map_numa_kva - add KVA mapping to the temporary page tables created
|
||||
* during resume from hibernation
|
||||
* @pgd_base - temporary resume page directory
|
||||
*/
|
||||
void resume_map_numa_kva(pgd_t *pgd_base)
|
||||
{
|
||||
int node;
|
||||
|
||||
for_each_online_node(node) {
|
||||
unsigned long start_va, start_pfn, size, pfn;
|
||||
|
||||
start_va = (unsigned long)node_remap_start_vaddr[node];
|
||||
start_pfn = node_remap_start_pfn[node];
|
||||
size = node_remap_size[node];
|
||||
|
||||
printk(KERN_DEBUG "%s: node %d\n", __FUNCTION__, node);
|
||||
|
||||
for (pfn = 0; pfn < size; pfn += PTRS_PER_PTE) {
|
||||
unsigned long vaddr = start_va + (pfn << PAGE_SHIFT);
|
||||
pgd_t *pgd = pgd_base + pgd_index(vaddr);
|
||||
pud_t *pud = pud_offset(pgd, vaddr);
|
||||
pmd_t *pmd = pmd_offset(pud, vaddr);
|
||||
|
||||
set_pmd(pmd, pfn_pmd(start_pfn + pfn,
|
||||
PAGE_KERNEL_LARGE_EXEC));
|
||||
|
||||
printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n",
|
||||
__FUNCTION__, vaddr, start_pfn + pfn);
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static unsigned long calculate_numa_remap_pages(void)
|
||||
{
|
||||
int nid;
|
||||
|
@@ -67,18 +67,18 @@ static void split_page_count(int level)
|
||||
|
||||
void arch_report_meminfo(struct seq_file *m)
|
||||
{
|
||||
seq_printf(m, "DirectMap4k: %8lu kB\n",
|
||||
seq_printf(m, "DirectMap4k: %8lu kB\n",
|
||||
direct_pages_count[PG_LEVEL_4K] << 2);
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
||||
seq_printf(m, "DirectMap2M: %8lu kB\n",
|
||||
seq_printf(m, "DirectMap2M: %8lu kB\n",
|
||||
direct_pages_count[PG_LEVEL_2M] << 11);
|
||||
#else
|
||||
seq_printf(m, "DirectMap4M: %8lu kB\n",
|
||||
seq_printf(m, "DirectMap4M: %8lu kB\n",
|
||||
direct_pages_count[PG_LEVEL_2M] << 12);
|
||||
#endif
|
||||
#ifdef CONFIG_X86_64
|
||||
if (direct_gbpages)
|
||||
seq_printf(m, "DirectMap1G: %8lu kB\n",
|
||||
seq_printf(m, "DirectMap1G: %8lu kB\n",
|
||||
direct_pages_count[PG_LEVEL_1G] << 20);
|
||||
#endif
|
||||
}
|
||||
|
@@ -481,12 +481,16 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
|
||||
return 1;
|
||||
}
|
||||
#else
|
||||
/* This check is needed to avoid cache aliasing when PAT is enabled */
|
||||
static inline int range_is_allowed(unsigned long pfn, unsigned long size)
|
||||
{
|
||||
u64 from = ((u64)pfn) << PAGE_SHIFT;
|
||||
u64 to = from + size;
|
||||
u64 cursor = from;
|
||||
|
||||
if (!pat_enabled)
|
||||
return 1;
|
||||
|
||||
while (cursor < to) {
|
||||
if (!devmem_is_allowed(pfn)) {
|
||||
printk(KERN_INFO
|
||||
|
Reference in New Issue
Block a user