Merge branch 'x86/urgent' into x86/pat
Conflicts: arch/x86/mm/pageattr.c Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -1,7 +1,6 @@
|
||||
obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
|
||||
pat.o pgtable.o
|
||||
pat.o pgtable.o gup.o
|
||||
|
||||
obj-$(CONFIG_HAVE_GET_USER_PAGES_FAST) += gup.o
|
||||
obj-$(CONFIG_X86_32) += pgtable_32.o
|
||||
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
|
@@ -60,7 +60,7 @@ static unsigned long dma_reserve __initdata;
|
||||
|
||||
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
|
||||
|
||||
int direct_gbpages __meminitdata
|
||||
int direct_gbpages
|
||||
#ifdef CONFIG_DIRECT_GBPAGES
|
||||
= 1
|
||||
#endif
|
||||
@@ -88,7 +88,11 @@ early_param("gbpages", parse_direct_gbpages_on);
|
||||
|
||||
int after_bootmem;
|
||||
|
||||
static __init void *spp_getpage(void)
|
||||
/*
|
||||
* NOTE: This function is marked __ref because it calls __init function
|
||||
* (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
|
||||
*/
|
||||
static __ref void *spp_getpage(void)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
@@ -314,6 +318,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
|
||||
{
|
||||
unsigned long pages = 0;
|
||||
unsigned long last_map_addr = end;
|
||||
unsigned long start = address;
|
||||
|
||||
int i = pmd_index(address);
|
||||
|
||||
@@ -334,6 +339,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
|
||||
if (!pmd_large(*pmd))
|
||||
last_map_addr = phys_pte_update(pmd, address,
|
||||
end);
|
||||
/* Count entries we're using from level2_ident_pgt */
|
||||
if (start == 0)
|
||||
pages++;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@@ -170,7 +170,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
||||
phys_addr &= PAGE_MASK;
|
||||
size = PAGE_ALIGN(last_addr+1) - phys_addr;
|
||||
|
||||
retval = reserve_memtype(phys_addr, phys_addr + size,
|
||||
retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
|
||||
prot_val, &new_prot_val);
|
||||
if (retval) {
|
||||
pr_debug("Warning: reserve_memtype returned %d\n", retval);
|
||||
@@ -553,13 +553,11 @@ static int __init check_early_ioremap_leak(void)
|
||||
{
|
||||
if (!early_ioremap_nested)
|
||||
return 0;
|
||||
|
||||
printk(KERN_WARNING
|
||||
WARN(1, KERN_WARNING
|
||||
"Debug warning: early ioremap leak of %d areas detected.\n",
|
||||
early_ioremap_nested);
|
||||
early_ioremap_nested);
|
||||
printk(KERN_WARNING
|
||||
"please boot with early_ioremap_debug and report the dmesg.\n");
|
||||
WARN_ON(1);
|
||||
"please boot with early_ioremap_debug and report the dmesg.\n");
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@@ -430,7 +430,9 @@ static void enter_uniprocessor(void)
|
||||
"may miss events.\n");
|
||||
}
|
||||
|
||||
static void leave_uniprocessor(void)
|
||||
/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
|
||||
but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
|
||||
static void __ref leave_uniprocessor(void)
|
||||
{
|
||||
int cpu;
|
||||
int err;
|
||||
|
@@ -224,8 +224,7 @@ static int pageattr_test(void)
|
||||
failed += print_split(&sc);
|
||||
|
||||
if (failed) {
|
||||
printk(KERN_ERR "NOT PASSED. Please report.\n");
|
||||
WARN_ON(1);
|
||||
WARN(1, KERN_ERR "NOT PASSED. Please report.\n");
|
||||
return -EINVAL;
|
||||
} else {
|
||||
if (print)
|
||||
|
@@ -59,13 +59,19 @@ static void split_page_count(int level)
|
||||
|
||||
int arch_report_meminfo(char *page)
|
||||
{
|
||||
int n = sprintf(page, "DirectMap4k: %8lu\n"
|
||||
"DirectMap2M: %8lu\n",
|
||||
direct_pages_count[PG_LEVEL_4K],
|
||||
direct_pages_count[PG_LEVEL_2M]);
|
||||
int n = sprintf(page, "DirectMap4k: %8lu kB\n",
|
||||
direct_pages_count[PG_LEVEL_4K] << 2);
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
|
||||
n += sprintf(page + n, "DirectMap2M: %8lu kB\n",
|
||||
direct_pages_count[PG_LEVEL_2M] << 11);
|
||||
#else
|
||||
n += sprintf(page + n, "DirectMap4M: %8lu kB\n",
|
||||
direct_pages_count[PG_LEVEL_2M] << 12);
|
||||
#endif
|
||||
#ifdef CONFIG_X86_64
|
||||
n += sprintf(page + n, "DirectMap1G: %8lu\n",
|
||||
direct_pages_count[PG_LEVEL_1G]);
|
||||
if (direct_gbpages)
|
||||
n += sprintf(page + n, "DirectMap1G: %8lu kB\n",
|
||||
direct_pages_count[PG_LEVEL_1G] << 20);
|
||||
#endif
|
||||
return n;
|
||||
}
|
||||
@@ -636,9 +642,8 @@ repeat:
|
||||
if (!pte_val(old_pte)) {
|
||||
if (!primary)
|
||||
return 0;
|
||||
printk(KERN_WARNING "CPA: called for zero pte. "
|
||||
WARN(1, KERN_WARNING "CPA: called for zero pte. "
|
||||
"vaddr = %lx cpa->vaddr = %lx\n", address,
|
||||
WARN_ON(1);
|
||||
*cpa->vaddr);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -927,7 +932,7 @@ int set_memory_uc(unsigned long addr, int numpages)
|
||||
/*
|
||||
* for now UC MINUS. see comments in ioremap_nocache()
|
||||
*/
|
||||
if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
|
||||
if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
|
||||
_PAGE_CACHE_UC_MINUS, NULL))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -967,7 +972,7 @@ int set_memory_wc(unsigned long addr, int numpages)
|
||||
if (!pat_enabled)
|
||||
return set_memory_uc(addr, numpages);
|
||||
|
||||
if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
|
||||
if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
|
||||
_PAGE_CACHE_WC, NULL))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -983,7 +988,7 @@ int _set_memory_wb(unsigned long addr, int numpages)
|
||||
|
||||
int set_memory_wb(unsigned long addr, int numpages)
|
||||
{
|
||||
free_memtype(addr, addr + numpages * PAGE_SIZE);
|
||||
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
|
||||
|
||||
return _set_memory_wb(addr, numpages);
|
||||
}
|
||||
|
@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static struct memtype *cached_entry;
|
||||
static u64 cached_start;
|
||||
|
||||
/*
|
||||
* req_type typically has one of the:
|
||||
* - _PAGE_CACHE_WB
|
||||
@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
||||
|
||||
spin_lock(&memtype_lock);
|
||||
|
||||
if (cached_entry && start >= cached_start)
|
||||
entry = cached_entry;
|
||||
else
|
||||
entry = list_entry(&memtype_list, struct memtype, nd);
|
||||
|
||||
/* Search for existing mapping that overlaps the current range */
|
||||
where = NULL;
|
||||
list_for_each_entry(entry, &memtype_list, nd) {
|
||||
list_for_each_entry_continue(entry, &memtype_list, nd) {
|
||||
if (end <= entry->start) {
|
||||
where = entry->nd.prev;
|
||||
cached_entry = list_entry(where, struct memtype, nd);
|
||||
break;
|
||||
} else if (start <= entry->start) { /* end > entry->start */
|
||||
err = chk_conflict(new, entry, new_type);
|
||||
@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
||||
dprintk("Overlap at 0x%Lx-0x%Lx\n",
|
||||
entry->start, entry->end);
|
||||
where = entry->nd.prev;
|
||||
cached_entry = list_entry(where,
|
||||
struct memtype, nd);
|
||||
}
|
||||
break;
|
||||
} else if (start < entry->end) { /* start > entry->start */
|
||||
@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
||||
if (!err) {
|
||||
dprintk("Overlap at 0x%Lx-0x%Lx\n",
|
||||
entry->start, entry->end);
|
||||
where = &entry->nd;
|
||||
cached_entry = list_entry(entry->nd.prev,
|
||||
struct memtype, nd);
|
||||
|
||||
/*
|
||||
* Move to right position in the linked
|
||||
* list to add this new entry
|
||||
*/
|
||||
list_for_each_entry_continue(entry,
|
||||
&memtype_list, nd) {
|
||||
if (start <= entry->start) {
|
||||
where = entry->nd.prev;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
||||
return err;
|
||||
}
|
||||
|
||||
cached_start = start;
|
||||
|
||||
if (where)
|
||||
list_add(&new->nd, where);
|
||||
else
|
||||
@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end)
|
||||
spin_lock(&memtype_lock);
|
||||
list_for_each_entry(entry, &memtype_list, nd) {
|
||||
if (entry->start == start && entry->end == end) {
|
||||
if (cached_entry == entry || cached_start == start)
|
||||
cached_entry = NULL;
|
||||
|
||||
list_del(&entry->nd);
|
||||
kfree(entry);
|
||||
err = 0;
|
||||
@@ -361,14 +390,6 @@ int free_memtype(u64 start, u64 end)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* /dev/mem mmap interface. The memtype used for mapping varies:
|
||||
* - Use UC for mappings with O_SYNC flag
|
||||
* - Without O_SYNC flag, if there is any conflict in reserve_memtype,
|
||||
* inherit the memtype from existing mapping.
|
||||
* - Else use UC_MINUS memtype (for backward compatibility with existing
|
||||
* X drivers.
|
||||
*/
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot)
|
||||
{
|
||||
@@ -406,14 +427,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t *vma_prot)
|
||||
{
|
||||
u64 offset = ((u64) pfn) << PAGE_SHIFT;
|
||||
unsigned long flags = _PAGE_CACHE_UC_MINUS;
|
||||
unsigned long flags = -1;
|
||||
int retval;
|
||||
|
||||
if (!range_is_allowed(pfn, size))
|
||||
return 0;
|
||||
|
||||
if (file->f_flags & O_SYNC) {
|
||||
flags = _PAGE_CACHE_UC;
|
||||
flags = _PAGE_CACHE_UC_MINUS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@@ -436,13 +457,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
#endif
|
||||
|
||||
/*
|
||||
* With O_SYNC, we can only take UC mapping. Fail if we cannot.
|
||||
* With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
|
||||
*
|
||||
* Without O_SYNC, we want to get
|
||||
* - WB for WB-able memory and no other conflicting mappings
|
||||
* - UC_MINUS for non-WB-able memory with no other conflicting mappings
|
||||
* - Inherit from confliting mappings otherwise
|
||||
*/
|
||||
if (flags != _PAGE_CACHE_UC_MINUS) {
|
||||
if (flags != -1) {
|
||||
retval = reserve_memtype(offset, offset + size, flags, NULL);
|
||||
} else {
|
||||
retval = reserve_memtype(offset, offset + size, -1, &flags);
|
||||
|
@@ -207,6 +207,9 @@ static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
|
||||
unsigned long addr;
|
||||
int i;
|
||||
|
||||
if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
|
||||
return;
|
||||
|
||||
pud = pud_offset(pgd, 0);
|
||||
|
||||
for (addr = i = 0; i < PREALLOCATED_PMDS;
|
||||
|
@@ -178,7 +178,7 @@ void acpi_numa_arch_fixup(void)
|
||||
* start of the node, and that the current "end" address is after
|
||||
* the previous one.
|
||||
*/
|
||||
static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk)
|
||||
static __init int node_read_chunk(int nid, struct node_memory_chunk_s *memory_chunk)
|
||||
{
|
||||
/*
|
||||
* Only add present memory as told by the e820.
|
||||
@@ -189,10 +189,10 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c
|
||||
if (memory_chunk->start_pfn >= max_pfn) {
|
||||
printk(KERN_INFO "Ignoring SRAT pfns: %08lx - %08lx\n",
|
||||
memory_chunk->start_pfn, memory_chunk->end_pfn);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
if (memory_chunk->nid != nid)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
if (!node_has_online_mem(nid))
|
||||
node_start_pfn[nid] = memory_chunk->start_pfn;
|
||||
@@ -202,6 +202,8 @@ static __init void node_read_chunk(int nid, struct node_memory_chunk_s *memory_c
|
||||
|
||||
if (node_end_pfn[nid] < memory_chunk->end_pfn)
|
||||
node_end_pfn[nid] = memory_chunk->end_pfn;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init get_memcfg_from_srat(void)
|
||||
@@ -259,7 +261,9 @@ int __init get_memcfg_from_srat(void)
|
||||
printk(KERN_DEBUG
|
||||
"chunk %d nid %d start_pfn %08lx end_pfn %08lx\n",
|
||||
j, chunk->nid, chunk->start_pfn, chunk->end_pfn);
|
||||
node_read_chunk(chunk->nid, chunk);
|
||||
if (node_read_chunk(chunk->nid, chunk))
|
||||
continue;
|
||||
|
||||
e820_register_active_regions(chunk->nid, chunk->start_pfn,
|
||||
min(chunk->end_pfn, max_pfn));
|
||||
}
|
||||
|
Reference in New Issue
Block a user