Merge branch 'x86/urgent' into x86/cleanups
This commit is contained in:
@@ -241,7 +241,7 @@ static unsigned long __initdata table_start;
|
||||
static unsigned long __meminitdata table_end;
|
||||
static unsigned long __meminitdata table_top;
|
||||
|
||||
static __meminit void *alloc_low_page(unsigned long *phys)
|
||||
static __ref void *alloc_low_page(unsigned long *phys)
|
||||
{
|
||||
unsigned long pfn = table_end++;
|
||||
void *adr;
|
||||
@@ -262,7 +262,7 @@ static __meminit void *alloc_low_page(unsigned long *phys)
|
||||
return adr;
|
||||
}
|
||||
|
||||
static __meminit void unmap_low_page(void *adr)
|
||||
static __ref void unmap_low_page(void *adr)
|
||||
{
|
||||
if (after_bootmem)
|
||||
return;
|
||||
@@ -336,9 +336,12 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
|
||||
}
|
||||
|
||||
if (pmd_val(*pmd)) {
|
||||
if (!pmd_large(*pmd))
|
||||
if (!pmd_large(*pmd)) {
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
last_map_addr = phys_pte_update(pmd, address,
|
||||
end);
|
||||
end);
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
}
|
||||
/* Count entries we're using from level2_ident_pgt */
|
||||
if (start == 0)
|
||||
pages++;
|
||||
@@ -347,8 +350,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
|
||||
|
||||
if (page_size_mask & (1<<PG_LEVEL_2M)) {
|
||||
pages++;
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
set_pte((pte_t *)pmd,
|
||||
pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
last_map_addr = (address & PMD_MASK) + PMD_SIZE;
|
||||
continue;
|
||||
}
|
||||
@@ -357,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
|
||||
last_map_addr = phys_pte_init(pte, address, end);
|
||||
unmap_low_page(pte);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
}
|
||||
update_page_count(PG_LEVEL_2M, pages);
|
||||
return last_map_addr;
|
||||
@@ -370,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end,
|
||||
pmd_t *pmd = pmd_offset(pud, 0);
|
||||
unsigned long last_map_addr;
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask);
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
__flush_tlb_all();
|
||||
return last_map_addr;
|
||||
}
|
||||
@@ -408,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
|
||||
|
||||
if (page_size_mask & (1<<PG_LEVEL_1G)) {
|
||||
pages++;
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
set_pte((pte_t *)pud,
|
||||
pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE));
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
last_map_addr = (addr & PUD_MASK) + PUD_SIZE;
|
||||
continue;
|
||||
}
|
||||
|
||||
pmd = alloc_low_page(&pmd_phys);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask);
|
||||
unmap_low_page(pmd);
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pud_populate(&init_mm, pud, __va(pmd_phys));
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
|
||||
}
|
||||
__flush_tlb_all();
|
||||
update_page_count(PG_LEVEL_1G, pages);
|
||||
@@ -513,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (after_bootmem)
|
||||
pud = pud_offset(pgd, start & PGDIR_MASK);
|
||||
else
|
||||
pud = alloc_low_page(&pud_phys);
|
||||
|
||||
pud = alloc_low_page(&pud_phys);
|
||||
last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
|
||||
page_size_mask);
|
||||
unmap_low_page(pud);
|
||||
pgd_populate(&init_mm, pgd_offset_k(start),
|
||||
__va(pud_phys));
|
||||
|
||||
spin_lock(&init_mm.page_table_lock);
|
||||
pgd_populate(&init_mm, pgd, __va(pud_phys));
|
||||
spin_unlock(&init_mm.page_table_lock);
|
||||
}
|
||||
|
||||
return last_map_addr;
|
||||
|
@@ -553,13 +553,11 @@ static int __init check_early_ioremap_leak(void)
|
||||
{
|
||||
if (!early_ioremap_nested)
|
||||
return 0;
|
||||
|
||||
printk(KERN_WARNING
|
||||
WARN(1, KERN_WARNING
|
||||
"Debug warning: early ioremap leak of %d areas detected.\n",
|
||||
early_ioremap_nested);
|
||||
early_ioremap_nested);
|
||||
printk(KERN_WARNING
|
||||
"please boot with early_ioremap_debug and report the dmesg.\n");
|
||||
WARN_ON(1);
|
||||
"please boot with early_ioremap_debug and report the dmesg.\n");
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@@ -430,7 +430,9 @@ static void enter_uniprocessor(void)
|
||||
"may miss events.\n");
|
||||
}
|
||||
|
||||
static void leave_uniprocessor(void)
|
||||
/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
|
||||
but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
|
||||
static void __ref leave_uniprocessor(void)
|
||||
{
|
||||
int cpu;
|
||||
int err;
|
||||
|
@@ -849,7 +849,7 @@ int set_memory_uc(unsigned long addr, int numpages)
|
||||
/*
|
||||
* for now UC MINUS. see comments in ioremap_nocache()
|
||||
*/
|
||||
if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
|
||||
if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
|
||||
_PAGE_CACHE_UC_MINUS, NULL))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -868,7 +868,7 @@ int set_memory_wc(unsigned long addr, int numpages)
|
||||
if (!pat_enabled)
|
||||
return set_memory_uc(addr, numpages);
|
||||
|
||||
if (reserve_memtype(addr, addr + numpages * PAGE_SIZE,
|
||||
if (reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
|
||||
_PAGE_CACHE_WC, NULL))
|
||||
return -EINVAL;
|
||||
|
||||
@@ -884,7 +884,7 @@ int _set_memory_wb(unsigned long addr, int numpages)
|
||||
|
||||
int set_memory_wb(unsigned long addr, int numpages)
|
||||
{
|
||||
free_memtype(addr, addr + numpages * PAGE_SIZE);
|
||||
free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
|
||||
|
||||
return _set_memory_wb(addr, numpages);
|
||||
}
|
||||
|
@@ -207,6 +207,9 @@ static int chk_conflict(struct memtype *new, struct memtype *entry,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
static struct memtype *cached_entry;
|
||||
static u64 cached_start;
|
||||
|
||||
/*
|
||||
* req_type typically has one of the:
|
||||
* - _PAGE_CACHE_WB
|
||||
@@ -280,11 +283,17 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
||||
|
||||
spin_lock(&memtype_lock);
|
||||
|
||||
if (cached_entry && start >= cached_start)
|
||||
entry = cached_entry;
|
||||
else
|
||||
entry = list_entry(&memtype_list, struct memtype, nd);
|
||||
|
||||
/* Search for existing mapping that overlaps the current range */
|
||||
where = NULL;
|
||||
list_for_each_entry(entry, &memtype_list, nd) {
|
||||
list_for_each_entry_continue(entry, &memtype_list, nd) {
|
||||
if (end <= entry->start) {
|
||||
where = entry->nd.prev;
|
||||
cached_entry = list_entry(where, struct memtype, nd);
|
||||
break;
|
||||
} else if (start <= entry->start) { /* end > entry->start */
|
||||
err = chk_conflict(new, entry, new_type);
|
||||
@@ -292,6 +301,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
||||
dprintk("Overlap at 0x%Lx-0x%Lx\n",
|
||||
entry->start, entry->end);
|
||||
where = entry->nd.prev;
|
||||
cached_entry = list_entry(where,
|
||||
struct memtype, nd);
|
||||
}
|
||||
break;
|
||||
} else if (start < entry->end) { /* start > entry->start */
|
||||
@@ -299,7 +310,20 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
||||
if (!err) {
|
||||
dprintk("Overlap at 0x%Lx-0x%Lx\n",
|
||||
entry->start, entry->end);
|
||||
where = &entry->nd;
|
||||
cached_entry = list_entry(entry->nd.prev,
|
||||
struct memtype, nd);
|
||||
|
||||
/*
|
||||
* Move to right position in the linked
|
||||
* list to add this new entry
|
||||
*/
|
||||
list_for_each_entry_continue(entry,
|
||||
&memtype_list, nd) {
|
||||
if (start <= entry->start) {
|
||||
where = entry->nd.prev;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@@ -314,6 +338,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
|
||||
return err;
|
||||
}
|
||||
|
||||
cached_start = start;
|
||||
|
||||
if (where)
|
||||
list_add(&new->nd, where);
|
||||
else
|
||||
@@ -343,6 +369,9 @@ int free_memtype(u64 start, u64 end)
|
||||
spin_lock(&memtype_lock);
|
||||
list_for_each_entry(entry, &memtype_list, nd) {
|
||||
if (entry->start == start && entry->end == end) {
|
||||
if (cached_entry == entry || cached_start == start)
|
||||
cached_entry = NULL;
|
||||
|
||||
list_del(&entry->nd);
|
||||
kfree(entry);
|
||||
err = 0;
|
||||
@@ -361,14 +390,6 @@ int free_memtype(u64 start, u64 end)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* /dev/mem mmap interface. The memtype used for mapping varies:
|
||||
* - Use UC for mappings with O_SYNC flag
|
||||
* - Without O_SYNC flag, if there is any conflict in reserve_memtype,
|
||||
* inherit the memtype from existing mapping.
|
||||
* - Else use UC_MINUS memtype (for backward compatibility with existing
|
||||
* X drivers.
|
||||
*/
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot)
|
||||
{
|
||||
@@ -406,14 +427,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t *vma_prot)
|
||||
{
|
||||
u64 offset = ((u64) pfn) << PAGE_SHIFT;
|
||||
unsigned long flags = _PAGE_CACHE_UC_MINUS;
|
||||
unsigned long flags = -1;
|
||||
int retval;
|
||||
|
||||
if (!range_is_allowed(pfn, size))
|
||||
return 0;
|
||||
|
||||
if (file->f_flags & O_SYNC) {
|
||||
flags = _PAGE_CACHE_UC;
|
||||
flags = _PAGE_CACHE_UC_MINUS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@@ -436,13 +457,14 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
#endif
|
||||
|
||||
/*
|
||||
* With O_SYNC, we can only take UC mapping. Fail if we cannot.
|
||||
* With O_SYNC, we can only take UC_MINUS mapping. Fail if we cannot.
|
||||
*
|
||||
* Without O_SYNC, we want to get
|
||||
* - WB for WB-able memory and no other conflicting mappings
|
||||
* - UC_MINUS for non-WB-able memory with no other conflicting mappings
|
||||
* - Inherit from confliting mappings otherwise
|
||||
*/
|
||||
if (flags != _PAGE_CACHE_UC_MINUS) {
|
||||
if (flags != -1) {
|
||||
retval = reserve_memtype(offset, offset + size, flags, NULL);
|
||||
} else {
|
||||
retval = reserve_memtype(offset, offset + size, -1, &flags);
|
||||
|
Reference in New Issue
Block a user