Merge commit 'linus' into x86/urgent, to pick up recent x86 changes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -15,6 +15,7 @@ obj-$(CONFIG_X86_32) += pgtable_32.o iomap_32.o
|
||||
|
||||
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
|
||||
obj-$(CONFIG_X86_PTDUMP_CORE) += dump_pagetables.o
|
||||
obj-$(CONFIG_X86_PTDUMP) += debug_pagetables.o
|
||||
|
||||
obj-$(CONFIG_HIGHMEM) += highmem_32.o
|
||||
|
||||
|
46
arch/x86/mm/debug_pagetables.c
Normal file
46
arch/x86/mm/debug_pagetables.c
Normal file
@@ -0,0 +1,46 @@
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
static int ptdump_show(struct seq_file *m, void *v)
|
||||
{
|
||||
ptdump_walk_pgd_level(m, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ptdump_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return single_open(filp, ptdump_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations ptdump_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = ptdump_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static struct dentry *pe;
|
||||
|
||||
static int __init pt_dump_debug_init(void)
|
||||
{
|
||||
pe = debugfs_create_file("kernel_page_tables", S_IRUSR, NULL, NULL,
|
||||
&ptdump_fops);
|
||||
if (!pe)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit pt_dump_debug_exit(void)
|
||||
{
|
||||
debugfs_remove_recursive(pe);
|
||||
}
|
||||
|
||||
module_init(pt_dump_debug_init);
|
||||
module_exit(pt_dump_debug_exit);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
|
||||
MODULE_DESCRIPTION("Kernel debugging helper that dumps pagetables");
|
@@ -426,38 +426,15 @@ void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
|
||||
{
|
||||
ptdump_walk_pgd_level_core(m, pgd, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level);
|
||||
|
||||
void ptdump_walk_pgd_level_checkwx(void)
|
||||
{
|
||||
ptdump_walk_pgd_level_core(NULL, NULL, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_PTDUMP
|
||||
static int ptdump_show(struct seq_file *m, void *v)
|
||||
static int __init pt_dump_init(void)
|
||||
{
|
||||
ptdump_walk_pgd_level(m, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ptdump_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return single_open(filp, ptdump_show, NULL);
|
||||
}
|
||||
|
||||
static const struct file_operations ptdump_fops = {
|
||||
.open = ptdump_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
#endif
|
||||
|
||||
static int pt_dump_init(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_PTDUMP
|
||||
struct dentry *pe;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/* Not a compile-time constant on x86-32 */
|
||||
address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
|
||||
@@ -468,13 +445,6 @@ static int pt_dump_init(void)
|
||||
address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_PTDUMP
|
||||
pe = debugfs_create_file("kernel_page_tables", 0600, NULL, NULL,
|
||||
&ptdump_fops);
|
||||
if (!pe)
|
||||
return -ENOMEM;
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -194,8 +194,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
|
||||
* Check if the request spans more than any BAR in the iomem resource
|
||||
* tree.
|
||||
*/
|
||||
WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
|
||||
KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
|
||||
if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
|
||||
pr_warn("caller %pS mapping multiple BARs\n", caller);
|
||||
|
||||
return ret_addr;
|
||||
err_free_area:
|
||||
|
@@ -129,14 +129,16 @@ within(unsigned long addr, unsigned long start, unsigned long end)
|
||||
*/
|
||||
void clflush_cache_range(void *vaddr, unsigned int size)
|
||||
{
|
||||
unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1;
|
||||
const unsigned long clflush_size = boot_cpu_data.x86_clflush_size;
|
||||
void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1));
|
||||
void *vend = vaddr + size;
|
||||
void *p;
|
||||
|
||||
if (p >= vend)
|
||||
return;
|
||||
|
||||
mb();
|
||||
|
||||
for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
|
||||
p < vend; p += boot_cpu_data.x86_clflush_size)
|
||||
for (; p < vend; p += clflush_size)
|
||||
clflushopt(p);
|
||||
|
||||
mb();
|
||||
|
@@ -586,7 +586,7 @@ int free_memtype(u64 start, u64 end)
|
||||
entry = rbt_memtype_erase(start, end);
|
||||
spin_unlock(&memtype_lock);
|
||||
|
||||
if (!entry) {
|
||||
if (IS_ERR(entry)) {
|
||||
pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
|
||||
current->comm, current->pid, start, end - 1);
|
||||
return -EINVAL;
|
||||
@@ -992,6 +992,16 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
|
||||
vma->vm_flags &= ~VM_PAT;
|
||||
}
|
||||
|
||||
/*
|
||||
* untrack_pfn_moved is called, while mremapping a pfnmap for a new region,
|
||||
* with the old vma after its pfnmap page table has been removed. The new
|
||||
* vma has a new pfnmap to the same pfn & cache type with VM_PAT set.
|
||||
*/
|
||||
void untrack_pfn_moved(struct vm_area_struct *vma)
|
||||
{
|
||||
vma->vm_flags &= ~VM_PAT;
|
||||
}
|
||||
|
||||
pgprot_t pgprot_writecombine(pgprot_t prot)
|
||||
{
|
||||
return __pgprot(pgprot_val(prot) |
|
||||
|
@@ -98,8 +98,13 @@ static struct memtype *memtype_rb_lowest_match(struct rb_root *root,
|
||||
return last_lower; /* Returns NULL if there is no overlap */
|
||||
}
|
||||
|
||||
static struct memtype *memtype_rb_exact_match(struct rb_root *root,
|
||||
u64 start, u64 end)
|
||||
enum {
|
||||
MEMTYPE_EXACT_MATCH = 0,
|
||||
MEMTYPE_END_MATCH = 1
|
||||
};
|
||||
|
||||
static struct memtype *memtype_rb_match(struct rb_root *root,
|
||||
u64 start, u64 end, int match_type)
|
||||
{
|
||||
struct memtype *match;
|
||||
|
||||
@@ -107,7 +112,12 @@ static struct memtype *memtype_rb_exact_match(struct rb_root *root,
|
||||
while (match != NULL && match->start < end) {
|
||||
struct rb_node *node;
|
||||
|
||||
if (match->start == start && match->end == end)
|
||||
if ((match_type == MEMTYPE_EXACT_MATCH) &&
|
||||
(match->start == start) && (match->end == end))
|
||||
return match;
|
||||
|
||||
if ((match_type == MEMTYPE_END_MATCH) &&
|
||||
(match->start < start) && (match->end == end))
|
||||
return match;
|
||||
|
||||
node = rb_next(&match->rb);
|
||||
@@ -117,7 +127,7 @@ static struct memtype *memtype_rb_exact_match(struct rb_root *root,
|
||||
match = NULL;
|
||||
}
|
||||
|
||||
return NULL; /* Returns NULL if there is no exact match */
|
||||
return NULL; /* Returns NULL if there is no match */
|
||||
}
|
||||
|
||||
static int memtype_rb_check_conflict(struct rb_root *root,
|
||||
@@ -210,12 +220,36 @@ struct memtype *rbt_memtype_erase(u64 start, u64 end)
|
||||
{
|
||||
struct memtype *data;
|
||||
|
||||
data = memtype_rb_exact_match(&memtype_rbroot, start, end);
|
||||
if (!data)
|
||||
goto out;
|
||||
/*
|
||||
* Since the memtype_rbroot tree allows overlapping ranges,
|
||||
* rbt_memtype_erase() checks with EXACT_MATCH first, i.e. free
|
||||
* a whole node for the munmap case. If no such entry is found,
|
||||
* it then checks with END_MATCH, i.e. shrink the size of a node
|
||||
* from the end for the mremap case.
|
||||
*/
|
||||
data = memtype_rb_match(&memtype_rbroot, start, end,
|
||||
MEMTYPE_EXACT_MATCH);
|
||||
if (!data) {
|
||||
data = memtype_rb_match(&memtype_rbroot, start, end,
|
||||
MEMTYPE_END_MATCH);
|
||||
if (!data)
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
if (data->start == start) {
|
||||
/* munmap: erase this node */
|
||||
rb_erase_augmented(&data->rb, &memtype_rbroot,
|
||||
&memtype_rb_augment_cb);
|
||||
} else {
|
||||
/* mremap: update the end value of this node */
|
||||
rb_erase_augmented(&data->rb, &memtype_rbroot,
|
||||
&memtype_rb_augment_cb);
|
||||
data->end = start;
|
||||
data->subtree_max_end = data->end;
|
||||
memtype_rb_insert(&memtype_rbroot, data);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rb_erase_augmented(&data->rb, &memtype_rbroot, &memtype_rb_augment_cb);
|
||||
out:
|
||||
return data;
|
||||
}
|
||||
|
||||
|
@@ -414,7 +414,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
|
||||
if (changed && dirty) {
|
||||
*ptep = entry;
|
||||
pte_update_defer(vma->vm_mm, address, ptep);
|
||||
pte_update(vma->vm_mm, address, ptep);
|
||||
}
|
||||
|
||||
return changed;
|
||||
@@ -431,7 +431,6 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
|
||||
|
||||
if (changed && dirty) {
|
||||
*pmdp = entry;
|
||||
pmd_update_defer(vma->vm_mm, address, pmdp);
|
||||
/*
|
||||
* We had a write-protection fault here and changed the pmd
|
||||
* to to more permissive. No need to flush the TLB for that,
|
||||
@@ -469,9 +468,6 @@ int pmdp_test_and_clear_young(struct vm_area_struct *vma,
|
||||
ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
|
||||
(unsigned long *)pmdp);
|
||||
|
||||
if (ret)
|
||||
pmd_update(vma->vm_mm, addr, pmdp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
@@ -518,7 +514,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma,
|
||||
set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
|
||||
(unsigned long *)pmdp);
|
||||
if (set) {
|
||||
pmd_update(vma->vm_mm, address, pmdp);
|
||||
/* need tlb flush only to serialize against gup-fast */
|
||||
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
|
||||
}
|
||||
|
@@ -31,7 +31,7 @@ early_param("noexec", noexec_setup);
|
||||
|
||||
void x86_configure_nx(void)
|
||||
{
|
||||
if (cpu_has_nx && !disable_nx)
|
||||
if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
|
||||
__supported_pte_mask |= _PAGE_NX;
|
||||
else
|
||||
__supported_pte_mask &= ~_PAGE_NX;
|
||||
@@ -39,7 +39,7 @@ void x86_configure_nx(void)
|
||||
|
||||
void __init x86_report_nx(void)
|
||||
{
|
||||
if (!cpu_has_nx) {
|
||||
if (!boot_cpu_has(X86_FEATURE_NX)) {
|
||||
printk(KERN_NOTICE "Notice: NX (Execute Disable) protection "
|
||||
"missing in CPU!\n");
|
||||
} else {
|
||||
|
@@ -203,6 +203,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
|
||||
pr_warn("SRAT: Failed to mark hotplug range [mem %#010Lx-%#010Lx] in memblock\n",
|
||||
(unsigned long long)start, (unsigned long long)end - 1);
|
||||
|
||||
max_possible_pfn = max(max_possible_pfn, PFN_UP(end - 1));
|
||||
|
||||
return 0;
|
||||
out_err_bad_srat:
|
||||
bad_srat();
|
||||
|
Viittaa uudesa ongelmassa
Block a user