Merge branch 'x86/urgent' into x86/asm, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
14
mm/memory.c
14
mm/memory.c
@@ -1612,10 +1612,15 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
|
||||
* than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
|
||||
* without pte special, it would there be refcounted as a normal page.
|
||||
*/
|
||||
if (!HAVE_PTE_SPECIAL && pfn_t_valid(pfn)) {
|
||||
if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
|
||||
struct page *page;
|
||||
|
||||
page = pfn_t_to_page(pfn);
|
||||
/*
|
||||
* At this point we are committed to insert_page()
|
||||
* regardless of whether the caller specified flags that
|
||||
* result in pfn_t_has_page() == false.
|
||||
*/
|
||||
page = pfn_to_page(pfn_t_to_pfn(pfn));
|
||||
return insert_page(vma, addr, page, vma->vm_page_prot);
|
||||
}
|
||||
return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
|
||||
@@ -2253,11 +2258,6 @@ static int wp_page_shared(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
|
||||
page_cache_get(old_page);
|
||||
|
||||
/*
|
||||
* Only catch write-faults on shared writable pages,
|
||||
* read-only shared pages can get COWed by
|
||||
* get_user_pages(.write=1, .force=1).
|
||||
*/
|
||||
if (vma->vm_ops && vma->vm_ops->page_mkwrite) {
|
||||
int tmp;
|
||||
|
||||
|
Reference in New Issue
Block a user