Merge commit 'v2.6.28-rc2' into core/locking
Conflicts: arch/um/include/asm/system.h
This commit is contained in:
127
mm/memory.c
127
mm/memory.c
@@ -1129,12 +1129,17 @@ static inline int use_zero_page(struct vm_area_struct *vma)
|
||||
return !vma->vm_ops || !vma->vm_ops->fault;
|
||||
}
|
||||
|
||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int write, int force,
|
||||
|
||||
|
||||
int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int flags,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
{
|
||||
int i;
|
||||
unsigned int vm_flags;
|
||||
unsigned int vm_flags = 0;
|
||||
int write = !!(flags & GUP_FLAGS_WRITE);
|
||||
int force = !!(flags & GUP_FLAGS_FORCE);
|
||||
int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
|
||||
|
||||
if (len <= 0)
|
||||
return 0;
|
||||
@@ -1158,7 +1163,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
if (write) /* user gate pages are read-only */
|
||||
|
||||
/* user gate pages are read-only */
|
||||
if (!ignore && write)
|
||||
return i ? : -EFAULT;
|
||||
if (pg > TASK_SIZE)
|
||||
pgd = pgd_offset_k(pg);
|
||||
@@ -1190,8 +1197,9 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
|
||||
|| !(vm_flags & vma->vm_flags))
|
||||
if (!vma ||
|
||||
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
|
||||
(!ignore && !(vm_flags & vma->vm_flags)))
|
||||
return i ? : -EFAULT;
|
||||
|
||||
if (is_vm_hugetlb_page(vma)) {
|
||||
@@ -1266,6 +1274,23 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
} while (len);
|
||||
return i;
|
||||
}
|
||||
|
||||
int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
unsigned long start, int len, int write, int force,
|
||||
struct page **pages, struct vm_area_struct **vmas)
|
||||
{
|
||||
int flags = 0;
|
||||
|
||||
if (write)
|
||||
flags |= GUP_FLAGS_WRITE;
|
||||
if (force)
|
||||
flags |= GUP_FLAGS_FORCE;
|
||||
|
||||
return __get_user_pages(tsk, mm,
|
||||
start, len, flags,
|
||||
pages, vmas);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(get_user_pages);
|
||||
|
||||
pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
|
||||
@@ -1296,18 +1321,14 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
|
||||
pte_t *pte;
|
||||
spinlock_t *ptl;
|
||||
|
||||
retval = mem_cgroup_charge(page, mm, GFP_KERNEL);
|
||||
if (retval)
|
||||
goto out;
|
||||
|
||||
retval = -EINVAL;
|
||||
if (PageAnon(page))
|
||||
goto out_uncharge;
|
||||
goto out;
|
||||
retval = -ENOMEM;
|
||||
flush_dcache_page(page);
|
||||
pte = get_locked_pte(mm, addr, &ptl);
|
||||
if (!pte)
|
||||
goto out_uncharge;
|
||||
goto out;
|
||||
retval = -EBUSY;
|
||||
if (!pte_none(*pte))
|
||||
goto out_unlock;
|
||||
@@ -1323,8 +1344,6 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
|
||||
return retval;
|
||||
out_unlock:
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
out_uncharge:
|
||||
mem_cgroup_uncharge_page(page);
|
||||
out:
|
||||
return retval;
|
||||
}
|
||||
@@ -1858,6 +1877,15 @@ gotten:
|
||||
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
|
||||
if (!new_page)
|
||||
goto oom;
|
||||
/*
|
||||
* Don't let another task, with possibly unlocked vma,
|
||||
* keep the mlocked page.
|
||||
*/
|
||||
if (vma->vm_flags & VM_LOCKED) {
|
||||
lock_page(old_page); /* for LRU manipulation */
|
||||
clear_page_mlock(old_page);
|
||||
unlock_page(old_page);
|
||||
}
|
||||
cow_user_page(new_page, old_page, address, vma);
|
||||
__SetPageUptodate(new_page);
|
||||
|
||||
@@ -1886,11 +1914,13 @@ gotten:
|
||||
* thread doing COW.
|
||||
*/
|
||||
ptep_clear_flush_notify(vma, address, page_table);
|
||||
set_pte_at(mm, address, page_table, entry);
|
||||
update_mmu_cache(vma, address, entry);
|
||||
lru_cache_add_active(new_page);
|
||||
SetPageSwapBacked(new_page);
|
||||
lru_cache_add_active_or_unevictable(new_page, vma);
|
||||
page_add_new_anon_rmap(new_page, vma, address);
|
||||
|
||||
//TODO: is this safe? do_anonymous_page() does it this way.
|
||||
set_pte_at(mm, address, page_table, entry);
|
||||
update_mmu_cache(vma, address, entry);
|
||||
if (old_page) {
|
||||
/*
|
||||
* Only after switching the pte to the new page may
|
||||
@@ -2288,16 +2318,17 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
count_vm_event(PGMAJFAULT);
|
||||
}
|
||||
|
||||
if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
|
||||
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
|
||||
ret = VM_FAULT_OOM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mark_page_accessed(page);
|
||||
|
||||
lock_page(page);
|
||||
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
|
||||
|
||||
if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
unlock_page(page);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Back out if somebody else already faulted in this pte.
|
||||
*/
|
||||
@@ -2324,7 +2355,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
page_add_anon_rmap(page, vma, address);
|
||||
|
||||
swap_free(entry);
|
||||
if (vm_swap_full())
|
||||
if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
|
||||
remove_exclusive_swap_page(page);
|
||||
unlock_page(page);
|
||||
|
||||
@@ -2382,7 +2413,8 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
if (!pte_none(*page_table))
|
||||
goto release;
|
||||
inc_mm_counter(mm, anon_rss);
|
||||
lru_cache_add_active(page);
|
||||
SetPageSwapBacked(page);
|
||||
lru_cache_add_active_or_unevictable(page, vma);
|
||||
page_add_new_anon_rmap(page, vma, address);
|
||||
set_pte_at(mm, address, page_table, entry);
|
||||
|
||||
@@ -2423,6 +2455,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
struct page *page;
|
||||
pte_t entry;
|
||||
int anon = 0;
|
||||
int charged = 0;
|
||||
struct page *dirty_page = NULL;
|
||||
struct vm_fault vmf;
|
||||
int ret;
|
||||
@@ -2463,6 +2496,18 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
ret = VM_FAULT_OOM;
|
||||
goto out;
|
||||
}
|
||||
if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
page_cache_release(page);
|
||||
goto out;
|
||||
}
|
||||
charged = 1;
|
||||
/*
|
||||
* Don't let another task, with possibly unlocked vma,
|
||||
* keep the mlocked page.
|
||||
*/
|
||||
if (vma->vm_flags & VM_LOCKED)
|
||||
clear_page_mlock(vmf.page);
|
||||
copy_user_highpage(page, vmf.page, address, vma);
|
||||
__SetPageUptodate(page);
|
||||
} else {
|
||||
@@ -2497,11 +2542,6 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
|
||||
}
|
||||
|
||||
if (mem_cgroup_charge(page, mm, GFP_KERNEL)) {
|
||||
ret = VM_FAULT_OOM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
|
||||
|
||||
/*
|
||||
@@ -2520,11 +2560,11 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
entry = mk_pte(page, vma->vm_page_prot);
|
||||
if (flags & FAULT_FLAG_WRITE)
|
||||
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
|
||||
set_pte_at(mm, address, page_table, entry);
|
||||
if (anon) {
|
||||
inc_mm_counter(mm, anon_rss);
|
||||
lru_cache_add_active(page);
|
||||
page_add_new_anon_rmap(page, vma, address);
|
||||
inc_mm_counter(mm, anon_rss);
|
||||
SetPageSwapBacked(page);
|
||||
lru_cache_add_active_or_unevictable(page, vma);
|
||||
page_add_new_anon_rmap(page, vma, address);
|
||||
} else {
|
||||
inc_mm_counter(mm, file_rss);
|
||||
page_add_file_rmap(page);
|
||||
@@ -2533,11 +2573,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
get_page(dirty_page);
|
||||
}
|
||||
}
|
||||
//TODO: is this safe? do_anonymous_page() does it this way.
|
||||
set_pte_at(mm, address, page_table, entry);
|
||||
|
||||
/* no need to invalidate: a not-present page won't be cached */
|
||||
update_mmu_cache(vma, address, entry);
|
||||
} else {
|
||||
mem_cgroup_uncharge_page(page);
|
||||
if (charged)
|
||||
mem_cgroup_uncharge_page(page);
|
||||
if (anon)
|
||||
page_cache_release(page);
|
||||
else
|
||||
@@ -2772,19 +2815,9 @@ int make_pages_present(unsigned long addr, unsigned long end)
|
||||
len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
|
||||
ret = get_user_pages(current, current->mm, addr,
|
||||
len, write, 0, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
SUS require strange return value to mlock
|
||||
- invalid addr generate to ENOMEM.
|
||||
- out of memory should generate EAGAIN.
|
||||
*/
|
||||
if (ret == -EFAULT)
|
||||
ret = -ENOMEM;
|
||||
else if (ret == -ENOMEM)
|
||||
ret = -EAGAIN;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
return ret == len ? 0 : -ENOMEM;
|
||||
return ret == len ? 0 : -EFAULT;
|
||||
}
|
||||
|
||||
#if !defined(__HAVE_ARCH_GATE_AREA)
|
||||
|
Reference in New Issue
Block a user