mm: re-architect the VM_UNPAGED logic
This replaces the (in my opinion horrible) VM_UNMAPPED logic with very explicit support for a "remapped page range" aka VM_PFNMAP. It allows a VM area to contain an arbitrary range of page table entries that the VM never touches, and never considers to be normal pages. Any user of "remap_pfn_range()" automatically gets this new functionality, and doesn't even have to mark the pages reserved or indeed mark them any other way. It just works. As a side effect, doing mmap() on /dev/mem works for arbitrary ranges. Sparc update from David in the next commit. Signed-off-by: Linus Torvalds <torvalds@osdl.org>
此提交包含在:
22
mm/fremap.c
22
mm/fremap.c
@@ -27,24 +27,20 @@ static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
struct page *page = NULL;
|
||||
|
||||
if (pte_present(pte)) {
|
||||
unsigned long pfn = pte_pfn(pte);
|
||||
flush_cache_page(vma, addr, pfn);
|
||||
flush_cache_page(vma, addr, pte_pfn(pte));
|
||||
pte = ptep_clear_flush(vma, addr, ptep);
|
||||
if (unlikely(!pfn_valid(pfn))) {
|
||||
print_bad_pte(vma, pte, addr);
|
||||
goto out;
|
||||
page = vm_normal_page(vma, addr, pte);
|
||||
if (page) {
|
||||
if (pte_dirty(pte))
|
||||
set_page_dirty(page);
|
||||
page_remove_rmap(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
page = pfn_to_page(pfn);
|
||||
if (pte_dirty(pte))
|
||||
set_page_dirty(page);
|
||||
page_remove_rmap(page);
|
||||
page_cache_release(page);
|
||||
} else {
|
||||
if (!pte_file(pte))
|
||||
free_swap_and_cache(pte_to_swp_entry(pte));
|
||||
pte_clear(mm, addr, ptep);
|
||||
}
|
||||
out:
|
||||
return !!page;
|
||||
}
|
||||
|
||||
@@ -65,8 +61,6 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
pte_t pte_val;
|
||||
spinlock_t *ptl;
|
||||
|
||||
BUG_ON(vma->vm_flags & VM_UNPAGED);
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
if (!pud)
|
||||
@@ -122,8 +116,6 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
pte_t pte_val;
|
||||
spinlock_t *ptl;
|
||||
|
||||
BUG_ON(vma->vm_flags & VM_UNPAGED);
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
if (!pud)
|
||||
|
新增問題並參考
封鎖使用者