[PATCH] mm: ptd_alloc take ptlock
Second step in pushing down the page_table_lock. Remove the temporary bridging hack from __pud_alloc, __pmd_alloc, __pte_alloc: expect callers not to hold page_table_lock, whether it's on init_mm or a user mm; take page_table_lock internally to check if a racing task already allocated. Convert their callers from common code. But avoid coming back to change them again later: instead of moving the spin_lock(&mm->page_table_lock) down, switch over to new macros pte_alloc_map_lock and pte_unmap_unlock, which encapsulate the mapping+locking and unlocking+unmapping together, and in the end may use alternatives to the mm page_table_lock itself. These callers all hold mmap_sem (some exclusively, some not), so at no level can a page table be whipped away from beneath them; and pte_alloc uses the "atomic" pmd_present to test whether it needs to allocate. It appears that on all arches we can safely descend without page_table_lock. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:

committed by
Linus Torvalds

parent
1bb3630e89
commit
c74df32c72
48
mm/fremap.c
48
mm/fremap.c
@@ -63,23 +63,20 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
pud_t *pud;
|
||||
pgd_t *pgd;
|
||||
pte_t pte_val;
|
||||
spinlock_t *ptl;
|
||||
|
||||
BUG_ON(vma->vm_flags & VM_RESERVED);
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
spin_lock(&mm->page_table_lock);
|
||||
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
if (!pud)
|
||||
goto err_unlock;
|
||||
|
||||
goto out;
|
||||
pmd = pmd_alloc(mm, pud, addr);
|
||||
if (!pmd)
|
||||
goto err_unlock;
|
||||
|
||||
pte = pte_alloc_map(mm, pmd, addr);
|
||||
goto out;
|
||||
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
|
||||
if (!pte)
|
||||
goto err_unlock;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* This page may have been truncated. Tell the
|
||||
@@ -89,10 +86,10 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
inode = vma->vm_file->f_mapping->host;
|
||||
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
|
||||
if (!page->mapping || page->index >= size)
|
||||
goto err_unlock;
|
||||
goto unlock;
|
||||
err = -ENOMEM;
|
||||
if (page_mapcount(page) > INT_MAX/2)
|
||||
goto err_unlock;
|
||||
goto unlock;
|
||||
|
||||
if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
|
||||
inc_mm_counter(mm, file_rss);
|
||||
@@ -101,17 +98,15 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
set_pte_at(mm, addr, pte, mk_pte(page, prot));
|
||||
page_add_file_rmap(page);
|
||||
pte_val = *pte;
|
||||
pte_unmap(pte);
|
||||
update_mmu_cache(vma, addr, pte_val);
|
||||
|
||||
err = 0;
|
||||
err_unlock:
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
unlock:
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL(install_page);
|
||||
|
||||
|
||||
/*
|
||||
* Install a file pte to a given virtual memory address, release any
|
||||
* previously existing mapping.
|
||||
@@ -125,23 +120,20 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
pud_t *pud;
|
||||
pgd_t *pgd;
|
||||
pte_t pte_val;
|
||||
spinlock_t *ptl;
|
||||
|
||||
BUG_ON(vma->vm_flags & VM_RESERVED);
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
spin_lock(&mm->page_table_lock);
|
||||
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
if (!pud)
|
||||
goto err_unlock;
|
||||
|
||||
goto out;
|
||||
pmd = pmd_alloc(mm, pud, addr);
|
||||
if (!pmd)
|
||||
goto err_unlock;
|
||||
|
||||
pte = pte_alloc_map(mm, pmd, addr);
|
||||
goto out;
|
||||
pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
|
||||
if (!pte)
|
||||
goto err_unlock;
|
||||
goto out;
|
||||
|
||||
if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
|
||||
update_hiwater_rss(mm);
|
||||
@@ -150,17 +142,13 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
|
||||
set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
|
||||
pte_val = *pte;
|
||||
pte_unmap(pte);
|
||||
update_mmu_cache(vma, addr, pte_val);
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
return 0;
|
||||
|
||||
err_unlock:
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
pte_unmap_unlock(pte, ptl);
|
||||
err = 0;
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
||||
|
||||
/***
|
||||
* sys_remap_file_pages - remap arbitrary pages of a shared backing store
|
||||
* file within an existing vma.
|
||||
|
Reference in New Issue
Block a user