mm, dax: dax-pmd vs thp-pmd vs hugetlbfs-pmd
A dax-huge-page mapping while it uses some thp helpers is ultimately not a transparent huge page. The distinction is especially important in the get_user_pages() path. pmd_devmap() is used to distinguish dax-pmds from pmd_huge() and pmd_trans_huge() which have slightly different semantics. Explicitly mark the pmd_trans_huge() helpers that dax needs by adding pmd_devmap() checks. [kirill.shutemov@linux.intel.com: fix regression in handling mlocked pages in __split_huge_pmd()] Signed-off-by: Dan Williams <dan.j.williams@intel.com> Cc: Dave Hansen <dave@sr71.net> Cc: Mel Gorman <mgorman@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Matthew Wilcox <willy@linux.intel.com> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Šī revīzija ir iekļauta:

revīziju iesūtīja
Linus Torvalds

vecāks
5c2c2587b1
revīzija
5c7fb56e5e
@@ -950,7 +950,7 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
|
||||
src_pmd = pmd_offset(src_pud, addr);
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_trans_huge(*src_pmd)) {
|
||||
if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) {
|
||||
int err;
|
||||
VM_BUG_ON(next-addr != HPAGE_PMD_SIZE);
|
||||
err = copy_huge_pmd(dst_mm, src_mm,
|
||||
@@ -1177,7 +1177,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
|
||||
pmd = pmd_offset(pud, addr);
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_trans_huge(*pmd)) {
|
||||
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
|
||||
if (next - addr != HPAGE_PMD_SIZE) {
|
||||
#ifdef CONFIG_DEBUG_VM
|
||||
if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
|
||||
@@ -3375,7 +3375,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
int ret;
|
||||
|
||||
barrier();
|
||||
if (pmd_trans_huge(orig_pmd)) {
|
||||
if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
|
||||
unsigned int dirty = flags & FAULT_FLAG_WRITE;
|
||||
|
||||
if (pmd_protnone(orig_pmd))
|
||||
@@ -3404,7 +3404,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unlikely(__pte_alloc(mm, vma, pmd, address)))
|
||||
return VM_FAULT_OOM;
|
||||
/* if an huge pmd materialized from under us just retry later */
|
||||
if (unlikely(pmd_trans_huge(*pmd)))
|
||||
if (unlikely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
|
||||
return 0;
|
||||
/*
|
||||
* A regular pmd is established and it can't morph into a huge pmd
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user