mm, x86: get_user_pages() for dax mappings
A dax mapping establishes a pte with _PAGE_DEVMAP set when the driver has established a devm_memremap_pages() mapping, i.e. when the pfn_t return from ->direct_access() has PFN_DEV and PFN_MAP set. Later, when encountering _PAGE_DEVMAP during a page table walk we lookup and pin a struct dev_pagemap instance to keep the result of pfn_to_page() valid until put_page(). Signed-off-by: Dan Williams <dan.j.williams@intel.com> Tested-by: Logan Gunthorpe <logang@deltatee.com> Cc: Dave Hansen <dave@sr71.net> Cc: Mel Gorman <mgorman@suse.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
@@ -9,6 +9,7 @@
|
||||
#include <linux/vmstat.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/memremap.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
@@ -63,6 +64,16 @@ retry:
|
||||
#endif
|
||||
}
|
||||
|
||||
static void undo_dev_pagemap(int *nr, int nr_start, struct page **pages)
|
||||
{
|
||||
while ((*nr) - nr_start) {
|
||||
struct page *page = pages[--(*nr)];
|
||||
|
||||
ClearPageReferenced(page);
|
||||
put_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* The performance critical leaf functions are made noinline otherwise gcc
|
||||
* inlines everything into a single function which results in too much
|
||||
@@ -71,7 +82,9 @@ retry:
|
||||
static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
struct dev_pagemap *pgmap = NULL;
|
||||
unsigned long mask;
|
||||
int nr_start = *nr;
|
||||
pte_t *ptep;
|
||||
|
||||
mask = _PAGE_PRESENT|_PAGE_USER;
|
||||
@@ -89,13 +102,21 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
|
||||
page = pte_page(pte);
|
||||
if (pte_devmap(pte)) {
|
||||
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
|
||||
if (unlikely(!pgmap)) {
|
||||
undo_dev_pagemap(nr, nr_start, pages);
|
||||
pte_unmap(ptep);
|
||||
return 0;
|
||||
}
|
||||
} else if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
|
||||
pte_unmap(ptep);
|
||||
return 0;
|
||||
}
|
||||
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
page = pte_page(pte);
|
||||
get_page(page);
|
||||
put_dev_pagemap(pgmap);
|
||||
SetPageReferenced(page);
|
||||
pages[*nr] = page;
|
||||
(*nr)++;
|
||||
@@ -114,6 +135,32 @@ static inline void get_head_page_multiple(struct page *page, int nr)
|
||||
SetPageReferenced(page);
|
||||
}
|
||||
|
||||
static int __gup_device_huge_pmd(pmd_t pmd, unsigned long addr,
|
||||
unsigned long end, struct page **pages, int *nr)
|
||||
{
|
||||
int nr_start = *nr;
|
||||
unsigned long pfn = pmd_pfn(pmd);
|
||||
struct dev_pagemap *pgmap = NULL;
|
||||
|
||||
pfn += (addr & ~PMD_MASK) >> PAGE_SHIFT;
|
||||
do {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
|
||||
pgmap = get_dev_pagemap(pfn, pgmap);
|
||||
if (unlikely(!pgmap)) {
|
||||
undo_dev_pagemap(nr, nr_start, pages);
|
||||
return 0;
|
||||
}
|
||||
SetPageReferenced(page);
|
||||
pages[*nr] = page;
|
||||
get_page(page);
|
||||
put_dev_pagemap(pgmap);
|
||||
(*nr)++;
|
||||
pfn++;
|
||||
} while (addr += PAGE_SIZE, addr != end);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
|
||||
unsigned long end, int write, struct page **pages, int *nr)
|
||||
{
|
||||
@@ -126,9 +173,13 @@ static noinline int gup_huge_pmd(pmd_t pmd, unsigned long addr,
|
||||
mask |= _PAGE_RW;
|
||||
if ((pmd_flags(pmd) & mask) != mask)
|
||||
return 0;
|
||||
|
||||
VM_BUG_ON(!pfn_valid(pmd_pfn(pmd)));
|
||||
if (pmd_devmap(pmd))
|
||||
return __gup_device_huge_pmd(pmd, addr, end, pages, nr);
|
||||
|
||||
/* hugepages are never "special" */
|
||||
VM_BUG_ON(pmd_flags(pmd) & _PAGE_SPECIAL);
|
||||
VM_BUG_ON(!pfn_valid(pmd_pfn(pmd)));
|
||||
|
||||
refs = 0;
|
||||
head = pmd_page(pmd);
|
||||
|
Reference in New Issue
Block a user