mm/gup: change GUP fast to use flags rather than a write 'bool'
To facilitate additional options to get_user_pages_fast() change the singular write parameter to be gup_flags. This patch does not change any functionality. New functionality will follow in subsequent patches. Some of the get_user_pages_fast() call sites were unchanged because they already passed FOLL_WRITE or 0 for the write parameter. NOTE: It was suggested to change the ordering of the get_user_pages_fast() arguments to ensure that callers were converted. This breaks the current GUP call site convention of having the returned pages be the final parameter. So the suggestion was rejected. Link: http://lkml.kernel.org/r/20190328084422.29911-4-ira.weiny@intel.com Link: http://lkml.kernel.org/r/20190317183438.2057-4-ira.weiny@intel.com Signed-off-by: Ira Weiny <ira.weiny@intel.com> Reviewed-by: Mike Marshall <hubcap@omnibond.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dan Williams <dan.j.williams@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Hogan <jhogan@kernel.org> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: John Hubbard <jhubbard@nvidia.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Rich Felker <dalias@libc.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Este cometimento está contido em:

cometido por
Linus Torvalds

ascendente
b798bec474
cometimento
73b0140bf0
@@ -235,7 +235,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
* get_user_pages_fast() - pin user pages in memory
|
||||
* @start: starting user address
|
||||
* @nr_pages: number of pages from start to pin
|
||||
* @write: whether pages will be written to
|
||||
* @gup_flags: flags modifying pin behaviour
|
||||
* @pages: array that receives pointers to the pages pinned.
|
||||
* Should be at least nr_pages long.
|
||||
*
|
||||
@@ -247,8 +247,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
||||
* were pinned, returns -errno.
|
||||
*/
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages,
|
||||
unsigned int gup_flags, struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
@@ -273,7 +273,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
goto slow;
|
||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
||||
if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
|
||||
pages, &nr))
|
||||
goto slow;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
local_irq_enable();
|
||||
@@ -289,7 +290,7 @@ slow_irqon:
|
||||
pages += nr;
|
||||
|
||||
ret = get_user_pages_unlocked(start, (end - start) >> PAGE_SHIFT,
|
||||
pages, write ? FOLL_WRITE : 0);
|
||||
pages, gup_flags);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
|
@@ -600,7 +600,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
/* If writing != 0, then the HPTE must allow writing, if we get here */
|
||||
write_ok = writing;
|
||||
hva = gfn_to_hva_memslot(memslot, gfn);
|
||||
npages = get_user_pages_fast(hva, 1, writing, pages);
|
||||
npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
|
||||
if (npages < 1) {
|
||||
/* Check if it's an I/O mapping */
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
@@ -1193,7 +1193,7 @@ void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
|
||||
if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
|
||||
goto err;
|
||||
hva = gfn_to_hva_memslot(memslot, gfn);
|
||||
npages = get_user_pages_fast(hva, 1, 1, pages);
|
||||
npages = get_user_pages_fast(hva, 1, FOLL_WRITE, pages);
|
||||
if (npages < 1)
|
||||
goto err;
|
||||
page = pages[0];
|
||||
|
@@ -783,7 +783,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
|
||||
ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages);
|
||||
if (ret < 0)
|
||||
goto free_pages;
|
||||
|
||||
|
@@ -2376,7 +2376,7 @@ static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
|
||||
ret = get_user_pages_fast(map->addr, 1, FOLL_WRITE, &map->page);
|
||||
if (ret < 0)
|
||||
goto out;
|
||||
BUG_ON(ret != 1);
|
||||
|
@@ -204,7 +204,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
* get_user_pages_fast() - pin user pages in memory
|
||||
* @start: starting user address
|
||||
* @nr_pages: number of pages from start to pin
|
||||
* @write: whether pages will be written to
|
||||
* @gup_flags: flags modifying pin behaviour
|
||||
* @pages: array that receives pointers to the pages pinned.
|
||||
* Should be at least nr_pages long.
|
||||
*
|
||||
@@ -216,8 +216,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
* requested. If nr_pages is 0 or negative, returns 0. If no pages
|
||||
* were pinned, returns -errno.
|
||||
*/
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages,
|
||||
unsigned int gup_flags, struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
@@ -241,7 +241,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
goto slow;
|
||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
||||
if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
|
||||
pages, &nr))
|
||||
goto slow;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
local_irq_enable();
|
||||
@@ -261,7 +262,7 @@ slow_irqon:
|
||||
|
||||
ret = get_user_pages_unlocked(start,
|
||||
(end - start) >> PAGE_SHIFT, pages,
|
||||
write ? FOLL_WRITE : 0);
|
||||
gup_flags);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
|
@@ -245,8 +245,8 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
return nr;
|
||||
}
|
||||
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
struct page **pages)
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages,
|
||||
unsigned int gup_flags, struct page **pages)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long addr, len, end;
|
||||
@@ -303,7 +303,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(pgd))
|
||||
goto slow;
|
||||
if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
|
||||
if (!gup_pud_range(pgd, addr, next, gup_flags & FOLL_WRITE,
|
||||
pages, &nr))
|
||||
goto slow;
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
|
||||
@@ -324,7 +325,7 @@ slow:
|
||||
|
||||
ret = get_user_pages_unlocked(start,
|
||||
(end - start) >> PAGE_SHIFT, pages,
|
||||
write ? FOLL_WRITE : 0);
|
||||
gup_flags);
|
||||
|
||||
/* Have to be a bit careful with return values */
|
||||
if (nr > 0) {
|
||||
|
@@ -140,7 +140,7 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
pt_element_t *table;
|
||||
struct page *page;
|
||||
|
||||
npages = get_user_pages_fast((unsigned long)ptep_user, 1, 1, &page);
|
||||
npages = get_user_pages_fast((unsigned long)ptep_user, 1, FOLL_WRITE, &page);
|
||||
/* Check if the user is doing something meaningless. */
|
||||
if (unlikely(npages != 1))
|
||||
return -EFAULT;
|
||||
|
@@ -1805,7 +1805,7 @@ static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
|
||||
return NULL;
|
||||
|
||||
/* Pin the user virtual address. */
|
||||
npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
|
||||
npinned = get_user_pages_fast(uaddr, npages, FOLL_WRITE, pages);
|
||||
if (npinned != npages) {
|
||||
pr_err("SEV: Failure locking %lu pages.\n", npages);
|
||||
goto err;
|
||||
|
Criar uma nova questão referindo esta
Bloquear um utilizador