mm: join struct fault_env and vm_fault
Currently we have two different structures for passing fault information around - struct vm_fault and struct fault_env. DAX will need more information in struct vm_fault to handle its faults so the content of that structure would become event closer to fault_env. Furthermore it would need to generate struct fault_env to be able to call some of the generic functions. So at this point I don't think there's much use in keeping these two structures separate. Just embed into struct vm_fault all that is needed to use it for both purposes. Link: http://lkml.kernel.org/r/1479460644-25076-2-git-send-email-jack@suse.cz Signed-off-by: Jan Kara <jack@suse.cz> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Cc: Dan Williams <dan.j.williams@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
@@ -292,10 +292,16 @@ extern pgprot_t protection_map[16];
|
||||
* pgoff should be used in favour of virtual_address, if possible.
|
||||
*/
|
||||
struct vm_fault {
|
||||
struct vm_area_struct *vma; /* Target VMA */
|
||||
unsigned int flags; /* FAULT_FLAG_xxx flags */
|
||||
gfp_t gfp_mask; /* gfp mask to be used for allocations */
|
||||
pgoff_t pgoff; /* Logical page offset based on vma */
|
||||
void __user *virtual_address; /* Faulting virtual address */
|
||||
unsigned long address; /* Faulting virtual address */
|
||||
void __user *virtual_address; /* Faulting virtual address masked by
|
||||
* PAGE_MASK */
|
||||
pmd_t *pmd; /* Pointer to pmd entry matching
|
||||
* the 'address'
|
||||
*/
|
||||
|
||||
struct page *cow_page; /* Handler may choose to COW */
|
||||
struct page *page; /* ->fault handlers should return a
|
||||
@@ -309,19 +315,7 @@ struct vm_fault {
|
||||
* VM_FAULT_DAX_LOCKED and fill in
|
||||
* entry here.
|
||||
*/
|
||||
};
|
||||
|
||||
/*
|
||||
* Page fault context: passes though page fault handler instead of endless list
|
||||
* of function arguments.
|
||||
*/
|
||||
struct fault_env {
|
||||
struct vm_area_struct *vma; /* Target VMA */
|
||||
unsigned long address; /* Faulting virtual address */
|
||||
unsigned int flags; /* FAULT_FLAG_xxx flags */
|
||||
pmd_t *pmd; /* Pointer to pmd entry matching
|
||||
* the 'address'
|
||||
*/
|
||||
/* These three entries are valid only while holding ptl lock */
|
||||
pte_t *pte; /* Pointer to pte entry matching
|
||||
* the 'address'. NULL if the page
|
||||
* table hasn't been allocated.
|
||||
@@ -351,7 +345,7 @@ struct vm_operations_struct {
|
||||
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
|
||||
pmd_t *, unsigned int flags);
|
||||
void (*map_pages)(struct fault_env *fe,
|
||||
void (*map_pages)(struct vm_fault *vmf,
|
||||
pgoff_t start_pgoff, pgoff_t end_pgoff);
|
||||
|
||||
/* notification that a previously read-only page is about to become
|
||||
@@ -625,7 +619,7 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
|
||||
return pte;
|
||||
}
|
||||
|
||||
int alloc_set_pte(struct fault_env *fe, struct mem_cgroup *memcg,
|
||||
int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
|
||||
struct page *page);
|
||||
#endif
|
||||
|
||||
@@ -2094,7 +2088,7 @@ extern void truncate_inode_pages_final(struct address_space *);
|
||||
|
||||
/* generic vm_area_ops exported for stackable file systems */
|
||||
extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
|
||||
extern void filemap_map_pages(struct fault_env *fe,
|
||||
extern void filemap_map_pages(struct vm_fault *vmf,
|
||||
pgoff_t start_pgoff, pgoff_t end_pgoff);
|
||||
extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
|
||||
|
Reference in New Issue
Block a user