BACKPORT: FROMGIT: mm: Cleanup faultaround and finish_fault() codepaths
alloc_set_pte() has two users with different requirements: in the faultaround code, it called from an atomic context and PTE page table has to be preallocated. finish_fault() can sleep and allocate page table as needed. PTL locking rules are also strange, hard to follow and overkill for finish_fault(). Let's untangle the mess. alloc_set_pte() has gone now. All locking is explicit. The price is some code duplication to handle huge pages in faultaround path, but it should be fine, having overall improvement in readability. Link: https://lore.kernel.org/r/20201229132819.najtavneutnf7ajp@box Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> [will: s/from from/from/ in comment; spotted by willy] Signed-off-by: Will Deacon <will@kernel.org> Change-Id: I2746b62adfe63e4f1b62e806df06b1b7a17574ad Bug: 171278850 (cherry picked from commit f9ce0be71d1fbb038ada15ced83474b0e63f264d https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git/log/?h=for-next/faultaround) [vinmenon: changes for speculative page fault] Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:

committed by
Will Deacon

parent
a83b02fc81
commit
0aa300a252
@@ -549,8 +549,8 @@ struct vm_fault {
|
||||
* is not NULL, otherwise pmd.
|
||||
*/
|
||||
pgtable_t prealloc_pte; /* Pre-allocated pte page table.
|
||||
* vm_ops->map_pages() calls
|
||||
* alloc_set_pte() from atomic context.
|
||||
* vm_ops->map_pages() sets up a page
|
||||
* table from atomic context.
|
||||
* do_fault_around() pre-allocates
|
||||
* page table to avoid allocation from
|
||||
* atomic context.
|
||||
@@ -583,7 +583,7 @@ struct vm_operations_struct {
|
||||
vm_fault_t (*fault)(struct vm_fault *vmf);
|
||||
vm_fault_t (*huge_fault)(struct vm_fault *vmf,
|
||||
enum page_entry_size pe_size);
|
||||
void (*map_pages)(struct vm_fault *vmf,
|
||||
vm_fault_t (*map_pages)(struct vm_fault *vmf,
|
||||
pgoff_t start_pgoff, pgoff_t end_pgoff);
|
||||
unsigned long (*pagesize)(struct vm_area_struct * area);
|
||||
|
||||
@@ -1002,7 +1002,9 @@ static inline pte_t maybe_mkwrite(pte_t pte, unsigned long vma_flags)
|
||||
return pte;
|
||||
}
|
||||
|
||||
vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct page *page);
|
||||
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
|
||||
void do_set_pte(struct vm_fault *vmf, struct page *page);
|
||||
|
||||
vm_fault_t finish_fault(struct vm_fault *vmf);
|
||||
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
|
||||
#endif
|
||||
@@ -2740,7 +2742,7 @@ extern void truncate_inode_pages_final(struct address_space *);
|
||||
|
||||
/* generic vm_area_ops exported for stackable file systems */
|
||||
extern vm_fault_t filemap_fault(struct vm_fault *vmf);
|
||||
extern void filemap_map_pages(struct vm_fault *vmf,
|
||||
extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
|
||||
pgoff_t start_pgoff, pgoff_t end_pgoff);
|
||||
extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
|
||||
|
||||
|
Reference in New Issue
Block a user