Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: "A few little subsystems and a start of a lot of MM patches. Subsystems affected by this patch series: squashfs, ocfs2, parisc, vfs. With mm subsystems: slab-generic, slub, debug, pagecache, gup, swap, memcg, pagemap, memory-failure, vmalloc, kasan" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (128 commits) kasan: move kasan_report() into report.c mm/mm_init.c: report kasan-tag information stored in page->flags ubsan: entirely disable alignment checks under UBSAN_TRAP kasan: fix clang compilation warning due to stack protector x86/mm: remove vmalloc faulting mm: remove vmalloc_sync_(un)mappings() x86/mm/32: implement arch_sync_kernel_mappings() x86/mm/64: implement arch_sync_kernel_mappings() mm/ioremap: track which page-table levels were modified mm/vmalloc: track which page-table levels were modified mm: add functions to track page directory modifications s390: use __vmalloc_node in stack_alloc powerpc: use __vmalloc_node in alloc_vm_stack arm64: use __vmalloc_node in arch_alloc_vmap_stack mm: remove vmalloc_user_node_flags mm: switch the test_vmalloc module to use __vmalloc_node mm: remove __vmalloc_node_flags_caller mm: remove both instances of __vmalloc_node_flags mm: remove the prot argument to __vmalloc_node mm: remove the pgprot argument to __vmalloc ...
This commit is contained in:
@@ -1709,6 +1709,8 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
|
||||
unsigned int gup_flags, struct page **pages, int *locked);
|
||||
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
struct page **pages, unsigned int gup_flags);
|
||||
long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
|
||||
struct page **pages, unsigned int gup_flags);
|
||||
|
||||
int get_user_pages_fast(unsigned long start, int nr_pages,
|
||||
unsigned int gup_flags, struct page **pages);
|
||||
@@ -2085,13 +2087,54 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
|
||||
return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
|
||||
NULL : pud_offset(p4d, address);
|
||||
}
|
||||
|
||||
static inline p4d_t *p4d_alloc_track(struct mm_struct *mm, pgd_t *pgd,
|
||||
unsigned long address,
|
||||
pgtbl_mod_mask *mod_mask)
|
||||
|
||||
{
|
||||
if (unlikely(pgd_none(*pgd))) {
|
||||
if (__p4d_alloc(mm, pgd, address))
|
||||
return NULL;
|
||||
*mod_mask |= PGTBL_PGD_MODIFIED;
|
||||
}
|
||||
|
||||
return p4d_offset(pgd, address);
|
||||
}
|
||||
|
||||
#endif /* !__ARCH_HAS_5LEVEL_HACK */
|
||||
|
||||
static inline pud_t *pud_alloc_track(struct mm_struct *mm, p4d_t *p4d,
|
||||
unsigned long address,
|
||||
pgtbl_mod_mask *mod_mask)
|
||||
{
|
||||
if (unlikely(p4d_none(*p4d))) {
|
||||
if (__pud_alloc(mm, p4d, address))
|
||||
return NULL;
|
||||
*mod_mask |= PGTBL_P4D_MODIFIED;
|
||||
}
|
||||
|
||||
return pud_offset(p4d, address);
|
||||
}
|
||||
|
||||
static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
|
||||
{
|
||||
return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
|
||||
NULL: pmd_offset(pud, address);
|
||||
}
|
||||
|
||||
static inline pmd_t *pmd_alloc_track(struct mm_struct *mm, pud_t *pud,
|
||||
unsigned long address,
|
||||
pgtbl_mod_mask *mod_mask)
|
||||
{
|
||||
if (unlikely(pud_none(*pud))) {
|
||||
if (__pmd_alloc(mm, pud, address))
|
||||
return NULL;
|
||||
*mod_mask |= PGTBL_PUD_MODIFIED;
|
||||
}
|
||||
|
||||
return pmd_offset(pud, address);
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#if USE_SPLIT_PTE_PTLOCKS
|
||||
@@ -2207,6 +2250,11 @@ static inline void pgtable_pte_page_dtor(struct page *page)
|
||||
((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
|
||||
NULL: pte_offset_kernel(pmd, address))
|
||||
|
||||
#define pte_alloc_kernel_track(pmd, address, mask) \
|
||||
((unlikely(pmd_none(*(pmd))) && \
|
||||
(__pte_alloc_kernel(pmd) || ({*(mask)|=PGTBL_PMD_MODIFIED;0;})))?\
|
||||
NULL: pte_offset_kernel(pmd, address))
|
||||
|
||||
#if USE_SPLIT_PMD_PTLOCKS
|
||||
|
||||
static struct page *pmd_to_page(pmd_t *pmd)
|
||||
@@ -2608,25 +2656,6 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
|
||||
int __must_check write_one_page(struct page *page);
|
||||
void task_dirty_inc(struct task_struct *tsk);
|
||||
|
||||
/* readahead.c */
|
||||
#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
|
||||
|
||||
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
||||
pgoff_t offset, unsigned long nr_to_read);
|
||||
|
||||
void page_cache_sync_readahead(struct address_space *mapping,
|
||||
struct file_ra_state *ra,
|
||||
struct file *filp,
|
||||
pgoff_t offset,
|
||||
unsigned long size);
|
||||
|
||||
void page_cache_async_readahead(struct address_space *mapping,
|
||||
struct file_ra_state *ra,
|
||||
struct file *filp,
|
||||
struct page *pg,
|
||||
pgoff_t offset,
|
||||
unsigned long size);
|
||||
|
||||
extern unsigned long stack_guard_gap;
|
||||
/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
|
||||
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
|
||||
|
Reference in New Issue
Block a user