Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: "A few little subsystems and a start of a lot of MM patches. Subsystems affected by this patch series: squashfs, ocfs2, parisc, vfs. With mm subsystems: slab-generic, slub, debug, pagecache, gup, swap, memcg, pagemap, memory-failure, vmalloc, kasan" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (128 commits) kasan: move kasan_report() into report.c mm/mm_init.c: report kasan-tag information stored in page->flags ubsan: entirely disable alignment checks under UBSAN_TRAP kasan: fix clang compilation warning due to stack protector x86/mm: remove vmalloc faulting mm: remove vmalloc_sync_(un)mappings() x86/mm/32: implement arch_sync_kernel_mappings() x86/mm/64: implement arch_sync_kernel_mappings() mm/ioremap: track which page-table levels were modified mm/vmalloc: track which page-table levels were modified mm: add functions to track page directory modifications s390: use __vmalloc_node in stack_alloc powerpc: use __vmalloc_node in alloc_vm_stack arm64: use __vmalloc_node in arch_alloc_vmap_stack mm: remove vmalloc_user_node_flags mm: switch the test_vmalloc module to use __vmalloc_node mm: remove __vmalloc_node_flags_caller mm: remove both instances of __vmalloc_node_flags mm: remove the prot argument to __vmalloc_node mm: remove the pgprot argument to __vmalloc ...
This commit is contained in:
@@ -82,7 +82,7 @@ struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flag
|
||||
struct bpf_prog *fp;
|
||||
|
||||
size = round_up(size, PAGE_SIZE);
|
||||
fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
|
||||
fp = __vmalloc(size, gfp_flags);
|
||||
if (fp == NULL)
|
||||
return NULL;
|
||||
|
||||
@@ -232,7 +232,7 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
|
||||
fp = __vmalloc(size, gfp_flags);
|
||||
if (fp == NULL) {
|
||||
__bpf_prog_uncharge(fp_old->aux->user, delta);
|
||||
} else {
|
||||
@@ -1089,7 +1089,7 @@ static struct bpf_prog *bpf_prog_clone_create(struct bpf_prog *fp_other,
|
||||
gfp_t gfp_flags = GFP_KERNEL | __GFP_ZERO | gfp_extra_flags;
|
||||
struct bpf_prog *fp;
|
||||
|
||||
fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags, PAGE_KERNEL);
|
||||
fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags);
|
||||
if (fp != NULL) {
|
||||
/* aux->prog still points to the fp_other one, so
|
||||
* when promoting the clone to the real program,
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#include <linux/nospec.h>
|
||||
#include <linux/audit.h>
|
||||
#include <uapi/linux/btf.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <linux/bpf_lsm.h>
|
||||
|
||||
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
|
||||
@@ -281,27 +282,29 @@ static void *__bpf_map_area_alloc(u64 size, int numa_node, bool mmapable)
|
||||
* __GFP_RETRY_MAYFAIL to avoid such situations.
|
||||
*/
|
||||
|
||||
const gfp_t flags = __GFP_NOWARN | __GFP_ZERO;
|
||||
const gfp_t gfp = __GFP_NOWARN | __GFP_ZERO;
|
||||
unsigned int flags = 0;
|
||||
unsigned long align = 1;
|
||||
void *area;
|
||||
|
||||
if (size >= SIZE_MAX)
|
||||
return NULL;
|
||||
|
||||
/* kmalloc()'ed memory can't be mmap()'ed */
|
||||
if (!mmapable && size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
|
||||
area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags,
|
||||
if (mmapable) {
|
||||
BUG_ON(!PAGE_ALIGNED(size));
|
||||
align = SHMLBA;
|
||||
flags = VM_USERMAP;
|
||||
} else if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
|
||||
area = kmalloc_node(size, gfp | GFP_USER | __GFP_NORETRY,
|
||||
numa_node);
|
||||
if (area != NULL)
|
||||
return area;
|
||||
}
|
||||
if (mmapable) {
|
||||
BUG_ON(!PAGE_ALIGNED(size));
|
||||
return vmalloc_user_node_flags(size, numa_node, GFP_KERNEL |
|
||||
__GFP_RETRY_MAYFAIL | flags);
|
||||
}
|
||||
return __vmalloc_node_flags_caller(size, numa_node,
|
||||
GFP_KERNEL | __GFP_RETRY_MAYFAIL |
|
||||
flags, __builtin_return_address(0));
|
||||
|
||||
return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
|
||||
gfp | GFP_KERNEL | __GFP_RETRY_MAYFAIL, PAGE_KERNEL,
|
||||
flags, numa_node, __builtin_return_address(0));
|
||||
}
|
||||
|
||||
void *bpf_map_area_alloc(u64 size, int numa_node)
|
||||
|
@@ -20,23 +20,6 @@ struct page **dma_common_find_pages(void *cpu_addr)
|
||||
return area->pages;
|
||||
}
|
||||
|
||||
static struct vm_struct *__dma_common_pages_remap(struct page **pages,
|
||||
size_t size, pgprot_t prot, const void *caller)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
area = get_vm_area_caller(size, VM_DMA_COHERENT, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
if (map_vm_area(area, prot, pages)) {
|
||||
vunmap(area->addr);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return area;
|
||||
}
|
||||
|
||||
/*
|
||||
* Remaps an array of PAGE_SIZE pages into another vm_area.
|
||||
* Cannot be used in non-sleeping contexts
|
||||
@@ -44,15 +27,12 @@ static struct vm_struct *__dma_common_pages_remap(struct page **pages,
|
||||
void *dma_common_pages_remap(struct page **pages, size_t size,
|
||||
pgprot_t prot, const void *caller)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
void *vaddr;
|
||||
|
||||
area = __dma_common_pages_remap(pages, size, prot, caller);
|
||||
if (!area)
|
||||
return NULL;
|
||||
|
||||
area->pages = pages;
|
||||
|
||||
return area->addr;
|
||||
vaddr = vmap(pages, size >> PAGE_SHIFT, VM_DMA_COHERENT, prot);
|
||||
if (vaddr)
|
||||
find_vm_area(vaddr)->pages = pages;
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -62,24 +42,20 @@ void *dma_common_pages_remap(struct page **pages, size_t size,
|
||||
void *dma_common_contiguous_remap(struct page *page, size_t size,
|
||||
pgprot_t prot, const void *caller)
|
||||
{
|
||||
int i;
|
||||
int count = size >> PAGE_SHIFT;
|
||||
struct page **pages;
|
||||
struct vm_struct *area;
|
||||
void *vaddr;
|
||||
int i;
|
||||
|
||||
pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
|
||||
pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
|
||||
for (i = 0; i < (size >> PAGE_SHIFT); i++)
|
||||
for (i = 0; i < count; i++)
|
||||
pages[i] = nth_page(page, i);
|
||||
|
||||
area = __dma_common_pages_remap(pages, size, prot, caller);
|
||||
|
||||
vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
|
||||
kfree(pages);
|
||||
|
||||
if (!area)
|
||||
return NULL;
|
||||
return area->addr;
|
||||
return vaddr;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -20,7 +20,7 @@ struct group_info *groups_alloc(int gidsetsize)
|
||||
len = sizeof(struct group_info) + sizeof(kgid_t) * gidsetsize;
|
||||
gi = kmalloc(len, GFP_KERNEL_ACCOUNT|__GFP_NOWARN|__GFP_NORETRY);
|
||||
if (!gi)
|
||||
gi = __vmalloc(len, GFP_KERNEL_ACCOUNT, PAGE_KERNEL);
|
||||
gi = __vmalloc(len, GFP_KERNEL_ACCOUNT);
|
||||
if (!gi)
|
||||
return NULL;
|
||||
|
||||
|
@@ -2951,8 +2951,7 @@ static int copy_module_from_user(const void __user *umod, unsigned long len,
|
||||
return err;
|
||||
|
||||
/* Suck in entire file: we'll want most of it. */
|
||||
info->hdr = __vmalloc(info->len,
|
||||
GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL);
|
||||
info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!info->hdr)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@@ -519,7 +519,6 @@ NOKPROBE_SYMBOL(notify_die);
|
||||
|
||||
int register_die_notifier(struct notifier_block *nb)
|
||||
{
|
||||
vmalloc_sync_mappings();
|
||||
return atomic_notifier_chain_register(&die_chain, nb);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_die_notifier);
|
||||
|
@@ -2262,7 +2262,7 @@ int __weak arch_prctl_spec_ctrl_set(struct task_struct *t, unsigned long which,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LESS_THROTTLE)
|
||||
#define PR_IO_FLUSHER (PF_MEMALLOC_NOIO | PF_LOCAL_THROTTLE)
|
||||
|
||||
SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
|
||||
unsigned long, arg4, unsigned long, arg5)
|
||||
|
@@ -8527,18 +8527,6 @@ static int allocate_trace_buffers(struct trace_array *tr, int size)
|
||||
allocate_snapshot = false;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Because of some magic with the way alloc_percpu() works on
|
||||
* x86_64, we need to synchronize the pgd of all the tables,
|
||||
* otherwise the trace events that happen in x86_64 page fault
|
||||
* handlers can't cope with accessing the chance that a
|
||||
* alloc_percpu()'d memory might be touched in the page fault trace
|
||||
* event. Oh, and we need to audit all other alloc_percpu() and vmalloc()
|
||||
* calls in tracing, because something might get triggered within a
|
||||
* page fault trace event!
|
||||
*/
|
||||
vmalloc_sync_mappings();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user