Merge branch 'linus' into locking/core, to pick up fixes and dependencies
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -26,8 +26,10 @@ static void bpf_array_free_percpu(struct bpf_array *array)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < array->map.max_entries; i++)
|
||||
for (i = 0; i < array->map.max_entries; i++) {
|
||||
free_percpu(array->pptrs[i]);
|
||||
cond_resched();
|
||||
}
|
||||
}
|
||||
|
||||
static int bpf_array_alloc_percpu(struct bpf_array *array)
|
||||
@@ -43,6 +45,7 @@ static int bpf_array_alloc_percpu(struct bpf_array *array)
|
||||
return -ENOMEM;
|
||||
}
|
||||
array->pptrs[i] = ptr;
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -73,11 +76,11 @@ static int array_map_alloc_check(union bpf_attr *attr)
|
||||
static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
|
||||
int numa_node = bpf_map_attr_numa_node(attr);
|
||||
int ret, numa_node = bpf_map_attr_numa_node(attr);
|
||||
u32 elem_size, index_mask, max_entries;
|
||||
bool unpriv = !capable(CAP_SYS_ADMIN);
|
||||
u64 cost, array_size, mask64;
|
||||
struct bpf_array *array;
|
||||
u64 array_size, mask64;
|
||||
|
||||
elem_size = round_up(attr->value_size, 8);
|
||||
|
||||
@@ -109,8 +112,19 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
||||
array_size += (u64) max_entries * elem_size;
|
||||
|
||||
/* make sure there is no u32 overflow later in round_up() */
|
||||
if (array_size >= U32_MAX - PAGE_SIZE)
|
||||
cost = array_size;
|
||||
if (cost >= U32_MAX - PAGE_SIZE)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
if (percpu) {
|
||||
cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
|
||||
if (cost >= U32_MAX - PAGE_SIZE)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
|
||||
ret = bpf_map_precharge_memlock(cost);
|
||||
if (ret < 0)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
/* allocate all map elements and zero-initialize them */
|
||||
array = bpf_map_area_alloc(array_size, numa_node);
|
||||
@@ -121,20 +135,13 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
|
||||
|
||||
/* copy mandatory map attributes */
|
||||
bpf_map_init_from_attr(&array->map, attr);
|
||||
array->map.pages = cost;
|
||||
array->elem_size = elem_size;
|
||||
|
||||
if (!percpu)
|
||||
goto out;
|
||||
|
||||
array_size += (u64) attr->max_entries * elem_size * num_possible_cpus();
|
||||
|
||||
if (array_size >= U32_MAX - PAGE_SIZE ||
|
||||
bpf_array_alloc_percpu(array)) {
|
||||
if (percpu && bpf_array_alloc_percpu(array)) {
|
||||
bpf_map_area_free(array);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
out:
|
||||
array->map.pages = round_up(array_size, PAGE_SIZE) >> PAGE_SHIFT;
|
||||
|
||||
return &array->map;
|
||||
}
|
||||
|
@@ -1590,7 +1590,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
|
||||
* so always copy 'cnt' prog_ids to the user.
|
||||
* In a rare race the user will see zero prog_ids
|
||||
*/
|
||||
ids = kcalloc(cnt, sizeof(u32), GFP_USER);
|
||||
ids = kcalloc(cnt, sizeof(u32), GFP_USER | __GFP_NOWARN);
|
||||
if (!ids)
|
||||
return -ENOMEM;
|
||||
rcu_read_lock();
|
||||
|
@@ -334,7 +334,7 @@ static int cpu_map_kthread_run(void *data)
|
||||
static struct bpf_cpu_map_entry *__cpu_map_entry_alloc(u32 qsize, u32 cpu,
|
||||
int map_id)
|
||||
{
|
||||
gfp_t gfp = GFP_ATOMIC|__GFP_NOWARN;
|
||||
gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
|
||||
struct bpf_cpu_map_entry *rcpu;
|
||||
int numa, err;
|
||||
|
||||
|
@@ -555,7 +555,10 @@ static void trie_free(struct bpf_map *map)
|
||||
struct lpm_trie_node __rcu **slot;
|
||||
struct lpm_trie_node *node;
|
||||
|
||||
raw_spin_lock(&trie->lock);
|
||||
/* Wait for outstanding programs to complete
|
||||
* update/lookup/delete/get_next_key and free the trie.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
/* Always start at the root and walk down to a node that has no
|
||||
* children. Then free that node, nullify its reference in the parent
|
||||
@@ -566,10 +569,9 @@ static void trie_free(struct bpf_map *map)
|
||||
slot = &trie->root;
|
||||
|
||||
for (;;) {
|
||||
node = rcu_dereference_protected(*slot,
|
||||
lockdep_is_held(&trie->lock));
|
||||
node = rcu_dereference_protected(*slot, 1);
|
||||
if (!node)
|
||||
goto unlock;
|
||||
goto out;
|
||||
|
||||
if (rcu_access_pointer(node->child[0])) {
|
||||
slot = &node->child[0];
|
||||
@@ -587,8 +589,8 @@ static void trie_free(struct bpf_map *map)
|
||||
}
|
||||
}
|
||||
|
||||
unlock:
|
||||
raw_spin_unlock(&trie->lock);
|
||||
out:
|
||||
kfree(trie);
|
||||
}
|
||||
|
||||
static int trie_get_next_key(struct bpf_map *map, void *_key, void *_next_key)
|
||||
|
@@ -521,8 +521,8 @@ static struct smap_psock *smap_init_psock(struct sock *sock,
|
||||
static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_stab *stab;
|
||||
int err = -EINVAL;
|
||||
u64 cost;
|
||||
int err;
|
||||
|
||||
if (!capable(CAP_NET_ADMIN))
|
||||
return ERR_PTR(-EPERM);
|
||||
@@ -547,6 +547,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
|
||||
|
||||
/* make sure page count doesn't overflow */
|
||||
cost = (u64) stab->map.max_entries * sizeof(struct sock *);
|
||||
err = -EINVAL;
|
||||
if (cost >= U32_MAX - PAGE_SIZE)
|
||||
goto free_stab;
|
||||
|
||||
|
@@ -1356,6 +1356,13 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
|
||||
return reg->type == PTR_TO_CTX;
|
||||
}
|
||||
|
||||
static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
|
||||
{
|
||||
const struct bpf_reg_state *reg = cur_regs(env) + regno;
|
||||
|
||||
return type_is_pkt_pointer(reg->type);
|
||||
}
|
||||
|
||||
static int check_pkt_ptr_alignment(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size, bool strict)
|
||||
@@ -1416,10 +1423,10 @@ static int check_generic_ptr_alignment(struct bpf_verifier_env *env,
|
||||
}
|
||||
|
||||
static int check_ptr_alignment(struct bpf_verifier_env *env,
|
||||
const struct bpf_reg_state *reg,
|
||||
int off, int size)
|
||||
const struct bpf_reg_state *reg, int off,
|
||||
int size, bool strict_alignment_once)
|
||||
{
|
||||
bool strict = env->strict_alignment;
|
||||
bool strict = env->strict_alignment || strict_alignment_once;
|
||||
const char *pointer_desc = "";
|
||||
|
||||
switch (reg->type) {
|
||||
@@ -1576,9 +1583,9 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
|
||||
* if t==write && value_regno==-1, some unknown value is stored into memory
|
||||
* if t==read && value_regno==-1, don't care what we read from memory
|
||||
*/
|
||||
static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno, int off,
|
||||
int bpf_size, enum bpf_access_type t,
|
||||
int value_regno)
|
||||
static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regno,
|
||||
int off, int bpf_size, enum bpf_access_type t,
|
||||
int value_regno, bool strict_alignment_once)
|
||||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env);
|
||||
struct bpf_reg_state *reg = regs + regno;
|
||||
@@ -1590,7 +1597,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
return size;
|
||||
|
||||
/* alignment checks will add in reg->off themselves */
|
||||
err = check_ptr_alignment(env, reg, off, size);
|
||||
err = check_ptr_alignment(env, reg, off, size, strict_alignment_once);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -1735,21 +1742,23 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
if (is_ctx_reg(env, insn->dst_reg)) {
|
||||
verbose(env, "BPF_XADD stores into R%d context is not allowed\n",
|
||||
insn->dst_reg);
|
||||
if (is_ctx_reg(env, insn->dst_reg) ||
|
||||
is_pkt_reg(env, insn->dst_reg)) {
|
||||
verbose(env, "BPF_XADD stores into R%d %s is not allowed\n",
|
||||
insn->dst_reg, is_ctx_reg(env, insn->dst_reg) ?
|
||||
"context" : "packet");
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
/* check whether atomic_add can read the memory */
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_READ, -1);
|
||||
BPF_SIZE(insn->code), BPF_READ, -1, true);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* check whether atomic_add can write into the same memory */
|
||||
return check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_WRITE, -1);
|
||||
BPF_SIZE(insn->code), BPF_WRITE, -1, true);
|
||||
}
|
||||
|
||||
/* when register 'regno' is passed into function that will read 'access_size'
|
||||
@@ -2388,7 +2397,8 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
||||
* is inferred from register state.
|
||||
*/
|
||||
for (i = 0; i < meta.access_size; i++) {
|
||||
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B, BPF_WRITE, -1);
|
||||
err = check_mem_access(env, insn_idx, meta.regno, i, BPF_B,
|
||||
BPF_WRITE, -1, false);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
@@ -4632,7 +4642,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
*/
|
||||
err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_READ,
|
||||
insn->dst_reg);
|
||||
insn->dst_reg, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -4684,7 +4694,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
/* check that memory (dst_reg + off) is writeable */
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_WRITE,
|
||||
insn->src_reg);
|
||||
insn->src_reg, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -4719,7 +4729,7 @@ static int do_check(struct bpf_verifier_env *env)
|
||||
/* check that memory (dst_reg + off) is writeable */
|
||||
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
||||
BPF_SIZE(insn->code), BPF_WRITE,
|
||||
-1);
|
||||
-1, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@@ -488,25 +488,6 @@ get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(get_compat_sigset);
|
||||
|
||||
int
|
||||
put_compat_sigset(compat_sigset_t __user *compat, const sigset_t *set,
|
||||
unsigned int size)
|
||||
{
|
||||
/* size <= sizeof(compat_sigset_t) <= sizeof(sigset_t) */
|
||||
#ifdef __BIG_ENDIAN
|
||||
compat_sigset_t v;
|
||||
switch (_NSIG_WORDS) {
|
||||
case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
|
||||
case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
|
||||
case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
|
||||
case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
|
||||
}
|
||||
return copy_to_user(compat, &v, size) ? -EFAULT : 0;
|
||||
#else
|
||||
return copy_to_user(compat, set, size) ? -EFAULT : 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
|
||||
compat_uptr_t __user *, pages32,
|
||||
|
@@ -64,7 +64,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr)
|
||||
return e;
|
||||
}
|
||||
|
||||
static inline int init_kernel_text(unsigned long addr)
|
||||
int init_kernel_text(unsigned long addr)
|
||||
{
|
||||
if (addr >= (unsigned long)_sinittext &&
|
||||
addr < (unsigned long)_einittext)
|
||||
|
@@ -592,7 +592,7 @@ static void check_mm(struct mm_struct *mm)
|
||||
* is dropped: either by a lazy thread or by
|
||||
* mmput. Free the page directory and the mm.
|
||||
*/
|
||||
static void __mmdrop(struct mm_struct *mm)
|
||||
void __mmdrop(struct mm_struct *mm)
|
||||
{
|
||||
BUG_ON(mm == &init_mm);
|
||||
mm_free_pgd(mm);
|
||||
@@ -603,18 +603,7 @@ static void __mmdrop(struct mm_struct *mm)
|
||||
put_user_ns(mm->user_ns);
|
||||
free_mm(mm);
|
||||
}
|
||||
|
||||
void mmdrop(struct mm_struct *mm)
|
||||
{
|
||||
/*
|
||||
* The implicit full barrier implied by atomic_dec_and_test() is
|
||||
* required by the membarrier system call before returning to
|
||||
* user-space, after storing to rq->curr.
|
||||
*/
|
||||
if (unlikely(atomic_dec_and_test(&mm->mm_count)))
|
||||
__mmdrop(mm);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mmdrop);
|
||||
EXPORT_SYMBOL_GPL(__mmdrop);
|
||||
|
||||
static void mmdrop_async_fn(struct work_struct *work)
|
||||
{
|
||||
|
@@ -16,6 +16,7 @@ struct cpumap {
|
||||
unsigned int available;
|
||||
unsigned int allocated;
|
||||
unsigned int managed;
|
||||
bool initialized;
|
||||
bool online;
|
||||
unsigned long alloc_map[IRQ_MATRIX_SIZE];
|
||||
unsigned long managed_map[IRQ_MATRIX_SIZE];
|
||||
@@ -81,9 +82,11 @@ void irq_matrix_online(struct irq_matrix *m)
|
||||
|
||||
BUG_ON(cm->online);
|
||||
|
||||
bitmap_zero(cm->alloc_map, m->matrix_bits);
|
||||
cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc);
|
||||
cm->allocated = 0;
|
||||
if (!cm->initialized) {
|
||||
cm->available = m->alloc_size;
|
||||
cm->available -= cm->managed + m->systembits_inalloc;
|
||||
cm->initialized = true;
|
||||
}
|
||||
m->global_available += cm->available;
|
||||
cm->online = true;
|
||||
m->online_maps++;
|
||||
@@ -370,14 +373,16 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
|
||||
if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
|
||||
return;
|
||||
|
||||
if (cm->online) {
|
||||
clear_bit(bit, cm->alloc_map);
|
||||
cm->allocated--;
|
||||
clear_bit(bit, cm->alloc_map);
|
||||
cm->allocated--;
|
||||
|
||||
if (cm->online)
|
||||
m->total_allocated--;
|
||||
if (!managed) {
|
||||
cm->available++;
|
||||
|
||||
if (!managed) {
|
||||
cm->available++;
|
||||
if (cm->online)
|
||||
m->global_available++;
|
||||
}
|
||||
}
|
||||
trace_irq_matrix_free(bit, cpu, m, cm);
|
||||
}
|
||||
|
@@ -366,12 +366,15 @@ static void __jump_label_update(struct static_key *key,
|
||||
{
|
||||
for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
|
||||
/*
|
||||
* entry->code set to 0 invalidates module init text sections
|
||||
* kernel_text_address() verifies we are not in core kernel
|
||||
* init code, see jump_label_invalidate_module_init().
|
||||
* An entry->code of 0 indicates an entry which has been
|
||||
* disabled because it was in an init text area.
|
||||
*/
|
||||
if (entry->code && kernel_text_address(entry->code))
|
||||
arch_jump_label_transform(entry, jump_label_type(entry));
|
||||
if (entry->code) {
|
||||
if (kernel_text_address(entry->code))
|
||||
arch_jump_label_transform(entry, jump_label_type(entry));
|
||||
else
|
||||
WARN_ONCE(1, "can't patch jump_label at %pS", (void *)entry->code);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -417,6 +420,19 @@ void __init jump_label_init(void)
|
||||
cpus_read_unlock();
|
||||
}
|
||||
|
||||
/* Disable any jump label entries in __init code */
|
||||
void __init jump_label_invalidate_init(void)
|
||||
{
|
||||
struct jump_entry *iter_start = __start___jump_table;
|
||||
struct jump_entry *iter_stop = __stop___jump_table;
|
||||
struct jump_entry *iter;
|
||||
|
||||
for (iter = iter_start; iter < iter_stop; iter++) {
|
||||
if (init_kernel_text(iter->code))
|
||||
iter->code = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
|
||||
@@ -633,6 +649,7 @@ static void jump_label_del_module(struct module *mod)
|
||||
}
|
||||
}
|
||||
|
||||
/* Disable any jump label entries in module init code */
|
||||
static void jump_label_invalidate_module_init(struct module *mod)
|
||||
{
|
||||
struct jump_entry *iter_start = mod->jump_entries;
|
||||
|
@@ -275,8 +275,15 @@ static unsigned long pfn_end(struct dev_pagemap *pgmap)
|
||||
return (res->start + resource_size(res)) >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
static unsigned long pfn_next(unsigned long pfn)
|
||||
{
|
||||
if (pfn % 1024 == 0)
|
||||
cond_resched();
|
||||
return pfn + 1;
|
||||
}
|
||||
|
||||
#define for_each_device_pfn(pfn, map) \
|
||||
for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
|
||||
for (pfn = pfn_first(map); pfn < pfn_end(map); pfn = pfn_next(pfn))
|
||||
|
||||
static void devm_memremap_pages_release(void *data)
|
||||
{
|
||||
@@ -337,10 +344,10 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
||||
resource_size_t align_start, align_size, align_end;
|
||||
struct vmem_altmap *altmap = pgmap->altmap_valid ?
|
||||
&pgmap->altmap : NULL;
|
||||
struct resource *res = &pgmap->res;
|
||||
unsigned long pfn, pgoff, order;
|
||||
pgprot_t pgprot = PAGE_KERNEL;
|
||||
int error, nid, is_ram, i = 0;
|
||||
struct resource *res = &pgmap->res;
|
||||
int error, nid, is_ram;
|
||||
|
||||
align_start = res->start & ~(SECTION_SIZE - 1);
|
||||
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
|
||||
@@ -409,8 +416,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
|
||||
list_del(&page->lru);
|
||||
page->pgmap = pgmap;
|
||||
percpu_ref_get(pgmap->ref);
|
||||
if (!(++i % 1024))
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
devm_add_action(dev, devm_memremap_pages_release, pgmap);
|
||||
|
@@ -640,7 +640,7 @@ device_initcall(register_warn_debugfs);
|
||||
*/
|
||||
__visible void __stack_chk_fail(void)
|
||||
{
|
||||
panic("stack-protector: Kernel stack is corrupted in: %p\n",
|
||||
panic("stack-protector: Kernel stack is corrupted in: %pB\n",
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
EXPORT_SYMBOL(__stack_chk_fail);
|
||||
|
@@ -2397,7 +2397,7 @@ skip:
|
||||
|
||||
if (console_lock_spinning_disable_and_check()) {
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
printk_safe_exit_irqrestore(flags);
|
||||
@@ -2430,6 +2430,7 @@ skip:
|
||||
if (retry && console_trylock())
|
||||
goto again;
|
||||
|
||||
out:
|
||||
if (wake_klogd)
|
||||
wake_up_klogd();
|
||||
}
|
||||
|
@@ -163,7 +163,7 @@ static struct rchan_buf *relay_create_buf(struct rchan *chan)
|
||||
{
|
||||
struct rchan_buf *buf;
|
||||
|
||||
if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
|
||||
if (chan->n_subbufs > KMALLOC_MAX_SIZE / sizeof(size_t *))
|
||||
return NULL;
|
||||
|
||||
buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
|
||||
|
@@ -1076,14 +1076,16 @@ long seccomp_get_metadata(struct task_struct *task,
|
||||
|
||||
size = min_t(unsigned long, size, sizeof(kmd));
|
||||
|
||||
if (copy_from_user(&kmd, data, size))
|
||||
if (size < sizeof(kmd.filter_off))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_from_user(&kmd.filter_off, data, sizeof(kmd.filter_off)))
|
||||
return -EFAULT;
|
||||
|
||||
filter = get_nth_filter(task, kmd.filter_off);
|
||||
if (IS_ERR(filter))
|
||||
return PTR_ERR(filter);
|
||||
|
||||
memset(&kmd, 0, sizeof(kmd));
|
||||
if (filter->log)
|
||||
kmd.flags |= SECCOMP_FILTER_FLAG_LOG;
|
||||
|
||||
|
@@ -1894,6 +1894,12 @@ int timers_dead_cpu(unsigned int cpu)
|
||||
raw_spin_lock_irq(&new_base->lock);
|
||||
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
/*
|
||||
* The current CPUs base clock might be stale. Update it
|
||||
* before moving the timers over.
|
||||
*/
|
||||
forward_timer_base(new_base);
|
||||
|
||||
BUG_ON(old_base->running_timer);
|
||||
|
||||
for (i = 0; i < WHEEL_SIZE; i++)
|
||||
|
@@ -872,6 +872,8 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(&query, uquery, sizeof(query)))
|
||||
return -EFAULT;
|
||||
if (query.ids_len > BPF_TRACE_MAX_PROGS)
|
||||
return -E2BIG;
|
||||
|
||||
mutex_lock(&bpf_event_mutex);
|
||||
ret = bpf_prog_array_copy_info(event->tp_event->prog_array,
|
||||
|
@@ -101,6 +101,7 @@ struct user_struct root_user = {
|
||||
.sigpending = ATOMIC_INIT(0),
|
||||
.locked_shm = 0,
|
||||
.uid = GLOBAL_ROOT_UID,
|
||||
.ratelimit = RATELIMIT_STATE_INIT(root_user.ratelimit, 0, 0),
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -191,6 +192,8 @@ struct user_struct *alloc_uid(kuid_t uid)
|
||||
|
||||
new->uid = uid;
|
||||
atomic_set(&new->__count, 1);
|
||||
ratelimit_state_init(&new->ratelimit, HZ, 100);
|
||||
ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE);
|
||||
|
||||
/*
|
||||
* Before adding this, check whether we raced
|
||||
|
@@ -4179,6 +4179,22 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(workqueue_set_max_active);
|
||||
|
||||
/**
|
||||
* current_work - retrieve %current task's work struct
|
||||
*
|
||||
* Determine if %current task is a workqueue worker and what it's working on.
|
||||
* Useful to find out the context that the %current task is running in.
|
||||
*
|
||||
* Return: work struct if %current task is a workqueue worker, %NULL otherwise.
|
||||
*/
|
||||
struct work_struct *current_work(void)
|
||||
{
|
||||
struct worker *worker = current_wq_worker();
|
||||
|
||||
return worker ? worker->current_work : NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(current_work);
|
||||
|
||||
/**
|
||||
* current_is_workqueue_rescuer - is %current workqueue rescuer?
|
||||
*
|
||||
|
Reference in New Issue
Block a user