Merge branch 'linus' into locking/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -687,7 +687,8 @@ static void delete_all_elements(struct bpf_htab *htab)
|
||||
|
||||
hlist_for_each_entry_safe(l, n, head, hash_node) {
|
||||
hlist_del_rcu(&l->hash_node);
|
||||
htab_elem_free(htab, l);
|
||||
if (l->state != HTAB_EXTRA_ELEM_USED)
|
||||
htab_elem_free(htab, l);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -194,7 +194,7 @@ static int map_create(union bpf_attr *attr)
|
||||
|
||||
err = bpf_map_charge_memlock(map);
|
||||
if (err)
|
||||
goto free_map;
|
||||
goto free_map_nouncharge;
|
||||
|
||||
err = bpf_map_new_fd(map);
|
||||
if (err < 0)
|
||||
@@ -204,6 +204,8 @@ static int map_create(union bpf_attr *attr)
|
||||
return err;
|
||||
|
||||
free_map:
|
||||
bpf_map_uncharge_memlock(map);
|
||||
free_map_nouncharge:
|
||||
map->ops->map_free(map);
|
||||
return err;
|
||||
}
|
||||
|
@@ -216,8 +216,8 @@ static void print_verifier_state(struct bpf_verifier_state *state)
|
||||
reg->map_ptr->key_size,
|
||||
reg->map_ptr->value_size);
|
||||
if (reg->min_value != BPF_REGISTER_MIN_RANGE)
|
||||
verbose(",min_value=%llu",
|
||||
(unsigned long long)reg->min_value);
|
||||
verbose(",min_value=%lld",
|
||||
(long long)reg->min_value);
|
||||
if (reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
verbose(",max_value=%llu",
|
||||
(unsigned long long)reg->max_value);
|
||||
@@ -758,7 +758,7 @@ static int check_mem_access(struct bpf_verifier_env *env, u32 regno, int off,
|
||||
* index'es we need to make sure that whatever we use
|
||||
* will have a set floor within our range.
|
||||
*/
|
||||
if ((s64)reg->min_value < 0) {
|
||||
if (reg->min_value < 0) {
|
||||
verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
|
||||
regno);
|
||||
return -EACCES;
|
||||
@@ -1468,7 +1468,8 @@ static void check_reg_overflow(struct bpf_reg_state *reg)
|
||||
{
|
||||
if (reg->max_value > BPF_REGISTER_MAX_RANGE)
|
||||
reg->max_value = BPF_REGISTER_MAX_RANGE;
|
||||
if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE)
|
||||
if (reg->min_value < BPF_REGISTER_MIN_RANGE ||
|
||||
reg->min_value > BPF_REGISTER_MAX_RANGE)
|
||||
reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
}
|
||||
|
||||
@@ -1476,7 +1477,8 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
struct bpf_insn *insn)
|
||||
{
|
||||
struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
|
||||
u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE;
|
||||
s64 min_val = BPF_REGISTER_MIN_RANGE;
|
||||
u64 max_val = BPF_REGISTER_MAX_RANGE;
|
||||
bool min_set = false, max_set = false;
|
||||
u8 opcode = BPF_OP(insn->code);
|
||||
|
||||
@@ -1512,22 +1514,43 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
return;
|
||||
}
|
||||
|
||||
/* If one of our values was at the end of our ranges then we can't just
|
||||
* do our normal operations to the register, we need to set the values
|
||||
* to the min/max since they are undefined.
|
||||
*/
|
||||
if (min_val == BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
if (max_val == BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
|
||||
|
||||
switch (opcode) {
|
||||
case BPF_ADD:
|
||||
dst_reg->min_value += min_val;
|
||||
dst_reg->max_value += max_val;
|
||||
if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value += min_val;
|
||||
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value += max_val;
|
||||
break;
|
||||
case BPF_SUB:
|
||||
dst_reg->min_value -= min_val;
|
||||
dst_reg->max_value -= max_val;
|
||||
if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value -= min_val;
|
||||
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value -= max_val;
|
||||
break;
|
||||
case BPF_MUL:
|
||||
dst_reg->min_value *= min_val;
|
||||
dst_reg->max_value *= max_val;
|
||||
if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value *= min_val;
|
||||
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value *= max_val;
|
||||
break;
|
||||
case BPF_AND:
|
||||
/* & is special since it could end up with 0 bits set. */
|
||||
dst_reg->min_value &= min_val;
|
||||
/* Disallow AND'ing of negative numbers, ain't nobody got time
|
||||
* for that. Otherwise the minimum is 0 and the max is the max
|
||||
* value we could AND against.
|
||||
*/
|
||||
if (min_val < 0)
|
||||
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
else
|
||||
dst_reg->min_value = 0;
|
||||
dst_reg->max_value = max_val;
|
||||
break;
|
||||
case BPF_LSH:
|
||||
@@ -1537,24 +1560,25 @@ static void adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
||||
*/
|
||||
if (min_val > ilog2(BPF_REGISTER_MAX_RANGE))
|
||||
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
else
|
||||
else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
|
||||
dst_reg->min_value <<= min_val;
|
||||
|
||||
if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
|
||||
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
|
||||
else
|
||||
else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value <<= max_val;
|
||||
break;
|
||||
case BPF_RSH:
|
||||
dst_reg->min_value >>= min_val;
|
||||
dst_reg->max_value >>= max_val;
|
||||
break;
|
||||
case BPF_MOD:
|
||||
/* % is special since it is an unsigned modulus, so the floor
|
||||
* will always be 0.
|
||||
/* RSH by a negative number is undefined, and the BPF_RSH is an
|
||||
* unsigned shift, so make the appropriate casts.
|
||||
*/
|
||||
dst_reg->min_value = 0;
|
||||
dst_reg->max_value = max_val - 1;
|
||||
if (min_val < 0 || dst_reg->min_value < 0)
|
||||
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
|
||||
else
|
||||
dst_reg->min_value =
|
||||
(u64)(dst_reg->min_value) >> min_val;
|
||||
if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
|
||||
dst_reg->max_value >>= max_val;
|
||||
break;
|
||||
default:
|
||||
reset_reg_range_values(regs, insn->dst_reg);
|
||||
|
@@ -1341,12 +1341,12 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||
|
||||
} else if (new->flags & IRQF_TRIGGER_MASK) {
|
||||
unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
|
||||
unsigned int omsk = irq_settings_get_trigger_mask(desc);
|
||||
unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
|
||||
|
||||
if (nmsk != omsk)
|
||||
/* hope the handler works with current trigger mode */
|
||||
pr_warn("irq %d uses trigger mode %u; requested %u\n",
|
||||
irq, nmsk, omsk);
|
||||
irq, omsk, nmsk);
|
||||
}
|
||||
|
||||
*old_ptr = new;
|
||||
|
@@ -45,6 +45,14 @@ enum {
|
||||
#define LOCKF_USED_IN_IRQ_READ \
|
||||
(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
|
||||
|
||||
/*
|
||||
* CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
|
||||
* .data and .bss to fit in required 32MB limit for the kernel. With
|
||||
* PROVE_LOCKING we could go over this limit and cause system boot-up problems.
|
||||
* So, reduce the static allocations for lockdeps related structures so that
|
||||
* everything fits in current required size limit.
|
||||
*/
|
||||
#ifdef CONFIG_PROVE_LOCKING_SMALL
|
||||
/*
|
||||
* MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
|
||||
* we track.
|
||||
@@ -54,18 +62,24 @@ enum {
|
||||
* table (if it's not there yet), and we check it for lock order
|
||||
* conflicts and deadlocks.
|
||||
*/
|
||||
#define MAX_LOCKDEP_ENTRIES 16384UL
|
||||
#define MAX_LOCKDEP_CHAINS_BITS 15
|
||||
#define MAX_STACK_TRACE_ENTRIES 262144UL
|
||||
#else
|
||||
#define MAX_LOCKDEP_ENTRIES 32768UL
|
||||
|
||||
#define MAX_LOCKDEP_CHAINS_BITS 16
|
||||
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
|
||||
|
||||
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
|
||||
|
||||
/*
|
||||
* Stack-trace: tightly packed array of stack backtrace
|
||||
* addresses. Protected by the hash_lock.
|
||||
*/
|
||||
#define MAX_STACK_TRACE_ENTRIES 524288UL
|
||||
#endif
|
||||
|
||||
#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
|
||||
|
||||
#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
|
||||
|
||||
extern struct list_head all_lock_classes;
|
||||
extern struct lock_chain lock_chains[];
|
||||
|
@@ -203,8 +203,10 @@ static int __init test_suspend(void)
|
||||
|
||||
/* RTCs have initialized by now too ... can we use one? */
|
||||
dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm);
|
||||
if (dev)
|
||||
if (dev) {
|
||||
rtc = rtc_class_open(dev_name(dev));
|
||||
put_device(dev);
|
||||
}
|
||||
if (!rtc) {
|
||||
printk(warn_no_rtc);
|
||||
return 0;
|
||||
|
@@ -253,17 +253,6 @@ static int preferred_console = -1;
|
||||
int console_set_on_cmdline;
|
||||
EXPORT_SYMBOL(console_set_on_cmdline);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static bool of_specified_console;
|
||||
|
||||
void console_set_by_of(void)
|
||||
{
|
||||
of_specified_console = true;
|
||||
}
|
||||
#else
|
||||
# define of_specified_console false
|
||||
#endif
|
||||
|
||||
/* Flag: console code may call schedule() */
|
||||
static int console_may_schedule;
|
||||
|
||||
@@ -794,8 +783,6 @@ static ssize_t devkmsg_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void cont_flush(void);
|
||||
|
||||
static ssize_t devkmsg_read(struct file *file, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
@@ -811,7 +798,6 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
|
||||
if (ret)
|
||||
return ret;
|
||||
raw_spin_lock_irq(&logbuf_lock);
|
||||
cont_flush();
|
||||
while (user->seq == log_next_seq) {
|
||||
if (file->f_flags & O_NONBLOCK) {
|
||||
ret = -EAGAIN;
|
||||
@@ -874,7 +860,6 @@ static loff_t devkmsg_llseek(struct file *file, loff_t offset, int whence)
|
||||
return -ESPIPE;
|
||||
|
||||
raw_spin_lock_irq(&logbuf_lock);
|
||||
cont_flush();
|
||||
switch (whence) {
|
||||
case SEEK_SET:
|
||||
/* the first record */
|
||||
@@ -913,7 +898,6 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
|
||||
poll_wait(file, &log_wait, wait);
|
||||
|
||||
raw_spin_lock_irq(&logbuf_lock);
|
||||
cont_flush();
|
||||
if (user->seq < log_next_seq) {
|
||||
/* return error when data has vanished underneath us */
|
||||
if (user->seq < log_first_seq)
|
||||
@@ -1300,7 +1284,6 @@ static int syslog_print(char __user *buf, int size)
|
||||
size_t skip;
|
||||
|
||||
raw_spin_lock_irq(&logbuf_lock);
|
||||
cont_flush();
|
||||
if (syslog_seq < log_first_seq) {
|
||||
/* messages are gone, move to first one */
|
||||
syslog_seq = log_first_seq;
|
||||
@@ -1360,7 +1343,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
|
||||
return -ENOMEM;
|
||||
|
||||
raw_spin_lock_irq(&logbuf_lock);
|
||||
cont_flush();
|
||||
if (buf) {
|
||||
u64 next_seq;
|
||||
u64 seq;
|
||||
@@ -1522,7 +1504,6 @@ int do_syslog(int type, char __user *buf, int len, int source)
|
||||
/* Number of chars in the log buffer */
|
||||
case SYSLOG_ACTION_SIZE_UNREAD:
|
||||
raw_spin_lock_irq(&logbuf_lock);
|
||||
cont_flush();
|
||||
if (syslog_seq < log_first_seq) {
|
||||
/* messages are gone, move to first one */
|
||||
syslog_seq = log_first_seq;
|
||||
@@ -2657,7 +2638,7 @@ void register_console(struct console *newcon)
|
||||
* didn't select a console we take the first one
|
||||
* that registers here.
|
||||
*/
|
||||
if (preferred_console < 0 && !of_specified_console) {
|
||||
if (preferred_console < 0) {
|
||||
if (newcon->index < 0)
|
||||
newcon->index = 0;
|
||||
if (newcon->setup == NULL ||
|
||||
@@ -3039,7 +3020,6 @@ void kmsg_dump(enum kmsg_dump_reason reason)
|
||||
dumper->active = true;
|
||||
|
||||
raw_spin_lock_irqsave(&logbuf_lock, flags);
|
||||
cont_flush();
|
||||
dumper->cur_seq = clear_seq;
|
||||
dumper->cur_idx = clear_idx;
|
||||
dumper->next_seq = log_next_seq;
|
||||
@@ -3130,7 +3110,6 @@ bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
|
||||
bool ret;
|
||||
|
||||
raw_spin_lock_irqsave(&logbuf_lock, flags);
|
||||
cont_flush();
|
||||
ret = kmsg_dump_get_line_nolock(dumper, syslog, line, size, len);
|
||||
raw_spin_unlock_irqrestore(&logbuf_lock, flags);
|
||||
|
||||
@@ -3173,7 +3152,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
|
||||
goto out;
|
||||
|
||||
raw_spin_lock_irqsave(&logbuf_lock, flags);
|
||||
cont_flush();
|
||||
if (dumper->cur_seq < log_first_seq) {
|
||||
/* messages are gone, move to first available one */
|
||||
dumper->cur_seq = log_first_seq;
|
||||
|
@@ -54,7 +54,11 @@ static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1
|
||||
[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
|
||||
[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
|
||||
|
||||
static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
|
||||
/*
|
||||
* We have to use TASKSTATS_CMD_ATTR_MAX here, it is the maxattr in the family.
|
||||
* Make sure they are always aligned.
|
||||
*/
|
||||
static const struct nla_policy cgroupstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
|
||||
[CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
|
||||
};
|
||||
|
||||
|
@@ -1862,6 +1862,10 @@ static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
|
||||
|
||||
/* Update rec->flags */
|
||||
do_for_each_ftrace_rec(pg, rec) {
|
||||
|
||||
if (rec->flags & FTRACE_FL_DISABLED)
|
||||
continue;
|
||||
|
||||
/* We need to update only differences of filter_hash */
|
||||
in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
|
||||
in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
|
||||
@@ -1884,6 +1888,10 @@ rollback:
|
||||
|
||||
/* Roll back what we did above */
|
||||
do_for_each_ftrace_rec(pg, rec) {
|
||||
|
||||
if (rec->flags & FTRACE_FL_DISABLED)
|
||||
continue;
|
||||
|
||||
if (rec == end)
|
||||
goto err_out;
|
||||
|
||||
@@ -2397,6 +2405,10 @@ void __weak ftrace_replace_code(int enable)
|
||||
return;
|
||||
|
||||
do_for_each_ftrace_rec(pg, rec) {
|
||||
|
||||
if (rec->flags & FTRACE_FL_DISABLED)
|
||||
continue;
|
||||
|
||||
failed = __ftrace_replace_code(rec, enable);
|
||||
if (failed) {
|
||||
ftrace_bug(failed, rec);
|
||||
@@ -2763,7 +2775,7 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
||||
struct dyn_ftrace *rec;
|
||||
|
||||
do_for_each_ftrace_rec(pg, rec) {
|
||||
if (FTRACE_WARN_ON_ONCE(rec->flags))
|
||||
if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
|
||||
pr_warn(" %pS flags:%lx\n",
|
||||
(void *)rec->ip, rec->flags);
|
||||
} while_for_each_ftrace_rec();
|
||||
@@ -3598,6 +3610,10 @@ match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
|
||||
goto out_unlock;
|
||||
|
||||
do_for_each_ftrace_rec(pg, rec) {
|
||||
|
||||
if (rec->flags & FTRACE_FL_DISABLED)
|
||||
continue;
|
||||
|
||||
if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
|
||||
ret = enter_record(hash, rec, clear_filter);
|
||||
if (ret < 0) {
|
||||
@@ -3793,6 +3809,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
|
||||
|
||||
do_for_each_ftrace_rec(pg, rec) {
|
||||
|
||||
if (rec->flags & FTRACE_FL_DISABLED)
|
||||
continue;
|
||||
|
||||
if (!ftrace_match_record(rec, &func_g, NULL, 0))
|
||||
continue;
|
||||
|
||||
@@ -4685,6 +4704,9 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
|
||||
|
||||
do_for_each_ftrace_rec(pg, rec) {
|
||||
|
||||
if (rec->flags & FTRACE_FL_DISABLED)
|
||||
continue;
|
||||
|
||||
if (ftrace_match_record(rec, &func_g, NULL, 0)) {
|
||||
/* if it is in the array */
|
||||
exists = false;
|
||||
|
Reference in New Issue
Block a user