Merge ra.kernel.org:/pub/scm/linux/kernel/git/netdev/net
This commit is contained in:
@@ -44,14 +44,19 @@ struct tnum tnum_rshift(struct tnum a, u8 shift)
|
||||
return TNUM(a.value >> shift, a.mask >> shift);
|
||||
}
|
||||
|
||||
struct tnum tnum_arshift(struct tnum a, u8 min_shift)
|
||||
struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness)
|
||||
{
|
||||
/* if a.value is negative, arithmetic shifting by minimum shift
|
||||
* will have larger negative offset compared to more shifting.
|
||||
* If a.value is nonnegative, arithmetic shifting by minimum shift
|
||||
* will have larger positive offset compare to more shifting.
|
||||
*/
|
||||
return TNUM((s64)a.value >> min_shift, (s64)a.mask >> min_shift);
|
||||
if (insn_bitness == 32)
|
||||
return TNUM((u32)(((s32)a.value) >> min_shift),
|
||||
(u32)(((s32)a.mask) >> min_shift));
|
||||
else
|
||||
return TNUM((s64)a.value >> min_shift,
|
||||
(s64)a.mask >> min_shift);
|
||||
}
|
||||
|
||||
struct tnum tnum_add(struct tnum a, struct tnum b)
|
||||
|
||||
@@ -5049,9 +5049,16 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
||||
/* Upon reaching here, src_known is true and
|
||||
* umax_val is equal to umin_val.
|
||||
*/
|
||||
dst_reg->smin_value >>= umin_val;
|
||||
dst_reg->smax_value >>= umin_val;
|
||||
dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val);
|
||||
if (insn_bitness == 32) {
|
||||
dst_reg->smin_value = (u32)(((s32)dst_reg->smin_value) >> umin_val);
|
||||
dst_reg->smax_value = (u32)(((s32)dst_reg->smax_value) >> umin_val);
|
||||
} else {
|
||||
dst_reg->smin_value >>= umin_val;
|
||||
dst_reg->smax_value >>= umin_val;
|
||||
}
|
||||
|
||||
dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val,
|
||||
insn_bitness);
|
||||
|
||||
/* blow away the dst_reg umin_value/umax_value and rely on
|
||||
* dst_reg var_off to refine the result.
|
||||
|
||||
143
kernel/cpu.c
143
kernel/cpu.c
@@ -1909,6 +1909,78 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
|
||||
}
|
||||
EXPORT_SYMBOL(__cpuhp_remove_state);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_SMT
|
||||
static void cpuhp_offline_cpu_device(unsigned int cpu)
|
||||
{
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
dev->offline = true;
|
||||
/* Tell user space about the state change */
|
||||
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
|
||||
}
|
||||
|
||||
static void cpuhp_online_cpu_device(unsigned int cpu)
|
||||
{
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
dev->offline = false;
|
||||
/* Tell user space about the state change */
|
||||
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
|
||||
}
|
||||
|
||||
int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
cpu_maps_update_begin();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (topology_is_primary_thread(cpu))
|
||||
continue;
|
||||
ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
|
||||
if (ret)
|
||||
break;
|
||||
/*
|
||||
* As this needs to hold the cpu maps lock it's impossible
|
||||
* to call device_offline() because that ends up calling
|
||||
* cpu_down() which takes cpu maps lock. cpu maps lock
|
||||
* needs to be held as this might race against in kernel
|
||||
* abusers of the hotplug machinery (thermal management).
|
||||
*
|
||||
* So nothing would update device:offline state. That would
|
||||
* leave the sysfs entry stale and prevent onlining after
|
||||
* smt control has been changed to 'off' again. This is
|
||||
* called under the sysfs hotplug lock, so it is properly
|
||||
* serialized against the regular offline usage.
|
||||
*/
|
||||
cpuhp_offline_cpu_device(cpu);
|
||||
}
|
||||
if (!ret)
|
||||
cpu_smt_control = ctrlval;
|
||||
cpu_maps_update_done();
|
||||
return ret;
|
||||
}
|
||||
|
||||
int cpuhp_smt_enable(void)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
cpu_maps_update_begin();
|
||||
cpu_smt_control = CPU_SMT_ENABLED;
|
||||
for_each_present_cpu(cpu) {
|
||||
/* Skip online CPUs and CPUs on offline nodes */
|
||||
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
|
||||
continue;
|
||||
ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
|
||||
if (ret)
|
||||
break;
|
||||
/* See comment in cpuhp_smt_disable() */
|
||||
cpuhp_online_cpu_device(cpu);
|
||||
}
|
||||
cpu_maps_update_done();
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
|
||||
static ssize_t show_cpuhp_state(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
@@ -2063,77 +2135,6 @@ static const struct attribute_group cpuhp_cpu_root_attr_group = {
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_SMT
|
||||
|
||||
static void cpuhp_offline_cpu_device(unsigned int cpu)
|
||||
{
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
dev->offline = true;
|
||||
/* Tell user space about the state change */
|
||||
kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
|
||||
}
|
||||
|
||||
static void cpuhp_online_cpu_device(unsigned int cpu)
|
||||
{
|
||||
struct device *dev = get_cpu_device(cpu);
|
||||
|
||||
dev->offline = false;
|
||||
/* Tell user space about the state change */
|
||||
kobject_uevent(&dev->kobj, KOBJ_ONLINE);
|
||||
}
|
||||
|
||||
int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
cpu_maps_update_begin();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (topology_is_primary_thread(cpu))
|
||||
continue;
|
||||
ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
|
||||
if (ret)
|
||||
break;
|
||||
/*
|
||||
* As this needs to hold the cpu maps lock it's impossible
|
||||
* to call device_offline() because that ends up calling
|
||||
* cpu_down() which takes cpu maps lock. cpu maps lock
|
||||
* needs to be held as this might race against in kernel
|
||||
* abusers of the hotplug machinery (thermal management).
|
||||
*
|
||||
* So nothing would update device:offline state. That would
|
||||
* leave the sysfs entry stale and prevent onlining after
|
||||
* smt control has been changed to 'off' again. This is
|
||||
* called under the sysfs hotplug lock, so it is properly
|
||||
* serialized against the regular offline usage.
|
||||
*/
|
||||
cpuhp_offline_cpu_device(cpu);
|
||||
}
|
||||
if (!ret)
|
||||
cpu_smt_control = ctrlval;
|
||||
cpu_maps_update_done();
|
||||
return ret;
|
||||
}
|
||||
|
||||
int cpuhp_smt_enable(void)
|
||||
{
|
||||
int cpu, ret = 0;
|
||||
|
||||
cpu_maps_update_begin();
|
||||
cpu_smt_control = CPU_SMT_ENABLED;
|
||||
for_each_present_cpu(cpu) {
|
||||
/* Skip online CPUs and CPUs on offline nodes */
|
||||
if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
|
||||
continue;
|
||||
ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
|
||||
if (ret)
|
||||
break;
|
||||
/* See comment in cpuhp_smt_disable() */
|
||||
cpuhp_online_cpu_device(cpu);
|
||||
}
|
||||
cpu_maps_update_done();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static ssize_t
|
||||
__store_smt_control(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
|
||||
@@ -175,8 +175,8 @@ void exit_creds(struct task_struct *tsk)
|
||||
put_cred(cred);
|
||||
|
||||
#ifdef CONFIG_KEYS_REQUEST_CACHE
|
||||
key_put(current->cached_requested_key);
|
||||
current->cached_requested_key = NULL;
|
||||
key_put(tsk->cached_requested_key);
|
||||
tsk->cached_requested_key = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@@ -11465,8 +11465,10 @@ SYSCALL_DEFINE5(perf_event_open,
|
||||
}
|
||||
}
|
||||
|
||||
if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader))
|
||||
if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) {
|
||||
err = -EINVAL;
|
||||
goto err_locked;
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be under the same ctx::mutex as perf_install_in_context(),
|
||||
|
||||
@@ -2578,6 +2578,16 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
|
||||
#endif
|
||||
|
||||
#ifdef __ARCH_WANT_SYS_CLONE3
|
||||
|
||||
/*
|
||||
* copy_thread implementations handle CLONE_SETTLS by reading the TLS value from
|
||||
* the registers containing the syscall arguments for clone. This doesn't work
|
||||
* with clone3 since the TLS value is passed in clone_args instead.
|
||||
*/
|
||||
#ifndef CONFIG_HAVE_COPY_THREAD_TLS
|
||||
#error clone3 requires copy_thread_tls support in arch
|
||||
#endif
|
||||
|
||||
noinline static int copy_clone_args_from_user(struct kernel_clone_args *kargs,
|
||||
struct clone_args __user *uargs,
|
||||
size_t usize)
|
||||
|
||||
@@ -1178,6 +1178,7 @@ out_error:
|
||||
|
||||
/**
|
||||
* wait_for_owner_exiting - Block until the owner has exited
|
||||
* @ret: owner's current futex lock status
|
||||
* @exiting: Pointer to the exiting task
|
||||
*
|
||||
* Caller must hold a refcount on @exiting.
|
||||
|
||||
@@ -482,7 +482,7 @@ static struct lock_trace *save_trace(void)
|
||||
struct lock_trace *trace, *t2;
|
||||
struct hlist_head *hash_head;
|
||||
u32 hash;
|
||||
unsigned int max_entries;
|
||||
int max_entries;
|
||||
|
||||
BUILD_BUG_ON_NOT_POWER_OF_2(STACK_TRACE_HASH_SIZE);
|
||||
BUILD_BUG_ON(LOCK_TRACE_SIZE_IN_LONGS >= MAX_STACK_TRACE_ENTRIES);
|
||||
@@ -490,10 +490,8 @@ static struct lock_trace *save_trace(void)
|
||||
trace = (struct lock_trace *)(stack_trace + nr_stack_trace_entries);
|
||||
max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries -
|
||||
LOCK_TRACE_SIZE_IN_LONGS;
|
||||
trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
|
||||
|
||||
if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES -
|
||||
LOCK_TRACE_SIZE_IN_LONGS - 1) {
|
||||
if (max_entries <= 0) {
|
||||
if (!debug_locks_off_graph_unlock())
|
||||
return NULL;
|
||||
|
||||
@@ -502,6 +500,7 @@ static struct lock_trace *save_trace(void)
|
||||
|
||||
return NULL;
|
||||
}
|
||||
trace->nr_entries = stack_trace_save(trace->entries, max_entries, 3);
|
||||
|
||||
hash = jhash(trace->entries, trace->nr_entries *
|
||||
sizeof(trace->entries[0]), 0);
|
||||
|
||||
@@ -1226,8 +1226,8 @@ wait:
|
||||
* In this case, we attempt to acquire the lock again
|
||||
* without sleeping.
|
||||
*/
|
||||
if ((wstate == WRITER_HANDOFF) &&
|
||||
(rwsem_spin_on_owner(sem, 0) == OWNER_NULL))
|
||||
if (wstate == WRITER_HANDOFF &&
|
||||
rwsem_spin_on_owner(sem, RWSEM_NONSPINNABLE) == OWNER_NULL)
|
||||
goto trylock_again;
|
||||
|
||||
/* Block until there are no active lockers. */
|
||||
|
||||
@@ -264,12 +264,17 @@ static int ptrace_check_attach(struct task_struct *child, bool ignore_state)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ptrace_has_cap(struct user_namespace *ns, unsigned int mode)
|
||||
static bool ptrace_has_cap(const struct cred *cred, struct user_namespace *ns,
|
||||
unsigned int mode)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (mode & PTRACE_MODE_NOAUDIT)
|
||||
return has_ns_capability_noaudit(current, ns, CAP_SYS_PTRACE);
|
||||
ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NOAUDIT);
|
||||
else
|
||||
return has_ns_capability(current, ns, CAP_SYS_PTRACE);
|
||||
ret = security_capable(cred, ns, CAP_SYS_PTRACE, CAP_OPT_NONE);
|
||||
|
||||
return ret == 0;
|
||||
}
|
||||
|
||||
/* Returns 0 on success, -errno on denial. */
|
||||
@@ -321,7 +326,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode)
|
||||
gid_eq(caller_gid, tcred->sgid) &&
|
||||
gid_eq(caller_gid, tcred->gid))
|
||||
goto ok;
|
||||
if (ptrace_has_cap(tcred->user_ns, mode))
|
||||
if (ptrace_has_cap(cred, tcred->user_ns, mode))
|
||||
goto ok;
|
||||
rcu_read_unlock();
|
||||
return -EPERM;
|
||||
@@ -340,7 +345,7 @@ ok:
|
||||
mm = task->mm;
|
||||
if (mm &&
|
||||
((get_dumpable(mm) != SUID_DUMP_USER) &&
|
||||
!ptrace_has_cap(mm->user_ns, mode)))
|
||||
!ptrace_has_cap(cred, mm->user_ns, mode)))
|
||||
return -EPERM;
|
||||
|
||||
return security_ptrace_access_check(task, mode);
|
||||
|
||||
@@ -310,6 +310,8 @@ SYSCALL_DEFINE4(rseq, struct rseq __user *, rseq, u32, rseq_len,
|
||||
int ret;
|
||||
|
||||
if (flags & RSEQ_FLAG_UNREGISTER) {
|
||||
if (flags & ~RSEQ_FLAG_UNREGISTER)
|
||||
return -EINVAL;
|
||||
/* Unregister rseq for current thread. */
|
||||
if (current->rseq != rseq || !current->rseq)
|
||||
return -EINVAL;
|
||||
|
||||
@@ -151,6 +151,9 @@ SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
COMPAT_SYS_NI(timer_create);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_COMPAT) || defined(CONFIG_ALPHA)
|
||||
COMPAT_SYS_NI(getitimer);
|
||||
COMPAT_SYS_NI(setitimer);
|
||||
#endif
|
||||
|
||||
@@ -58,8 +58,9 @@ static void tick_do_update_jiffies64(ktime_t now)
|
||||
|
||||
/*
|
||||
* Do a quick check without holding jiffies_lock:
|
||||
* The READ_ONCE() pairs with two updates done later in this function.
|
||||
*/
|
||||
delta = ktime_sub(now, last_jiffies_update);
|
||||
delta = ktime_sub(now, READ_ONCE(last_jiffies_update));
|
||||
if (delta < tick_period)
|
||||
return;
|
||||
|
||||
@@ -70,8 +71,9 @@ static void tick_do_update_jiffies64(ktime_t now)
|
||||
if (delta >= tick_period) {
|
||||
|
||||
delta = ktime_sub(delta, tick_period);
|
||||
last_jiffies_update = ktime_add(last_jiffies_update,
|
||||
tick_period);
|
||||
/* Pairs with the lockless read in this function. */
|
||||
WRITE_ONCE(last_jiffies_update,
|
||||
ktime_add(last_jiffies_update, tick_period));
|
||||
|
||||
/* Slow path for long timeouts */
|
||||
if (unlikely(delta >= tick_period)) {
|
||||
@@ -79,8 +81,10 @@ static void tick_do_update_jiffies64(ktime_t now)
|
||||
|
||||
ticks = ktime_divns(delta, incr);
|
||||
|
||||
last_jiffies_update = ktime_add_ns(last_jiffies_update,
|
||||
incr * ticks);
|
||||
/* Pairs with the lockless read in this function. */
|
||||
WRITE_ONCE(last_jiffies_update,
|
||||
ktime_add_ns(last_jiffies_update,
|
||||
incr * ticks));
|
||||
}
|
||||
do_timer(++ticks);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user