Merge branch 'linus' into locking/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -1459,7 +1459,8 @@ static int btf_modifier_resolve(struct btf_verifier_env *env,
|
||||
|
||||
/* "typedef void new_void", "const void"...etc */
|
||||
if (!btf_type_is_void(next_type) &&
|
||||
!btf_type_is_fwd(next_type)) {
|
||||
!btf_type_is_fwd(next_type) &&
|
||||
!btf_type_is_func_proto(next_type)) {
|
||||
btf_verifier_log_type(env, v->t, "Invalid type_id");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@@ -572,7 +572,7 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
|
||||
bpf_compute_and_save_data_end(skb, &saved_data_end);
|
||||
|
||||
ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb,
|
||||
bpf_prog_run_save_cb);
|
||||
__bpf_prog_run_save_cb);
|
||||
bpf_restore_data_end(skb, saved_data_end);
|
||||
__skb_pull(skb, offset);
|
||||
skb->sk = save_sk;
|
||||
|
@@ -686,7 +686,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
|
||||
}
|
||||
|
||||
if (htab_is_prealloc(htab)) {
|
||||
pcpu_freelist_push(&htab->freelist, &l->fnode);
|
||||
__pcpu_freelist_push(&htab->freelist, &l->fnode);
|
||||
} else {
|
||||
atomic_dec(&htab->count);
|
||||
l->htab = htab;
|
||||
@@ -748,7 +748,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
||||
} else {
|
||||
struct pcpu_freelist_node *l;
|
||||
|
||||
l = pcpu_freelist_pop(&htab->freelist);
|
||||
l = __pcpu_freelist_pop(&htab->freelist);
|
||||
if (!l)
|
||||
return ERR_PTR(-E2BIG);
|
||||
l_new = container_of(l, struct htab_elem, fnode);
|
||||
|
@@ -471,6 +471,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
|
||||
}
|
||||
|
||||
if (!node || node->prefixlen != key->prefixlen ||
|
||||
node->prefixlen != matchlen ||
|
||||
(node->flags & LPM_TREE_NODE_FLAG_IM)) {
|
||||
ret = -ENOENT;
|
||||
goto out;
|
||||
|
@@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
|
||||
free_percpu(s->freelist);
|
||||
}
|
||||
|
||||
static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
|
||||
struct pcpu_freelist_node *node)
|
||||
static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
|
||||
struct pcpu_freelist_node *node)
|
||||
{
|
||||
raw_spin_lock(&head->lock);
|
||||
node->next = head->first;
|
||||
@@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
|
||||
raw_spin_unlock(&head->lock);
|
||||
}
|
||||
|
||||
void pcpu_freelist_push(struct pcpu_freelist *s,
|
||||
void __pcpu_freelist_push(struct pcpu_freelist *s,
|
||||
struct pcpu_freelist_node *node)
|
||||
{
|
||||
struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
|
||||
|
||||
__pcpu_freelist_push(head, node);
|
||||
___pcpu_freelist_push(head, node);
|
||||
}
|
||||
|
||||
void pcpu_freelist_push(struct pcpu_freelist *s,
|
||||
struct pcpu_freelist_node *node)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
__pcpu_freelist_push(s, node);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
|
||||
@@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
|
||||
for_each_possible_cpu(cpu) {
|
||||
again:
|
||||
head = per_cpu_ptr(s->freelist, cpu);
|
||||
__pcpu_freelist_push(head, buf);
|
||||
___pcpu_freelist_push(head, buf);
|
||||
i++;
|
||||
buf += elem_size;
|
||||
if (i == nr_elems)
|
||||
@@ -74,14 +84,12 @@ again:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
|
||||
struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
|
||||
{
|
||||
struct pcpu_freelist_head *head;
|
||||
struct pcpu_freelist_node *node;
|
||||
unsigned long flags;
|
||||
int orig_cpu, cpu;
|
||||
|
||||
local_irq_save(flags);
|
||||
orig_cpu = cpu = raw_smp_processor_id();
|
||||
while (1) {
|
||||
head = per_cpu_ptr(s->freelist, cpu);
|
||||
@@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
|
||||
node = head->first;
|
||||
if (node) {
|
||||
head->first = node->next;
|
||||
raw_spin_unlock_irqrestore(&head->lock, flags);
|
||||
raw_spin_unlock(&head->lock);
|
||||
return node;
|
||||
}
|
||||
raw_spin_unlock(&head->lock);
|
||||
cpu = cpumask_next(cpu, cpu_possible_mask);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
cpu = 0;
|
||||
if (cpu == orig_cpu) {
|
||||
local_irq_restore(flags);
|
||||
if (cpu == orig_cpu)
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
|
||||
{
|
||||
struct pcpu_freelist_node *ret;
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
ret = __pcpu_freelist_pop(s);
|
||||
local_irq_restore(flags);
|
||||
return ret;
|
||||
}
|
||||
|
@@ -22,8 +22,12 @@ struct pcpu_freelist_node {
|
||||
struct pcpu_freelist_node *next;
|
||||
};
|
||||
|
||||
/* pcpu_freelist_* do spin_lock_irqsave. */
|
||||
void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
|
||||
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
|
||||
/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
|
||||
void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
|
||||
struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
|
||||
void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
|
||||
u32 nr_elems);
|
||||
int pcpu_freelist_init(struct pcpu_freelist *);
|
||||
|
@@ -44,7 +44,7 @@ static void do_up_read(struct irq_work *entry)
|
||||
struct stack_map_irq_work *work;
|
||||
|
||||
work = container_of(entry, struct stack_map_irq_work, irq_work);
|
||||
up_read(work->sem);
|
||||
up_read_non_owner(work->sem);
|
||||
work->sem = NULL;
|
||||
}
|
||||
|
||||
@@ -338,6 +338,12 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
||||
} else {
|
||||
work->sem = ¤t->mm->mmap_sem;
|
||||
irq_work_queue(&work->irq_work);
|
||||
/*
|
||||
* The irq_work will release the mmap_sem with
|
||||
* up_read_non_owner(). The rwsem_release() is called
|
||||
* here to release the lock from lockdep's perspective.
|
||||
*/
|
||||
rwsem_release(¤t->mm->mmap_sem.dep_map, 1, _RET_IP_);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -713,8 +713,13 @@ static int map_lookup_elem(union bpf_attr *attr)
|
||||
|
||||
if (bpf_map_is_dev_bound(map)) {
|
||||
err = bpf_map_offload_lookup_elem(map, key, value);
|
||||
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
|
||||
goto done;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
this_cpu_inc(bpf_prog_active);
|
||||
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
|
||||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
|
||||
err = bpf_percpu_hash_copy(map, key, value);
|
||||
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
|
||||
err = bpf_percpu_array_copy(map, key, value);
|
||||
@@ -744,7 +749,10 @@ static int map_lookup_elem(union bpf_attr *attr)
|
||||
}
|
||||
rcu_read_unlock();
|
||||
}
|
||||
this_cpu_dec(bpf_prog_active);
|
||||
preempt_enable();
|
||||
|
||||
done:
|
||||
if (err)
|
||||
goto free_value;
|
||||
|
||||
|
@@ -1617,12 +1617,13 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
|
||||
int size, enum bpf_access_type t)
|
||||
static int check_sock_access(struct bpf_verifier_env *env, int insn_idx,
|
||||
u32 regno, int off, int size,
|
||||
enum bpf_access_type t)
|
||||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env);
|
||||
struct bpf_reg_state *reg = ®s[regno];
|
||||
struct bpf_insn_access_aux info;
|
||||
struct bpf_insn_access_aux info = {};
|
||||
|
||||
if (reg->smin_value < 0) {
|
||||
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
|
||||
@@ -1636,6 +1637,8 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off,
|
||||
return -EACCES;
|
||||
}
|
||||
|
||||
env->insn_aux_data[insn_idx].ctx_field_size = info.ctx_field_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -2032,7 +2035,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
||||
verbose(env, "cannot write into socket\n");
|
||||
return -EACCES;
|
||||
}
|
||||
err = check_sock_access(env, regno, off, size, t);
|
||||
err = check_sock_access(env, insn_idx, regno, off, size, t);
|
||||
if (!err && value_regno >= 0)
|
||||
mark_reg_unknown(env, regs, value_regno);
|
||||
} else {
|
||||
|
@@ -4963,6 +4963,11 @@ static void __perf_event_period(struct perf_event *event,
|
||||
}
|
||||
}
|
||||
|
||||
static int perf_event_check_period(struct perf_event *event, u64 value)
|
||||
{
|
||||
return event->pmu->check_period(event, value);
|
||||
}
|
||||
|
||||
static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
||||
{
|
||||
u64 value;
|
||||
@@ -4979,6 +4984,9 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
||||
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
|
||||
return -EINVAL;
|
||||
|
||||
if (perf_event_check_period(event, value))
|
||||
return -EINVAL;
|
||||
|
||||
event_function_call(event, __perf_event_period, &value);
|
||||
|
||||
return 0;
|
||||
@@ -9391,6 +9399,11 @@ static int perf_pmu_nop_int(struct pmu *pmu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int perf_event_nop_int(struct perf_event *event, u64 value)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(unsigned int, nop_txn_flags);
|
||||
|
||||
static void perf_pmu_start_txn(struct pmu *pmu, unsigned int flags)
|
||||
@@ -9691,6 +9704,9 @@ got_cpu_context:
|
||||
pmu->pmu_disable = perf_pmu_nop_void;
|
||||
}
|
||||
|
||||
if (!pmu->check_period)
|
||||
pmu->check_period = perf_event_nop_int;
|
||||
|
||||
if (!pmu->event_idx)
|
||||
pmu->event_idx = perf_event_idx_default;
|
||||
|
||||
|
@@ -734,6 +734,9 @@ struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
|
||||
size = sizeof(struct ring_buffer);
|
||||
size += nr_pages * sizeof(void *);
|
||||
|
||||
if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
|
||||
goto fail;
|
||||
|
||||
rb = kzalloc(size, GFP_KERNEL);
|
||||
if (!rb)
|
||||
goto fail;
|
||||
|
@@ -2217,11 +2217,11 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
|
||||
* decrement the counter at queue_unlock() when some error has
|
||||
* occurred and we don't end up adding the task to the list.
|
||||
*/
|
||||
hb_waiters_inc(hb);
|
||||
hb_waiters_inc(hb); /* implies smp_mb(); (A) */
|
||||
|
||||
q->lock_ptr = &hb->lock;
|
||||
|
||||
spin_lock(&hb->lock); /* implies smp_mb(); (A) */
|
||||
spin_lock(&hb->lock);
|
||||
return hb;
|
||||
}
|
||||
|
||||
@@ -2857,35 +2857,39 @@ retry_private:
|
||||
* and BUG when futex_unlock_pi() interleaves with this.
|
||||
*
|
||||
* Therefore acquire wait_lock while holding hb->lock, but drop the
|
||||
* latter before calling rt_mutex_start_proxy_lock(). This still fully
|
||||
* serializes against futex_unlock_pi() as that does the exact same
|
||||
* lock handoff sequence.
|
||||
* latter before calling __rt_mutex_start_proxy_lock(). This
|
||||
* interleaves with futex_unlock_pi() -- which does a similar lock
|
||||
* handoff -- such that the latter can observe the futex_q::pi_state
|
||||
* before __rt_mutex_start_proxy_lock() is done.
|
||||
*/
|
||||
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
|
||||
spin_unlock(q.lock_ptr);
|
||||
/*
|
||||
* __rt_mutex_start_proxy_lock() unconditionally enqueues the @rt_waiter
|
||||
* such that futex_unlock_pi() is guaranteed to observe the waiter when
|
||||
* it sees the futex_q::pi_state.
|
||||
*/
|
||||
ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current);
|
||||
raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock);
|
||||
|
||||
if (ret) {
|
||||
if (ret == 1)
|
||||
ret = 0;
|
||||
|
||||
spin_lock(q.lock_ptr);
|
||||
goto no_block;
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
|
||||
if (unlikely(to))
|
||||
hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
|
||||
|
||||
ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter);
|
||||
|
||||
cleanup:
|
||||
spin_lock(q.lock_ptr);
|
||||
/*
|
||||
* If we failed to acquire the lock (signal/timeout), we must
|
||||
* If we failed to acquire the lock (deadlock/signal/timeout), we must
|
||||
* first acquire the hb->lock before removing the lock from the
|
||||
* rt_mutex waitqueue, such that we can keep the hb and rt_mutex
|
||||
* wait lists consistent.
|
||||
* rt_mutex waitqueue, such that we can keep the hb and rt_mutex wait
|
||||
* lists consistent.
|
||||
*
|
||||
* In particular; it is important that futex_unlock_pi() can not
|
||||
* observe this inconsistency.
|
||||
@@ -3009,6 +3013,10 @@ retry:
|
||||
* there is no point where we hold neither; and therefore
|
||||
* wake_futex_pi() must observe a state consistent with what we
|
||||
* observed.
|
||||
*
|
||||
* In particular; this forces __rt_mutex_start_proxy() to
|
||||
* complete such that we're guaranteed to observe the
|
||||
* rt_waiter. Also see the WARN in wake_futex_pi().
|
||||
*/
|
||||
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
|
||||
spin_unlock(&hb->lock);
|
||||
|
@@ -1726,12 +1726,33 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock,
|
||||
rt_mutex_set_owner(lock, NULL);
|
||||
}
|
||||
|
||||
/**
|
||||
* __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
|
||||
* @lock: the rt_mutex to take
|
||||
* @waiter: the pre-initialized rt_mutex_waiter
|
||||
* @task: the task to prepare
|
||||
*
|
||||
* Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
|
||||
* detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
|
||||
*
|
||||
* NOTE: does _NOT_ remove the @waiter on failure; must either call
|
||||
* rt_mutex_wait_proxy_lock() or rt_mutex_cleanup_proxy_lock() after this.
|
||||
*
|
||||
* Returns:
|
||||
* 0 - task blocked on lock
|
||||
* 1 - acquired the lock for task, caller should wake it up
|
||||
* <0 - error
|
||||
*
|
||||
* Special API call for PI-futex support.
|
||||
*/
|
||||
int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
||||
struct rt_mutex_waiter *waiter,
|
||||
struct task_struct *task)
|
||||
{
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&lock->wait_lock);
|
||||
|
||||
if (try_to_take_rt_mutex(lock, task, NULL))
|
||||
return 1;
|
||||
|
||||
@@ -1749,9 +1770,6 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (unlikely(ret))
|
||||
remove_waiter(lock, waiter);
|
||||
|
||||
debug_rt_mutex_print_deadlock(waiter);
|
||||
|
||||
return ret;
|
||||
@@ -1763,12 +1781,18 @@ int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
||||
* @waiter: the pre-initialized rt_mutex_waiter
|
||||
* @task: the task to prepare
|
||||
*
|
||||
* Starts the rt_mutex acquire; it enqueues the @waiter and does deadlock
|
||||
* detection. It does not wait, see rt_mutex_wait_proxy_lock() for that.
|
||||
*
|
||||
* NOTE: unlike __rt_mutex_start_proxy_lock this _DOES_ remove the @waiter
|
||||
* on failure.
|
||||
*
|
||||
* Returns:
|
||||
* 0 - task blocked on lock
|
||||
* 1 - acquired the lock for task, caller should wake it up
|
||||
* <0 - error
|
||||
*
|
||||
* Special API call for FUTEX_REQUEUE_PI support.
|
||||
* Special API call for PI-futex support.
|
||||
*/
|
||||
int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
||||
struct rt_mutex_waiter *waiter,
|
||||
@@ -1778,6 +1802,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
||||
|
||||
raw_spin_lock_irq(&lock->wait_lock);
|
||||
ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
|
||||
if (unlikely(ret))
|
||||
remove_waiter(lock, waiter);
|
||||
raw_spin_unlock_irq(&lock->wait_lock);
|
||||
|
||||
return ret;
|
||||
@@ -1845,7 +1871,8 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
|
||||
* @lock: the rt_mutex we were woken on
|
||||
* @waiter: the pre-initialized rt_mutex_waiter
|
||||
*
|
||||
* Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
|
||||
* Attempt to clean up after a failed __rt_mutex_start_proxy_lock() or
|
||||
* rt_mutex_wait_proxy_lock().
|
||||
*
|
||||
* Unless we acquired the lock; we're still enqueued on the wait-list and can
|
||||
* in fact still be granted ownership until we're removed. Therefore we can
|
||||
|
@@ -428,6 +428,8 @@ static struct dentry *relay_create_buf_file(struct rchan *chan,
|
||||
dentry = chan->cb->create_buf_file(tmpname, chan->parent,
|
||||
S_IRUSR, buf,
|
||||
&chan->is_global);
|
||||
if (IS_ERR(dentry))
|
||||
dentry = NULL;
|
||||
|
||||
kfree(tmpname);
|
||||
|
||||
@@ -461,7 +463,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
|
||||
dentry = chan->cb->create_buf_file(NULL, NULL,
|
||||
S_IRUSR, buf,
|
||||
&chan->is_global);
|
||||
if (WARN_ON(dentry))
|
||||
if (IS_ERR_OR_NULL(dentry))
|
||||
goto free_buf;
|
||||
}
|
||||
|
||||
|
@@ -322,7 +322,7 @@ static bool update_stats(struct psi_group *group)
|
||||
expires = group->next_update;
|
||||
if (now < expires)
|
||||
goto out;
|
||||
if (now - expires > psi_period)
|
||||
if (now - expires >= psi_period)
|
||||
missed_periods = div_u64(now - expires, psi_period);
|
||||
|
||||
/*
|
||||
|
@@ -688,6 +688,48 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, kernel_siginfo_t *in
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dequeue_signal);
|
||||
|
||||
static int dequeue_synchronous_signal(kernel_siginfo_t *info)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
struct sigpending *pending = &tsk->pending;
|
||||
struct sigqueue *q, *sync = NULL;
|
||||
|
||||
/*
|
||||
* Might a synchronous signal be in the queue?
|
||||
*/
|
||||
if (!((pending->signal.sig[0] & ~tsk->blocked.sig[0]) & SYNCHRONOUS_MASK))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Return the first synchronous signal in the queue.
|
||||
*/
|
||||
list_for_each_entry(q, &pending->list, list) {
|
||||
/* Synchronous signals have a postive si_code */
|
||||
if ((q->info.si_code > SI_USER) &&
|
||||
(sigmask(q->info.si_signo) & SYNCHRONOUS_MASK)) {
|
||||
sync = q;
|
||||
goto next;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
next:
|
||||
/*
|
||||
* Check if there is another siginfo for the same signal.
|
||||
*/
|
||||
list_for_each_entry_continue(q, &pending->list, list) {
|
||||
if (q->info.si_signo == sync->info.si_signo)
|
||||
goto still_pending;
|
||||
}
|
||||
|
||||
sigdelset(&pending->signal, sync->info.si_signo);
|
||||
recalc_sigpending();
|
||||
still_pending:
|
||||
list_del_init(&sync->list);
|
||||
copy_siginfo(info, &sync->info);
|
||||
__sigqueue_free(sync);
|
||||
return info->si_signo;
|
||||
}
|
||||
|
||||
/*
|
||||
* Tell a process that it has a new active signal..
|
||||
*
|
||||
@@ -1057,10 +1099,9 @@ static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struc
|
||||
|
||||
result = TRACE_SIGNAL_DELIVERED;
|
||||
/*
|
||||
* Skip useless siginfo allocation for SIGKILL SIGSTOP,
|
||||
* and kernel threads.
|
||||
* Skip useless siginfo allocation for SIGKILL and kernel threads.
|
||||
*/
|
||||
if (sig_kernel_only(sig) || (t->flags & PF_KTHREAD))
|
||||
if ((sig == SIGKILL) || (t->flags & PF_KTHREAD))
|
||||
goto out_set;
|
||||
|
||||
/*
|
||||
@@ -2394,6 +2435,14 @@ relock:
|
||||
goto relock;
|
||||
}
|
||||
|
||||
/* Has this task already been marked for death? */
|
||||
if (signal_group_exit(signal)) {
|
||||
ksig->info.si_signo = signr = SIGKILL;
|
||||
sigdelset(¤t->pending.signal, SIGKILL);
|
||||
recalc_sigpending();
|
||||
goto fatal;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
struct k_sigaction *ka;
|
||||
|
||||
@@ -2407,7 +2456,15 @@ relock:
|
||||
goto relock;
|
||||
}
|
||||
|
||||
signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
|
||||
/*
|
||||
* Signals generated by the execution of an instruction
|
||||
* need to be delivered before any other pending signals
|
||||
* so that the instruction pointer in the signal stack
|
||||
* frame points to the faulting instruction.
|
||||
*/
|
||||
signr = dequeue_synchronous_signal(&ksig->info);
|
||||
if (!signr)
|
||||
signr = dequeue_signal(current, ¤t->blocked, &ksig->info);
|
||||
|
||||
if (!signr)
|
||||
break; /* will return 0 */
|
||||
@@ -2489,6 +2546,7 @@ relock:
|
||||
continue;
|
||||
}
|
||||
|
||||
fatal:
|
||||
spin_unlock_irq(&sighand->siglock);
|
||||
|
||||
/*
|
||||
|
@@ -1204,22 +1204,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
|
||||
|
||||
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&bpf_event_mutex);
|
||||
err = __bpf_probe_register(btp, prog);
|
||||
mutex_unlock(&bpf_event_mutex);
|
||||
return err;
|
||||
return __bpf_probe_register(btp, prog);
|
||||
}
|
||||
|
||||
int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
|
||||
{
|
||||
int err;
|
||||
|
||||
mutex_lock(&bpf_event_mutex);
|
||||
err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
|
||||
mutex_unlock(&bpf_event_mutex);
|
||||
return err;
|
||||
return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
|
||||
}
|
||||
|
||||
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
|
||||
|
@@ -3384,6 +3384,8 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
|
||||
const char tgid_space[] = " ";
|
||||
const char space[] = " ";
|
||||
|
||||
print_event_info(buf, m);
|
||||
|
||||
seq_printf(m, "# %s _-----=> irqs-off\n",
|
||||
tgid ? tgid_space : space);
|
||||
seq_printf(m, "# %s / _----=> need-resched\n",
|
||||
|
@@ -861,22 +861,14 @@ static const struct file_operations kprobe_profile_ops = {
|
||||
static nokprobe_inline int
|
||||
fetch_store_strlen(unsigned long addr)
|
||||
{
|
||||
mm_segment_t old_fs;
|
||||
int ret, len = 0;
|
||||
u8 c;
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
pagefault_disable();
|
||||
|
||||
do {
|
||||
ret = __copy_from_user_inatomic(&c, (u8 *)addr + len, 1);
|
||||
ret = probe_mem_read(&c, (u8 *)addr + len, 1);
|
||||
len++;
|
||||
} while (c && ret == 0 && len < MAX_STRING_SIZE);
|
||||
|
||||
pagefault_enable();
|
||||
set_fs(old_fs);
|
||||
|
||||
return (ret < 0) ? ret : len;
|
||||
}
|
||||
|
||||
|
@@ -180,10 +180,12 @@ store_trace_args(void *data, struct trace_probe *tp, struct pt_regs *regs,
|
||||
if (unlikely(arg->dynamic))
|
||||
*dl = make_data_loc(maxlen, dyndata - base);
|
||||
ret = process_fetch_insn(arg->code, regs, dl, base);
|
||||
if (unlikely(ret < 0 && arg->dynamic))
|
||||
if (unlikely(ret < 0 && arg->dynamic)) {
|
||||
*dl = make_data_loc(0, dyndata - base);
|
||||
else
|
||||
} else {
|
||||
dyndata += ret;
|
||||
maxlen -= ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -5,7 +5,7 @@
|
||||
* Copyright (C) IBM Corporation, 2010-2012
|
||||
* Author: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
|
||||
*/
|
||||
#define pr_fmt(fmt) "trace_kprobe: " fmt
|
||||
#define pr_fmt(fmt) "trace_uprobe: " fmt
|
||||
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/module.h>
|
||||
@@ -160,6 +160,13 @@ fetch_store_string(unsigned long addr, void *dest, void *base)
|
||||
if (ret >= 0) {
|
||||
if (ret == maxlen)
|
||||
dst[ret - 1] = '\0';
|
||||
else
|
||||
/*
|
||||
* Include the terminating null byte. In this case it
|
||||
* was copied by strncpy_from_user but not accounted
|
||||
* for in ret.
|
||||
*/
|
||||
ret++;
|
||||
*(u32 *)dest = make_data_loc(ret, (void *)dst - base);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user