Merge Linus' tree to be be to apply submitted patches to newer code than
current trivial.git base
This commit is contained in:
File diff suppressed because it is too large
Load Diff
@@ -538,16 +538,18 @@ static void rb_wake_up_waiters(struct irq_work *work)
|
||||
* ring_buffer_wait - wait for input to the ring buffer
|
||||
* @buffer: buffer to wait on
|
||||
* @cpu: the cpu buffer to wait on
|
||||
* @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
|
||||
*
|
||||
* If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
|
||||
* as data is added to any of the @buffer's cpu buffers. Otherwise
|
||||
* it will wait for data to be added to a specific cpu buffer.
|
||||
*/
|
||||
int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
|
||||
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
|
||||
{
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
|
||||
DEFINE_WAIT(wait);
|
||||
struct rb_irq_work *work;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Depending on what the caller is waiting for, either any
|
||||
@@ -564,36 +566,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
|
||||
}
|
||||
|
||||
|
||||
prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
|
||||
while (true) {
|
||||
prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
|
||||
|
||||
/*
|
||||
* The events can happen in critical sections where
|
||||
* checking a work queue can cause deadlocks.
|
||||
* After adding a task to the queue, this flag is set
|
||||
* only to notify events to try to wake up the queue
|
||||
* using irq_work.
|
||||
*
|
||||
* We don't clear it even if the buffer is no longer
|
||||
* empty. The flag only causes the next event to run
|
||||
* irq_work to do the work queue wake up. The worse
|
||||
* that can happen if we race with !trace_empty() is that
|
||||
* an event will cause an irq_work to try to wake up
|
||||
* an empty queue.
|
||||
*
|
||||
* There's no reason to protect this flag either, as
|
||||
* the work queue and irq_work logic will do the necessary
|
||||
* synchronization for the wake ups. The only thing
|
||||
* that is necessary is that the wake up happens after
|
||||
* a task has been queued. It's OK for spurious wake ups.
|
||||
*/
|
||||
work->waiters_pending = true;
|
||||
/*
|
||||
* The events can happen in critical sections where
|
||||
* checking a work queue can cause deadlocks.
|
||||
* After adding a task to the queue, this flag is set
|
||||
* only to notify events to try to wake up the queue
|
||||
* using irq_work.
|
||||
*
|
||||
* We don't clear it even if the buffer is no longer
|
||||
* empty. The flag only causes the next event to run
|
||||
* irq_work to do the work queue wake up. The worse
|
||||
* that can happen if we race with !trace_empty() is that
|
||||
* an event will cause an irq_work to try to wake up
|
||||
* an empty queue.
|
||||
*
|
||||
* There's no reason to protect this flag either, as
|
||||
* the work queue and irq_work logic will do the necessary
|
||||
* synchronization for the wake ups. The only thing
|
||||
* that is necessary is that the wake up happens after
|
||||
* a task has been queued. It's OK for spurious wake ups.
|
||||
*/
|
||||
work->waiters_pending = true;
|
||||
|
||||
if (signal_pending(current)) {
|
||||
ret = -EINTR;
|
||||
break;
|
||||
}
|
||||
|
||||
if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
|
||||
break;
|
||||
|
||||
if (cpu != RING_BUFFER_ALL_CPUS &&
|
||||
!ring_buffer_empty_cpu(buffer, cpu)) {
|
||||
unsigned long flags;
|
||||
bool pagebusy;
|
||||
|
||||
if (!full)
|
||||
break;
|
||||
|
||||
raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
|
||||
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
if (!pagebusy)
|
||||
break;
|
||||
}
|
||||
|
||||
if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
|
||||
(cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
|
||||
schedule();
|
||||
}
|
||||
|
||||
finish_wait(&work->waiters, &wait);
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -626,8 +653,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
|
||||
work = &cpu_buffer->irq_work;
|
||||
}
|
||||
|
||||
work->waiters_pending = true;
|
||||
poll_wait(filp, &work->waiters, poll_table);
|
||||
work->waiters_pending = true;
|
||||
/*
|
||||
* There's a tight race between setting the waiters_pending and
|
||||
* checking if the ring buffer is empty. Once the waiters_pending bit
|
||||
* is set, the next event will wake the task up, but we can get stuck
|
||||
* if there's only a single event in.
|
||||
*
|
||||
* FIXME: Ideally, we need a memory barrier on the writer side as well,
|
||||
* but adding a memory barrier to all events will cause too much of a
|
||||
* performance hit in the fast path. We only need a memory barrier when
|
||||
* the buffer goes from empty to having content. But as this race is
|
||||
* extremely small, and it's not a problem if another event comes in, we
|
||||
* will fix it later.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
|
||||
(cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
|
||||
@@ -1968,7 +2009,7 @@ rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
|
||||
|
||||
/**
|
||||
* rb_update_event - update event type and data
|
||||
* @event: the even to update
|
||||
* @event: the event to update
|
||||
* @type: the type of event
|
||||
* @length: the size of the event field in the ring buffer
|
||||
*
|
||||
@@ -3341,21 +3382,16 @@ static void rb_iter_reset(struct ring_buffer_iter *iter)
|
||||
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
|
||||
|
||||
/* Iterator usage is expected to have record disabled */
|
||||
if (list_empty(&cpu_buffer->reader_page->list)) {
|
||||
iter->head_page = rb_set_head_page(cpu_buffer);
|
||||
if (unlikely(!iter->head_page))
|
||||
return;
|
||||
iter->head = iter->head_page->read;
|
||||
} else {
|
||||
iter->head_page = cpu_buffer->reader_page;
|
||||
iter->head = cpu_buffer->reader_page->read;
|
||||
}
|
||||
iter->head_page = cpu_buffer->reader_page;
|
||||
iter->head = cpu_buffer->reader_page->read;
|
||||
|
||||
iter->cache_reader_page = iter->head_page;
|
||||
iter->cache_read = cpu_buffer->read;
|
||||
|
||||
if (iter->head)
|
||||
iter->read_stamp = cpu_buffer->read_stamp;
|
||||
else
|
||||
iter->read_stamp = iter->head_page->page->time_stamp;
|
||||
iter->cache_reader_page = cpu_buffer->reader_page;
|
||||
iter->cache_read = cpu_buffer->read;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -3748,12 +3784,14 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* We repeat when a time extend is encountered.
|
||||
* Since the time extend is always attached to a data event,
|
||||
* we should never loop more than once.
|
||||
* (We never hit the following condition more than twice).
|
||||
* We repeat when a time extend is encountered or we hit
|
||||
* the end of the page. Since the time extend is always attached
|
||||
* to a data event, we should never loop more than three times.
|
||||
* Once for going to next page, once on time extend, and
|
||||
* finally once to get the event.
|
||||
* (We never hit the following condition more than thrice).
|
||||
*/
|
||||
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
|
||||
if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
|
||||
return NULL;
|
||||
|
||||
if (rb_per_cpu_empty(cpu_buffer))
|
||||
|
@@ -205,7 +205,6 @@ static void ring_buffer_consumer(void)
|
||||
break;
|
||||
|
||||
schedule();
|
||||
__set_current_state(TASK_RUNNING);
|
||||
}
|
||||
reader_finish = 0;
|
||||
complete(&read_done);
|
||||
@@ -379,7 +378,6 @@ static int ring_buffer_consumer_thread(void *arg)
|
||||
break;
|
||||
|
||||
schedule();
|
||||
__set_current_state(TASK_RUNNING);
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
@@ -407,7 +405,6 @@ static int ring_buffer_producer_thread(void *arg)
|
||||
trace_printk("Sleeping for 10 secs\n");
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule_timeout(HZ * SLEEP_TIME);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
}
|
||||
|
||||
if (kill_test)
|
||||
|
@@ -1076,13 +1076,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
}
|
||||
#endif /* CONFIG_TRACER_MAX_TRACE */
|
||||
|
||||
static int wait_on_pipe(struct trace_iterator *iter)
|
||||
static int wait_on_pipe(struct trace_iterator *iter, bool full)
|
||||
{
|
||||
/* Iterators are static, they should be filled or empty */
|
||||
if (trace_buffer_iter(iter, iter->cpu_file))
|
||||
return 0;
|
||||
|
||||
return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file);
|
||||
return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
|
||||
full);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||
@@ -4434,15 +4435,12 @@ static int tracing_wait_pipe(struct file *filp)
|
||||
|
||||
mutex_unlock(&iter->mutex);
|
||||
|
||||
ret = wait_on_pipe(iter);
|
||||
ret = wait_on_pipe(iter, false);
|
||||
|
||||
mutex_lock(&iter->mutex);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (signal_pending(current))
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
return 1;
|
||||
@@ -5372,16 +5370,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
|
||||
goto out_unlock;
|
||||
}
|
||||
mutex_unlock(&trace_types_lock);
|
||||
ret = wait_on_pipe(iter);
|
||||
ret = wait_on_pipe(iter, false);
|
||||
mutex_lock(&trace_types_lock);
|
||||
if (ret) {
|
||||
size = ret;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (signal_pending(current)) {
|
||||
size = -EINTR;
|
||||
goto out_unlock;
|
||||
}
|
||||
goto again;
|
||||
}
|
||||
size = 0;
|
||||
@@ -5500,7 +5494,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
};
|
||||
struct buffer_ref *ref;
|
||||
int entries, size, i;
|
||||
ssize_t ret;
|
||||
ssize_t ret = 0;
|
||||
|
||||
mutex_lock(&trace_types_lock);
|
||||
|
||||
@@ -5538,13 +5532,16 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
int r;
|
||||
|
||||
ref = kzalloc(sizeof(*ref), GFP_KERNEL);
|
||||
if (!ref)
|
||||
if (!ref) {
|
||||
ret = -ENOMEM;
|
||||
break;
|
||||
}
|
||||
|
||||
ref->ref = 1;
|
||||
ref->buffer = iter->trace_buffer->buffer;
|
||||
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
|
||||
if (!ref->page) {
|
||||
ret = -ENOMEM;
|
||||
kfree(ref);
|
||||
break;
|
||||
}
|
||||
@@ -5582,19 +5579,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
|
||||
|
||||
/* did we read anything? */
|
||||
if (!spd.nr_pages) {
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
mutex_unlock(&trace_types_lock);
|
||||
ret = wait_on_pipe(iter);
|
||||
ret = wait_on_pipe(iter, true);
|
||||
mutex_lock(&trace_types_lock);
|
||||
if (ret)
|
||||
goto out;
|
||||
if (signal_pending(current)) {
|
||||
ret = -EINTR;
|
||||
goto out;
|
||||
}
|
||||
|
||||
goto again;
|
||||
}
|
||||
|
||||
|
@@ -2513,8 +2513,11 @@ static __init int event_test_thread(void *unused)
|
||||
kfree(test_malloc);
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop())
|
||||
while (!kthread_should_stop()) {
|
||||
schedule();
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@@ -382,6 +382,8 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
|
||||
|
||||
/* check the trace buffer */
|
||||
ret = trace_test_buffer(&tr->trace_buffer, &count);
|
||||
|
||||
ftrace_enabled = 1;
|
||||
tracing_start();
|
||||
|
||||
/* we should only have one item */
|
||||
@@ -679,6 +681,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
|
||||
|
||||
/* check the trace buffer */
|
||||
ret = trace_test_buffer(&tr->trace_buffer, &count);
|
||||
|
||||
ftrace_enabled = 1;
|
||||
trace->reset(tr);
|
||||
tracing_start();
|
||||
|
||||
@@ -1025,6 +1029,12 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SCHED_TRACER
|
||||
|
||||
struct wakeup_test_data {
|
||||
struct completion is_ready;
|
||||
int go;
|
||||
};
|
||||
|
||||
static int trace_wakeup_test_thread(void *data)
|
||||
{
|
||||
/* Make this a -deadline thread */
|
||||
@@ -1034,51 +1044,56 @@ static int trace_wakeup_test_thread(void *data)
|
||||
.sched_deadline = 10000000ULL,
|
||||
.sched_period = 10000000ULL
|
||||
};
|
||||
struct completion *x = data;
|
||||
struct wakeup_test_data *x = data;
|
||||
|
||||
sched_setattr(current, &attr);
|
||||
|
||||
/* Make it know we have a new prio */
|
||||
complete(x);
|
||||
complete(&x->is_ready);
|
||||
|
||||
/* now go to sleep and let the test wake us up */
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
schedule();
|
||||
while (!x->go) {
|
||||
schedule();
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
complete(x);
|
||||
complete(&x->is_ready);
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
/* we are awake, now wait to disappear */
|
||||
while (!kthread_should_stop()) {
|
||||
/*
|
||||
* This will likely be the system top priority
|
||||
* task, do short sleeps to let others run.
|
||||
*/
|
||||
msleep(100);
|
||||
schedule();
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
}
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
|
||||
{
|
||||
unsigned long save_max = tr->max_latency;
|
||||
struct task_struct *p;
|
||||
struct completion is_ready;
|
||||
struct wakeup_test_data data;
|
||||
unsigned long count;
|
||||
int ret;
|
||||
|
||||
init_completion(&is_ready);
|
||||
memset(&data, 0, sizeof(data));
|
||||
|
||||
init_completion(&data.is_ready);
|
||||
|
||||
/* create a -deadline thread */
|
||||
p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test");
|
||||
p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test");
|
||||
if (IS_ERR(p)) {
|
||||
printk(KERN_CONT "Failed to create ftrace wakeup test thread ");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* make sure the thread is running at -deadline policy */
|
||||
wait_for_completion(&is_ready);
|
||||
wait_for_completion(&data.is_ready);
|
||||
|
||||
/* start the tracing */
|
||||
ret = tracer_init(trace, tr);
|
||||
@@ -1099,18 +1114,20 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
|
||||
msleep(100);
|
||||
}
|
||||
|
||||
init_completion(&is_ready);
|
||||
init_completion(&data.is_ready);
|
||||
|
||||
data.go = 1;
|
||||
/* memory barrier is in the wake_up_process() */
|
||||
|
||||
wake_up_process(p);
|
||||
|
||||
/* Wait for the task to wake up */
|
||||
wait_for_completion(&is_ready);
|
||||
wait_for_completion(&data.is_ready);
|
||||
|
||||
/* stop the tracing. */
|
||||
tracing_stop();
|
||||
/* check both trace buffers */
|
||||
ret = trace_test_buffer(&tr->trace_buffer, NULL);
|
||||
printk("ret = %d\n", ret);
|
||||
if (!ret)
|
||||
ret = trace_test_buffer(&tr->max_buffer, &count);
|
||||
|
||||
|
@@ -13,7 +13,6 @@
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/magic.h>
|
||||
|
||||
#include <asm/setup.h>
|
||||
|
||||
@@ -171,8 +170,7 @@ check_stack(unsigned long ip, unsigned long *stack)
|
||||
i++;
|
||||
}
|
||||
|
||||
if ((current != &init_task &&
|
||||
*(end_of_stack(current)) != STACK_END_MAGIC)) {
|
||||
if (task_stack_end_corrupted(current)) {
|
||||
print_max_stack();
|
||||
BUG();
|
||||
}
|
||||
|
@@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
|
||||
int size;
|
||||
|
||||
syscall_nr = trace_get_syscall_nr(current, regs);
|
||||
if (syscall_nr < 0)
|
||||
if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
|
||||
return;
|
||||
|
||||
/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
|
||||
@@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
|
||||
int syscall_nr;
|
||||
|
||||
syscall_nr = trace_get_syscall_nr(current, regs);
|
||||
if (syscall_nr < 0)
|
||||
if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
|
||||
return;
|
||||
|
||||
/* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
|
||||
@@ -425,7 +425,7 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
|
||||
return;
|
||||
mutex_lock(&syscall_trace_lock);
|
||||
tr->sys_refcount_enter--;
|
||||
rcu_assign_pointer(tr->enter_syscall_files[num], NULL);
|
||||
RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL);
|
||||
if (!tr->sys_refcount_enter)
|
||||
unregister_trace_sys_enter(ftrace_syscall_enter, tr);
|
||||
mutex_unlock(&syscall_trace_lock);
|
||||
@@ -463,7 +463,7 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
|
||||
return;
|
||||
mutex_lock(&syscall_trace_lock);
|
||||
tr->sys_refcount_exit--;
|
||||
rcu_assign_pointer(tr->exit_syscall_files[num], NULL);
|
||||
RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL);
|
||||
if (!tr->sys_refcount_exit)
|
||||
unregister_trace_sys_exit(ftrace_syscall_exit, tr);
|
||||
mutex_unlock(&syscall_trace_lock);
|
||||
@@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
|
||||
int size;
|
||||
|
||||
syscall_nr = trace_get_syscall_nr(current, regs);
|
||||
if (syscall_nr < 0)
|
||||
if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
|
||||
return;
|
||||
if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
|
||||
return;
|
||||
@@ -641,7 +641,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
|
||||
int size;
|
||||
|
||||
syscall_nr = trace_get_syscall_nr(current, regs);
|
||||
if (syscall_nr < 0)
|
||||
if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
|
||||
return;
|
||||
if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
|
||||
return;
|
||||
|
Reference in New Issue
Block a user