Merge branch 'sched/rt' into sched/core, to pick up -rt changes
Pick up the first couple of patches working towards PREEMPT_RT. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
@@ -144,7 +144,10 @@ void __put_cred(struct cred *cred)
|
||||
BUG_ON(cred == current->cred);
|
||||
BUG_ON(cred == current->real_cred);
|
||||
|
||||
call_rcu(&cred->rcu, put_cred_rcu);
|
||||
if (cred->non_rcu)
|
||||
put_cred_rcu(&cred->rcu);
|
||||
else
|
||||
call_rcu(&cred->rcu, put_cred_rcu);
|
||||
}
|
||||
EXPORT_SYMBOL(__put_cred);
|
||||
|
||||
@@ -261,6 +264,7 @@ struct cred *prepare_creds(void)
|
||||
old = task->cred;
|
||||
memcpy(new, old, sizeof(struct cred));
|
||||
|
||||
new->non_rcu = 0;
|
||||
atomic_set(&new->usage, 1);
|
||||
set_cred_subscribers(new, 0);
|
||||
get_group_info(new->group_info);
|
||||
@@ -544,7 +548,19 @@ const struct cred *override_creds(const struct cred *new)
|
||||
|
||||
validate_creds(old);
|
||||
validate_creds(new);
|
||||
get_cred(new);
|
||||
|
||||
/*
|
||||
* NOTE! This uses 'get_new_cred()' rather than 'get_cred()'.
|
||||
*
|
||||
* That means that we do not clear the 'non_rcu' flag, since
|
||||
* we are only installing the cred into the thread-synchronous
|
||||
* '->cred' pointer, not the '->real_cred' pointer that is
|
||||
* visible to other threads under RCU.
|
||||
*
|
||||
* Also note that we did validate_creds() manually, not depending
|
||||
* on the validation in 'get_cred()'.
|
||||
*/
|
||||
get_new_cred((struct cred *)new);
|
||||
alter_cred_subscribers(new, 1);
|
||||
rcu_assign_pointer(current->cred, new);
|
||||
alter_cred_subscribers(old, -1);
|
||||
@@ -681,6 +697,7 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
|
||||
validate_creds(old);
|
||||
|
||||
*new = *old;
|
||||
new->non_rcu = 0;
|
||||
atomic_set(&new->usage, 1);
|
||||
set_cred_subscribers(new, 0);
|
||||
get_uid(new->user);
|
||||
|
@@ -11271,7 +11271,7 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
perf_install_in_context(ctx, event, cpu);
|
||||
perf_install_in_context(ctx, event, event->cpu);
|
||||
perf_unpin_context(ctx);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
|
||||
|
@@ -1906,7 +1906,7 @@ int register_kretprobe(struct kretprobe *rp)
|
||||
|
||||
/* Pre-allocate memory for max kretprobe instances */
|
||||
if (rp->maxactive <= 0) {
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
|
||||
#else
|
||||
rp->maxactive = num_possible_cpus();
|
||||
|
@@ -448,7 +448,7 @@ static void print_lockdep_off(const char *bug_msg)
|
||||
|
||||
unsigned long nr_stack_trace_entries;
|
||||
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
/*
|
||||
* Stack-trace: tightly packed array of stack backtrace
|
||||
* addresses. Protected by the graph_lock.
|
||||
@@ -491,7 +491,7 @@ unsigned int max_lockdep_depth;
|
||||
DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
/*
|
||||
* Locking printouts:
|
||||
*/
|
||||
@@ -2969,7 +2969,7 @@ static void check_chain_key(struct task_struct *curr)
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||
enum lock_usage_bit new_bit);
|
||||
|
||||
@@ -3608,7 +3608,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
|
||||
return ret;
|
||||
}
|
||||
|
||||
#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
|
||||
#else /* CONFIG_PROVE_LOCKING */
|
||||
|
||||
static inline int
|
||||
mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
|
||||
@@ -3627,7 +3627,7 @@ static inline int separate_irq_context(struct task_struct *curr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
|
||||
#endif /* CONFIG_PROVE_LOCKING */
|
||||
|
||||
/*
|
||||
* Initialize a lock instance's lock-class mapping info:
|
||||
@@ -4321,8 +4321,7 @@ static void __lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie cookie
|
||||
*/
|
||||
static void check_flags(unsigned long flags)
|
||||
{
|
||||
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \
|
||||
defined(CONFIG_TRACE_IRQFLAGS)
|
||||
#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
|
||||
if (!debug_locks)
|
||||
return;
|
||||
|
||||
|
@@ -200,7 +200,6 @@ static void lockdep_stats_debug_show(struct seq_file *m)
|
||||
|
||||
static int lockdep_stats_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct lock_class *class;
|
||||
unsigned long nr_unused = 0, nr_uncategorized = 0,
|
||||
nr_irq_safe = 0, nr_irq_unsafe = 0,
|
||||
nr_softirq_safe = 0, nr_softirq_unsafe = 0,
|
||||
@@ -211,6 +210,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
|
||||
sum_forward_deps = 0;
|
||||
|
||||
#ifdef CONFIG_PROVE_LOCKING
|
||||
struct lock_class *class;
|
||||
|
||||
list_for_each_entry(class, &all_lock_classes, lock_entry) {
|
||||
|
||||
if (class->usage_mask == 0)
|
||||
|
@@ -908,6 +908,10 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
|
||||
|
||||
might_sleep();
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
||||
#endif
|
||||
|
||||
ww = container_of(lock, struct ww_mutex, base);
|
||||
if (use_ww_ctx && ww_ctx) {
|
||||
if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
|
||||
@@ -1379,8 +1383,13 @@ __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock,
|
||||
*/
|
||||
int __sched mutex_trylock(struct mutex *lock)
|
||||
{
|
||||
bool locked = __mutex_trylock(lock);
|
||||
bool locked;
|
||||
|
||||
#ifdef CONFIG_DEBUG_MUTEXES
|
||||
DEBUG_LOCKS_WARN_ON(lock->magic != lock);
|
||||
#endif
|
||||
|
||||
locked = __mutex_trylock(lock);
|
||||
if (locked)
|
||||
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
|
||||
|
||||
|
@@ -666,7 +666,11 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
|
||||
preempt_disable();
|
||||
rcu_read_lock();
|
||||
owner = rwsem_owner_flags(sem, &flags);
|
||||
if ((flags & nonspinnable) || (owner && !owner_on_cpu(owner)))
|
||||
/*
|
||||
* Don't check the read-owner as the entry may be stale.
|
||||
*/
|
||||
if ((flags & nonspinnable) ||
|
||||
(owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
|
||||
ret = false;
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
@@ -1000,6 +1004,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
|
||||
atomic_long_add(-RWSEM_READER_BIAS, &sem->count);
|
||||
adjustment = 0;
|
||||
if (rwsem_optimistic_spin(sem, false)) {
|
||||
/* rwsem_optimistic_spin() implies ACQUIRE on success */
|
||||
/*
|
||||
* Wake up other readers in the wait list if the front
|
||||
* waiter is a reader.
|
||||
@@ -1014,6 +1019,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, int state)
|
||||
}
|
||||
return sem;
|
||||
} else if (rwsem_reader_phase_trylock(sem, waiter.last_rowner)) {
|
||||
/* rwsem_reader_phase_trylock() implies ACQUIRE on success */
|
||||
return sem;
|
||||
}
|
||||
|
||||
@@ -1032,6 +1038,8 @@ queue:
|
||||
*/
|
||||
if (adjustment && !(atomic_long_read(&sem->count) &
|
||||
(RWSEM_WRITER_MASK | RWSEM_FLAG_HANDOFF))) {
|
||||
/* Provide lock ACQUIRE */
|
||||
smp_acquire__after_ctrl_dep();
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
rwsem_set_reader_owned(sem);
|
||||
lockevent_inc(rwsem_rlock_fast);
|
||||
@@ -1065,15 +1073,18 @@ queue:
|
||||
wake_up_q(&wake_q);
|
||||
|
||||
/* wait to be given the lock */
|
||||
while (true) {
|
||||
for (;;) {
|
||||
set_current_state(state);
|
||||
if (!waiter.task)
|
||||
if (!smp_load_acquire(&waiter.task)) {
|
||||
/* Matches rwsem_mark_wake()'s smp_store_release(). */
|
||||
break;
|
||||
}
|
||||
if (signal_pending_state(state, current)) {
|
||||
raw_spin_lock_irq(&sem->wait_lock);
|
||||
if (waiter.task)
|
||||
goto out_nolock;
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
@@ -1083,6 +1094,7 @@ queue:
|
||||
__set_current_state(TASK_RUNNING);
|
||||
lockevent_inc(rwsem_rlock);
|
||||
return sem;
|
||||
|
||||
out_nolock:
|
||||
list_del(&waiter.list);
|
||||
if (list_empty(&sem->wait_list)) {
|
||||
@@ -1123,8 +1135,10 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
|
||||
|
||||
/* do optimistic spinning and steal lock if possible */
|
||||
if (rwsem_can_spin_on_owner(sem, RWSEM_WR_NONSPINNABLE) &&
|
||||
rwsem_optimistic_spin(sem, true))
|
||||
rwsem_optimistic_spin(sem, true)) {
|
||||
/* rwsem_optimistic_spin() implies ACQUIRE on success */
|
||||
return sem;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable reader optimistic spinning for this rwsem after
|
||||
@@ -1184,9 +1198,11 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
|
||||
wait:
|
||||
/* wait until we successfully acquire the lock */
|
||||
set_current_state(state);
|
||||
while (true) {
|
||||
if (rwsem_try_write_lock(sem, wstate))
|
||||
for (;;) {
|
||||
if (rwsem_try_write_lock(sem, wstate)) {
|
||||
/* rwsem_try_write_lock() implies ACQUIRE on success */
|
||||
break;
|
||||
}
|
||||
|
||||
raw_spin_unlock_irq(&sem->wait_lock);
|
||||
|
||||
|
@@ -7,7 +7,7 @@ menu "RCU Subsystem"
|
||||
|
||||
config TREE_RCU
|
||||
bool
|
||||
default y if !PREEMPT && SMP
|
||||
default y if !PREEMPTION && SMP
|
||||
help
|
||||
This option selects the RCU implementation that is
|
||||
designed for very large SMP system with hundreds or
|
||||
@@ -16,7 +16,7 @@ config TREE_RCU
|
||||
|
||||
config PREEMPT_RCU
|
||||
bool
|
||||
default y if PREEMPT
|
||||
default y if PREEMPTION
|
||||
help
|
||||
This option selects the RCU implementation that is
|
||||
designed for very large SMP systems with hundreds or
|
||||
@@ -28,7 +28,7 @@ config PREEMPT_RCU
|
||||
|
||||
config TINY_RCU
|
||||
bool
|
||||
default y if !PREEMPT && !SMP
|
||||
default y if !PREEMPTION && !SMP
|
||||
help
|
||||
This option selects the RCU implementation that is
|
||||
designed for UP systems from which real-time response
|
||||
@@ -70,7 +70,7 @@ config TREE_SRCU
|
||||
This option selects the full-fledged version of SRCU.
|
||||
|
||||
config TASKS_RCU
|
||||
def_bool PREEMPT
|
||||
def_bool PREEMPTION
|
||||
select SRCU
|
||||
help
|
||||
This option enables a task-based RCU implementation that uses
|
||||
|
@@ -1881,7 +1881,7 @@ rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
|
||||
struct rcu_node *rnp_p;
|
||||
|
||||
raw_lockdep_assert_held_rcu_node(rnp);
|
||||
if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)) ||
|
||||
if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPTION)) ||
|
||||
WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
|
||||
rnp->qsmask != 0) {
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
@@ -2205,7 +2205,7 @@ static void force_qs_rnp(int (*f)(struct rcu_data *rdp))
|
||||
mask = 0;
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
if (rnp->qsmask == 0) {
|
||||
if (!IS_ENABLED(CONFIG_PREEMPT) ||
|
||||
if (!IS_ENABLED(CONFIG_PREEMPTION) ||
|
||||
rcu_preempt_blocked_readers_cgp(rnp)) {
|
||||
/*
|
||||
* No point in scanning bits because they
|
||||
@@ -2622,7 +2622,7 @@ static int rcu_blocking_is_gp(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (IS_ENABLED(CONFIG_PREEMPT))
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION))
|
||||
return rcu_scheduler_active == RCU_SCHEDULER_INACTIVE;
|
||||
might_sleep(); /* Check for RCU read-side critical section. */
|
||||
preempt_disable();
|
||||
|
@@ -163,7 +163,7 @@ static void rcu_iw_handler(struct irq_work *iwp)
|
||||
//
|
||||
// Printing RCU CPU stall warnings
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
|
||||
/*
|
||||
* Dump detailed information for all tasks blocking the current RCU
|
||||
@@ -215,7 +215,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
|
||||
return ndetected;
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_PREEMPT */
|
||||
#else /* #ifdef CONFIG_PREEMPTION */
|
||||
|
||||
/*
|
||||
* Because preemptible RCU does not exist, we never have to check for
|
||||
@@ -233,7 +233,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT */
|
||||
#endif /* #else #ifdef CONFIG_PREEMPTION */
|
||||
|
||||
/*
|
||||
* Dump stacks of all tasks running on stalled CPUs. First try using
|
||||
|
@@ -3752,7 +3752,7 @@ static inline void sched_tick_start(int cpu) { }
|
||||
static inline void sched_tick_stop(int cpu) { }
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
|
||||
#if defined(CONFIG_PREEMPTION) && (defined(CONFIG_DEBUG_PREEMPT) || \
|
||||
defined(CONFIG_TRACE_PREEMPT_TOGGLE))
|
||||
/*
|
||||
* If the value passed in is equal to the current preempt count
|
||||
@@ -3958,7 +3958,7 @@ restart:
|
||||
* task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets
|
||||
* called on the nearest possible occasion:
|
||||
*
|
||||
* - If the kernel is preemptible (CONFIG_PREEMPT=y):
|
||||
* - If the kernel is preemptible (CONFIG_PREEMPTION=y):
|
||||
*
|
||||
* - in syscall or exception context, at the next outmost
|
||||
* preempt_enable(). (this might be as soon as the wake_up()'s
|
||||
@@ -3967,7 +3967,7 @@ restart:
|
||||
* - in IRQ context, return from interrupt-handler to
|
||||
* preemptible context
|
||||
*
|
||||
* - If the kernel is not preemptible (CONFIG_PREEMPT is not set)
|
||||
* - If the kernel is not preemptible (CONFIG_PREEMPTION is not set)
|
||||
* then at the next:
|
||||
*
|
||||
* - cond_resched() call
|
||||
@@ -4209,7 +4209,7 @@ static void __sched notrace preempt_schedule_common(void)
|
||||
} while (need_resched());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
/*
|
||||
* this is the entry point to schedule() from in-kernel preemption
|
||||
* off of preempt_enable. Kernel preemptions off return from interrupt
|
||||
@@ -4281,7 +4281,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
|
||||
|
||||
#endif /* CONFIG_PREEMPT */
|
||||
#endif /* CONFIG_PREEMPTION */
|
||||
|
||||
/*
|
||||
* this is the entry point to schedule() from kernel preemption
|
||||
@@ -5610,7 +5610,7 @@ SYSCALL_DEFINE0(sched_yield)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_PREEMPT
|
||||
#ifndef CONFIG_PREEMPTION
|
||||
int __sched _cond_resched(void)
|
||||
{
|
||||
if (should_resched(0)) {
|
||||
@@ -5627,7 +5627,7 @@ EXPORT_SYMBOL(_cond_resched);
|
||||
* __cond_resched_lock() - if a reschedule is pending, drop the given lock,
|
||||
* call schedule, and on return reacquire the lock.
|
||||
*
|
||||
* This works OK both with and without CONFIG_PREEMPT. We do strange low-level
|
||||
* This works OK both with and without CONFIG_PREEMPTION. We do strange low-level
|
||||
* operations here to prevent schedule() from being called twice (once via
|
||||
* spin_unlock(), once by hand).
|
||||
*/
|
||||
|
@@ -7396,7 +7396,7 @@ static int detach_tasks(struct lb_env *env)
|
||||
detached++;
|
||||
env->imbalance -= load;
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
/*
|
||||
* NEWIDLE balancing is a source of latency, so preemptible
|
||||
* kernels will stop after the first task is detached to minimize
|
||||
|
@@ -1958,7 +1958,7 @@ unsigned long arch_scale_freq_capacity(int cpu)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#ifdef CONFIG_PREEMPT
|
||||
#ifdef CONFIG_PREEMPTION
|
||||
|
||||
static inline void double_rq_lock(struct rq *rq1, struct rq *rq2);
|
||||
|
||||
@@ -2010,7 +2010,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PREEMPT */
|
||||
#endif /* CONFIG_PREEMPTION */
|
||||
|
||||
/*
|
||||
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
|
||||
|
@@ -146,7 +146,7 @@ config FUNCTION_TRACER
|
||||
select GENERIC_TRACER
|
||||
select CONTEXT_SWITCH_TRACER
|
||||
select GLOB
|
||||
select TASKS_RCU if PREEMPT
|
||||
select TASKS_RCU if PREEMPTION
|
||||
help
|
||||
Enable the kernel to trace every kernel function. This is done
|
||||
by using a compiler feature to insert a small, 5-byte No-Operation
|
||||
@@ -179,7 +179,7 @@ config TRACE_PREEMPT_TOGGLE
|
||||
config PREEMPTIRQ_EVENTS
|
||||
bool "Enable trace events for preempt and irq disable/enable"
|
||||
select TRACE_IRQFLAGS
|
||||
select TRACE_PREEMPT_TOGGLE if PREEMPT
|
||||
select TRACE_PREEMPT_TOGGLE if PREEMPTION
|
||||
select GENERIC_TRACER
|
||||
default n
|
||||
help
|
||||
@@ -214,7 +214,7 @@ config PREEMPT_TRACER
|
||||
bool "Preemption-off Latency Tracer"
|
||||
default n
|
||||
depends on !ARCH_USES_GETTIMEOFFSET
|
||||
depends on PREEMPT
|
||||
depends on PREEMPTION
|
||||
select GENERIC_TRACER
|
||||
select TRACER_MAX_TRACE
|
||||
select RING_BUFFER_ALLOW_SWAP
|
||||
|
@@ -2814,7 +2814,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
||||
* synchornize_rcu_tasks() will wait for those tasks to
|
||||
* execute and either schedule voluntarily or enter user space.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PREEMPT))
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION))
|
||||
synchronize_rcu_tasks();
|
||||
|
||||
free_ops:
|
||||
|
@@ -267,7 +267,7 @@ static void ring_buffer_producer(void)
|
||||
if (consumer && !(cnt % wakeup_interval))
|
||||
wake_up_process(consumer);
|
||||
|
||||
#ifndef CONFIG_PREEMPT
|
||||
#ifndef CONFIG_PREEMPTION
|
||||
/*
|
||||
* If we are a non preempt kernel, the 10 second run will
|
||||
* stop everything while it runs. Instead, we will call
|
||||
|
@@ -255,12 +255,12 @@ void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
|
||||
local_save_flags(fbuffer->flags);
|
||||
fbuffer->pc = preempt_count();
|
||||
/*
|
||||
* If CONFIG_PREEMPT is enabled, then the tracepoint itself disables
|
||||
* If CONFIG_PREEMPTION is enabled, then the tracepoint itself disables
|
||||
* preemption (adding one to the preempt_count). Since we are
|
||||
* interested in the preempt_count at the time the tracepoint was
|
||||
* hit, we need to subtract one to offset the increment.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_PREEMPT))
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION))
|
||||
fbuffer->pc--;
|
||||
fbuffer->trace_file = trace_file;
|
||||
|
||||
|
Reference in New Issue
Block a user