Merge branch 'linus' into irq/core
Pull in upstream fixes before applying conflicting changes
This commit is contained in:
@@ -1021,8 +1021,7 @@ static int audit_log_single_execve_arg(struct audit_context *context,
|
||||
* for strings that are too long, we should not have created
|
||||
* any.
|
||||
*/
|
||||
if (unlikely((len == 0) || len > MAX_ARG_STRLEN - 1)) {
|
||||
WARN_ON(1);
|
||||
if (WARN_ON_ONCE(len < 0 || len > MAX_ARG_STRLEN - 1)) {
|
||||
send_sig(SIGKILL, current, 0);
|
||||
return -1;
|
||||
}
|
||||
|
13
kernel/cpu.c
13
kernel/cpu.c
@@ -21,6 +21,7 @@
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/irq.h>
|
||||
#include <trace/events/power.h>
|
||||
|
||||
#include "smpboot.h"
|
||||
@@ -391,14 +392,20 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
|
||||
smpboot_park_threads(cpu);
|
||||
|
||||
/*
|
||||
* Prevent irq alloc/free while the dying cpu reorganizes the
|
||||
* interrupt affinities.
|
||||
*/
|
||||
irq_lock_sparse();
|
||||
|
||||
/*
|
||||
* So now all preempt/rcu users must observe !cpu_active().
|
||||
*/
|
||||
|
||||
err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
|
||||
if (err) {
|
||||
/* CPU didn't die: tell everyone. Can't complain. */
|
||||
cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
|
||||
irq_unlock_sparse();
|
||||
goto out_release;
|
||||
}
|
||||
BUG_ON(cpu_online(cpu));
|
||||
@@ -415,6 +422,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
|
||||
smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
|
||||
per_cpu(cpu_dead_idle, cpu) = false;
|
||||
|
||||
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
|
||||
irq_unlock_sparse();
|
||||
|
||||
hotplug_cpu__broadcast_tick_pull(cpu);
|
||||
/* This actually kills the CPU. */
|
||||
__cpu_die(cpu);
|
||||
@@ -519,6 +529,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
|
||||
|
||||
/* Arch-specific enabling code. */
|
||||
ret = __cpu_up(cpu, idle);
|
||||
|
||||
if (ret != 0)
|
||||
goto out_notify;
|
||||
BUG_ON(!cpu_online(cpu));
|
||||
|
@@ -287,6 +287,11 @@ static void set_max_threads(unsigned int max_threads_suggested)
|
||||
max_threads = clamp_t(u64, threads, MIN_THREADS, MAX_THREADS);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
|
||||
/* Initialized by the architecture: */
|
||||
int arch_task_struct_size __read_mostly;
|
||||
#endif
|
||||
|
||||
void __init fork_init(void)
|
||||
{
|
||||
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
|
||||
@@ -295,7 +300,7 @@ void __init fork_init(void)
|
||||
#endif
|
||||
/* create a slab on which task_structs can be allocated */
|
||||
task_struct_cachep =
|
||||
kmem_cache_create("task_struct", sizeof(struct task_struct),
|
||||
kmem_cache_create("task_struct", arch_task_struct_size,
|
||||
ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
|
||||
#endif
|
||||
|
||||
|
@@ -75,12 +75,8 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
static inline void irq_mark_irq(unsigned int irq) { }
|
||||
extern void irq_lock_sparse(void);
|
||||
extern void irq_unlock_sparse(void);
|
||||
#else
|
||||
extern void irq_mark_irq(unsigned int irq);
|
||||
static inline void irq_lock_sparse(void) { }
|
||||
static inline void irq_unlock_sparse(void) { }
|
||||
#endif
|
||||
|
||||
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
||||
|
@@ -77,13 +77,21 @@ void check_irq_resend(struct irq_desc *desc)
|
||||
unsigned int irq = irq_desc_get_irq(desc);
|
||||
|
||||
/*
|
||||
* If the interrupt has a parent irq and runs
|
||||
* in the thread context of the parent irq,
|
||||
* retrigger the parent.
|
||||
* If the interrupt is running in the thread
|
||||
* context of the parent irq we need to be
|
||||
* careful, because we cannot trigger it
|
||||
* directly.
|
||||
*/
|
||||
if (desc->parent_irq &&
|
||||
irq_settings_is_nested_thread(desc))
|
||||
if (irq_settings_is_nested_thread(desc)) {
|
||||
/*
|
||||
* If the parent_irq is valid, we
|
||||
* retrigger the parent, otherwise we
|
||||
* do nothing.
|
||||
*/
|
||||
if (!desc->parent_irq)
|
||||
return;
|
||||
irq = desc->parent_irq;
|
||||
}
|
||||
/* Set it pending and activate the softirq: */
|
||||
set_bit(irq, irqs_resend);
|
||||
tasklet_schedule(&resend_tasklet);
|
||||
|
@@ -3557,6 +3557,7 @@ static int load_module(struct load_info *info, const char __user *uargs,
|
||||
mutex_lock(&module_mutex);
|
||||
/* Unlink carefully: kallsyms could be walking list. */
|
||||
list_del_rcu(&mod->list);
|
||||
mod_tree_remove(mod);
|
||||
wake_up_all(&module_wq);
|
||||
/* Wait for RCU-sched synchronizing before releasing mod->list. */
|
||||
synchronize_sched();
|
||||
|
@@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size)
|
||||
{
|
||||
struct resource *p;
|
||||
resource_size_t end = start + size - 1;
|
||||
int flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
const char *name = "System RAM";
|
||||
int ret = -1;
|
||||
|
||||
read_lock(&resource_lock);
|
||||
for (p = iomem_resource.child; p ; p = p->sibling) {
|
||||
if (end < p->start)
|
||||
if (p->end < start)
|
||||
continue;
|
||||
|
||||
if (p->start <= start && end <= p->end) {
|
||||
@@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size)
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
if (p->end < start)
|
||||
if (end < p->start)
|
||||
break; /* not found */
|
||||
}
|
||||
read_unlock(&resource_lock);
|
||||
|
@@ -3683,7 +3683,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
|
||||
cfs_rq->throttled = 1;
|
||||
cfs_rq->throttled_clock = rq_clock(rq);
|
||||
raw_spin_lock(&cfs_b->lock);
|
||||
empty = list_empty(&cfs_rq->throttled_list);
|
||||
empty = list_empty(&cfs_b->throttled_cfs_rq);
|
||||
|
||||
/*
|
||||
* Add to the _head_ of the list, so that an already-started
|
||||
|
@@ -120,19 +120,25 @@ static int __clockevents_switch_state(struct clock_event_device *dev,
|
||||
/* The clockevent device is getting replaced. Shut it down. */
|
||||
|
||||
case CLOCK_EVT_STATE_SHUTDOWN:
|
||||
return dev->set_state_shutdown(dev);
|
||||
if (dev->set_state_shutdown)
|
||||
return dev->set_state_shutdown(dev);
|
||||
return 0;
|
||||
|
||||
case CLOCK_EVT_STATE_PERIODIC:
|
||||
/* Core internal bug */
|
||||
if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC))
|
||||
return -ENOSYS;
|
||||
return dev->set_state_periodic(dev);
|
||||
if (dev->set_state_periodic)
|
||||
return dev->set_state_periodic(dev);
|
||||
return 0;
|
||||
|
||||
case CLOCK_EVT_STATE_ONESHOT:
|
||||
/* Core internal bug */
|
||||
if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
|
||||
return -ENOSYS;
|
||||
return dev->set_state_oneshot(dev);
|
||||
if (dev->set_state_oneshot)
|
||||
return dev->set_state_oneshot(dev);
|
||||
return 0;
|
||||
|
||||
case CLOCK_EVT_STATE_ONESHOT_STOPPED:
|
||||
/* Core internal bug */
|
||||
@@ -471,18 +477,6 @@ static int clockevents_sanity_check(struct clock_event_device *dev)
|
||||
if (dev->features & CLOCK_EVT_FEAT_DUMMY)
|
||||
return 0;
|
||||
|
||||
/* New state-specific callbacks */
|
||||
if (!dev->set_state_shutdown)
|
||||
return -EINVAL;
|
||||
|
||||
if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
|
||||
!dev->set_state_periodic)
|
||||
return -EINVAL;
|
||||
|
||||
if ((dev->features & CLOCK_EVT_FEAT_ONESHOT) &&
|
||||
!dev->set_state_oneshot)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@@ -159,7 +159,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
|
||||
{
|
||||
struct clock_event_device *bc = tick_broadcast_device.evtdev;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
|
||||
@@ -221,13 +221,14 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
|
||||
* If we kept the cpu in the broadcast mask,
|
||||
* tell the caller to leave the per cpu device
|
||||
* in shutdown state. The periodic interrupt
|
||||
* is delivered by the broadcast device.
|
||||
* is delivered by the broadcast device, if
|
||||
* the broadcast device exists and is not
|
||||
* hrtimer based.
|
||||
*/
|
||||
ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
|
||||
if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER))
|
||||
ret = cpumask_test_cpu(cpu, tick_broadcast_mask);
|
||||
break;
|
||||
default:
|
||||
/* Nothing to do */
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -265,8 +266,22 @@ static bool tick_do_broadcast(struct cpumask *mask)
|
||||
* Check, if the current cpu is in the mask
|
||||
*/
|
||||
if (cpumask_test_cpu(cpu, mask)) {
|
||||
struct clock_event_device *bc = tick_broadcast_device.evtdev;
|
||||
|
||||
cpumask_clear_cpu(cpu, mask);
|
||||
local = true;
|
||||
/*
|
||||
* We only run the local handler, if the broadcast
|
||||
* device is not hrtimer based. Otherwise we run into
|
||||
* a hrtimer recursion.
|
||||
*
|
||||
* local timer_interrupt()
|
||||
* local_handler()
|
||||
* expire_hrtimers()
|
||||
* bc_handler()
|
||||
* local_handler()
|
||||
* expire_hrtimers()
|
||||
*/
|
||||
local = !(bc->features & CLOCK_EVT_FEAT_HRTIMER);
|
||||
}
|
||||
|
||||
if (!cpumask_empty(mask)) {
|
||||
@@ -301,6 +316,13 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
|
||||
bool bc_local;
|
||||
|
||||
raw_spin_lock(&tick_broadcast_lock);
|
||||
|
||||
/* Handle spurious interrupts gracefully */
|
||||
if (clockevent_state_shutdown(tick_broadcast_device.evtdev)) {
|
||||
raw_spin_unlock(&tick_broadcast_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
bc_local = tick_do_periodic_broadcast();
|
||||
|
||||
if (clockevent_state_oneshot(dev)) {
|
||||
@@ -359,8 +381,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
|
||||
case TICK_BROADCAST_ON:
|
||||
cpumask_set_cpu(cpu, tick_broadcast_on);
|
||||
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
|
||||
if (tick_broadcast_device.mode ==
|
||||
TICKDEV_MODE_PERIODIC)
|
||||
/*
|
||||
* Only shutdown the cpu local device, if:
|
||||
*
|
||||
* - the broadcast device exists
|
||||
* - the broadcast device is not a hrtimer based one
|
||||
* - the broadcast device is in periodic mode to
|
||||
* avoid a hickup during switch to oneshot mode
|
||||
*/
|
||||
if (bc && !(bc->features & CLOCK_EVT_FEAT_HRTIMER) &&
|
||||
tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
||||
clockevents_shutdown(dev);
|
||||
}
|
||||
break;
|
||||
@@ -379,14 +409,16 @@ void tick_broadcast_control(enum tick_broadcast_mode mode)
|
||||
break;
|
||||
}
|
||||
|
||||
if (cpumask_empty(tick_broadcast_mask)) {
|
||||
if (!bc_stopped)
|
||||
clockevents_shutdown(bc);
|
||||
} else if (bc_stopped) {
|
||||
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
||||
tick_broadcast_start_periodic(bc);
|
||||
else
|
||||
tick_broadcast_setup_oneshot(bc);
|
||||
if (bc) {
|
||||
if (cpumask_empty(tick_broadcast_mask)) {
|
||||
if (!bc_stopped)
|
||||
clockevents_shutdown(bc);
|
||||
} else if (bc_stopped) {
|
||||
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
||||
tick_broadcast_start_periodic(bc);
|
||||
else
|
||||
tick_broadcast_setup_oneshot(bc);
|
||||
}
|
||||
}
|
||||
raw_spin_unlock(&tick_broadcast_lock);
|
||||
}
|
||||
@@ -662,71 +694,82 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
|
||||
clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
|
||||
* @state: The target state (enter/exit)
|
||||
*
|
||||
* The system enters/leaves a state, where affected devices might stop
|
||||
* Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
|
||||
*
|
||||
* Called with interrupts disabled, so clockevents_lock is not
|
||||
* required here because the local clock event device cannot go away
|
||||
* under us.
|
||||
*/
|
||||
int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
|
||||
int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
|
||||
{
|
||||
struct clock_event_device *bc, *dev;
|
||||
struct tick_device *td;
|
||||
int cpu, ret = 0;
|
||||
ktime_t now;
|
||||
|
||||
/*
|
||||
* Periodic mode does not care about the enter/exit of power
|
||||
* states
|
||||
* If there is no broadcast device, tell the caller not to go
|
||||
* into deep idle.
|
||||
*/
|
||||
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
|
||||
return 0;
|
||||
if (!tick_broadcast_device.evtdev)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* We are called with preemtion disabled from the depth of the
|
||||
* idle code, so we can't be moved away.
|
||||
*/
|
||||
td = this_cpu_ptr(&tick_cpu_device);
|
||||
dev = td->evtdev;
|
||||
|
||||
if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
|
||||
return 0;
|
||||
dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
|
||||
|
||||
raw_spin_lock(&tick_broadcast_lock);
|
||||
bc = tick_broadcast_device.evtdev;
|
||||
cpu = smp_processor_id();
|
||||
|
||||
if (state == TICK_BROADCAST_ENTER) {
|
||||
/*
|
||||
* If the current CPU owns the hrtimer broadcast
|
||||
* mechanism, it cannot go deep idle and we do not add
|
||||
* the CPU to the broadcast mask. We don't have to go
|
||||
* through the EXIT path as the local timer is not
|
||||
* shutdown.
|
||||
*/
|
||||
ret = broadcast_needs_cpu(bc, cpu);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If the broadcast device is in periodic mode, we
|
||||
* return.
|
||||
*/
|
||||
if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
|
||||
/* If it is a hrtimer based broadcast, return busy */
|
||||
if (bc->features & CLOCK_EVT_FEAT_HRTIMER)
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
||||
WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
|
||||
|
||||
/* Conditionally shut down the local timer. */
|
||||
broadcast_shutdown_local(bc, dev);
|
||||
|
||||
/*
|
||||
* We only reprogram the broadcast timer if we
|
||||
* did not mark ourself in the force mask and
|
||||
* if the cpu local event is earlier than the
|
||||
* broadcast event. If the current CPU is in
|
||||
* the force mask, then we are going to be
|
||||
* woken by the IPI right away.
|
||||
* woken by the IPI right away; we return
|
||||
* busy, so the CPU does not try to go deep
|
||||
* idle.
|
||||
*/
|
||||
if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
|
||||
dev->next_event.tv64 < bc->next_event.tv64)
|
||||
if (cpumask_test_cpu(cpu, tick_broadcast_force_mask)) {
|
||||
ret = -EBUSY;
|
||||
} else if (dev->next_event.tv64 < bc->next_event.tv64) {
|
||||
tick_broadcast_set_event(bc, cpu, dev->next_event);
|
||||
/*
|
||||
* In case of hrtimer broadcasts the
|
||||
* programming might have moved the
|
||||
* timer to this cpu. If yes, remove
|
||||
* us from the broadcast mask and
|
||||
* return busy.
|
||||
*/
|
||||
ret = broadcast_needs_cpu(bc, cpu);
|
||||
if (ret) {
|
||||
cpumask_clear_cpu(cpu,
|
||||
tick_broadcast_oneshot_mask);
|
||||
}
|
||||
}
|
||||
}
|
||||
/*
|
||||
* If the current CPU owns the hrtimer broadcast
|
||||
* mechanism, it cannot go deep idle and we remove the
|
||||
* CPU from the broadcast mask. We don't have to go
|
||||
* through the EXIT path as the local timer is not
|
||||
* shutdown.
|
||||
*/
|
||||
ret = broadcast_needs_cpu(bc, cpu);
|
||||
if (ret)
|
||||
cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
|
||||
} else {
|
||||
if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
|
||||
clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
|
||||
@@ -796,7 +839,6 @@ out:
|
||||
raw_spin_unlock(&tick_broadcast_lock);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
|
||||
|
||||
/*
|
||||
* Reset the one shot broadcast for a cpu
|
||||
@@ -938,6 +980,16 @@ bool tick_broadcast_oneshot_available(void)
|
||||
return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
|
||||
}
|
||||
|
||||
#else
|
||||
int __tick_broadcast_oneshot_control(enum tick_broadcast_state state)
|
||||
{
|
||||
struct clock_event_device *bc = tick_broadcast_device.evtdev;
|
||||
|
||||
if (!bc || (bc->features & CLOCK_EVT_FEAT_HRTIMER))
|
||||
return -EBUSY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
void __init tick_broadcast_init(void)
|
||||
|
@@ -343,6 +343,28 @@ out_bc:
|
||||
tick_install_broadcast_device(newdev);
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_broadcast_oneshot_control - Enter/exit broadcast oneshot mode
|
||||
* @state: The target state (enter/exit)
|
||||
*
|
||||
* The system enters/leaves a state, where affected devices might stop
|
||||
* Returns 0 on success, -EBUSY if the cpu is used to broadcast wakeups.
|
||||
*
|
||||
* Called with interrupts disabled, so clockevents_lock is not
|
||||
* required here because the local clock event device cannot go away
|
||||
* under us.
|
||||
*/
|
||||
int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
|
||||
{
|
||||
struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
|
||||
|
||||
if (!(td->evtdev->features & CLOCK_EVT_FEAT_C3STOP))
|
||||
return 0;
|
||||
|
||||
return __tick_broadcast_oneshot_control(state);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(tick_broadcast_oneshot_control);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* Transfer the do_timer job away from a dying cpu.
|
||||
|
@@ -71,4 +71,14 @@ extern void tick_cancel_sched_timer(int cpu);
|
||||
static inline void tick_cancel_sched_timer(int cpu) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
|
||||
extern int __tick_broadcast_oneshot_control(enum tick_broadcast_state state);
|
||||
#else
|
||||
static inline int
|
||||
__tick_broadcast_oneshot_control(enum tick_broadcast_state state)
|
||||
{
|
||||
return -EBUSY;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@@ -98,6 +98,13 @@ struct ftrace_pid {
|
||||
struct pid *pid;
|
||||
};
|
||||
|
||||
static bool ftrace_pids_enabled(void)
|
||||
{
|
||||
return !list_empty(&ftrace_pids);
|
||||
}
|
||||
|
||||
static void ftrace_update_trampoline(struct ftrace_ops *ops);
|
||||
|
||||
/*
|
||||
* ftrace_disabled is set when an anomaly is discovered.
|
||||
* ftrace_disabled is much stronger than ftrace_enabled.
|
||||
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
|
||||
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
|
||||
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
|
||||
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
|
||||
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
|
||||
static struct ftrace_ops global_ops;
|
||||
static struct ftrace_ops control_ops;
|
||||
|
||||
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
|
||||
if (!test_tsk_trace_trace(current))
|
||||
return;
|
||||
|
||||
ftrace_pid_function(ip, parent_ip, op, regs);
|
||||
}
|
||||
|
||||
static void set_ftrace_pid_function(ftrace_func_t func)
|
||||
{
|
||||
/* do not set ftrace_pid_function to itself! */
|
||||
if (func != ftrace_pid_func)
|
||||
ftrace_pid_function = func;
|
||||
op->saved_func(ip, parent_ip, op, regs);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
|
||||
void clear_ftrace_function(void)
|
||||
{
|
||||
ftrace_trace_function = ftrace_stub;
|
||||
ftrace_pid_function = ftrace_stub;
|
||||
}
|
||||
|
||||
static void control_ops_disable_all(struct ftrace_ops *ops)
|
||||
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
|
||||
} else
|
||||
add_ftrace_ops(&ftrace_ops_list, ops);
|
||||
|
||||
/* Always save the function, and reset at unregistering */
|
||||
ops->saved_func = ops->func;
|
||||
|
||||
if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
|
||||
ops->func = ftrace_pid_func;
|
||||
|
||||
ftrace_update_trampoline(ops);
|
||||
|
||||
if (ftrace_enabled)
|
||||
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
|
||||
if (ftrace_enabled)
|
||||
update_ftrace_function();
|
||||
|
||||
ops->func = ops->saved_func;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ftrace_update_pid_func(void)
|
||||
{
|
||||
bool enabled = ftrace_pids_enabled();
|
||||
struct ftrace_ops *op;
|
||||
|
||||
/* Only do something if we are tracing something */
|
||||
if (ftrace_trace_function == ftrace_stub)
|
||||
return;
|
||||
|
||||
do_for_each_ftrace_op(op, ftrace_ops_list) {
|
||||
if (op->flags & FTRACE_OPS_FL_PID) {
|
||||
op->func = enabled ? ftrace_pid_func :
|
||||
op->saved_func;
|
||||
ftrace_update_trampoline(op);
|
||||
}
|
||||
} while_for_each_ftrace_op(op);
|
||||
|
||||
update_ftrace_function();
|
||||
}
|
||||
|
||||
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
|
||||
.local_hash.filter_hash = EMPTY_HASH,
|
||||
INIT_OPS_HASH(global_ops)
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
||||
FTRACE_OPS_FL_INITIALIZED,
|
||||
FTRACE_OPS_FL_INITIALIZED |
|
||||
FTRACE_OPS_FL_PID,
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
|
||||
|
||||
static struct ftrace_ops global_ops = {
|
||||
.func = ftrace_stub,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
||||
FTRACE_OPS_FL_INITIALIZED |
|
||||
FTRACE_OPS_FL_PID,
|
||||
};
|
||||
|
||||
static int __init ftrace_nodyn_init(void)
|
||||
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
|
||||
if (WARN_ON(tr->ops->func != ftrace_stub))
|
||||
printk("ftrace ops had %pS for function\n",
|
||||
tr->ops->func);
|
||||
/* Only the top level instance does pid tracing */
|
||||
if (!list_empty(&ftrace_pids)) {
|
||||
set_ftrace_pid_function(func);
|
||||
func = ftrace_pid_func;
|
||||
}
|
||||
}
|
||||
tr->ops->func = func;
|
||||
tr->ops->private = tr;
|
||||
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
mutex_lock(&ftrace_lock);
|
||||
|
||||
if (list_empty(&ftrace_pids) && (!*pos))
|
||||
if (!ftrace_pids_enabled() && (!*pos))
|
||||
return (void *) 1;
|
||||
|
||||
return seq_list_start(&ftrace_pids, *pos);
|
||||
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
|
||||
.func = ftrace_stub,
|
||||
.flags = FTRACE_OPS_FL_RECURSION_SAFE |
|
||||
FTRACE_OPS_FL_INITIALIZED |
|
||||
FTRACE_OPS_FL_PID |
|
||||
FTRACE_OPS_FL_STUB,
|
||||
#ifdef FTRACE_GRAPH_TRAMP_ADDR
|
||||
.trampoline = FTRACE_GRAPH_TRAMP_ADDR,
|
||||
|
@@ -444,6 +444,7 @@ enum {
|
||||
|
||||
TRACE_CONTROL_BIT,
|
||||
|
||||
TRACE_BRANCH_BIT,
|
||||
/*
|
||||
* Abuse of the trace_recursion.
|
||||
* As we need a way to maintain state if we are tracing the function
|
||||
|
@@ -36,9 +36,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
||||
struct trace_branch *entry;
|
||||
struct ring_buffer *buffer;
|
||||
unsigned long flags;
|
||||
int cpu, pc;
|
||||
int pc;
|
||||
const char *p;
|
||||
|
||||
if (current->trace_recursion & TRACE_BRANCH_BIT)
|
||||
return;
|
||||
|
||||
/*
|
||||
* I would love to save just the ftrace_likely_data pointer, but
|
||||
* this code can also be used by modules. Ugly things can happen
|
||||
@@ -49,10 +52,10 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
||||
if (unlikely(!tr))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
cpu = raw_smp_processor_id();
|
||||
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
|
||||
if (atomic_inc_return(&data->disabled) != 1)
|
||||
raw_local_irq_save(flags);
|
||||
current->trace_recursion |= TRACE_BRANCH_BIT;
|
||||
data = this_cpu_ptr(tr->trace_buffer.data);
|
||||
if (atomic_read(&data->disabled))
|
||||
goto out;
|
||||
|
||||
pc = preempt_count();
|
||||
@@ -81,8 +84,8 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
|
||||
__buffer_unlock_commit(buffer, event);
|
||||
|
||||
out:
|
||||
atomic_dec(&data->disabled);
|
||||
local_irq_restore(flags);
|
||||
current->trace_recursion &= ~TRACE_BRANCH_BIT;
|
||||
raw_local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline
|
||||
|
Reference in New Issue
Block a user