Merge branch 'linus' into sched/urgent
Conflicts: kernel/sched_idletask.c Merge reason: resolve the conflicts, pick up latest changes. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Esse commit está contido em:
@@ -536,7 +536,8 @@ static void do_acct_process(struct bsd_acct_struct *acct,
|
||||
do_div(elapsed, AHZ);
|
||||
ac.ac_btime = get_seconds() - elapsed;
|
||||
/* we really need to bite the bullet and change layout */
|
||||
current_uid_gid(&ac.ac_uid, &ac.ac_gid);
|
||||
ac.ac_uid = orig_cred->uid;
|
||||
ac.ac_gid = orig_cred->gid;
|
||||
#if ACCT_VERSION==2
|
||||
ac.ac_ahz = AHZ;
|
||||
#endif
|
||||
|
@@ -933,7 +933,7 @@ NORET_TYPE void do_exit(long code)
|
||||
* an exiting task cleaning up the robust pi futexes.
|
||||
*/
|
||||
smp_mb();
|
||||
spin_unlock_wait(&tsk->pi_lock);
|
||||
raw_spin_unlock_wait(&tsk->pi_lock);
|
||||
|
||||
if (unlikely(in_atomic()))
|
||||
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
|
||||
|
@@ -939,9 +939,9 @@ SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
|
||||
|
||||
static void rt_mutex_init_task(struct task_struct *p)
|
||||
{
|
||||
spin_lock_init(&p->pi_lock);
|
||||
raw_spin_lock_init(&p->pi_lock);
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
plist_head_init(&p->pi_waiters, &p->pi_lock);
|
||||
plist_head_init_raw(&p->pi_waiters, &p->pi_lock);
|
||||
p->pi_blocked_on = NULL;
|
||||
#endif
|
||||
}
|
||||
|
@@ -403,9 +403,9 @@ static void free_pi_state(struct futex_pi_state *pi_state)
|
||||
* and has cleaned up the pi_state already
|
||||
*/
|
||||
if (pi_state->owner) {
|
||||
spin_lock_irq(&pi_state->owner->pi_lock);
|
||||
raw_spin_lock_irq(&pi_state->owner->pi_lock);
|
||||
list_del_init(&pi_state->list);
|
||||
spin_unlock_irq(&pi_state->owner->pi_lock);
|
||||
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
|
||||
|
||||
rt_mutex_proxy_unlock(&pi_state->pi_mutex, pi_state->owner);
|
||||
}
|
||||
@@ -470,18 +470,18 @@ void exit_pi_state_list(struct task_struct *curr)
|
||||
* pi_state_list anymore, but we have to be careful
|
||||
* versus waiters unqueueing themselves:
|
||||
*/
|
||||
spin_lock_irq(&curr->pi_lock);
|
||||
raw_spin_lock_irq(&curr->pi_lock);
|
||||
while (!list_empty(head)) {
|
||||
|
||||
next = head->next;
|
||||
pi_state = list_entry(next, struct futex_pi_state, list);
|
||||
key = pi_state->key;
|
||||
hb = hash_futex(&key);
|
||||
spin_unlock_irq(&curr->pi_lock);
|
||||
raw_spin_unlock_irq(&curr->pi_lock);
|
||||
|
||||
spin_lock(&hb->lock);
|
||||
|
||||
spin_lock_irq(&curr->pi_lock);
|
||||
raw_spin_lock_irq(&curr->pi_lock);
|
||||
/*
|
||||
* We dropped the pi-lock, so re-check whether this
|
||||
* task still owns the PI-state:
|
||||
@@ -495,15 +495,15 @@ void exit_pi_state_list(struct task_struct *curr)
|
||||
WARN_ON(list_empty(&pi_state->list));
|
||||
list_del_init(&pi_state->list);
|
||||
pi_state->owner = NULL;
|
||||
spin_unlock_irq(&curr->pi_lock);
|
||||
raw_spin_unlock_irq(&curr->pi_lock);
|
||||
|
||||
rt_mutex_unlock(&pi_state->pi_mutex);
|
||||
|
||||
spin_unlock(&hb->lock);
|
||||
|
||||
spin_lock_irq(&curr->pi_lock);
|
||||
raw_spin_lock_irq(&curr->pi_lock);
|
||||
}
|
||||
spin_unlock_irq(&curr->pi_lock);
|
||||
raw_spin_unlock_irq(&curr->pi_lock);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -558,7 +558,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
||||
* change of the task flags, we do this protected by
|
||||
* p->pi_lock:
|
||||
*/
|
||||
spin_lock_irq(&p->pi_lock);
|
||||
raw_spin_lock_irq(&p->pi_lock);
|
||||
if (unlikely(p->flags & PF_EXITING)) {
|
||||
/*
|
||||
* The task is on the way out. When PF_EXITPIDONE is
|
||||
@@ -567,7 +567,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
||||
*/
|
||||
int ret = (p->flags & PF_EXITPIDONE) ? -ESRCH : -EAGAIN;
|
||||
|
||||
spin_unlock_irq(&p->pi_lock);
|
||||
raw_spin_unlock_irq(&p->pi_lock);
|
||||
put_task_struct(p);
|
||||
return ret;
|
||||
}
|
||||
@@ -586,7 +586,7 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
|
||||
WARN_ON(!list_empty(&pi_state->list));
|
||||
list_add(&pi_state->list, &p->pi_state_list);
|
||||
pi_state->owner = p;
|
||||
spin_unlock_irq(&p->pi_lock);
|
||||
raw_spin_unlock_irq(&p->pi_lock);
|
||||
|
||||
put_task_struct(p);
|
||||
|
||||
@@ -760,7 +760,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
|
||||
if (!pi_state)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&pi_state->pi_mutex.wait_lock);
|
||||
raw_spin_lock(&pi_state->pi_mutex.wait_lock);
|
||||
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
|
||||
|
||||
/*
|
||||
@@ -789,23 +789,23 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
|
||||
else if (curval != uval)
|
||||
ret = -EINVAL;
|
||||
if (ret) {
|
||||
spin_unlock(&pi_state->pi_mutex.wait_lock);
|
||||
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock_irq(&pi_state->owner->pi_lock);
|
||||
raw_spin_lock_irq(&pi_state->owner->pi_lock);
|
||||
WARN_ON(list_empty(&pi_state->list));
|
||||
list_del_init(&pi_state->list);
|
||||
spin_unlock_irq(&pi_state->owner->pi_lock);
|
||||
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
|
||||
|
||||
spin_lock_irq(&new_owner->pi_lock);
|
||||
raw_spin_lock_irq(&new_owner->pi_lock);
|
||||
WARN_ON(!list_empty(&pi_state->list));
|
||||
list_add(&pi_state->list, &new_owner->pi_state_list);
|
||||
pi_state->owner = new_owner;
|
||||
spin_unlock_irq(&new_owner->pi_lock);
|
||||
raw_spin_unlock_irq(&new_owner->pi_lock);
|
||||
|
||||
spin_unlock(&pi_state->pi_mutex.wait_lock);
|
||||
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
|
||||
rt_mutex_unlock(&pi_state->pi_mutex);
|
||||
|
||||
return 0;
|
||||
@@ -1010,7 +1010,7 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
|
||||
plist_add(&q->list, &hb2->chain);
|
||||
q->lock_ptr = &hb2->lock;
|
||||
#ifdef CONFIG_DEBUG_PI_LIST
|
||||
q->list.plist.lock = &hb2->lock;
|
||||
q->list.plist.spinlock = &hb2->lock;
|
||||
#endif
|
||||
}
|
||||
get_futex_key_refs(key2);
|
||||
@@ -1046,7 +1046,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
|
||||
|
||||
q->lock_ptr = &hb->lock;
|
||||
#ifdef CONFIG_DEBUG_PI_LIST
|
||||
q->list.plist.lock = &hb->lock;
|
||||
q->list.plist.spinlock = &hb->lock;
|
||||
#endif
|
||||
|
||||
wake_up_state(q->task, TASK_NORMAL);
|
||||
@@ -1394,7 +1394,7 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb)
|
||||
|
||||
plist_node_init(&q->list, prio);
|
||||
#ifdef CONFIG_DEBUG_PI_LIST
|
||||
q->list.plist.lock = &hb->lock;
|
||||
q->list.plist.spinlock = &hb->lock;
|
||||
#endif
|
||||
plist_add(&q->list, &hb->chain);
|
||||
q->task = current;
|
||||
@@ -1529,18 +1529,18 @@ retry:
|
||||
* itself.
|
||||
*/
|
||||
if (pi_state->owner != NULL) {
|
||||
spin_lock_irq(&pi_state->owner->pi_lock);
|
||||
raw_spin_lock_irq(&pi_state->owner->pi_lock);
|
||||
WARN_ON(list_empty(&pi_state->list));
|
||||
list_del_init(&pi_state->list);
|
||||
spin_unlock_irq(&pi_state->owner->pi_lock);
|
||||
raw_spin_unlock_irq(&pi_state->owner->pi_lock);
|
||||
}
|
||||
|
||||
pi_state->owner = newowner;
|
||||
|
||||
spin_lock_irq(&newowner->pi_lock);
|
||||
raw_spin_lock_irq(&newowner->pi_lock);
|
||||
WARN_ON(!list_empty(&pi_state->list));
|
||||
list_add(&pi_state->list, &newowner->pi_state_list);
|
||||
spin_unlock_irq(&newowner->pi_lock);
|
||||
raw_spin_unlock_irq(&newowner->pi_lock);
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@@ -127,11 +127,11 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
|
||||
for (;;) {
|
||||
base = timer->base;
|
||||
if (likely(base != NULL)) {
|
||||
spin_lock_irqsave(&base->cpu_base->lock, *flags);
|
||||
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
|
||||
if (likely(base == timer->base))
|
||||
return base;
|
||||
/* The timer has migrated to another CPU: */
|
||||
spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
|
||||
raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
|
||||
}
|
||||
cpu_relax();
|
||||
}
|
||||
@@ -208,13 +208,13 @@ again:
|
||||
|
||||
/* See the comment in lock_timer_base() */
|
||||
timer->base = NULL;
|
||||
spin_unlock(&base->cpu_base->lock);
|
||||
spin_lock(&new_base->cpu_base->lock);
|
||||
raw_spin_unlock(&base->cpu_base->lock);
|
||||
raw_spin_lock(&new_base->cpu_base->lock);
|
||||
|
||||
if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
|
||||
cpu = this_cpu;
|
||||
spin_unlock(&new_base->cpu_base->lock);
|
||||
spin_lock(&base->cpu_base->lock);
|
||||
raw_spin_unlock(&new_base->cpu_base->lock);
|
||||
raw_spin_lock(&base->cpu_base->lock);
|
||||
timer->base = base;
|
||||
goto again;
|
||||
}
|
||||
@@ -230,7 +230,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
|
||||
{
|
||||
struct hrtimer_clock_base *base = timer->base;
|
||||
|
||||
spin_lock_irqsave(&base->cpu_base->lock, *flags);
|
||||
raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
|
||||
|
||||
return base;
|
||||
}
|
||||
@@ -628,12 +628,12 @@ static void retrigger_next_event(void *arg)
|
||||
base = &__get_cpu_var(hrtimer_bases);
|
||||
|
||||
/* Adjust CLOCK_REALTIME offset */
|
||||
spin_lock(&base->lock);
|
||||
raw_spin_lock(&base->lock);
|
||||
base->clock_base[CLOCK_REALTIME].offset =
|
||||
timespec_to_ktime(realtime_offset);
|
||||
|
||||
hrtimer_force_reprogram(base, 0);
|
||||
spin_unlock(&base->lock);
|
||||
raw_spin_unlock(&base->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -694,9 +694,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
|
||||
{
|
||||
if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
|
||||
if (wakeup) {
|
||||
spin_unlock(&base->cpu_base->lock);
|
||||
raw_spin_unlock(&base->cpu_base->lock);
|
||||
raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||||
spin_lock(&base->cpu_base->lock);
|
||||
raw_spin_lock(&base->cpu_base->lock);
|
||||
} else
|
||||
__raise_softirq_irqoff(HRTIMER_SOFTIRQ);
|
||||
|
||||
@@ -790,7 +790,7 @@ static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
|
||||
static inline
|
||||
void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
|
||||
{
|
||||
spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
|
||||
raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1123,7 +1123,7 @@ ktime_t hrtimer_get_next_event(void)
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&cpu_base->lock, flags);
|
||||
raw_spin_lock_irqsave(&cpu_base->lock, flags);
|
||||
|
||||
if (!hrtimer_hres_active()) {
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
|
||||
@@ -1140,7 +1140,7 @@ ktime_t hrtimer_get_next_event(void)
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&cpu_base->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
|
||||
|
||||
if (mindelta.tv64 < 0)
|
||||
mindelta.tv64 = 0;
|
||||
@@ -1222,11 +1222,11 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
|
||||
* they get migrated to another cpu, therefore its safe to unlock
|
||||
* the timer base.
|
||||
*/
|
||||
spin_unlock(&cpu_base->lock);
|
||||
raw_spin_unlock(&cpu_base->lock);
|
||||
trace_hrtimer_expire_entry(timer, now);
|
||||
restart = fn(timer);
|
||||
trace_hrtimer_expire_exit(timer);
|
||||
spin_lock(&cpu_base->lock);
|
||||
raw_spin_lock(&cpu_base->lock);
|
||||
|
||||
/*
|
||||
* Note: We clear the CALLBACK bit after enqueue_hrtimer and
|
||||
@@ -1261,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
|
||||
retry:
|
||||
expires_next.tv64 = KTIME_MAX;
|
||||
|
||||
spin_lock(&cpu_base->lock);
|
||||
raw_spin_lock(&cpu_base->lock);
|
||||
/*
|
||||
* We set expires_next to KTIME_MAX here with cpu_base->lock
|
||||
* held to prevent that a timer is enqueued in our queue via
|
||||
@@ -1317,7 +1317,7 @@ retry:
|
||||
* against it.
|
||||
*/
|
||||
cpu_base->expires_next = expires_next;
|
||||
spin_unlock(&cpu_base->lock);
|
||||
raw_spin_unlock(&cpu_base->lock);
|
||||
|
||||
/* Reprogramming necessary ? */
|
||||
if (expires_next.tv64 == KTIME_MAX ||
|
||||
@@ -1457,7 +1457,7 @@ void hrtimer_run_queues(void)
|
||||
gettime = 0;
|
||||
}
|
||||
|
||||
spin_lock(&cpu_base->lock);
|
||||
raw_spin_lock(&cpu_base->lock);
|
||||
|
||||
while ((node = base->first)) {
|
||||
struct hrtimer *timer;
|
||||
@@ -1469,7 +1469,7 @@ void hrtimer_run_queues(void)
|
||||
|
||||
__run_hrtimer(timer, &base->softirq_time);
|
||||
}
|
||||
spin_unlock(&cpu_base->lock);
|
||||
raw_spin_unlock(&cpu_base->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1625,7 +1625,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
|
||||
struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
|
||||
int i;
|
||||
|
||||
spin_lock_init(&cpu_base->lock);
|
||||
raw_spin_lock_init(&cpu_base->lock);
|
||||
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
|
||||
cpu_base->clock_base[i].cpu_base = cpu_base;
|
||||
@@ -1683,16 +1683,16 @@ static void migrate_hrtimers(int scpu)
|
||||
* The caller is globally serialized and nobody else
|
||||
* takes two locks at once, deadlock is not possible.
|
||||
*/
|
||||
spin_lock(&new_base->lock);
|
||||
spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
|
||||
raw_spin_lock(&new_base->lock);
|
||||
raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
|
||||
migrate_hrtimer_list(&old_base->clock_base[i],
|
||||
&new_base->clock_base[i]);
|
||||
}
|
||||
|
||||
spin_unlock(&old_base->lock);
|
||||
spin_unlock(&new_base->lock);
|
||||
raw_spin_unlock(&old_base->lock);
|
||||
raw_spin_unlock(&new_base->lock);
|
||||
|
||||
/* Check, if we got expired work to do */
|
||||
__hrtimer_peek_ahead_timers();
|
||||
|
@@ -96,7 +96,7 @@ static int task_bp_pinned(struct task_struct *tsk)
|
||||
|
||||
list = &ctx->event_list;
|
||||
|
||||
spin_lock_irqsave(&ctx->lock, flags);
|
||||
raw_spin_lock_irqsave(&ctx->lock, flags);
|
||||
|
||||
/*
|
||||
* The current breakpoint counter is not included in the list
|
||||
@@ -107,7 +107,7 @@ static int task_bp_pinned(struct task_struct *tsk)
|
||||
count++;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@@ -45,7 +45,7 @@ unsigned long probe_irq_on(void)
|
||||
* flush such a longstanding irq before considering it as spurious.
|
||||
*/
|
||||
for_each_irq_desc_reverse(i, desc) {
|
||||
spin_lock_irq(&desc->lock);
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
||||
/*
|
||||
* An old-style architecture might still have
|
||||
@@ -61,7 +61,7 @@ unsigned long probe_irq_on(void)
|
||||
desc->chip->set_type(i, IRQ_TYPE_PROBE);
|
||||
desc->chip->startup(i);
|
||||
}
|
||||
spin_unlock_irq(&desc->lock);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
||||
/* Wait for longstanding interrupts to trigger. */
|
||||
@@ -73,13 +73,13 @@ unsigned long probe_irq_on(void)
|
||||
* happened in the previous stage, it may have masked itself)
|
||||
*/
|
||||
for_each_irq_desc_reverse(i, desc) {
|
||||
spin_lock_irq(&desc->lock);
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
|
||||
desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
|
||||
if (desc->chip->startup(i))
|
||||
desc->status |= IRQ_PENDING;
|
||||
}
|
||||
spin_unlock_irq(&desc->lock);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -91,7 +91,7 @@ unsigned long probe_irq_on(void)
|
||||
* Now filter out any obviously spurious interrupts
|
||||
*/
|
||||
for_each_irq_desc(i, desc) {
|
||||
spin_lock_irq(&desc->lock);
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
status = desc->status;
|
||||
|
||||
if (status & IRQ_AUTODETECT) {
|
||||
@@ -103,7 +103,7 @@ unsigned long probe_irq_on(void)
|
||||
if (i < 32)
|
||||
mask |= 1 << i;
|
||||
}
|
||||
spin_unlock_irq(&desc->lock);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
|
||||
return mask;
|
||||
@@ -129,7 +129,7 @@ unsigned int probe_irq_mask(unsigned long val)
|
||||
int i;
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
spin_lock_irq(&desc->lock);
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
status = desc->status;
|
||||
|
||||
if (status & IRQ_AUTODETECT) {
|
||||
@@ -139,7 +139,7 @@ unsigned int probe_irq_mask(unsigned long val)
|
||||
desc->status = status & ~IRQ_AUTODETECT;
|
||||
desc->chip->shutdown(i);
|
||||
}
|
||||
spin_unlock_irq(&desc->lock);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
mutex_unlock(&probing_active);
|
||||
|
||||
@@ -171,7 +171,7 @@ int probe_irq_off(unsigned long val)
|
||||
unsigned int status;
|
||||
|
||||
for_each_irq_desc(i, desc) {
|
||||
spin_lock_irq(&desc->lock);
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
status = desc->status;
|
||||
|
||||
if (status & IRQ_AUTODETECT) {
|
||||
@@ -183,7 +183,7 @@ int probe_irq_off(unsigned long val)
|
||||
desc->status = status & ~IRQ_AUTODETECT;
|
||||
desc->chip->shutdown(i);
|
||||
}
|
||||
spin_unlock_irq(&desc->lock);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
mutex_unlock(&probing_active);
|
||||
|
||||
|
@@ -34,7 +34,7 @@ void dynamic_irq_init(unsigned int irq)
|
||||
}
|
||||
|
||||
/* Ensure we don't have left over values from a previous use of this irq */
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->chip = &no_irq_chip;
|
||||
desc->handle_irq = handle_bad_irq;
|
||||
@@ -51,7 +51,7 @@ void dynamic_irq_init(unsigned int irq)
|
||||
cpumask_clear(desc->pending_mask);
|
||||
#endif
|
||||
#endif
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -68,9 +68,9 @@ void dynamic_irq_cleanup(unsigned int irq)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
if (desc->action) {
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n",
|
||||
irq);
|
||||
return;
|
||||
@@ -82,7 +82,7 @@ void dynamic_irq_cleanup(unsigned int irq)
|
||||
desc->chip = &no_irq_chip;
|
||||
desc->name = NULL;
|
||||
clear_kstat_irqs(desc);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
|
||||
@@ -104,10 +104,10 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
|
||||
if (!chip)
|
||||
chip = &no_irq_chip;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
irq_chip_set_defaults(chip);
|
||||
desc->chip = chip;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -133,9 +133,9 @@ int set_irq_type(unsigned int irq, unsigned int type)
|
||||
if (type == IRQ_TYPE_NONE)
|
||||
return 0;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
ret = __irq_set_trigger(desc, irq, type);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(set_irq_type);
|
||||
@@ -158,9 +158,9 @@ int set_irq_data(unsigned int irq, void *data)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->handler_data = data;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(set_irq_data);
|
||||
@@ -183,11 +183,11 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->msi_desc = entry;
|
||||
if (entry)
|
||||
entry->irq = irq;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -214,9 +214,9 @@ int set_irq_chip_data(unsigned int irq, void *data)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->chip_data = data;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -241,12 +241,12 @@ void set_irq_nested_thread(unsigned int irq, int nest)
|
||||
if (!desc)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
if (nest)
|
||||
desc->status |= IRQ_NESTED_THREAD;
|
||||
else
|
||||
desc->status &= ~IRQ_NESTED_THREAD;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(set_irq_nested_thread);
|
||||
|
||||
@@ -343,7 +343,7 @@ void handle_nested_irq(unsigned int irq)
|
||||
|
||||
might_sleep();
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
|
||||
kstat_incr_irqs_this_cpu(irq, desc);
|
||||
|
||||
@@ -352,17 +352,17 @@ void handle_nested_irq(unsigned int irq)
|
||||
goto out_unlock;
|
||||
|
||||
desc->status |= IRQ_INPROGRESS;
|
||||
spin_unlock_irq(&desc->lock);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
|
||||
action_ret = action->thread_fn(action->irq, action->dev_id);
|
||||
if (!noirqdebug)
|
||||
note_interrupt(irq, desc, action_ret);
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
desc->status &= ~IRQ_INPROGRESS;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irq(&desc->lock);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_nested_irq);
|
||||
|
||||
@@ -384,7 +384,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
|
||||
struct irqaction *action;
|
||||
irqreturn_t action_ret;
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (unlikely(desc->status & IRQ_INPROGRESS))
|
||||
goto out_unlock;
|
||||
@@ -396,16 +396,16 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
|
||||
goto out_unlock;
|
||||
|
||||
desc->status |= IRQ_INPROGRESS;
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
action_ret = handle_IRQ_event(irq, action);
|
||||
if (!noirqdebug)
|
||||
note_interrupt(irq, desc, action_ret);
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
desc->status &= ~IRQ_INPROGRESS;
|
||||
out_unlock:
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -424,7 +424,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
||||
struct irqaction *action;
|
||||
irqreturn_t action_ret;
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
mask_ack_irq(desc, irq);
|
||||
|
||||
if (unlikely(desc->status & IRQ_INPROGRESS))
|
||||
@@ -441,13 +441,13 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
||||
goto out_unlock;
|
||||
|
||||
desc->status |= IRQ_INPROGRESS;
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
action_ret = handle_IRQ_event(irq, action);
|
||||
if (!noirqdebug)
|
||||
note_interrupt(irq, desc, action_ret);
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
desc->status &= ~IRQ_INPROGRESS;
|
||||
|
||||
if (unlikely(desc->status & IRQ_ONESHOT))
|
||||
@@ -455,7 +455,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
|
||||
else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
|
||||
desc->chip->unmask(irq);
|
||||
out_unlock:
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(handle_level_irq);
|
||||
|
||||
@@ -475,7 +475,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||
struct irqaction *action;
|
||||
irqreturn_t action_ret;
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
if (unlikely(desc->status & IRQ_INPROGRESS))
|
||||
goto out;
|
||||
@@ -497,18 +497,18 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
|
||||
|
||||
desc->status |= IRQ_INPROGRESS;
|
||||
desc->status &= ~IRQ_PENDING;
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
action_ret = handle_IRQ_event(irq, action);
|
||||
if (!noirqdebug)
|
||||
note_interrupt(irq, desc, action_ret);
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
desc->status &= ~IRQ_INPROGRESS;
|
||||
out:
|
||||
desc->chip->eoi(irq);
|
||||
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -530,7 +530,7 @@ out:
|
||||
void
|
||||
handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
|
||||
|
||||
@@ -576,17 +576,17 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
|
||||
}
|
||||
|
||||
desc->status &= ~IRQ_PENDING;
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
action_ret = handle_IRQ_event(irq, action);
|
||||
if (!noirqdebug)
|
||||
note_interrupt(irq, desc, action_ret);
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
|
||||
} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
|
||||
|
||||
desc->status &= ~IRQ_INPROGRESS;
|
||||
out_unlock:
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -643,7 +643,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||
}
|
||||
|
||||
chip_bus_lock(irq, desc);
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
/* Uninstall? */
|
||||
if (handle == handle_bad_irq) {
|
||||
@@ -661,7 +661,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||
desc->depth = 0;
|
||||
desc->chip->startup(irq);
|
||||
}
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
chip_bus_sync_unlock(irq, desc);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__set_irq_handler);
|
||||
@@ -692,9 +692,9 @@ void __init set_irq_noprobe(unsigned int irq)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->status |= IRQ_NOPROBE;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
void __init set_irq_probe(unsigned int irq)
|
||||
@@ -707,7 +707,7 @@ void __init set_irq_probe(unsigned int irq)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
desc->status &= ~IRQ_NOPROBE;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
@@ -80,7 +80,7 @@ static struct irq_desc irq_desc_init = {
|
||||
.chip = &no_irq_chip,
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
};
|
||||
|
||||
void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
|
||||
@@ -108,7 +108,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
|
||||
{
|
||||
memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
|
||||
|
||||
spin_lock_init(&desc->lock);
|
||||
raw_spin_lock_init(&desc->lock);
|
||||
desc->irq = irq;
|
||||
#ifdef CONFIG_SMP
|
||||
desc->node = node;
|
||||
@@ -130,7 +130,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
|
||||
/*
|
||||
* Protect the sparse_irqs:
|
||||
*/
|
||||
DEFINE_SPINLOCK(sparse_irq_lock);
|
||||
DEFINE_RAW_SPINLOCK(sparse_irq_lock);
|
||||
|
||||
struct irq_desc **irq_desc_ptrs __read_mostly;
|
||||
|
||||
@@ -141,7 +141,7 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm
|
||||
.chip = &no_irq_chip,
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
|
||||
}
|
||||
};
|
||||
|
||||
@@ -212,7 +212,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
|
||||
if (desc)
|
||||
return desc;
|
||||
|
||||
spin_lock_irqsave(&sparse_irq_lock, flags);
|
||||
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
|
||||
|
||||
/* We have to check it to avoid races with another CPU */
|
||||
desc = irq_desc_ptrs[irq];
|
||||
@@ -234,7 +234,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
|
||||
irq_desc_ptrs[irq] = desc;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
||||
return desc;
|
||||
}
|
||||
@@ -247,7 +247,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
|
||||
.chip = &no_irq_chip,
|
||||
.handle_irq = handle_bad_irq,
|
||||
.depth = 1,
|
||||
.lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
|
||||
}
|
||||
};
|
||||
|
||||
@@ -473,7 +473,7 @@ unsigned int __do_IRQ(unsigned int irq)
|
||||
return 1;
|
||||
}
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
if (desc->chip->ack)
|
||||
desc->chip->ack(irq);
|
||||
/*
|
||||
@@ -517,13 +517,13 @@ unsigned int __do_IRQ(unsigned int irq)
|
||||
for (;;) {
|
||||
irqreturn_t action_ret;
|
||||
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
action_ret = handle_IRQ_event(irq, action);
|
||||
if (!noirqdebug)
|
||||
note_interrupt(irq, desc, action_ret);
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
if (likely(!(desc->status & IRQ_PENDING)))
|
||||
break;
|
||||
desc->status &= ~IRQ_PENDING;
|
||||
@@ -536,7 +536,7 @@ out:
|
||||
* disabled while the handler was running.
|
||||
*/
|
||||
desc->chip->end(irq);
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@@ -18,7 +18,7 @@ extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
|
||||
extern struct lock_class_key irq_desc_lock_class;
|
||||
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
||||
extern void clear_kstat_irqs(struct irq_desc *desc);
|
||||
extern spinlock_t sparse_irq_lock;
|
||||
extern raw_spinlock_t sparse_irq_lock;
|
||||
|
||||
#ifdef CONFIG_SPARSE_IRQ
|
||||
/* irq_desc_ptrs allocated at boot time */
|
||||
|
@@ -46,9 +46,9 @@ void synchronize_irq(unsigned int irq)
|
||||
cpu_relax();
|
||||
|
||||
/* Ok, that indicated we're done: double-check carefully. */
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
status = desc->status;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
/* Oops, that failed? */
|
||||
} while (status & IRQ_INPROGRESS);
|
||||
@@ -114,7 +114,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
if (!desc->chip->set_affinity)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (desc->status & IRQ_MOVE_PCNTXT) {
|
||||
@@ -134,7 +134,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
}
|
||||
#endif
|
||||
desc->status |= IRQ_AFFINITY_SET;
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -181,11 +181,11 @@ int irq_select_affinity_usr(unsigned int irq)
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
ret = setup_affinity(irq, desc);
|
||||
if (!ret)
|
||||
irq_set_thread_affinity(desc);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -231,9 +231,9 @@ void disable_irq_nosync(unsigned int irq)
|
||||
return;
|
||||
|
||||
chip_bus_lock(irq, desc);
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
__disable_irq(desc, irq, false);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
chip_bus_sync_unlock(irq, desc);
|
||||
}
|
||||
EXPORT_SYMBOL(disable_irq_nosync);
|
||||
@@ -308,9 +308,9 @@ void enable_irq(unsigned int irq)
|
||||
return;
|
||||
|
||||
chip_bus_lock(irq, desc);
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
__enable_irq(desc, irq, false);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
chip_bus_sync_unlock(irq, desc);
|
||||
}
|
||||
EXPORT_SYMBOL(enable_irq);
|
||||
@@ -347,7 +347,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
|
||||
/* wakeup-capable irqs can be shared between drivers that
|
||||
* don't need to have the same sleep mode behaviors.
|
||||
*/
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
if (on) {
|
||||
if (desc->wake_depth++ == 0) {
|
||||
ret = set_irq_wake_real(irq, on);
|
||||
@@ -368,7 +368,7 @@ int set_irq_wake(unsigned int irq, unsigned int on)
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(set_irq_wake);
|
||||
@@ -484,12 +484,12 @@ static int irq_wait_for_interrupt(struct irqaction *action)
|
||||
static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
|
||||
{
|
||||
chip_bus_lock(irq, desc);
|
||||
spin_lock_irq(&desc->lock);
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
|
||||
desc->status &= ~IRQ_MASKED;
|
||||
desc->chip->unmask(irq);
|
||||
}
|
||||
spin_unlock_irq(&desc->lock);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
chip_bus_sync_unlock(irq, desc);
|
||||
}
|
||||
|
||||
@@ -514,9 +514,9 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
cpumask_copy(mask, desc->affinity);
|
||||
spin_unlock_irq(&desc->lock);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
|
||||
set_cpus_allowed_ptr(current, mask);
|
||||
free_cpumask_var(mask);
|
||||
@@ -545,7 +545,7 @@ static int irq_thread(void *data)
|
||||
|
||||
atomic_inc(&desc->threads_active);
|
||||
|
||||
spin_lock_irq(&desc->lock);
|
||||
raw_spin_lock_irq(&desc->lock);
|
||||
if (unlikely(desc->status & IRQ_DISABLED)) {
|
||||
/*
|
||||
* CHECKME: We might need a dedicated
|
||||
@@ -555,9 +555,9 @@ static int irq_thread(void *data)
|
||||
* retriggers the interrupt itself --- tglx
|
||||
*/
|
||||
desc->status |= IRQ_PENDING;
|
||||
spin_unlock_irq(&desc->lock);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
} else {
|
||||
spin_unlock_irq(&desc->lock);
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
|
||||
action->thread_fn(action->irq, action->dev_id);
|
||||
|
||||
@@ -679,7 +679,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||
/*
|
||||
* The following block of code has to be executed atomically
|
||||
*/
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
old_ptr = &desc->action;
|
||||
old = *old_ptr;
|
||||
if (old) {
|
||||
@@ -775,7 +775,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
||||
__enable_irq(desc, irq, false);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
/*
|
||||
* Strictly no need to wake it up, but hung_task complains
|
||||
@@ -802,7 +802,7 @@ mismatch:
|
||||
ret = -EBUSY;
|
||||
|
||||
out_thread:
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
if (new->thread) {
|
||||
struct task_struct *t = new->thread;
|
||||
|
||||
@@ -844,7 +844,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
|
||||
/*
|
||||
* There can be multiple actions per IRQ descriptor, find the right
|
||||
@@ -856,7 +856,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
||||
|
||||
if (!action) {
|
||||
WARN(1, "Trying to free already-free IRQ %d\n", irq);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
@@ -884,7 +884,7 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
|
||||
desc->chip->disable(irq);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
unregister_handler_proc(irq, action);
|
||||
|
||||
|
@@ -27,7 +27,7 @@ void move_masked_irq(int irq)
|
||||
if (!desc->chip->set_affinity)
|
||||
return;
|
||||
|
||||
assert_spin_locked(&desc->lock);
|
||||
assert_raw_spin_locked(&desc->lock);
|
||||
|
||||
/*
|
||||
* If there was a valid mask to work with, please
|
||||
|
@@ -42,7 +42,7 @@ static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc,
|
||||
"for migration.\n", irq);
|
||||
return false;
|
||||
}
|
||||
spin_lock_init(&desc->lock);
|
||||
raw_spin_lock_init(&desc->lock);
|
||||
desc->node = node;
|
||||
lockdep_set_class(&desc->lock, &irq_desc_lock_class);
|
||||
init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids);
|
||||
@@ -67,7 +67,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
||||
|
||||
irq = old_desc->irq;
|
||||
|
||||
spin_lock_irqsave(&sparse_irq_lock, flags);
|
||||
raw_spin_lock_irqsave(&sparse_irq_lock, flags);
|
||||
|
||||
/* We have to check it to avoid races with another CPU */
|
||||
desc = irq_desc_ptrs[irq];
|
||||
@@ -91,7 +91,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
||||
}
|
||||
|
||||
irq_desc_ptrs[irq] = desc;
|
||||
spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
||||
/* free the old one */
|
||||
free_one_irq_desc(old_desc, desc);
|
||||
@@ -100,7 +100,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
|
||||
return desc;
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
|
||||
|
||||
return desc;
|
||||
}
|
||||
|
@@ -28,9 +28,9 @@ void suspend_device_irqs(void)
|
||||
for_each_irq_desc(irq, desc) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
__disable_irq(desc, irq, true);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
|
||||
for_each_irq_desc(irq, desc)
|
||||
@@ -56,9 +56,9 @@ void resume_device_irqs(void)
|
||||
if (!(desc->status & IRQ_SUSPENDED))
|
||||
continue;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
__enable_irq(desc, irq, true);
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(resume_device_irqs);
|
||||
|
@@ -179,7 +179,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
|
||||
unsigned long flags;
|
||||
int ret = 1;
|
||||
|
||||
spin_lock_irqsave(&desc->lock, flags);
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
for (action = desc->action ; action; action = action->next) {
|
||||
if ((action != new_action) && action->name &&
|
||||
!strcmp(new_action->name, action->name)) {
|
||||
@@ -187,7 +187,7 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&desc->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@@ -28,7 +28,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
|
||||
struct irqaction *action;
|
||||
int ok = 0, work = 0;
|
||||
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
/* Already running on another processor */
|
||||
if (desc->status & IRQ_INPROGRESS) {
|
||||
/*
|
||||
@@ -37,13 +37,13 @@ static int try_one_irq(int irq, struct irq_desc *desc)
|
||||
*/
|
||||
if (desc->action && (desc->action->flags & IRQF_SHARED))
|
||||
desc->status |= IRQ_PENDING;
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
return ok;
|
||||
}
|
||||
/* Honour the normal IRQ locking */
|
||||
desc->status |= IRQ_INPROGRESS;
|
||||
action = desc->action;
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
while (action) {
|
||||
/* Only shared IRQ handlers are safe to call */
|
||||
@@ -56,7 +56,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
|
||||
}
|
||||
local_irq_disable();
|
||||
/* Now clean up the flags */
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
action = desc->action;
|
||||
|
||||
/*
|
||||
@@ -68,9 +68,9 @@ static int try_one_irq(int irq, struct irq_desc *desc)
|
||||
* Perform real IRQ processing for the IRQ we deferred
|
||||
*/
|
||||
work = 1;
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
handle_IRQ_event(irq, action);
|
||||
spin_lock(&desc->lock);
|
||||
raw_spin_lock(&desc->lock);
|
||||
desc->status &= ~IRQ_PENDING;
|
||||
}
|
||||
desc->status &= ~IRQ_INPROGRESS;
|
||||
@@ -80,7 +80,7 @@ static int try_one_irq(int irq, struct irq_desc *desc)
|
||||
*/
|
||||
if (work && desc->chip && desc->chip->end)
|
||||
desc->chip->end(irq);
|
||||
spin_unlock(&desc->lock);
|
||||
raw_spin_unlock(&desc->lock);
|
||||
|
||||
return ok;
|
||||
}
|
||||
|
@@ -73,11 +73,11 @@ module_param(lock_stat, int, 0644);
|
||||
* to use a raw spinlock - we really dont want the spinlock
|
||||
* code to recurse back into the lockdep code...
|
||||
*/
|
||||
static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static int graph_lock(void)
|
||||
{
|
||||
__raw_spin_lock(&lockdep_lock);
|
||||
arch_spin_lock(&lockdep_lock);
|
||||
/*
|
||||
* Make sure that if another CPU detected a bug while
|
||||
* walking the graph we dont change it (while the other
|
||||
@@ -85,7 +85,7 @@ static int graph_lock(void)
|
||||
* dropped already)
|
||||
*/
|
||||
if (!debug_locks) {
|
||||
__raw_spin_unlock(&lockdep_lock);
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
return 0;
|
||||
}
|
||||
/* prevent any recursions within lockdep from causing deadlocks */
|
||||
@@ -95,11 +95,11 @@ static int graph_lock(void)
|
||||
|
||||
static inline int graph_unlock(void)
|
||||
{
|
||||
if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
|
||||
if (debug_locks && !arch_spin_is_locked(&lockdep_lock))
|
||||
return DEBUG_LOCKS_WARN_ON(1);
|
||||
|
||||
current->lockdep_recursion--;
|
||||
__raw_spin_unlock(&lockdep_lock);
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -111,7 +111,7 @@ static inline int debug_locks_off_graph_unlock(void)
|
||||
{
|
||||
int ret = debug_locks_off();
|
||||
|
||||
__raw_spin_unlock(&lockdep_lock);
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -140,7 +140,8 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LOCK_STAT
|
||||
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
|
||||
static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS],
|
||||
cpu_lock_stats);
|
||||
|
||||
static inline u64 lockstat_clock(void)
|
||||
{
|
||||
@@ -198,7 +199,7 @@ struct lock_class_stats lock_stats(struct lock_class *class)
|
||||
memset(&stats, 0, sizeof(struct lock_class_stats));
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct lock_class_stats *pcs =
|
||||
&per_cpu(lock_stats, cpu)[class - lock_classes];
|
||||
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++)
|
||||
stats.contention_point[i] += pcs->contention_point[i];
|
||||
@@ -225,7 +226,7 @@ void clear_lock_stats(struct lock_class *class)
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct lock_class_stats *cpu_stats =
|
||||
&per_cpu(lock_stats, cpu)[class - lock_classes];
|
||||
&per_cpu(cpu_lock_stats, cpu)[class - lock_classes];
|
||||
|
||||
memset(cpu_stats, 0, sizeof(struct lock_class_stats));
|
||||
}
|
||||
@@ -235,12 +236,12 @@ void clear_lock_stats(struct lock_class *class)
|
||||
|
||||
static struct lock_class_stats *get_lock_stats(struct lock_class *class)
|
||||
{
|
||||
return &get_cpu_var(lock_stats)[class - lock_classes];
|
||||
return &get_cpu_var(cpu_lock_stats)[class - lock_classes];
|
||||
}
|
||||
|
||||
static void put_lock_stats(struct lock_class_stats *stats)
|
||||
{
|
||||
put_cpu_var(lock_stats);
|
||||
put_cpu_var(cpu_lock_stats);
|
||||
}
|
||||
|
||||
static void lock_release_holdtime(struct held_lock *hlock)
|
||||
@@ -1169,9 +1170,9 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
|
||||
this.class = class;
|
||||
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&lockdep_lock);
|
||||
arch_spin_lock(&lockdep_lock);
|
||||
ret = __lockdep_count_forward_deps(&this);
|
||||
__raw_spin_unlock(&lockdep_lock);
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
@@ -1196,9 +1197,9 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
|
||||
this.class = class;
|
||||
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&lockdep_lock);
|
||||
arch_spin_lock(&lockdep_lock);
|
||||
ret = __lockdep_count_backward_deps(&this);
|
||||
__raw_spin_unlock(&lockdep_lock);
|
||||
arch_spin_unlock(&lockdep_lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return ret;
|
||||
|
150
kernel/module.c
150
kernel/module.c
@@ -370,8 +370,6 @@ EXPORT_SYMBOL_GPL(find_module);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
|
||||
|
||||
static void *percpu_modalloc(unsigned long size, unsigned long align,
|
||||
const char *name)
|
||||
{
|
||||
@@ -395,154 +393,6 @@ static void percpu_modfree(void *freeme)
|
||||
free_percpu(freeme);
|
||||
}
|
||||
|
||||
#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
|
||||
|
||||
/* Number of blocks used and allocated. */
|
||||
static unsigned int pcpu_num_used, pcpu_num_allocated;
|
||||
/* Size of each block. -ve means used. */
|
||||
static int *pcpu_size;
|
||||
|
||||
static int split_block(unsigned int i, unsigned short size)
|
||||
{
|
||||
/* Reallocation required? */
|
||||
if (pcpu_num_used + 1 > pcpu_num_allocated) {
|
||||
int *new;
|
||||
|
||||
new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2,
|
||||
GFP_KERNEL);
|
||||
if (!new)
|
||||
return 0;
|
||||
|
||||
pcpu_num_allocated *= 2;
|
||||
pcpu_size = new;
|
||||
}
|
||||
|
||||
/* Insert a new subblock */
|
||||
memmove(&pcpu_size[i+1], &pcpu_size[i],
|
||||
sizeof(pcpu_size[0]) * (pcpu_num_used - i));
|
||||
pcpu_num_used++;
|
||||
|
||||
pcpu_size[i+1] -= size;
|
||||
pcpu_size[i] = size;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline unsigned int block_size(int val)
|
||||
{
|
||||
if (val < 0)
|
||||
return -val;
|
||||
return val;
|
||||
}
|
||||
|
||||
static void *percpu_modalloc(unsigned long size, unsigned long align,
|
||||
const char *name)
|
||||
{
|
||||
unsigned long extra;
|
||||
unsigned int i;
|
||||
void *ptr;
|
||||
int cpu;
|
||||
|
||||
if (align > PAGE_SIZE) {
|
||||
printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
|
||||
name, align, PAGE_SIZE);
|
||||
align = PAGE_SIZE;
|
||||
}
|
||||
|
||||
ptr = __per_cpu_start;
|
||||
for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
|
||||
/* Extra for alignment requirement. */
|
||||
extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
|
||||
BUG_ON(i == 0 && extra != 0);
|
||||
|
||||
if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size)
|
||||
continue;
|
||||
|
||||
/* Transfer extra to previous block. */
|
||||
if (pcpu_size[i-1] < 0)
|
||||
pcpu_size[i-1] -= extra;
|
||||
else
|
||||
pcpu_size[i-1] += extra;
|
||||
pcpu_size[i] -= extra;
|
||||
ptr += extra;
|
||||
|
||||
/* Split block if warranted */
|
||||
if (pcpu_size[i] - size > sizeof(unsigned long))
|
||||
if (!split_block(i, size))
|
||||
return NULL;
|
||||
|
||||
/* add the per-cpu scanning areas */
|
||||
for_each_possible_cpu(cpu)
|
||||
kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
|
||||
GFP_KERNEL);
|
||||
|
||||
/* Mark allocated */
|
||||
pcpu_size[i] = -pcpu_size[i];
|
||||
return ptr;
|
||||
}
|
||||
|
||||
printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n",
|
||||
size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void percpu_modfree(void *freeme)
|
||||
{
|
||||
unsigned int i;
|
||||
void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
|
||||
int cpu;
|
||||
|
||||
/* First entry is core kernel percpu data. */
|
||||
for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
|
||||
if (ptr == freeme) {
|
||||
pcpu_size[i] = -pcpu_size[i];
|
||||
goto free;
|
||||
}
|
||||
}
|
||||
BUG();
|
||||
|
||||
free:
|
||||
/* remove the per-cpu scanning areas */
|
||||
for_each_possible_cpu(cpu)
|
||||
kmemleak_free(freeme + per_cpu_offset(cpu));
|
||||
|
||||
/* Merge with previous? */
|
||||
if (pcpu_size[i-1] >= 0) {
|
||||
pcpu_size[i-1] += pcpu_size[i];
|
||||
pcpu_num_used--;
|
||||
memmove(&pcpu_size[i], &pcpu_size[i+1],
|
||||
(pcpu_num_used - i) * sizeof(pcpu_size[0]));
|
||||
i--;
|
||||
}
|
||||
/* Merge with next? */
|
||||
if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) {
|
||||
pcpu_size[i] += pcpu_size[i+1];
|
||||
pcpu_num_used--;
|
||||
memmove(&pcpu_size[i+1], &pcpu_size[i+2],
|
||||
(pcpu_num_used - (i+1)) * sizeof(pcpu_size[0]));
|
||||
}
|
||||
}
|
||||
|
||||
static int percpu_modinit(void)
|
||||
{
|
||||
pcpu_num_used = 2;
|
||||
pcpu_num_allocated = 2;
|
||||
pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
|
||||
GFP_KERNEL);
|
||||
/* Static in-kernel percpu data (used). */
|
||||
pcpu_size[0] = -(__per_cpu_end-__per_cpu_start);
|
||||
/* Free room. */
|
||||
pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
|
||||
if (pcpu_size[1] < 0) {
|
||||
printk(KERN_ERR "No per-cpu room for modules.\n");
|
||||
pcpu_num_used = 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
__initcall(percpu_modinit);
|
||||
|
||||
#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
|
||||
|
||||
static unsigned int find_pcpusec(Elf_Ehdr *hdr,
|
||||
Elf_Shdr *sechdrs,
|
||||
const char *secstrings)
|
||||
|
@@ -43,13 +43,13 @@ static inline void mutex_clear_owner(struct mutex *lock)
|
||||
\
|
||||
DEBUG_LOCKS_WARN_ON(in_interrupt()); \
|
||||
local_irq_save(flags); \
|
||||
__raw_spin_lock(&(lock)->raw_lock); \
|
||||
arch_spin_lock(&(lock)->rlock.raw_lock);\
|
||||
DEBUG_LOCKS_WARN_ON(l->magic != l); \
|
||||
} while (0)
|
||||
|
||||
#define spin_unlock_mutex(lock, flags) \
|
||||
do { \
|
||||
__raw_spin_unlock(&(lock)->raw_lock); \
|
||||
local_irq_restore(flags); \
|
||||
preempt_check_resched(); \
|
||||
#define spin_unlock_mutex(lock, flags) \
|
||||
do { \
|
||||
arch_spin_unlock(&(lock)->rlock.raw_lock); \
|
||||
local_irq_restore(flags); \
|
||||
preempt_check_resched(); \
|
||||
} while (0)
|
||||
|
@@ -24,6 +24,7 @@
|
||||
#include <linux/err.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#if 0
|
||||
#define DEBUGP printk
|
||||
@@ -122,9 +123,7 @@ static char *next_arg(char *args, char **param, char **val)
|
||||
next = args + i;
|
||||
|
||||
/* Chew up trailing spaces. */
|
||||
while (isspace(*next))
|
||||
next++;
|
||||
return next;
|
||||
return skip_spaces(next);
|
||||
}
|
||||
|
||||
/* Args looks like "foo=bar,bar2 baz=fuz wiz". */
|
||||
@@ -139,8 +138,7 @@ int parse_args(const char *name,
|
||||
DEBUGP("Parsing ARGS: %s\n", args);
|
||||
|
||||
/* Chew leading spaces */
|
||||
while (isspace(*args))
|
||||
args++;
|
||||
args = skip_spaces(args);
|
||||
|
||||
while (*args) {
|
||||
int ret;
|
||||
|
@@ -203,14 +203,14 @@ perf_lock_task_context(struct task_struct *task, unsigned long *flags)
|
||||
* if so. If we locked the right context, then it
|
||||
* can't get swapped on us any more.
|
||||
*/
|
||||
spin_lock_irqsave(&ctx->lock, *flags);
|
||||
raw_spin_lock_irqsave(&ctx->lock, *flags);
|
||||
if (ctx != rcu_dereference(task->perf_event_ctxp)) {
|
||||
spin_unlock_irqrestore(&ctx->lock, *flags);
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (!atomic_inc_not_zero(&ctx->refcount)) {
|
||||
spin_unlock_irqrestore(&ctx->lock, *flags);
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
|
||||
ctx = NULL;
|
||||
}
|
||||
}
|
||||
@@ -231,7 +231,7 @@ static struct perf_event_context *perf_pin_task_context(struct task_struct *task
|
||||
ctx = perf_lock_task_context(task, &flags);
|
||||
if (ctx) {
|
||||
++ctx->pin_count;
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
}
|
||||
return ctx;
|
||||
}
|
||||
@@ -240,9 +240,9 @@ static void perf_unpin_context(struct perf_event_context *ctx)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ctx->lock, flags);
|
||||
raw_spin_lock_irqsave(&ctx->lock, flags);
|
||||
--ctx->pin_count;
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
put_ctx(ctx);
|
||||
}
|
||||
|
||||
@@ -427,7 +427,7 @@ static void __perf_event_remove_from_context(void *info)
|
||||
if (ctx->task && cpuctx->task_ctx != ctx)
|
||||
return;
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
raw_spin_lock(&ctx->lock);
|
||||
/*
|
||||
* Protect the list operation against NMI by disabling the
|
||||
* events on a global level.
|
||||
@@ -449,7 +449,7 @@ static void __perf_event_remove_from_context(void *info)
|
||||
}
|
||||
|
||||
perf_enable();
|
||||
spin_unlock(&ctx->lock);
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
|
||||
@@ -488,12 +488,12 @@ retry:
|
||||
task_oncpu_function_call(task, __perf_event_remove_from_context,
|
||||
event);
|
||||
|
||||
spin_lock_irq(&ctx->lock);
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
/*
|
||||
* If the context is active we need to retry the smp call.
|
||||
*/
|
||||
if (ctx->nr_active && !list_empty(&event->group_entry)) {
|
||||
spin_unlock_irq(&ctx->lock);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@@ -504,7 +504,7 @@ retry:
|
||||
*/
|
||||
if (!list_empty(&event->group_entry))
|
||||
list_del_event(event, ctx);
|
||||
spin_unlock_irq(&ctx->lock);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -535,7 +535,7 @@ static void __perf_event_disable(void *info)
|
||||
if (ctx->task && cpuctx->task_ctx != ctx)
|
||||
return;
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
raw_spin_lock(&ctx->lock);
|
||||
|
||||
/*
|
||||
* If the event is on, turn it off.
|
||||
@@ -551,7 +551,7 @@ static void __perf_event_disable(void *info)
|
||||
event->state = PERF_EVENT_STATE_OFF;
|
||||
}
|
||||
|
||||
spin_unlock(&ctx->lock);
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -584,12 +584,12 @@ void perf_event_disable(struct perf_event *event)
|
||||
retry:
|
||||
task_oncpu_function_call(task, __perf_event_disable, event);
|
||||
|
||||
spin_lock_irq(&ctx->lock);
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
/*
|
||||
* If the event is still active, we need to retry the cross-call.
|
||||
*/
|
||||
if (event->state == PERF_EVENT_STATE_ACTIVE) {
|
||||
spin_unlock_irq(&ctx->lock);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@@ -602,7 +602,7 @@ void perf_event_disable(struct perf_event *event)
|
||||
event->state = PERF_EVENT_STATE_OFF;
|
||||
}
|
||||
|
||||
spin_unlock_irq(&ctx->lock);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -770,7 +770,7 @@ static void __perf_install_in_context(void *info)
|
||||
cpuctx->task_ctx = ctx;
|
||||
}
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
raw_spin_lock(&ctx->lock);
|
||||
ctx->is_active = 1;
|
||||
update_context_time(ctx);
|
||||
|
||||
@@ -820,7 +820,7 @@ static void __perf_install_in_context(void *info)
|
||||
unlock:
|
||||
perf_enable();
|
||||
|
||||
spin_unlock(&ctx->lock);
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -856,12 +856,12 @@ retry:
|
||||
task_oncpu_function_call(task, __perf_install_in_context,
|
||||
event);
|
||||
|
||||
spin_lock_irq(&ctx->lock);
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
/*
|
||||
* we need to retry the smp call.
|
||||
*/
|
||||
if (ctx->is_active && list_empty(&event->group_entry)) {
|
||||
spin_unlock_irq(&ctx->lock);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@@ -872,7 +872,7 @@ retry:
|
||||
*/
|
||||
if (list_empty(&event->group_entry))
|
||||
add_event_to_ctx(event, ctx);
|
||||
spin_unlock_irq(&ctx->lock);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -917,7 +917,7 @@ static void __perf_event_enable(void *info)
|
||||
cpuctx->task_ctx = ctx;
|
||||
}
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
raw_spin_lock(&ctx->lock);
|
||||
ctx->is_active = 1;
|
||||
update_context_time(ctx);
|
||||
|
||||
@@ -959,7 +959,7 @@ static void __perf_event_enable(void *info)
|
||||
}
|
||||
|
||||
unlock:
|
||||
spin_unlock(&ctx->lock);
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -985,7 +985,7 @@ void perf_event_enable(struct perf_event *event)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irq(&ctx->lock);
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
if (event->state >= PERF_EVENT_STATE_INACTIVE)
|
||||
goto out;
|
||||
|
||||
@@ -1000,10 +1000,10 @@ void perf_event_enable(struct perf_event *event)
|
||||
event->state = PERF_EVENT_STATE_OFF;
|
||||
|
||||
retry:
|
||||
spin_unlock_irq(&ctx->lock);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
task_oncpu_function_call(task, __perf_event_enable, event);
|
||||
|
||||
spin_lock_irq(&ctx->lock);
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
|
||||
/*
|
||||
* If the context is active and the event is still off,
|
||||
@@ -1020,7 +1020,7 @@ void perf_event_enable(struct perf_event *event)
|
||||
__perf_event_mark_enabled(event, ctx);
|
||||
|
||||
out:
|
||||
spin_unlock_irq(&ctx->lock);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
}
|
||||
|
||||
static int perf_event_refresh(struct perf_event *event, int refresh)
|
||||
@@ -1042,7 +1042,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
|
||||
{
|
||||
struct perf_event *event;
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
raw_spin_lock(&ctx->lock);
|
||||
ctx->is_active = 0;
|
||||
if (likely(!ctx->nr_events))
|
||||
goto out;
|
||||
@@ -1055,7 +1055,7 @@ void __perf_event_sched_out(struct perf_event_context *ctx,
|
||||
}
|
||||
perf_enable();
|
||||
out:
|
||||
spin_unlock(&ctx->lock);
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1193,8 +1193,8 @@ void perf_event_task_sched_out(struct task_struct *task,
|
||||
* order we take the locks because no other cpu could
|
||||
* be trying to lock both of these tasks.
|
||||
*/
|
||||
spin_lock(&ctx->lock);
|
||||
spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
|
||||
raw_spin_lock(&ctx->lock);
|
||||
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
|
||||
if (context_equiv(ctx, next_ctx)) {
|
||||
/*
|
||||
* XXX do we need a memory barrier of sorts
|
||||
@@ -1208,8 +1208,8 @@ void perf_event_task_sched_out(struct task_struct *task,
|
||||
|
||||
perf_event_sync_stat(ctx, next_ctx);
|
||||
}
|
||||
spin_unlock(&next_ctx->lock);
|
||||
spin_unlock(&ctx->lock);
|
||||
raw_spin_unlock(&next_ctx->lock);
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
@@ -1251,7 +1251,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
|
||||
struct perf_event *event;
|
||||
int can_add_hw = 1;
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
raw_spin_lock(&ctx->lock);
|
||||
ctx->is_active = 1;
|
||||
if (likely(!ctx->nr_events))
|
||||
goto out;
|
||||
@@ -1306,7 +1306,7 @@ __perf_event_sched_in(struct perf_event_context *ctx,
|
||||
}
|
||||
perf_enable();
|
||||
out:
|
||||
spin_unlock(&ctx->lock);
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1370,7 +1370,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
|
||||
struct hw_perf_event *hwc;
|
||||
u64 interrupts, freq;
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
raw_spin_lock(&ctx->lock);
|
||||
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
continue;
|
||||
@@ -1425,7 +1425,7 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
|
||||
perf_enable();
|
||||
}
|
||||
}
|
||||
spin_unlock(&ctx->lock);
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1438,7 +1438,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
|
||||
if (!ctx->nr_events)
|
||||
return;
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
raw_spin_lock(&ctx->lock);
|
||||
/*
|
||||
* Rotate the first entry last (works just fine for group events too):
|
||||
*/
|
||||
@@ -1449,7 +1449,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
|
||||
}
|
||||
perf_enable();
|
||||
|
||||
spin_unlock(&ctx->lock);
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
void perf_event_task_tick(struct task_struct *curr, int cpu)
|
||||
@@ -1498,7 +1498,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
|
||||
|
||||
__perf_event_task_sched_out(ctx);
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
raw_spin_lock(&ctx->lock);
|
||||
|
||||
list_for_each_entry(event, &ctx->group_list, group_entry) {
|
||||
if (!event->attr.enable_on_exec)
|
||||
@@ -1516,7 +1516,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
|
||||
if (enabled)
|
||||
unclone_ctx(ctx);
|
||||
|
||||
spin_unlock(&ctx->lock);
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
|
||||
perf_event_task_sched_in(task, smp_processor_id());
|
||||
out:
|
||||
@@ -1542,10 +1542,10 @@ static void __perf_event_read(void *info)
|
||||
if (ctx->task && cpuctx->task_ctx != ctx)
|
||||
return;
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
raw_spin_lock(&ctx->lock);
|
||||
update_context_time(ctx);
|
||||
update_event_times(event);
|
||||
spin_unlock(&ctx->lock);
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
|
||||
event->pmu->read(event);
|
||||
}
|
||||
@@ -1563,10 +1563,10 @@ static u64 perf_event_read(struct perf_event *event)
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ctx->lock, flags);
|
||||
raw_spin_lock_irqsave(&ctx->lock, flags);
|
||||
update_context_time(ctx);
|
||||
update_event_times(event);
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
}
|
||||
|
||||
return atomic64_read(&event->count);
|
||||
@@ -1579,7 +1579,7 @@ static void
|
||||
__perf_event_init_context(struct perf_event_context *ctx,
|
||||
struct task_struct *task)
|
||||
{
|
||||
spin_lock_init(&ctx->lock);
|
||||
raw_spin_lock_init(&ctx->lock);
|
||||
mutex_init(&ctx->mutex);
|
||||
INIT_LIST_HEAD(&ctx->group_list);
|
||||
INIT_LIST_HEAD(&ctx->event_list);
|
||||
@@ -1649,7 +1649,7 @@ static struct perf_event_context *find_get_context(pid_t pid, int cpu)
|
||||
ctx = perf_lock_task_context(task, &flags);
|
||||
if (ctx) {
|
||||
unclone_ctx(ctx);
|
||||
spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&ctx->lock, flags);
|
||||
}
|
||||
|
||||
if (!ctx) {
|
||||
@@ -1987,7 +1987,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
||||
if (!value)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irq(&ctx->lock);
|
||||
raw_spin_lock_irq(&ctx->lock);
|
||||
if (event->attr.freq) {
|
||||
if (value > sysctl_perf_event_sample_rate) {
|
||||
ret = -EINVAL;
|
||||
@@ -2000,7 +2000,7 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
|
||||
event->hw.sample_period = value;
|
||||
}
|
||||
unlock:
|
||||
spin_unlock_irq(&ctx->lock);
|
||||
raw_spin_unlock_irq(&ctx->lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -4992,7 +4992,7 @@ void perf_event_exit_task(struct task_struct *child)
|
||||
* reading child->perf_event_ctxp, we wait until it has
|
||||
* incremented the context's refcount before we do put_ctx below.
|
||||
*/
|
||||
spin_lock(&child_ctx->lock);
|
||||
raw_spin_lock(&child_ctx->lock);
|
||||
child->perf_event_ctxp = NULL;
|
||||
/*
|
||||
* If this context is a clone; unclone it so it can't get
|
||||
@@ -5001,7 +5001,7 @@ void perf_event_exit_task(struct task_struct *child)
|
||||
*/
|
||||
unclone_ctx(child_ctx);
|
||||
update_context_time(child_ctx);
|
||||
spin_unlock_irqrestore(&child_ctx->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
|
||||
|
||||
/*
|
||||
* Report the task dead after unscheduling the events so that we
|
||||
@@ -5292,11 +5292,11 @@ perf_set_reserve_percpu(struct sysdev_class *class,
|
||||
perf_reserved_percpu = val;
|
||||
for_each_online_cpu(cpu) {
|
||||
cpuctx = &per_cpu(perf_cpu_context, cpu);
|
||||
spin_lock_irq(&cpuctx->ctx.lock);
|
||||
raw_spin_lock_irq(&cpuctx->ctx.lock);
|
||||
mpt = min(perf_max_events - cpuctx->ctx.nr_events,
|
||||
perf_max_events - perf_reserved_percpu);
|
||||
cpuctx->max_pertask = mpt;
|
||||
spin_unlock_irq(&cpuctx->ctx.lock);
|
||||
raw_spin_unlock_irq(&cpuctx->ctx.lock);
|
||||
}
|
||||
spin_unlock(&perf_resource_lock);
|
||||
|
||||
|
@@ -6,7 +6,7 @@
|
||||
|
||||
#include <linux/vt_kern.h>
|
||||
#include <linux/kbd_kern.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/vt.h>
|
||||
#include <linux/module.h>
|
||||
#include "power.h"
|
||||
|
||||
@@ -21,8 +21,7 @@ int pm_prepare_console(void)
|
||||
if (orig_fgconsole < 0)
|
||||
return 1;
|
||||
|
||||
orig_kmsg = kmsg_redirect;
|
||||
kmsg_redirect = SUSPEND_CONSOLE;
|
||||
orig_kmsg = vt_kmsg_redirect(SUSPEND_CONSOLE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -30,7 +29,7 @@ void pm_restore_console(void)
|
||||
{
|
||||
if (orig_fgconsole >= 0) {
|
||||
vt_move_to_console(orig_fgconsole, 0);
|
||||
kmsg_redirect = orig_kmsg;
|
||||
vt_kmsg_redirect(orig_kmsg);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@@ -763,13 +763,13 @@ static void rcu_torture_timer(unsigned long unused)
|
||||
/* Should not happen, but... */
|
||||
pipe_count = RCU_TORTURE_PIPE_LEN;
|
||||
}
|
||||
++__get_cpu_var(rcu_torture_count)[pipe_count];
|
||||
__this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
|
||||
completed = cur_ops->completed() - completed;
|
||||
if (completed > RCU_TORTURE_PIPE_LEN) {
|
||||
/* Should not happen, but... */
|
||||
completed = RCU_TORTURE_PIPE_LEN;
|
||||
}
|
||||
++__get_cpu_var(rcu_torture_batch)[completed];
|
||||
__this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
|
||||
preempt_enable();
|
||||
cur_ops->readunlock(idx);
|
||||
}
|
||||
@@ -818,13 +818,13 @@ rcu_torture_reader(void *arg)
|
||||
/* Should not happen, but... */
|
||||
pipe_count = RCU_TORTURE_PIPE_LEN;
|
||||
}
|
||||
++__get_cpu_var(rcu_torture_count)[pipe_count];
|
||||
__this_cpu_inc(per_cpu_var(rcu_torture_count)[pipe_count]);
|
||||
completed = cur_ops->completed() - completed;
|
||||
if (completed > RCU_TORTURE_PIPE_LEN) {
|
||||
/* Should not happen, but... */
|
||||
completed = RCU_TORTURE_PIPE_LEN;
|
||||
}
|
||||
++__get_cpu_var(rcu_torture_batch)[completed];
|
||||
__this_cpu_inc(per_cpu_var(rcu_torture_batch)[completed]);
|
||||
preempt_enable();
|
||||
cur_ops->readunlock(idx);
|
||||
schedule();
|
||||
|
@@ -37,8 +37,8 @@ do { \
|
||||
if (rt_trace_on) { \
|
||||
rt_trace_on = 0; \
|
||||
console_verbose(); \
|
||||
if (spin_is_locked(¤t->pi_lock)) \
|
||||
spin_unlock(¤t->pi_lock); \
|
||||
if (raw_spin_is_locked(¤t->pi_lock)) \
|
||||
raw_spin_unlock(¤t->pi_lock); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
|
106
kernel/rtmutex.c
106
kernel/rtmutex.c
@@ -138,9 +138,9 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&task->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
||||
__rt_mutex_adjust_prio(task);
|
||||
spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -195,7 +195,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
||||
/*
|
||||
* Task can not go away as we did a get_task() before !
|
||||
*/
|
||||
spin_lock_irqsave(&task->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
||||
|
||||
waiter = task->pi_blocked_on;
|
||||
/*
|
||||
@@ -231,8 +231,8 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
||||
goto out_unlock_pi;
|
||||
|
||||
lock = waiter->lock;
|
||||
if (!spin_trylock(&lock->wait_lock)) {
|
||||
spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
if (!raw_spin_trylock(&lock->wait_lock)) {
|
||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
cpu_relax();
|
||||
goto retry;
|
||||
}
|
||||
@@ -240,7 +240,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
||||
/* Deadlock detection */
|
||||
if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
|
||||
debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
ret = deadlock_detect ? -EDEADLK : 0;
|
||||
goto out_unlock_pi;
|
||||
}
|
||||
@@ -253,13 +253,13 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
||||
plist_add(&waiter->list_entry, &lock->wait_list);
|
||||
|
||||
/* Release the task */
|
||||
spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
put_task_struct(task);
|
||||
|
||||
/* Grab the next task */
|
||||
task = rt_mutex_owner(lock);
|
||||
get_task_struct(task);
|
||||
spin_lock_irqsave(&task->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
||||
|
||||
if (waiter == rt_mutex_top_waiter(lock)) {
|
||||
/* Boost the owner */
|
||||
@@ -277,10 +277,10 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
||||
__rt_mutex_adjust_prio(task);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
|
||||
top_waiter = rt_mutex_top_waiter(lock);
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
|
||||
if (!detect_deadlock && waiter != top_waiter)
|
||||
goto out_put_task;
|
||||
@@ -288,7 +288,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
|
||||
goto again;
|
||||
|
||||
out_unlock_pi:
|
||||
spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
out_put_task:
|
||||
put_task_struct(task);
|
||||
|
||||
@@ -313,9 +313,9 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
|
||||
if (pendowner == task)
|
||||
return 1;
|
||||
|
||||
spin_lock_irqsave(&pendowner->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
|
||||
if (task->prio >= pendowner->prio) {
|
||||
spin_unlock_irqrestore(&pendowner->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -325,7 +325,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
|
||||
* priority.
|
||||
*/
|
||||
if (likely(!rt_mutex_has_waiters(lock))) {
|
||||
spin_unlock_irqrestore(&pendowner->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
|
||||
return 1;
|
||||
}
|
||||
|
||||
@@ -333,7 +333,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
|
||||
next = rt_mutex_top_waiter(lock);
|
||||
plist_del(&next->pi_list_entry, &pendowner->pi_waiters);
|
||||
__rt_mutex_adjust_prio(pendowner);
|
||||
spin_unlock_irqrestore(&pendowner->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
|
||||
|
||||
/*
|
||||
* We are going to steal the lock and a waiter was
|
||||
@@ -350,10 +350,10 @@ static inline int try_to_steal_lock(struct rt_mutex *lock,
|
||||
* might be task:
|
||||
*/
|
||||
if (likely(next->task != task)) {
|
||||
spin_lock_irqsave(&task->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
||||
plist_add(&next->pi_list_entry, &task->pi_waiters);
|
||||
__rt_mutex_adjust_prio(task);
|
||||
spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
@@ -420,7 +420,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
||||
unsigned long flags;
|
||||
int chain_walk = 0, res;
|
||||
|
||||
spin_lock_irqsave(&task->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
||||
__rt_mutex_adjust_prio(task);
|
||||
waiter->task = task;
|
||||
waiter->lock = lock;
|
||||
@@ -434,17 +434,17 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
||||
|
||||
task->pi_blocked_on = waiter;
|
||||
|
||||
spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
|
||||
if (waiter == rt_mutex_top_waiter(lock)) {
|
||||
spin_lock_irqsave(&owner->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(&owner->pi_lock, flags);
|
||||
plist_del(&top_waiter->pi_list_entry, &owner->pi_waiters);
|
||||
plist_add(&waiter->pi_list_entry, &owner->pi_waiters);
|
||||
|
||||
__rt_mutex_adjust_prio(owner);
|
||||
if (owner->pi_blocked_on)
|
||||
chain_walk = 1;
|
||||
spin_unlock_irqrestore(&owner->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
|
||||
}
|
||||
else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
|
||||
chain_walk = 1;
|
||||
@@ -459,12 +459,12 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
|
||||
*/
|
||||
get_task_struct(owner);
|
||||
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
|
||||
res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
|
||||
task);
|
||||
|
||||
spin_lock(&lock->wait_lock);
|
||||
raw_spin_lock(&lock->wait_lock);
|
||||
|
||||
return res;
|
||||
}
|
||||
@@ -483,7 +483,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
|
||||
struct task_struct *pendowner;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(¤t->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(¤t->pi_lock, flags);
|
||||
|
||||
waiter = rt_mutex_top_waiter(lock);
|
||||
plist_del(&waiter->list_entry, &lock->wait_list);
|
||||
@@ -500,7 +500,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
|
||||
|
||||
rt_mutex_set_owner(lock, pendowner, RT_MUTEX_OWNER_PENDING);
|
||||
|
||||
spin_unlock_irqrestore(¤t->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
|
||||
|
||||
/*
|
||||
* Clear the pi_blocked_on variable and enqueue a possible
|
||||
@@ -509,7 +509,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
|
||||
* waiter with higher priority than pending-owner->normal_prio
|
||||
* is blocked on the unboosted (pending) owner.
|
||||
*/
|
||||
spin_lock_irqsave(&pendowner->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(&pendowner->pi_lock, flags);
|
||||
|
||||
WARN_ON(!pendowner->pi_blocked_on);
|
||||
WARN_ON(pendowner->pi_blocked_on != waiter);
|
||||
@@ -523,7 +523,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
|
||||
next = rt_mutex_top_waiter(lock);
|
||||
plist_add(&next->pi_list_entry, &pendowner->pi_waiters);
|
||||
}
|
||||
spin_unlock_irqrestore(&pendowner->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&pendowner->pi_lock, flags);
|
||||
|
||||
wake_up_process(pendowner);
|
||||
}
|
||||
@@ -541,15 +541,15 @@ static void remove_waiter(struct rt_mutex *lock,
|
||||
unsigned long flags;
|
||||
int chain_walk = 0;
|
||||
|
||||
spin_lock_irqsave(¤t->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(¤t->pi_lock, flags);
|
||||
plist_del(&waiter->list_entry, &lock->wait_list);
|
||||
waiter->task = NULL;
|
||||
current->pi_blocked_on = NULL;
|
||||
spin_unlock_irqrestore(¤t->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
|
||||
|
||||
if (first && owner != current) {
|
||||
|
||||
spin_lock_irqsave(&owner->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(&owner->pi_lock, flags);
|
||||
|
||||
plist_del(&waiter->pi_list_entry, &owner->pi_waiters);
|
||||
|
||||
@@ -564,7 +564,7 @@ static void remove_waiter(struct rt_mutex *lock,
|
||||
if (owner->pi_blocked_on)
|
||||
chain_walk = 1;
|
||||
|
||||
spin_unlock_irqrestore(&owner->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
|
||||
}
|
||||
|
||||
WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
|
||||
@@ -575,11 +575,11 @@ static void remove_waiter(struct rt_mutex *lock,
|
||||
/* gets dropped in rt_mutex_adjust_prio_chain()! */
|
||||
get_task_struct(owner);
|
||||
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
|
||||
rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
|
||||
|
||||
spin_lock(&lock->wait_lock);
|
||||
raw_spin_lock(&lock->wait_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -592,15 +592,15 @@ void rt_mutex_adjust_pi(struct task_struct *task)
|
||||
struct rt_mutex_waiter *waiter;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&task->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(&task->pi_lock, flags);
|
||||
|
||||
waiter = task->pi_blocked_on;
|
||||
if (!waiter || waiter->list_entry.prio == task->prio) {
|
||||
spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
|
||||
|
||||
/* gets dropped in rt_mutex_adjust_prio_chain()! */
|
||||
get_task_struct(task);
|
||||
@@ -672,14 +672,14 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||
break;
|
||||
}
|
||||
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
|
||||
debug_rt_mutex_print_deadlock(waiter);
|
||||
|
||||
if (waiter->task)
|
||||
schedule_rt_mutex(lock);
|
||||
|
||||
spin_lock(&lock->wait_lock);
|
||||
raw_spin_lock(&lock->wait_lock);
|
||||
set_current_state(state);
|
||||
}
|
||||
|
||||
@@ -700,11 +700,11 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||
debug_rt_mutex_init_waiter(&waiter);
|
||||
waiter.task = NULL;
|
||||
|
||||
spin_lock(&lock->wait_lock);
|
||||
raw_spin_lock(&lock->wait_lock);
|
||||
|
||||
/* Try to acquire the lock again: */
|
||||
if (try_to_take_rt_mutex(lock)) {
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -731,7 +731,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
|
||||
*/
|
||||
fixup_rt_mutex_waiters(lock);
|
||||
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
|
||||
/* Remove pending timer: */
|
||||
if (unlikely(timeout))
|
||||
@@ -758,7 +758,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
spin_lock(&lock->wait_lock);
|
||||
raw_spin_lock(&lock->wait_lock);
|
||||
|
||||
if (likely(rt_mutex_owner(lock) != current)) {
|
||||
|
||||
@@ -770,7 +770,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
|
||||
fixup_rt_mutex_waiters(lock);
|
||||
}
|
||||
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -781,7 +781,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock)
|
||||
static void __sched
|
||||
rt_mutex_slowunlock(struct rt_mutex *lock)
|
||||
{
|
||||
spin_lock(&lock->wait_lock);
|
||||
raw_spin_lock(&lock->wait_lock);
|
||||
|
||||
debug_rt_mutex_unlock(lock);
|
||||
|
||||
@@ -789,13 +789,13 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
|
||||
|
||||
if (!rt_mutex_has_waiters(lock)) {
|
||||
lock->owner = NULL;
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
wakeup_next_waiter(lock);
|
||||
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
|
||||
/* Undo pi boosting if necessary: */
|
||||
rt_mutex_adjust_prio(current);
|
||||
@@ -970,8 +970,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
|
||||
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
|
||||
{
|
||||
lock->owner = NULL;
|
||||
spin_lock_init(&lock->wait_lock);
|
||||
plist_head_init(&lock->wait_list, &lock->wait_lock);
|
||||
raw_spin_lock_init(&lock->wait_lock);
|
||||
plist_head_init_raw(&lock->wait_list, &lock->wait_lock);
|
||||
|
||||
debug_rt_mutex_init(lock, name);
|
||||
}
|
||||
@@ -1032,7 +1032,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock(&lock->wait_lock);
|
||||
raw_spin_lock(&lock->wait_lock);
|
||||
|
||||
mark_rt_mutex_waiters(lock);
|
||||
|
||||
@@ -1040,7 +1040,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
||||
/* We got the lock for task. */
|
||||
debug_rt_mutex_lock(lock);
|
||||
rt_mutex_set_owner(lock, task, 0);
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
rt_mutex_deadlock_account_lock(lock, task);
|
||||
return 1;
|
||||
}
|
||||
@@ -1056,7 +1056,7 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
|
||||
*/
|
||||
ret = 0;
|
||||
}
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
|
||||
debug_rt_mutex_print_deadlock(waiter);
|
||||
|
||||
@@ -1106,7 +1106,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
|
||||
{
|
||||
int ret;
|
||||
|
||||
spin_lock(&lock->wait_lock);
|
||||
raw_spin_lock(&lock->wait_lock);
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
@@ -1124,7 +1124,7 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
|
||||
*/
|
||||
fixup_rt_mutex_waiters(lock);
|
||||
|
||||
spin_unlock(&lock->wait_lock);
|
||||
raw_spin_unlock(&lock->wait_lock);
|
||||
|
||||
/*
|
||||
* Readjust priority, when we did not get the lock. We might have been
|
||||
|
231
kernel/sched.c
231
kernel/sched.c
@@ -143,7 +143,7 @@ struct rt_prio_array {
|
||||
|
||||
struct rt_bandwidth {
|
||||
/* nests inside the rq lock: */
|
||||
spinlock_t rt_runtime_lock;
|
||||
raw_spinlock_t rt_runtime_lock;
|
||||
ktime_t rt_period;
|
||||
u64 rt_runtime;
|
||||
struct hrtimer rt_period_timer;
|
||||
@@ -180,7 +180,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
|
||||
rt_b->rt_period = ns_to_ktime(period);
|
||||
rt_b->rt_runtime = runtime;
|
||||
|
||||
spin_lock_init(&rt_b->rt_runtime_lock);
|
||||
raw_spin_lock_init(&rt_b->rt_runtime_lock);
|
||||
|
||||
hrtimer_init(&rt_b->rt_period_timer,
|
||||
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
@@ -202,7 +202,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
||||
if (hrtimer_active(&rt_b->rt_period_timer))
|
||||
return;
|
||||
|
||||
spin_lock(&rt_b->rt_runtime_lock);
|
||||
raw_spin_lock(&rt_b->rt_runtime_lock);
|
||||
for (;;) {
|
||||
unsigned long delta;
|
||||
ktime_t soft, hard;
|
||||
@@ -219,7 +219,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
|
||||
__hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta,
|
||||
HRTIMER_MODE_ABS_PINNED, 0);
|
||||
}
|
||||
spin_unlock(&rt_b->rt_runtime_lock);
|
||||
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
@@ -300,7 +300,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
|
||||
static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq_var);
|
||||
#endif /* CONFIG_RT_GROUP_SCHED */
|
||||
#else /* !CONFIG_USER_SCHED */
|
||||
#define root_task_group init_task_group
|
||||
@@ -472,7 +472,7 @@ struct rt_rq {
|
||||
u64 rt_time;
|
||||
u64 rt_runtime;
|
||||
/* Nests inside the rq lock: */
|
||||
spinlock_t rt_runtime_lock;
|
||||
raw_spinlock_t rt_runtime_lock;
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
unsigned long rt_nr_boosted;
|
||||
@@ -527,7 +527,7 @@ static struct root_domain def_root_domain;
|
||||
*/
|
||||
struct rq {
|
||||
/* runqueue lock: */
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
|
||||
/*
|
||||
* nr_running and cpu_load should be in the same cacheline because
|
||||
@@ -687,7 +687,7 @@ inline void update_rq_clock(struct rq *rq)
|
||||
*/
|
||||
int runqueue_is_locked(int cpu)
|
||||
{
|
||||
return spin_is_locked(&cpu_rq(cpu)->lock);
|
||||
return raw_spin_is_locked(&cpu_rq(cpu)->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -895,7 +895,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev)
|
||||
*/
|
||||
spin_acquire(&rq->lock.dep_map, 0, 0, _THIS_IP_);
|
||||
|
||||
spin_unlock_irq(&rq->lock);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
}
|
||||
|
||||
#else /* __ARCH_WANT_UNLOCKED_CTXSW */
|
||||
@@ -919,9 +919,9 @@ static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next)
|
||||
next->oncpu = 1;
|
||||
#endif
|
||||
#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
|
||||
spin_unlock_irq(&rq->lock);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
#else
|
||||
spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -951,10 +951,10 @@ static inline struct rq *__task_rq_lock(struct task_struct *p)
|
||||
{
|
||||
for (;;) {
|
||||
struct rq *rq = task_rq(p);
|
||||
spin_lock(&rq->lock);
|
||||
raw_spin_lock(&rq->lock);
|
||||
if (likely(rq == task_rq(p)))
|
||||
return rq;
|
||||
spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -971,10 +971,10 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
|
||||
for (;;) {
|
||||
local_irq_save(*flags);
|
||||
rq = task_rq(p);
|
||||
spin_lock(&rq->lock);
|
||||
raw_spin_lock(&rq->lock);
|
||||
if (likely(rq == task_rq(p)))
|
||||
return rq;
|
||||
spin_unlock_irqrestore(&rq->lock, *flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, *flags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -983,19 +983,19 @@ void task_rq_unlock_wait(struct task_struct *p)
|
||||
struct rq *rq = task_rq(p);
|
||||
|
||||
smp_mb(); /* spin-unlock-wait is not a full memory barrier */
|
||||
spin_unlock_wait(&rq->lock);
|
||||
raw_spin_unlock_wait(&rq->lock);
|
||||
}
|
||||
|
||||
static void __task_rq_unlock(struct rq *rq)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
static inline void task_rq_unlock(struct rq *rq, unsigned long *flags)
|
||||
__releases(rq->lock)
|
||||
{
|
||||
spin_unlock_irqrestore(&rq->lock, *flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, *flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1008,7 +1008,7 @@ static struct rq *this_rq_lock(void)
|
||||
|
||||
local_irq_disable();
|
||||
rq = this_rq();
|
||||
spin_lock(&rq->lock);
|
||||
raw_spin_lock(&rq->lock);
|
||||
|
||||
return rq;
|
||||
}
|
||||
@@ -1055,10 +1055,10 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
|
||||
|
||||
WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
raw_spin_lock(&rq->lock);
|
||||
update_rq_clock(rq);
|
||||
rq->curr->sched_class->task_tick(rq, rq->curr, 1);
|
||||
spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
@@ -1071,10 +1071,10 @@ static void __hrtick_start(void *arg)
|
||||
{
|
||||
struct rq *rq = arg;
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
raw_spin_lock(&rq->lock);
|
||||
hrtimer_restart(&rq->hrtick_timer);
|
||||
rq->hrtick_csd_pending = 0;
|
||||
spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -1181,7 +1181,7 @@ static void resched_task(struct task_struct *p)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
assert_spin_locked(&task_rq(p)->lock);
|
||||
assert_raw_spin_locked(&task_rq(p)->lock);
|
||||
|
||||
if (test_tsk_need_resched(p))
|
||||
return;
|
||||
@@ -1203,10 +1203,10 @@ static void resched_cpu(int cpu)
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
unsigned long flags;
|
||||
|
||||
if (!spin_trylock_irqsave(&rq->lock, flags))
|
||||
if (!raw_spin_trylock_irqsave(&rq->lock, flags))
|
||||
return;
|
||||
resched_task(cpu_curr(cpu));
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NO_HZ
|
||||
@@ -1275,7 +1275,7 @@ static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
|
||||
#else /* !CONFIG_SMP */
|
||||
static void resched_task(struct task_struct *p)
|
||||
{
|
||||
assert_spin_locked(&task_rq(p)->lock);
|
||||
assert_raw_spin_locked(&task_rq(p)->lock);
|
||||
set_tsk_need_resched(p);
|
||||
}
|
||||
|
||||
@@ -1602,11 +1602,11 @@ static void update_group_shares_cpu(struct task_group *tg, int cpu,
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
tg->cfs_rq[cpu]->rq_weight = boost ? 0 : rq_weight;
|
||||
tg->cfs_rq[cpu]->shares = boost ? 0 : shares;
|
||||
__set_se_shares(tg->se[cpu], shares);
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1708,9 +1708,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
|
||||
if (root_task_group_empty())
|
||||
return;
|
||||
|
||||
spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
update_shares(sd);
|
||||
spin_lock(&rq->lock);
|
||||
raw_spin_lock(&rq->lock);
|
||||
}
|
||||
|
||||
static void update_h_load(long cpu)
|
||||
@@ -1750,7 +1750,7 @@ static inline int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__acquires(busiest->lock)
|
||||
__acquires(this_rq->lock)
|
||||
{
|
||||
spin_unlock(&this_rq->lock);
|
||||
raw_spin_unlock(&this_rq->lock);
|
||||
double_rq_lock(this_rq, busiest);
|
||||
|
||||
return 1;
|
||||
@@ -1771,14 +1771,16 @@ static int _double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
if (unlikely(!spin_trylock(&busiest->lock))) {
|
||||
if (unlikely(!raw_spin_trylock(&busiest->lock))) {
|
||||
if (busiest < this_rq) {
|
||||
spin_unlock(&this_rq->lock);
|
||||
spin_lock(&busiest->lock);
|
||||
spin_lock_nested(&this_rq->lock, SINGLE_DEPTH_NESTING);
|
||||
raw_spin_unlock(&this_rq->lock);
|
||||
raw_spin_lock(&busiest->lock);
|
||||
raw_spin_lock_nested(&this_rq->lock,
|
||||
SINGLE_DEPTH_NESTING);
|
||||
ret = 1;
|
||||
} else
|
||||
spin_lock_nested(&busiest->lock, SINGLE_DEPTH_NESTING);
|
||||
raw_spin_lock_nested(&busiest->lock,
|
||||
SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@@ -1792,7 +1794,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
{
|
||||
if (unlikely(!irqs_disabled())) {
|
||||
/* printk() doesn't work good under rq->lock */
|
||||
spin_unlock(&this_rq->lock);
|
||||
raw_spin_unlock(&this_rq->lock);
|
||||
BUG_ON(1);
|
||||
}
|
||||
|
||||
@@ -1802,7 +1804,7 @@ static int double_lock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
static inline void double_unlock_balance(struct rq *this_rq, struct rq *busiest)
|
||||
__releases(busiest->lock)
|
||||
{
|
||||
spin_unlock(&busiest->lock);
|
||||
raw_spin_unlock(&busiest->lock);
|
||||
lock_set_subclass(&this_rq->lock.dep_map, 0, _RET_IP_);
|
||||
}
|
||||
#endif
|
||||
@@ -2025,13 +2027,13 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
update_rq_clock(rq);
|
||||
set_task_cpu(p, cpu);
|
||||
p->cpus_allowed = cpumask_of_cpu(cpu);
|
||||
p->rt.nr_cpus_allowed = 1;
|
||||
p->flags |= PF_THREAD_BOUND;
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(kthread_bind);
|
||||
|
||||
@@ -2783,10 +2785,10 @@ static inline void post_schedule(struct rq *rq)
|
||||
if (rq->post_schedule) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
if (rq->curr->sched_class->post_schedule)
|
||||
rq->curr->sched_class->post_schedule(rq);
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
|
||||
rq->post_schedule = 0;
|
||||
}
|
||||
@@ -3068,15 +3070,15 @@ static void double_rq_lock(struct rq *rq1, struct rq *rq2)
|
||||
{
|
||||
BUG_ON(!irqs_disabled());
|
||||
if (rq1 == rq2) {
|
||||
spin_lock(&rq1->lock);
|
||||
raw_spin_lock(&rq1->lock);
|
||||
__acquire(rq2->lock); /* Fake it out ;) */
|
||||
} else {
|
||||
if (rq1 < rq2) {
|
||||
spin_lock(&rq1->lock);
|
||||
spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
|
||||
raw_spin_lock(&rq1->lock);
|
||||
raw_spin_lock_nested(&rq2->lock, SINGLE_DEPTH_NESTING);
|
||||
} else {
|
||||
spin_lock(&rq2->lock);
|
||||
spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
|
||||
raw_spin_lock(&rq2->lock);
|
||||
raw_spin_lock_nested(&rq1->lock, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
}
|
||||
update_rq_clock(rq1);
|
||||
@@ -3093,9 +3095,9 @@ static void double_rq_unlock(struct rq *rq1, struct rq *rq2)
|
||||
__releases(rq1->lock)
|
||||
__releases(rq2->lock)
|
||||
{
|
||||
spin_unlock(&rq1->lock);
|
||||
raw_spin_unlock(&rq1->lock);
|
||||
if (rq1 != rq2)
|
||||
spin_unlock(&rq2->lock);
|
||||
raw_spin_unlock(&rq2->lock);
|
||||
else
|
||||
__release(rq2->lock);
|
||||
}
|
||||
@@ -4188,14 +4190,15 @@ redo:
|
||||
|
||||
if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
|
||||
|
||||
spin_lock_irqsave(&busiest->lock, flags);
|
||||
raw_spin_lock_irqsave(&busiest->lock, flags);
|
||||
|
||||
/* don't kick the migration_thread, if the curr
|
||||
* task on busiest cpu can't be moved to this_cpu
|
||||
*/
|
||||
if (!cpumask_test_cpu(this_cpu,
|
||||
&busiest->curr->cpus_allowed)) {
|
||||
spin_unlock_irqrestore(&busiest->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&busiest->lock,
|
||||
flags);
|
||||
all_pinned = 1;
|
||||
goto out_one_pinned;
|
||||
}
|
||||
@@ -4205,7 +4208,7 @@ redo:
|
||||
busiest->push_cpu = this_cpu;
|
||||
active_balance = 1;
|
||||
}
|
||||
spin_unlock_irqrestore(&busiest->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&busiest->lock, flags);
|
||||
if (active_balance)
|
||||
wake_up_process(busiest->migration_thread);
|
||||
|
||||
@@ -4387,10 +4390,10 @@ redo:
|
||||
/*
|
||||
* Should not call ttwu while holding a rq->lock
|
||||
*/
|
||||
spin_unlock(&this_rq->lock);
|
||||
raw_spin_unlock(&this_rq->lock);
|
||||
if (active_balance)
|
||||
wake_up_process(busiest->migration_thread);
|
||||
spin_lock(&this_rq->lock);
|
||||
raw_spin_lock(&this_rq->lock);
|
||||
|
||||
} else
|
||||
sd->nr_balance_failed = 0;
|
||||
@@ -5259,11 +5262,11 @@ void scheduler_tick(void)
|
||||
|
||||
sched_clock_tick();
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
raw_spin_lock(&rq->lock);
|
||||
update_rq_clock(rq);
|
||||
update_cpu_load(rq);
|
||||
curr->sched_class->task_tick(rq, curr, 0);
|
||||
spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
|
||||
perf_event_task_tick(curr, cpu);
|
||||
|
||||
@@ -5457,7 +5460,7 @@ need_resched_nonpreemptible:
|
||||
if (sched_feat(HRTICK))
|
||||
hrtick_clear(rq);
|
||||
|
||||
spin_lock_irq(&rq->lock);
|
||||
raw_spin_lock_irq(&rq->lock);
|
||||
update_rq_clock(rq);
|
||||
clear_tsk_need_resched(prev);
|
||||
|
||||
@@ -5493,7 +5496,7 @@ need_resched_nonpreemptible:
|
||||
cpu = smp_processor_id();
|
||||
rq = cpu_rq(cpu);
|
||||
} else
|
||||
spin_unlock_irq(&rq->lock);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
|
||||
post_schedule(rq);
|
||||
|
||||
@@ -6324,7 +6327,7 @@ recheck:
|
||||
* make sure no PI-waiters arrive (or leave) while we are
|
||||
* changing the priority of the task:
|
||||
*/
|
||||
spin_lock_irqsave(&p->pi_lock, flags);
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
/*
|
||||
* To be able to change p->policy safely, the apropriate
|
||||
* runqueue lock must be held.
|
||||
@@ -6334,7 +6337,7 @@ recheck:
|
||||
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
|
||||
policy = oldpolicy = -1;
|
||||
__task_rq_unlock(rq);
|
||||
spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
goto recheck;
|
||||
}
|
||||
update_rq_clock(rq);
|
||||
@@ -6358,7 +6361,7 @@ recheck:
|
||||
check_class_changed(rq, p, prev_class, oldprio, running);
|
||||
}
|
||||
__task_rq_unlock(rq);
|
||||
spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
|
||||
rt_mutex_adjust_pi(p);
|
||||
|
||||
@@ -6684,7 +6687,7 @@ SYSCALL_DEFINE0(sched_yield)
|
||||
*/
|
||||
__release(rq->lock);
|
||||
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
|
||||
_raw_spin_unlock(&rq->lock);
|
||||
do_raw_spin_unlock(&rq->lock);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
schedule();
|
||||
@@ -6978,7 +6981,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
|
||||
__sched_fork(idle);
|
||||
idle->se.exec_start = sched_clock();
|
||||
@@ -6990,7 +6993,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
|
||||
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
|
||||
idle->oncpu = 1;
|
||||
#endif
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
|
||||
/* Set the preempt count _outside_ the spinlocks! */
|
||||
#if defined(CONFIG_PREEMPT)
|
||||
@@ -7207,10 +7210,10 @@ static int migration_thread(void *data)
|
||||
struct migration_req *req;
|
||||
struct list_head *head;
|
||||
|
||||
spin_lock_irq(&rq->lock);
|
||||
raw_spin_lock_irq(&rq->lock);
|
||||
|
||||
if (cpu_is_offline(cpu)) {
|
||||
spin_unlock_irq(&rq->lock);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -7222,7 +7225,7 @@ static int migration_thread(void *data)
|
||||
head = &rq->migration_queue;
|
||||
|
||||
if (list_empty(head)) {
|
||||
spin_unlock_irq(&rq->lock);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
schedule();
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
continue;
|
||||
@@ -7231,14 +7234,14 @@ static int migration_thread(void *data)
|
||||
list_del_init(head->next);
|
||||
|
||||
if (req->task != NULL) {
|
||||
spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
__migrate_task(req->task, cpu, req->dest_cpu);
|
||||
} else if (likely(cpu == (badcpu = smp_processor_id()))) {
|
||||
req->dest_cpu = RCU_MIGRATION_GOT_QS;
|
||||
spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
} else {
|
||||
req->dest_cpu = RCU_MIGRATION_MUST_SYNC;
|
||||
spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
WARN_ONCE(1, "migration_thread() on CPU %d, expected %d\n", badcpu, cpu);
|
||||
}
|
||||
local_irq_enable();
|
||||
@@ -7360,14 +7363,14 @@ void sched_idle_next(void)
|
||||
* Strictly not necessary since rest of the CPUs are stopped by now
|
||||
* and interrupts disabled on the current cpu.
|
||||
*/
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
|
||||
__setscheduler(rq, p, SCHED_FIFO, MAX_RT_PRIO-1);
|
||||
|
||||
update_rq_clock(rq);
|
||||
activate_task(rq, p, 0);
|
||||
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -7403,9 +7406,9 @@ static void migrate_dead(unsigned int dead_cpu, struct task_struct *p)
|
||||
* that's OK. No task can be added to this CPU, so iteration is
|
||||
* fine.
|
||||
*/
|
||||
spin_unlock_irq(&rq->lock);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
move_task_off_dead_cpu(dead_cpu, p);
|
||||
spin_lock_irq(&rq->lock);
|
||||
raw_spin_lock_irq(&rq->lock);
|
||||
|
||||
put_task_struct(p);
|
||||
}
|
||||
@@ -7671,13 +7674,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
|
||||
/* Update our root-domain */
|
||||
rq = cpu_rq(cpu);
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
if (rq->rd) {
|
||||
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
|
||||
|
||||
set_rq_online(rq);
|
||||
}
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
break;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
@@ -7702,13 +7705,13 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
put_task_struct(rq->migration_thread);
|
||||
rq->migration_thread = NULL;
|
||||
/* Idle task back to normal (off runqueue, low prio) */
|
||||
spin_lock_irq(&rq->lock);
|
||||
raw_spin_lock_irq(&rq->lock);
|
||||
update_rq_clock(rq);
|
||||
deactivate_task(rq, rq->idle, 0);
|
||||
__setscheduler(rq, rq->idle, SCHED_NORMAL, 0);
|
||||
rq->idle->sched_class = &idle_sched_class;
|
||||
migrate_dead_tasks(cpu);
|
||||
spin_unlock_irq(&rq->lock);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
cpuset_unlock();
|
||||
migrate_nr_uninterruptible(rq);
|
||||
BUG_ON(rq->nr_running != 0);
|
||||
@@ -7718,30 +7721,30 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
* they didn't take sched_hotcpu_mutex. Just wake up
|
||||
* the requestors.
|
||||
*/
|
||||
spin_lock_irq(&rq->lock);
|
||||
raw_spin_lock_irq(&rq->lock);
|
||||
while (!list_empty(&rq->migration_queue)) {
|
||||
struct migration_req *req;
|
||||
|
||||
req = list_entry(rq->migration_queue.next,
|
||||
struct migration_req, list);
|
||||
list_del_init(&req->list);
|
||||
spin_unlock_irq(&rq->lock);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
complete(&req->done);
|
||||
spin_lock_irq(&rq->lock);
|
||||
raw_spin_lock_irq(&rq->lock);
|
||||
}
|
||||
spin_unlock_irq(&rq->lock);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
break;
|
||||
|
||||
case CPU_DYING:
|
||||
case CPU_DYING_FROZEN:
|
||||
/* Update our root-domain */
|
||||
rq = cpu_rq(cpu);
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
if (rq->rd) {
|
||||
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
|
||||
set_rq_offline(rq);
|
||||
}
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
@@ -7965,7 +7968,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
|
||||
struct root_domain *old_rd = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
|
||||
if (rq->rd) {
|
||||
old_rd = rq->rd;
|
||||
@@ -7991,7 +7994,7 @@ static void rq_attach_root(struct rq *rq, struct root_domain *rd)
|
||||
if (cpumask_test_cpu(rq->cpu, cpu_active_mask))
|
||||
set_rq_online(rq);
|
||||
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
|
||||
if (old_rd)
|
||||
free_rootdomain(old_rd);
|
||||
@@ -8277,14 +8280,14 @@ enum s_alloc {
|
||||
*/
|
||||
#ifdef CONFIG_SCHED_SMT
|
||||
static DEFINE_PER_CPU(struct static_sched_domain, cpu_domains);
|
||||
static DEFINE_PER_CPU(struct static_sched_group, sched_group_cpus);
|
||||
static DEFINE_PER_CPU(struct static_sched_group, sched_groups);
|
||||
|
||||
static int
|
||||
cpu_to_cpu_group(int cpu, const struct cpumask *cpu_map,
|
||||
struct sched_group **sg, struct cpumask *unused)
|
||||
{
|
||||
if (sg)
|
||||
*sg = &per_cpu(sched_group_cpus, cpu).sg;
|
||||
*sg = &per_cpu(sched_groups, cpu).sg;
|
||||
return cpu;
|
||||
}
|
||||
#endif /* CONFIG_SCHED_SMT */
|
||||
@@ -9347,13 +9350,13 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq)
|
||||
#ifdef CONFIG_SMP
|
||||
rt_rq->rt_nr_migratory = 0;
|
||||
rt_rq->overloaded = 0;
|
||||
plist_head_init(&rt_rq->pushable_tasks, &rq->lock);
|
||||
plist_head_init_raw(&rt_rq->pushable_tasks, &rq->lock);
|
||||
#endif
|
||||
|
||||
rt_rq->rt_time = 0;
|
||||
rt_rq->rt_throttled = 0;
|
||||
rt_rq->rt_runtime = 0;
|
||||
spin_lock_init(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_lock_init(&rt_rq->rt_runtime_lock);
|
||||
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
rt_rq->rt_nr_boosted = 0;
|
||||
@@ -9513,7 +9516,7 @@ void __init sched_init(void)
|
||||
struct rq *rq;
|
||||
|
||||
rq = cpu_rq(i);
|
||||
spin_lock_init(&rq->lock);
|
||||
raw_spin_lock_init(&rq->lock);
|
||||
rq->nr_running = 0;
|
||||
rq->calc_load_active = 0;
|
||||
rq->calc_load_update = jiffies + LOAD_FREQ;
|
||||
@@ -9573,7 +9576,7 @@ void __init sched_init(void)
|
||||
#elif defined CONFIG_USER_SCHED
|
||||
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, 0, NULL);
|
||||
init_tg_rt_entry(&init_task_group,
|
||||
&per_cpu(init_rt_rq, i),
|
||||
&per_cpu(init_rt_rq_var, i),
|
||||
&per_cpu(init_sched_rt_entity, i), i, 1,
|
||||
root_task_group.rt_se[i]);
|
||||
#endif
|
||||
@@ -9611,7 +9614,7 @@ void __init sched_init(void)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RT_MUTEXES
|
||||
plist_head_init(&init_task.pi_waiters, &init_task.pi_lock);
|
||||
plist_head_init_raw(&init_task.pi_waiters, &init_task.pi_lock);
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -9734,13 +9737,13 @@ void normalize_rt_tasks(void)
|
||||
continue;
|
||||
}
|
||||
|
||||
spin_lock(&p->pi_lock);
|
||||
raw_spin_lock(&p->pi_lock);
|
||||
rq = __task_rq_lock(p);
|
||||
|
||||
normalize_task(rq, p);
|
||||
|
||||
__task_rq_unlock(rq);
|
||||
spin_unlock(&p->pi_lock);
|
||||
raw_spin_unlock(&p->pi_lock);
|
||||
} while_each_thread(g, p);
|
||||
|
||||
read_unlock_irqrestore(&tasklist_lock, flags);
|
||||
@@ -10103,9 +10106,9 @@ static void set_se_shares(struct sched_entity *se, unsigned long shares)
|
||||
struct rq *rq = cfs_rq->rq;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
__set_se_shares(se, shares);
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(shares_mutex);
|
||||
@@ -10290,18 +10293,18 @@ static int tg_set_bandwidth(struct task_group *tg,
|
||||
if (err)
|
||||
goto unlock;
|
||||
|
||||
spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
|
||||
raw_spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
|
||||
tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
|
||||
tg->rt_bandwidth.rt_runtime = rt_runtime;
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
struct rt_rq *rt_rq = tg->rt_rq[i];
|
||||
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||
rt_rq->rt_runtime = rt_runtime;
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
}
|
||||
spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
|
||||
raw_spin_unlock_irq(&tg->rt_bandwidth.rt_runtime_lock);
|
||||
unlock:
|
||||
read_unlock(&tasklist_lock);
|
||||
mutex_unlock(&rt_constraints_mutex);
|
||||
@@ -10406,15 +10409,15 @@ static int sched_rt_global_constraints(void)
|
||||
if (sysctl_sched_rt_runtime == 0)
|
||||
return -EBUSY;
|
||||
|
||||
spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
|
||||
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
|
||||
for_each_possible_cpu(i) {
|
||||
struct rt_rq *rt_rq = &cpu_rq(i)->rt;
|
||||
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||
rt_rq->rt_runtime = global_rt_runtime();
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
}
|
||||
spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -10705,9 +10708,9 @@ static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu)
|
||||
/*
|
||||
* Take rq->lock to make 64-bit read safe on 32-bit platforms.
|
||||
*/
|
||||
spin_lock_irq(&cpu_rq(cpu)->lock);
|
||||
raw_spin_lock_irq(&cpu_rq(cpu)->lock);
|
||||
data = *cpuusage;
|
||||
spin_unlock_irq(&cpu_rq(cpu)->lock);
|
||||
raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
|
||||
#else
|
||||
data = *cpuusage;
|
||||
#endif
|
||||
@@ -10723,9 +10726,9 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
|
||||
/*
|
||||
* Take rq->lock to make 64-bit write safe on 32-bit platforms.
|
||||
*/
|
||||
spin_lock_irq(&cpu_rq(cpu)->lock);
|
||||
raw_spin_lock_irq(&cpu_rq(cpu)->lock);
|
||||
*cpuusage = val;
|
||||
spin_unlock_irq(&cpu_rq(cpu)->lock);
|
||||
raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
|
||||
#else
|
||||
*cpuusage = val;
|
||||
#endif
|
||||
@@ -10959,9 +10962,9 @@ void synchronize_sched_expedited(void)
|
||||
init_completion(&req->done);
|
||||
req->task = NULL;
|
||||
req->dest_cpu = RCU_MIGRATION_NEED_QS;
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
list_add(&req->list, &rq->migration_queue);
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
wake_up_process(rq->migration_thread);
|
||||
}
|
||||
for_each_online_cpu(cpu) {
|
||||
@@ -10969,11 +10972,11 @@ void synchronize_sched_expedited(void)
|
||||
req = &per_cpu(rcu_migration_req, cpu);
|
||||
rq = cpu_rq(cpu);
|
||||
wait_for_completion(&req->done);
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
if (unlikely(req->dest_cpu == RCU_MIGRATION_MUST_SYNC))
|
||||
need_full_sync = 1;
|
||||
req->dest_cpu = RCU_MIGRATION_IDLE;
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
rcu_expedited_state = RCU_EXPEDITED_STATE_IDLE;
|
||||
synchronize_sched_expedited_count++;
|
||||
|
@@ -135,26 +135,26 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
|
||||
if (likely(newpri != CPUPRI_INVALID)) {
|
||||
struct cpupri_vec *vec = &cp->pri_to_cpu[newpri];
|
||||
|
||||
spin_lock_irqsave(&vec->lock, flags);
|
||||
raw_spin_lock_irqsave(&vec->lock, flags);
|
||||
|
||||
cpumask_set_cpu(cpu, vec->mask);
|
||||
vec->count++;
|
||||
if (vec->count == 1)
|
||||
set_bit(newpri, cp->pri_active);
|
||||
|
||||
spin_unlock_irqrestore(&vec->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&vec->lock, flags);
|
||||
}
|
||||
if (likely(oldpri != CPUPRI_INVALID)) {
|
||||
struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri];
|
||||
|
||||
spin_lock_irqsave(&vec->lock, flags);
|
||||
raw_spin_lock_irqsave(&vec->lock, flags);
|
||||
|
||||
vec->count--;
|
||||
if (!vec->count)
|
||||
clear_bit(oldpri, cp->pri_active);
|
||||
cpumask_clear_cpu(cpu, vec->mask);
|
||||
|
||||
spin_unlock_irqrestore(&vec->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&vec->lock, flags);
|
||||
}
|
||||
|
||||
*currpri = newpri;
|
||||
@@ -180,7 +180,7 @@ int cpupri_init(struct cpupri *cp, bool bootmem)
|
||||
for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) {
|
||||
struct cpupri_vec *vec = &cp->pri_to_cpu[i];
|
||||
|
||||
spin_lock_init(&vec->lock);
|
||||
raw_spin_lock_init(&vec->lock);
|
||||
vec->count = 0;
|
||||
if (!zalloc_cpumask_var(&vec->mask, gfp))
|
||||
goto cleanup;
|
||||
|
@@ -12,7 +12,7 @@
|
||||
/* values 2-101 are RT priorities 0-99 */
|
||||
|
||||
struct cpupri_vec {
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
int count;
|
||||
cpumask_var_t mask;
|
||||
};
|
||||
|
@@ -184,7 +184,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
|
||||
SPLIT_NS(cfs_rq->exec_clock));
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
if (cfs_rq->rb_leftmost)
|
||||
MIN_vruntime = (__pick_next_entity(cfs_rq))->vruntime;
|
||||
last = __pick_last_entity(cfs_rq);
|
||||
@@ -192,7 +192,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
|
||||
max_vruntime = last->vruntime;
|
||||
min_vruntime = cfs_rq->min_vruntime;
|
||||
rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
|
||||
SPLIT_NS(MIN_vruntime));
|
||||
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
|
||||
|
@@ -1955,7 +1955,7 @@ static void task_fork_fair(struct task_struct *p)
|
||||
struct rq *rq = this_rq();
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
|
||||
if (unlikely(task_cpu(p) != this_cpu))
|
||||
__set_task_cpu(p, this_cpu);
|
||||
@@ -1975,7 +1975,7 @@ static void task_fork_fair(struct task_struct *p)
|
||||
resched_task(rq->curr);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -34,10 +34,10 @@ static struct task_struct *pick_next_task_idle(struct rq *rq)
|
||||
static void
|
||||
dequeue_task_idle(struct rq *rq, struct task_struct *p, int sleep)
|
||||
{
|
||||
spin_unlock_irq(&rq->lock);
|
||||
raw_spin_unlock_irq(&rq->lock);
|
||||
pr_err("bad: scheduling from the idle thread!\n");
|
||||
dump_stack();
|
||||
spin_lock_irq(&rq->lock);
|
||||
raw_spin_lock_irq(&rq->lock);
|
||||
}
|
||||
|
||||
static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
|
||||
|
@@ -327,7 +327,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
|
||||
|
||||
weight = cpumask_weight(rd->span);
|
||||
|
||||
spin_lock(&rt_b->rt_runtime_lock);
|
||||
raw_spin_lock(&rt_b->rt_runtime_lock);
|
||||
rt_period = ktime_to_ns(rt_b->rt_period);
|
||||
for_each_cpu(i, rd->span) {
|
||||
struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
|
||||
@@ -336,7 +336,7 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
|
||||
if (iter == rt_rq)
|
||||
continue;
|
||||
|
||||
spin_lock(&iter->rt_runtime_lock);
|
||||
raw_spin_lock(&iter->rt_runtime_lock);
|
||||
/*
|
||||
* Either all rqs have inf runtime and there's nothing to steal
|
||||
* or __disable_runtime() below sets a specific rq to inf to
|
||||
@@ -358,14 +358,14 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
|
||||
rt_rq->rt_runtime += diff;
|
||||
more = 1;
|
||||
if (rt_rq->rt_runtime == rt_period) {
|
||||
spin_unlock(&iter->rt_runtime_lock);
|
||||
raw_spin_unlock(&iter->rt_runtime_lock);
|
||||
break;
|
||||
}
|
||||
}
|
||||
next:
|
||||
spin_unlock(&iter->rt_runtime_lock);
|
||||
raw_spin_unlock(&iter->rt_runtime_lock);
|
||||
}
|
||||
spin_unlock(&rt_b->rt_runtime_lock);
|
||||
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||
|
||||
return more;
|
||||
}
|
||||
@@ -386,8 +386,8 @@ static void __disable_runtime(struct rq *rq)
|
||||
s64 want;
|
||||
int i;
|
||||
|
||||
spin_lock(&rt_b->rt_runtime_lock);
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_lock(&rt_b->rt_runtime_lock);
|
||||
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||
/*
|
||||
* Either we're all inf and nobody needs to borrow, or we're
|
||||
* already disabled and thus have nothing to do, or we have
|
||||
@@ -396,7 +396,7 @@ static void __disable_runtime(struct rq *rq)
|
||||
if (rt_rq->rt_runtime == RUNTIME_INF ||
|
||||
rt_rq->rt_runtime == rt_b->rt_runtime)
|
||||
goto balanced;
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
|
||||
/*
|
||||
* Calculate the difference between what we started out with
|
||||
@@ -418,7 +418,7 @@ static void __disable_runtime(struct rq *rq)
|
||||
if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
|
||||
continue;
|
||||
|
||||
spin_lock(&iter->rt_runtime_lock);
|
||||
raw_spin_lock(&iter->rt_runtime_lock);
|
||||
if (want > 0) {
|
||||
diff = min_t(s64, iter->rt_runtime, want);
|
||||
iter->rt_runtime -= diff;
|
||||
@@ -427,13 +427,13 @@ static void __disable_runtime(struct rq *rq)
|
||||
iter->rt_runtime -= want;
|
||||
want -= want;
|
||||
}
|
||||
spin_unlock(&iter->rt_runtime_lock);
|
||||
raw_spin_unlock(&iter->rt_runtime_lock);
|
||||
|
||||
if (!want)
|
||||
break;
|
||||
}
|
||||
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||
/*
|
||||
* We cannot be left wanting - that would mean some runtime
|
||||
* leaked out of the system.
|
||||
@@ -445,8 +445,8 @@ balanced:
|
||||
* runtime - in which case borrowing doesn't make sense.
|
||||
*/
|
||||
rt_rq->rt_runtime = RUNTIME_INF;
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
spin_unlock(&rt_b->rt_runtime_lock);
|
||||
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -454,9 +454,9 @@ static void disable_runtime(struct rq *rq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
__disable_runtime(rq);
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
static void __enable_runtime(struct rq *rq)
|
||||
@@ -472,13 +472,13 @@ static void __enable_runtime(struct rq *rq)
|
||||
for_each_leaf_rt_rq(rt_rq, rq) {
|
||||
struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
|
||||
|
||||
spin_lock(&rt_b->rt_runtime_lock);
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_lock(&rt_b->rt_runtime_lock);
|
||||
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||
rt_rq->rt_runtime = rt_b->rt_runtime;
|
||||
rt_rq->rt_time = 0;
|
||||
rt_rq->rt_throttled = 0;
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
spin_unlock(&rt_b->rt_runtime_lock);
|
||||
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_unlock(&rt_b->rt_runtime_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -486,9 +486,9 @@ static void enable_runtime(struct rq *rq)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&rq->lock, flags);
|
||||
raw_spin_lock_irqsave(&rq->lock, flags);
|
||||
__enable_runtime(rq);
|
||||
spin_unlock_irqrestore(&rq->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
}
|
||||
|
||||
static int balance_runtime(struct rt_rq *rt_rq)
|
||||
@@ -496,9 +496,9 @@ static int balance_runtime(struct rt_rq *rt_rq)
|
||||
int more = 0;
|
||||
|
||||
if (rt_rq->rt_time > rt_rq->rt_runtime) {
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
more = do_balance_runtime(rt_rq);
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||
}
|
||||
|
||||
return more;
|
||||
@@ -524,11 +524,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||
struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
|
||||
struct rq *rq = rq_of_rt_rq(rt_rq);
|
||||
|
||||
spin_lock(&rq->lock);
|
||||
raw_spin_lock(&rq->lock);
|
||||
if (rt_rq->rt_time) {
|
||||
u64 runtime;
|
||||
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||
if (rt_rq->rt_throttled)
|
||||
balance_runtime(rt_rq);
|
||||
runtime = rt_rq->rt_runtime;
|
||||
@@ -539,13 +539,13 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
|
||||
}
|
||||
if (rt_rq->rt_time || rt_rq->rt_nr_running)
|
||||
idle = 0;
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
} else if (rt_rq->rt_nr_running)
|
||||
idle = 0;
|
||||
|
||||
if (enqueue)
|
||||
sched_rt_rq_enqueue(rt_rq);
|
||||
spin_unlock(&rq->lock);
|
||||
raw_spin_unlock(&rq->lock);
|
||||
}
|
||||
|
||||
return idle;
|
||||
@@ -624,11 +624,11 @@ static void update_curr_rt(struct rq *rq)
|
||||
rt_rq = rt_rq_of_se(rt_se);
|
||||
|
||||
if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
|
||||
spin_lock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_lock(&rt_rq->rt_runtime_lock);
|
||||
rt_rq->rt_time += delta_exec;
|
||||
if (sched_rt_runtime_exceeded(rt_rq))
|
||||
resched_task(curr);
|
||||
spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
raw_spin_unlock(&rt_rq->rt_runtime_lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1246,7 +1246,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
|
||||
task_running(rq, task) ||
|
||||
!task->se.on_rq)) {
|
||||
|
||||
spin_unlock(&lowest_rq->lock);
|
||||
raw_spin_unlock(&lowest_rq->lock);
|
||||
lowest_rq = NULL;
|
||||
break;
|
||||
}
|
||||
|
35
kernel/smp.c
35
kernel/smp.c
@@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
|
||||
|
||||
static struct {
|
||||
struct list_head queue;
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
} call_function __cacheline_aligned_in_smp =
|
||||
{
|
||||
.queue = LIST_HEAD_INIT(call_function.queue),
|
||||
.lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -35,7 +35,7 @@ struct call_function_data {
|
||||
|
||||
struct call_single_queue {
|
||||
struct list_head list;
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct call_function_data, cfd_data);
|
||||
@@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void)
|
||||
for_each_possible_cpu(i) {
|
||||
struct call_single_queue *q = &per_cpu(call_single_queue, i);
|
||||
|
||||
spin_lock_init(&q->lock);
|
||||
raw_spin_lock_init(&q->lock);
|
||||
INIT_LIST_HEAD(&q->list);
|
||||
}
|
||||
|
||||
@@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
|
||||
unsigned long flags;
|
||||
int ipi;
|
||||
|
||||
spin_lock_irqsave(&dst->lock, flags);
|
||||
raw_spin_lock_irqsave(&dst->lock, flags);
|
||||
ipi = list_empty(&dst->list);
|
||||
list_add_tail(&data->list, &dst->list);
|
||||
spin_unlock_irqrestore(&dst->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&dst->lock, flags);
|
||||
|
||||
/*
|
||||
* The list addition should be visible before sending the IPI
|
||||
@@ -171,7 +171,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
|
||||
void generic_smp_call_function_interrupt(void)
|
||||
{
|
||||
struct call_function_data *data;
|
||||
int cpu = get_cpu();
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
/*
|
||||
* Shouldn't receive this interrupt on a cpu that is not yet online.
|
||||
@@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void)
|
||||
refs = atomic_dec_return(&data->refs);
|
||||
WARN_ON(refs < 0);
|
||||
if (!refs) {
|
||||
spin_lock(&call_function.lock);
|
||||
raw_spin_lock(&call_function.lock);
|
||||
list_del_rcu(&data->csd.list);
|
||||
spin_unlock(&call_function.lock);
|
||||
raw_spin_unlock(&call_function.lock);
|
||||
}
|
||||
|
||||
if (refs)
|
||||
@@ -212,7 +212,6 @@ void generic_smp_call_function_interrupt(void)
|
||||
csd_unlock(&data->csd);
|
||||
}
|
||||
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -230,9 +229,9 @@ void generic_smp_call_function_single_interrupt(void)
|
||||
*/
|
||||
WARN_ON_ONCE(!cpu_online(smp_processor_id()));
|
||||
|
||||
spin_lock(&q->lock);
|
||||
raw_spin_lock(&q->lock);
|
||||
list_replace_init(&q->list, &list);
|
||||
spin_unlock(&q->lock);
|
||||
raw_spin_unlock(&q->lock);
|
||||
|
||||
while (!list_empty(&list)) {
|
||||
struct call_single_data *data;
|
||||
@@ -449,14 +448,14 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||
cpumask_clear_cpu(this_cpu, data->cpumask);
|
||||
atomic_set(&data->refs, cpumask_weight(data->cpumask));
|
||||
|
||||
spin_lock_irqsave(&call_function.lock, flags);
|
||||
raw_spin_lock_irqsave(&call_function.lock, flags);
|
||||
/*
|
||||
* Place entry at the _HEAD_ of the list, so that any cpu still
|
||||
* observing the entry in generic_smp_call_function_interrupt()
|
||||
* will not miss any other list entries:
|
||||
*/
|
||||
list_add_rcu(&data->csd.list, &call_function.queue);
|
||||
spin_unlock_irqrestore(&call_function.lock, flags);
|
||||
raw_spin_unlock_irqrestore(&call_function.lock, flags);
|
||||
|
||||
/*
|
||||
* Make the list addition visible before sending the ipi.
|
||||
@@ -501,20 +500,20 @@ EXPORT_SYMBOL(smp_call_function);
|
||||
|
||||
void ipi_call_lock(void)
|
||||
{
|
||||
spin_lock(&call_function.lock);
|
||||
raw_spin_lock(&call_function.lock);
|
||||
}
|
||||
|
||||
void ipi_call_unlock(void)
|
||||
{
|
||||
spin_unlock(&call_function.lock);
|
||||
raw_spin_unlock(&call_function.lock);
|
||||
}
|
||||
|
||||
void ipi_call_lock_irq(void)
|
||||
{
|
||||
spin_lock_irq(&call_function.lock);
|
||||
raw_spin_lock_irq(&call_function.lock);
|
||||
}
|
||||
|
||||
void ipi_call_unlock_irq(void)
|
||||
{
|
||||
spin_unlock_irq(&call_function.lock);
|
||||
raw_spin_unlock_irq(&call_function.lock);
|
||||
}
|
||||
|
@@ -697,7 +697,7 @@ void __init softirq_init(void)
|
||||
open_softirq(HI_SOFTIRQ, tasklet_hi_action);
|
||||
}
|
||||
|
||||
static int ksoftirqd(void * __bind_cpu)
|
||||
static int run_ksoftirqd(void * __bind_cpu)
|
||||
{
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
@@ -810,7 +810,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
|
||||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_UP_PREPARE_FROZEN:
|
||||
p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
|
||||
p = kthread_create(run_ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
|
||||
if (IS_ERR(p)) {
|
||||
printk("ksoftirqd for %i failed\n", hotcpu);
|
||||
return NOTIFY_BAD;
|
||||
|
@@ -22,9 +22,9 @@
|
||||
|
||||
static DEFINE_SPINLOCK(print_lock);
|
||||
|
||||
static DEFINE_PER_CPU(unsigned long, touch_timestamp);
|
||||
static DEFINE_PER_CPU(unsigned long, print_timestamp);
|
||||
static DEFINE_PER_CPU(struct task_struct *, watchdog_task);
|
||||
static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */
|
||||
static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */
|
||||
static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog);
|
||||
|
||||
static int __read_mostly did_panic;
|
||||
int __read_mostly softlockup_thresh = 60;
|
||||
@@ -70,12 +70,12 @@ static void __touch_softlockup_watchdog(void)
|
||||
{
|
||||
int this_cpu = raw_smp_processor_id();
|
||||
|
||||
__raw_get_cpu_var(touch_timestamp) = get_timestamp(this_cpu);
|
||||
__raw_get_cpu_var(softlockup_touch_ts) = get_timestamp(this_cpu);
|
||||
}
|
||||
|
||||
void touch_softlockup_watchdog(void)
|
||||
{
|
||||
__raw_get_cpu_var(touch_timestamp) = 0;
|
||||
__raw_get_cpu_var(softlockup_touch_ts) = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(touch_softlockup_watchdog);
|
||||
|
||||
@@ -85,7 +85,7 @@ void touch_all_softlockup_watchdogs(void)
|
||||
|
||||
/* Cause each CPU to re-update its timestamp rather than complain */
|
||||
for_each_online_cpu(cpu)
|
||||
per_cpu(touch_timestamp, cpu) = 0;
|
||||
per_cpu(softlockup_touch_ts, cpu) = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(touch_all_softlockup_watchdogs);
|
||||
|
||||
@@ -104,28 +104,28 @@ int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
|
||||
void softlockup_tick(void)
|
||||
{
|
||||
int this_cpu = smp_processor_id();
|
||||
unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu);
|
||||
unsigned long print_timestamp;
|
||||
unsigned long touch_ts = per_cpu(softlockup_touch_ts, this_cpu);
|
||||
unsigned long print_ts;
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
unsigned long now;
|
||||
|
||||
/* Is detection switched off? */
|
||||
if (!per_cpu(watchdog_task, this_cpu) || softlockup_thresh <= 0) {
|
||||
if (!per_cpu(softlockup_watchdog, this_cpu) || softlockup_thresh <= 0) {
|
||||
/* Be sure we don't false trigger if switched back on */
|
||||
if (touch_timestamp)
|
||||
per_cpu(touch_timestamp, this_cpu) = 0;
|
||||
if (touch_ts)
|
||||
per_cpu(softlockup_touch_ts, this_cpu) = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
if (touch_timestamp == 0) {
|
||||
if (touch_ts == 0) {
|
||||
__touch_softlockup_watchdog();
|
||||
return;
|
||||
}
|
||||
|
||||
print_timestamp = per_cpu(print_timestamp, this_cpu);
|
||||
print_ts = per_cpu(softlockup_print_ts, this_cpu);
|
||||
|
||||
/* report at most once a second */
|
||||
if (print_timestamp == touch_timestamp || did_panic)
|
||||
if (print_ts == touch_ts || did_panic)
|
||||
return;
|
||||
|
||||
/* do not print during early bootup: */
|
||||
@@ -140,18 +140,18 @@ void softlockup_tick(void)
|
||||
* Wake up the high-prio watchdog task twice per
|
||||
* threshold timespan.
|
||||
*/
|
||||
if (now > touch_timestamp + softlockup_thresh/2)
|
||||
wake_up_process(per_cpu(watchdog_task, this_cpu));
|
||||
if (now > touch_ts + softlockup_thresh/2)
|
||||
wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
|
||||
|
||||
/* Warn about unreasonable delays: */
|
||||
if (now <= (touch_timestamp + softlockup_thresh))
|
||||
if (now <= (touch_ts + softlockup_thresh))
|
||||
return;
|
||||
|
||||
per_cpu(print_timestamp, this_cpu) = touch_timestamp;
|
||||
per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
|
||||
|
||||
spin_lock(&print_lock);
|
||||
printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %lus! [%s:%d]\n",
|
||||
this_cpu, now - touch_timestamp,
|
||||
this_cpu, now - touch_ts,
|
||||
current->comm, task_pid_nr(current));
|
||||
print_modules();
|
||||
print_irqtrace_events(current);
|
||||
@@ -209,32 +209,32 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
|
||||
switch (action) {
|
||||
case CPU_UP_PREPARE:
|
||||
case CPU_UP_PREPARE_FROZEN:
|
||||
BUG_ON(per_cpu(watchdog_task, hotcpu));
|
||||
BUG_ON(per_cpu(softlockup_watchdog, hotcpu));
|
||||
p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu);
|
||||
if (IS_ERR(p)) {
|
||||
printk(KERN_ERR "watchdog for %i failed\n", hotcpu);
|
||||
return NOTIFY_BAD;
|
||||
}
|
||||
per_cpu(touch_timestamp, hotcpu) = 0;
|
||||
per_cpu(watchdog_task, hotcpu) = p;
|
||||
per_cpu(softlockup_touch_ts, hotcpu) = 0;
|
||||
per_cpu(softlockup_watchdog, hotcpu) = p;
|
||||
kthread_bind(p, hotcpu);
|
||||
break;
|
||||
case CPU_ONLINE:
|
||||
case CPU_ONLINE_FROZEN:
|
||||
wake_up_process(per_cpu(watchdog_task, hotcpu));
|
||||
wake_up_process(per_cpu(softlockup_watchdog, hotcpu));
|
||||
break;
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
case CPU_UP_CANCELED:
|
||||
case CPU_UP_CANCELED_FROZEN:
|
||||
if (!per_cpu(watchdog_task, hotcpu))
|
||||
if (!per_cpu(softlockup_watchdog, hotcpu))
|
||||
break;
|
||||
/* Unbind so it can run. Fall thru. */
|
||||
kthread_bind(per_cpu(watchdog_task, hotcpu),
|
||||
kthread_bind(per_cpu(softlockup_watchdog, hotcpu),
|
||||
cpumask_any(cpu_online_mask));
|
||||
case CPU_DEAD:
|
||||
case CPU_DEAD_FROZEN:
|
||||
p = per_cpu(watchdog_task, hotcpu);
|
||||
per_cpu(watchdog_task, hotcpu) = NULL;
|
||||
p = per_cpu(softlockup_watchdog, hotcpu);
|
||||
per_cpu(softlockup_watchdog, hotcpu) = NULL;
|
||||
kthread_stop(p);
|
||||
break;
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
@@ -32,6 +32,8 @@
|
||||
* include/linux/spinlock_api_smp.h
|
||||
*/
|
||||
#else
|
||||
#define raw_read_can_lock(l) read_can_lock(l)
|
||||
#define raw_write_can_lock(l) write_can_lock(l)
|
||||
/*
|
||||
* We build the __lock_function inlines here. They are too large for
|
||||
* inlining all over the place, but here is only one user per function
|
||||
@@ -42,49 +44,49 @@
|
||||
* towards that other CPU that it should break the lock ASAP.
|
||||
*/
|
||||
#define BUILD_LOCK_OPS(op, locktype) \
|
||||
void __lockfunc __##op##_lock(locktype##_t *lock) \
|
||||
void __lockfunc __raw_##op##_lock(locktype##_t *lock) \
|
||||
{ \
|
||||
for (;;) { \
|
||||
preempt_disable(); \
|
||||
if (likely(_raw_##op##_trylock(lock))) \
|
||||
if (likely(do_raw_##op##_trylock(lock))) \
|
||||
break; \
|
||||
preempt_enable(); \
|
||||
\
|
||||
if (!(lock)->break_lock) \
|
||||
(lock)->break_lock = 1; \
|
||||
while (!op##_can_lock(lock) && (lock)->break_lock) \
|
||||
_raw_##op##_relax(&lock->raw_lock); \
|
||||
while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
|
||||
arch_##op##_relax(&lock->raw_lock); \
|
||||
} \
|
||||
(lock)->break_lock = 0; \
|
||||
} \
|
||||
\
|
||||
unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
|
||||
unsigned long __lockfunc __raw_##op##_lock_irqsave(locktype##_t *lock) \
|
||||
{ \
|
||||
unsigned long flags; \
|
||||
\
|
||||
for (;;) { \
|
||||
preempt_disable(); \
|
||||
local_irq_save(flags); \
|
||||
if (likely(_raw_##op##_trylock(lock))) \
|
||||
if (likely(do_raw_##op##_trylock(lock))) \
|
||||
break; \
|
||||
local_irq_restore(flags); \
|
||||
preempt_enable(); \
|
||||
\
|
||||
if (!(lock)->break_lock) \
|
||||
(lock)->break_lock = 1; \
|
||||
while (!op##_can_lock(lock) && (lock)->break_lock) \
|
||||
_raw_##op##_relax(&lock->raw_lock); \
|
||||
while (!raw_##op##_can_lock(lock) && (lock)->break_lock)\
|
||||
arch_##op##_relax(&lock->raw_lock); \
|
||||
} \
|
||||
(lock)->break_lock = 0; \
|
||||
return flags; \
|
||||
} \
|
||||
\
|
||||
void __lockfunc __##op##_lock_irq(locktype##_t *lock) \
|
||||
void __lockfunc __raw_##op##_lock_irq(locktype##_t *lock) \
|
||||
{ \
|
||||
_##op##_lock_irqsave(lock); \
|
||||
_raw_##op##_lock_irqsave(lock); \
|
||||
} \
|
||||
\
|
||||
void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
|
||||
void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \
|
||||
{ \
|
||||
unsigned long flags; \
|
||||
\
|
||||
@@ -93,7 +95,7 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
|
||||
/* irq-disabling. We use the generic preemption-aware */ \
|
||||
/* function: */ \
|
||||
/**/ \
|
||||
flags = _##op##_lock_irqsave(lock); \
|
||||
flags = _raw_##op##_lock_irqsave(lock); \
|
||||
local_bh_disable(); \
|
||||
local_irq_restore(flags); \
|
||||
} \
|
||||
@@ -107,23 +109,247 @@ void __lockfunc __##op##_lock_bh(locktype##_t *lock) \
|
||||
* __[spin|read|write]_lock_irqsave()
|
||||
* __[spin|read|write]_lock_bh()
|
||||
*/
|
||||
BUILD_LOCK_OPS(spin, spinlock);
|
||||
BUILD_LOCK_OPS(spin, raw_spinlock);
|
||||
BUILD_LOCK_OPS(read, rwlock);
|
||||
BUILD_LOCK_OPS(write, rwlock);
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_TRYLOCK
|
||||
int __lockfunc _raw_spin_trylock(raw_spinlock_t *lock)
|
||||
{
|
||||
return __raw_spin_trylock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_spin_trylock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
|
||||
int __lockfunc _raw_spin_trylock_bh(raw_spinlock_t *lock)
|
||||
{
|
||||
return __raw_spin_trylock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_spin_trylock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_LOCK
|
||||
void __lockfunc _raw_spin_lock(raw_spinlock_t *lock)
|
||||
{
|
||||
__raw_spin_lock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_spin_lock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
|
||||
unsigned long __lockfunc _raw_spin_lock_irqsave(raw_spinlock_t *lock)
|
||||
{
|
||||
return __raw_spin_lock_irqsave(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_spin_lock_irqsave);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
|
||||
void __lockfunc _raw_spin_lock_irq(raw_spinlock_t *lock)
|
||||
{
|
||||
__raw_spin_lock_irq(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_spin_lock_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_LOCK_BH
|
||||
void __lockfunc _raw_spin_lock_bh(raw_spinlock_t *lock)
|
||||
{
|
||||
__raw_spin_lock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_spin_lock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_UNLOCK
|
||||
void __lockfunc _raw_spin_unlock(raw_spinlock_t *lock)
|
||||
{
|
||||
__raw_spin_unlock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_spin_unlock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
|
||||
void __lockfunc _raw_spin_unlock_irqrestore(raw_spinlock_t *lock, unsigned long flags)
|
||||
{
|
||||
__raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_spin_unlock_irqrestore);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
|
||||
void __lockfunc _raw_spin_unlock_irq(raw_spinlock_t *lock)
|
||||
{
|
||||
__raw_spin_unlock_irq(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_spin_unlock_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
|
||||
void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock)
|
||||
{
|
||||
__raw_spin_unlock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_spin_unlock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_TRYLOCK
|
||||
int __lockfunc _raw_read_trylock(rwlock_t *lock)
|
||||
{
|
||||
return __raw_read_trylock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_trylock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_LOCK
|
||||
void __lockfunc _raw_read_lock(rwlock_t *lock)
|
||||
{
|
||||
__raw_read_lock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_lock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
|
||||
unsigned long __lockfunc _raw_read_lock_irqsave(rwlock_t *lock)
|
||||
{
|
||||
return __raw_read_lock_irqsave(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_lock_irqsave);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_LOCK_IRQ
|
||||
void __lockfunc _raw_read_lock_irq(rwlock_t *lock)
|
||||
{
|
||||
__raw_read_lock_irq(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_lock_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_LOCK_BH
|
||||
void __lockfunc _raw_read_lock_bh(rwlock_t *lock)
|
||||
{
|
||||
__raw_read_lock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_lock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_UNLOCK
|
||||
void __lockfunc _raw_read_unlock(rwlock_t *lock)
|
||||
{
|
||||
__raw_read_unlock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_unlock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
|
||||
void __lockfunc _raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
|
||||
{
|
||||
__raw_read_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_unlock_irqrestore);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
|
||||
void __lockfunc _raw_read_unlock_irq(rwlock_t *lock)
|
||||
{
|
||||
__raw_read_unlock_irq(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_unlock_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_UNLOCK_BH
|
||||
void __lockfunc _raw_read_unlock_bh(rwlock_t *lock)
|
||||
{
|
||||
__raw_read_unlock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_read_unlock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_TRYLOCK
|
||||
int __lockfunc _raw_write_trylock(rwlock_t *lock)
|
||||
{
|
||||
return __raw_write_trylock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_trylock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_LOCK
|
||||
void __lockfunc _raw_write_lock(rwlock_t *lock)
|
||||
{
|
||||
__raw_write_lock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
|
||||
unsigned long __lockfunc _raw_write_lock_irqsave(rwlock_t *lock)
|
||||
{
|
||||
return __raw_write_lock_irqsave(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock_irqsave);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
|
||||
void __lockfunc _raw_write_lock_irq(rwlock_t *lock)
|
||||
{
|
||||
__raw_write_lock_irq(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_LOCK_BH
|
||||
void __lockfunc _raw_write_lock_bh(rwlock_t *lock)
|
||||
{
|
||||
__raw_write_lock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_lock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_UNLOCK
|
||||
void __lockfunc _raw_write_unlock(rwlock_t *lock)
|
||||
{
|
||||
__raw_write_unlock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_unlock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
|
||||
void __lockfunc _raw_write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
|
||||
{
|
||||
__raw_write_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_unlock_irqrestore);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
|
||||
void __lockfunc _raw_write_unlock_irq(rwlock_t *lock)
|
||||
{
|
||||
__raw_write_unlock_irq(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_unlock_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
|
||||
void __lockfunc _raw_write_unlock_bh(rwlock_t *lock)
|
||||
{
|
||||
__raw_write_unlock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_raw_write_unlock_bh);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
|
||||
void __lockfunc _spin_lock_nested(spinlock_t *lock, int subclass)
|
||||
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
|
||||
{
|
||||
preempt_disable();
|
||||
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
||||
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
|
||||
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_lock_nested);
|
||||
EXPORT_SYMBOL(_raw_spin_lock_nested);
|
||||
|
||||
unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
|
||||
unsigned long __lockfunc _raw_spin_lock_irqsave_nested(raw_spinlock_t *lock,
|
||||
int subclass)
|
||||
{
|
||||
unsigned long flags;
|
||||
@@ -131,247 +357,23 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock,
|
||||
local_irq_save(flags);
|
||||
preempt_disable();
|
||||
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
|
||||
LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock,
|
||||
_raw_spin_lock_flags, &flags);
|
||||
LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
|
||||
do_raw_spin_lock_flags, &flags);
|
||||
return flags;
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_lock_irqsave_nested);
|
||||
EXPORT_SYMBOL(_raw_spin_lock_irqsave_nested);
|
||||
|
||||
void __lockfunc _spin_lock_nest_lock(spinlock_t *lock,
|
||||
void __lockfunc _raw_spin_lock_nest_lock(raw_spinlock_t *lock,
|
||||
struct lockdep_map *nest_lock)
|
||||
{
|
||||
preempt_disable();
|
||||
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
|
||||
LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock);
|
||||
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_lock_nest_lock);
|
||||
EXPORT_SYMBOL(_raw_spin_lock_nest_lock);
|
||||
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_TRYLOCK
|
||||
int __lockfunc _spin_trylock(spinlock_t *lock)
|
||||
{
|
||||
return __spin_trylock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_trylock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_TRYLOCK
|
||||
int __lockfunc _read_trylock(rwlock_t *lock)
|
||||
{
|
||||
return __read_trylock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_read_trylock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_TRYLOCK
|
||||
int __lockfunc _write_trylock(rwlock_t *lock)
|
||||
{
|
||||
return __write_trylock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_write_trylock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_LOCK
|
||||
void __lockfunc _read_lock(rwlock_t *lock)
|
||||
{
|
||||
__read_lock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_read_lock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQSAVE
|
||||
unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
|
||||
{
|
||||
return __spin_lock_irqsave(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_lock_irqsave);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_LOCK_IRQ
|
||||
void __lockfunc _spin_lock_irq(spinlock_t *lock)
|
||||
{
|
||||
__spin_lock_irq(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_lock_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_LOCK_BH
|
||||
void __lockfunc _spin_lock_bh(spinlock_t *lock)
|
||||
{
|
||||
__spin_lock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_lock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_LOCK_IRQSAVE
|
||||
unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
|
||||
{
|
||||
return __read_lock_irqsave(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_read_lock_irqsave);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_LOCK_IRQ
|
||||
void __lockfunc _read_lock_irq(rwlock_t *lock)
|
||||
{
|
||||
__read_lock_irq(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_read_lock_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_LOCK_BH
|
||||
void __lockfunc _read_lock_bh(rwlock_t *lock)
|
||||
{
|
||||
__read_lock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_read_lock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQSAVE
|
||||
unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
|
||||
{
|
||||
return __write_lock_irqsave(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_write_lock_irqsave);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_LOCK_IRQ
|
||||
void __lockfunc _write_lock_irq(rwlock_t *lock)
|
||||
{
|
||||
__write_lock_irq(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_write_lock_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_LOCK_BH
|
||||
void __lockfunc _write_lock_bh(rwlock_t *lock)
|
||||
{
|
||||
__write_lock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_write_lock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_LOCK
|
||||
void __lockfunc _spin_lock(spinlock_t *lock)
|
||||
{
|
||||
__spin_lock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_lock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_LOCK
|
||||
void __lockfunc _write_lock(rwlock_t *lock)
|
||||
{
|
||||
__write_lock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_write_lock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_UNLOCK
|
||||
void __lockfunc _spin_unlock(spinlock_t *lock)
|
||||
{
|
||||
__spin_unlock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_unlock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_UNLOCK
|
||||
void __lockfunc _write_unlock(rwlock_t *lock)
|
||||
{
|
||||
__write_unlock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_write_unlock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_UNLOCK
|
||||
void __lockfunc _read_unlock(rwlock_t *lock)
|
||||
{
|
||||
__read_unlock(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_read_unlock);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQRESTORE
|
||||
void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
|
||||
{
|
||||
__spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_unlock_irqrestore);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_UNLOCK_IRQ
|
||||
void __lockfunc _spin_unlock_irq(spinlock_t *lock)
|
||||
{
|
||||
__spin_unlock_irq(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_unlock_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_UNLOCK_BH
|
||||
void __lockfunc _spin_unlock_bh(spinlock_t *lock)
|
||||
{
|
||||
__spin_unlock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_unlock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQRESTORE
|
||||
void __lockfunc _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
|
||||
{
|
||||
__read_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(_read_unlock_irqrestore);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_UNLOCK_IRQ
|
||||
void __lockfunc _read_unlock_irq(rwlock_t *lock)
|
||||
{
|
||||
__read_unlock_irq(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_read_unlock_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_READ_UNLOCK_BH
|
||||
void __lockfunc _read_unlock_bh(rwlock_t *lock)
|
||||
{
|
||||
__read_unlock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_read_unlock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQRESTORE
|
||||
void __lockfunc _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
|
||||
{
|
||||
__write_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(_write_unlock_irqrestore);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_UNLOCK_IRQ
|
||||
void __lockfunc _write_unlock_irq(rwlock_t *lock)
|
||||
{
|
||||
__write_unlock_irq(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_write_unlock_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_WRITE_UNLOCK_BH
|
||||
void __lockfunc _write_unlock_bh(rwlock_t *lock)
|
||||
{
|
||||
__write_unlock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_write_unlock_bh);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_INLINE_SPIN_TRYLOCK_BH
|
||||
int __lockfunc _spin_trylock_bh(spinlock_t *lock)
|
||||
{
|
||||
return __spin_trylock_bh(lock);
|
||||
}
|
||||
EXPORT_SYMBOL(_spin_trylock_bh);
|
||||
#endif
|
||||
|
||||
notrace int in_lock_functions(unsigned long addr)
|
||||
{
|
||||
/* Linker adds these: start and end of __lockfunc functions */
|
||||
|
@@ -189,10 +189,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
|
||||
!(user = find_user(who)))
|
||||
goto out_unlock; /* No processes for this user */
|
||||
|
||||
do_each_thread(g, p)
|
||||
do_each_thread(g, p) {
|
||||
if (__task_cred(p)->uid == who)
|
||||
error = set_one_prio(p, niceval, error);
|
||||
while_each_thread(g, p);
|
||||
} while_each_thread(g, p);
|
||||
if (who != cred->uid)
|
||||
free_uid(user); /* For find_user() */
|
||||
break;
|
||||
@@ -252,13 +252,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
|
||||
!(user = find_user(who)))
|
||||
goto out_unlock; /* No processes for this user */
|
||||
|
||||
do_each_thread(g, p)
|
||||
do_each_thread(g, p) {
|
||||
if (__task_cred(p)->uid == who) {
|
||||
niceval = 20 - task_nice(p);
|
||||
if (niceval > retval)
|
||||
retval = niceval;
|
||||
}
|
||||
while_each_thread(g, p);
|
||||
} while_each_thread(g, p);
|
||||
if (who != cred->uid)
|
||||
free_uid(user); /* for find_user() */
|
||||
break;
|
||||
|
@@ -1051,7 +1051,7 @@ static struct ctl_table vm_table[] = {
|
||||
.extra2 = &one_hundred,
|
||||
},
|
||||
#ifdef CONFIG_HUGETLB_PAGE
|
||||
{
|
||||
{
|
||||
.procname = "nr_hugepages",
|
||||
.data = NULL,
|
||||
.maxlen = sizeof(unsigned long),
|
||||
@@ -1059,7 +1059,18 @@ static struct ctl_table vm_table[] = {
|
||||
.proc_handler = hugetlb_sysctl_handler,
|
||||
.extra1 = (void *)&hugetlb_zero,
|
||||
.extra2 = (void *)&hugetlb_infinity,
|
||||
},
|
||||
},
|
||||
#ifdef CONFIG_NUMA
|
||||
{
|
||||
.procname = "nr_hugepages_mempolicy",
|
||||
.data = NULL,
|
||||
.maxlen = sizeof(unsigned long),
|
||||
.mode = 0644,
|
||||
.proc_handler = &hugetlb_mempolicy_sysctl_handler,
|
||||
.extra1 = (void *)&hugetlb_zero,
|
||||
.extra2 = (void *)&hugetlb_infinity,
|
||||
},
|
||||
#endif
|
||||
{
|
||||
.procname = "hugetlb_shm_group",
|
||||
.data = &sysctl_hugetlb_shm_group,
|
||||
@@ -1120,7 +1131,8 @@ static struct ctl_table vm_table[] = {
|
||||
.data = &sysctl_max_map_count,
|
||||
.maxlen = sizeof(sysctl_max_map_count),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec
|
||||
.proc_handler = proc_dointvec,
|
||||
.extra1 = &zero,
|
||||
},
|
||||
#else
|
||||
{
|
||||
|
@@ -30,7 +30,7 @@ static LIST_HEAD(clockevents_released);
|
||||
static RAW_NOTIFIER_HEAD(clockevents_chain);
|
||||
|
||||
/* Protection for the above */
|
||||
static DEFINE_SPINLOCK(clockevents_lock);
|
||||
static DEFINE_RAW_SPINLOCK(clockevents_lock);
|
||||
|
||||
/**
|
||||
* clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
|
||||
@@ -141,9 +141,9 @@ int clockevents_register_notifier(struct notifier_block *nb)
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
spin_lock_irqsave(&clockevents_lock, flags);
|
||||
raw_spin_lock_irqsave(&clockevents_lock, flags);
|
||||
ret = raw_notifier_chain_register(&clockevents_chain, nb);
|
||||
spin_unlock_irqrestore(&clockevents_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&clockevents_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -185,13 +185,13 @@ void clockevents_register_device(struct clock_event_device *dev)
|
||||
BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
|
||||
BUG_ON(!dev->cpumask);
|
||||
|
||||
spin_lock_irqsave(&clockevents_lock, flags);
|
||||
raw_spin_lock_irqsave(&clockevents_lock, flags);
|
||||
|
||||
list_add(&dev->list, &clockevent_devices);
|
||||
clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
|
||||
clockevents_notify_released();
|
||||
|
||||
spin_unlock_irqrestore(&clockevents_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&clockevents_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(clockevents_register_device);
|
||||
|
||||
@@ -241,7 +241,7 @@ void clockevents_notify(unsigned long reason, void *arg)
|
||||
struct list_head *node, *tmp;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&clockevents_lock, flags);
|
||||
raw_spin_lock_irqsave(&clockevents_lock, flags);
|
||||
clockevents_do_notify(reason, arg);
|
||||
|
||||
switch (reason) {
|
||||
@@ -256,7 +256,7 @@ void clockevents_notify(unsigned long reason, void *arg)
|
||||
default:
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&clockevents_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&clockevents_lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(clockevents_notify);
|
||||
#endif
|
||||
|
@@ -31,7 +31,7 @@ static struct tick_device tick_broadcast_device;
|
||||
/* FIXME: Use cpumask_var_t. */
|
||||
static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
|
||||
static DECLARE_BITMAP(tmpmask, NR_CPUS);
|
||||
static DEFINE_SPINLOCK(tick_broadcast_lock);
|
||||
static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
|
||||
static int tick_broadcast_force;
|
||||
|
||||
#ifdef CONFIG_TICK_ONESHOT
|
||||
@@ -96,7 +96,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
|
||||
/*
|
||||
* Devices might be registered with both periodic and oneshot
|
||||
@@ -122,7 +122,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
|
||||
tick_broadcast_clear_oneshot(cpu);
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -161,13 +161,13 @@ static void tick_do_broadcast(struct cpumask *mask)
|
||||
*/
|
||||
static void tick_do_periodic_broadcast(void)
|
||||
{
|
||||
spin_lock(&tick_broadcast_lock);
|
||||
raw_spin_lock(&tick_broadcast_lock);
|
||||
|
||||
cpumask_and(to_cpumask(tmpmask),
|
||||
cpu_online_mask, tick_get_broadcast_mask());
|
||||
tick_do_broadcast(to_cpumask(tmpmask));
|
||||
|
||||
spin_unlock(&tick_broadcast_lock);
|
||||
raw_spin_unlock(&tick_broadcast_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -212,7 +212,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
|
||||
unsigned long flags;
|
||||
int cpu, bc_stopped;
|
||||
|
||||
spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
|
||||
cpu = smp_processor_id();
|
||||
td = &per_cpu(tick_cpu_device, cpu);
|
||||
@@ -263,7 +263,7 @@ static void tick_do_broadcast_on_off(unsigned long *reason)
|
||||
tick_broadcast_setup_oneshot(bc);
|
||||
}
|
||||
out:
|
||||
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -299,7 +299,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
|
||||
unsigned long flags;
|
||||
unsigned int cpu = *cpup;
|
||||
|
||||
spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
|
||||
bc = tick_broadcast_device.evtdev;
|
||||
cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
|
||||
@@ -309,7 +309,7 @@ void tick_shutdown_broadcast(unsigned int *cpup)
|
||||
clockevents_shutdown(bc);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
|
||||
void tick_suspend_broadcast(void)
|
||||
@@ -317,13 +317,13 @@ void tick_suspend_broadcast(void)
|
||||
struct clock_event_device *bc;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
|
||||
bc = tick_broadcast_device.evtdev;
|
||||
if (bc)
|
||||
clockevents_shutdown(bc);
|
||||
|
||||
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
|
||||
int tick_resume_broadcast(void)
|
||||
@@ -332,7 +332,7 @@ int tick_resume_broadcast(void)
|
||||
unsigned long flags;
|
||||
int broadcast = 0;
|
||||
|
||||
spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
|
||||
bc = tick_broadcast_device.evtdev;
|
||||
|
||||
@@ -351,7 +351,7 @@ int tick_resume_broadcast(void)
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
|
||||
return broadcast;
|
||||
}
|
||||
@@ -405,7 +405,7 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
|
||||
ktime_t now, next_event;
|
||||
int cpu;
|
||||
|
||||
spin_lock(&tick_broadcast_lock);
|
||||
raw_spin_lock(&tick_broadcast_lock);
|
||||
again:
|
||||
dev->next_event.tv64 = KTIME_MAX;
|
||||
next_event.tv64 = KTIME_MAX;
|
||||
@@ -443,7 +443,7 @@ again:
|
||||
if (tick_broadcast_set_event(next_event, 0))
|
||||
goto again;
|
||||
}
|
||||
spin_unlock(&tick_broadcast_lock);
|
||||
raw_spin_unlock(&tick_broadcast_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -457,7 +457,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
|
||||
spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
|
||||
/*
|
||||
* Periodic mode does not care about the enter/exit of power
|
||||
@@ -492,7 +492,7 @@ void tick_broadcast_oneshot_control(unsigned long reason)
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -563,13 +563,13 @@ void tick_broadcast_switch_to_oneshot(void)
|
||||
struct clock_event_device *bc;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
|
||||
tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
|
||||
bc = tick_broadcast_device.evtdev;
|
||||
if (bc)
|
||||
tick_broadcast_setup_oneshot(bc);
|
||||
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
|
||||
|
||||
@@ -581,7 +581,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
|
||||
unsigned long flags;
|
||||
unsigned int cpu = *cpup;
|
||||
|
||||
spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
|
||||
|
||||
/*
|
||||
* Clear the broadcast mask flag for the dead cpu, but do not
|
||||
@@ -589,7 +589,7 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
|
||||
*/
|
||||
cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
|
||||
|
||||
spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -34,7 +34,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
|
||||
ktime_t tick_next_period;
|
||||
ktime_t tick_period;
|
||||
int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT;
|
||||
DEFINE_SPINLOCK(tick_device_lock);
|
||||
static DEFINE_RAW_SPINLOCK(tick_device_lock);
|
||||
|
||||
/*
|
||||
* Debugging: see timer_list.c
|
||||
@@ -209,7 +209,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
|
||||
int cpu, ret = NOTIFY_OK;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tick_device_lock, flags);
|
||||
raw_spin_lock_irqsave(&tick_device_lock, flags);
|
||||
|
||||
cpu = smp_processor_id();
|
||||
if (!cpumask_test_cpu(cpu, newdev->cpumask))
|
||||
@@ -268,7 +268,7 @@ static int tick_check_new_device(struct clock_event_device *newdev)
|
||||
if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
|
||||
tick_oneshot_notify();
|
||||
|
||||
spin_unlock_irqrestore(&tick_device_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
||||
return NOTIFY_STOP;
|
||||
|
||||
out_bc:
|
||||
@@ -278,7 +278,7 @@ out_bc:
|
||||
if (tick_check_broadcast_device(newdev))
|
||||
ret = NOTIFY_STOP;
|
||||
|
||||
spin_unlock_irqrestore(&tick_device_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -311,7 +311,7 @@ static void tick_shutdown(unsigned int *cpup)
|
||||
struct clock_event_device *dev = td->evtdev;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tick_device_lock, flags);
|
||||
raw_spin_lock_irqsave(&tick_device_lock, flags);
|
||||
td->mode = TICKDEV_MODE_PERIODIC;
|
||||
if (dev) {
|
||||
/*
|
||||
@@ -322,7 +322,7 @@ static void tick_shutdown(unsigned int *cpup)
|
||||
clockevents_exchange_device(dev, NULL);
|
||||
td->evtdev = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&tick_device_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
||||
}
|
||||
|
||||
static void tick_suspend(void)
|
||||
@@ -330,9 +330,9 @@ static void tick_suspend(void)
|
||||
struct tick_device *td = &__get_cpu_var(tick_cpu_device);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&tick_device_lock, flags);
|
||||
raw_spin_lock_irqsave(&tick_device_lock, flags);
|
||||
clockevents_shutdown(td->evtdev);
|
||||
spin_unlock_irqrestore(&tick_device_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
||||
}
|
||||
|
||||
static void tick_resume(void)
|
||||
@@ -341,7 +341,7 @@ static void tick_resume(void)
|
||||
unsigned long flags;
|
||||
int broadcast = tick_resume_broadcast();
|
||||
|
||||
spin_lock_irqsave(&tick_device_lock, flags);
|
||||
raw_spin_lock_irqsave(&tick_device_lock, flags);
|
||||
clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_RESUME);
|
||||
|
||||
if (!broadcast) {
|
||||
@@ -350,7 +350,7 @@ static void tick_resume(void)
|
||||
else
|
||||
tick_resume_oneshot();
|
||||
}
|
||||
spin_unlock_irqrestore(&tick_device_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&tick_device_lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -6,7 +6,6 @@
|
||||
#define TICK_DO_TIMER_BOOT -2
|
||||
|
||||
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
|
||||
extern spinlock_t tick_device_lock;
|
||||
extern ktime_t tick_next_period;
|
||||
extern ktime_t tick_period;
|
||||
extern int tick_do_timer_cpu __read_mostly;
|
||||
|
@@ -84,7 +84,7 @@ print_active_timers(struct seq_file *m, struct hrtimer_clock_base *base,
|
||||
|
||||
next_one:
|
||||
i = 0;
|
||||
spin_lock_irqsave(&base->cpu_base->lock, flags);
|
||||
raw_spin_lock_irqsave(&base->cpu_base->lock, flags);
|
||||
|
||||
curr = base->first;
|
||||
/*
|
||||
@@ -100,13 +100,13 @@ next_one:
|
||||
|
||||
timer = rb_entry(curr, struct hrtimer, node);
|
||||
tmp = *timer;
|
||||
spin_unlock_irqrestore(&base->cpu_base->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
|
||||
|
||||
print_timer(m, timer, &tmp, i, now);
|
||||
next++;
|
||||
goto next_one;
|
||||
}
|
||||
spin_unlock_irqrestore(&base->cpu_base->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&base->cpu_base->lock, flags);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@@ -86,7 +86,7 @@ static DEFINE_SPINLOCK(table_lock);
|
||||
/*
|
||||
* Per-CPU lookup locks for fast hash lookup:
|
||||
*/
|
||||
static DEFINE_PER_CPU(spinlock_t, lookup_lock);
|
||||
static DEFINE_PER_CPU(raw_spinlock_t, tstats_lookup_lock);
|
||||
|
||||
/*
|
||||
* Mutex to serialize state changes with show-stats activities:
|
||||
@@ -238,14 +238,14 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
|
||||
/*
|
||||
* It doesnt matter which lock we take:
|
||||
*/
|
||||
spinlock_t *lock;
|
||||
raw_spinlock_t *lock;
|
||||
struct entry *entry, input;
|
||||
unsigned long flags;
|
||||
|
||||
if (likely(!timer_stats_active))
|
||||
return;
|
||||
|
||||
lock = &per_cpu(lookup_lock, raw_smp_processor_id());
|
||||
lock = &per_cpu(tstats_lookup_lock, raw_smp_processor_id());
|
||||
|
||||
input.timer = timer;
|
||||
input.start_func = startf;
|
||||
@@ -253,7 +253,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
|
||||
input.pid = pid;
|
||||
input.timer_flag = timer_flag;
|
||||
|
||||
spin_lock_irqsave(lock, flags);
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
if (!timer_stats_active)
|
||||
goto out_unlock;
|
||||
|
||||
@@ -264,7 +264,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
|
||||
atomic_inc(&overflow_count);
|
||||
|
||||
out_unlock:
|
||||
spin_unlock_irqrestore(lock, flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
|
||||
static void print_name_offset(struct seq_file *m, unsigned long addr)
|
||||
@@ -348,9 +348,11 @@ static void sync_access(void)
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
spin_lock_irqsave(&per_cpu(lookup_lock, cpu), flags);
|
||||
raw_spinlock_t *lock = &per_cpu(tstats_lookup_lock, cpu);
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
/* nothing */
|
||||
spin_unlock_irqrestore(&per_cpu(lookup_lock, cpu), flags);
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -408,7 +410,7 @@ void __init init_timer_stats(void)
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
spin_lock_init(&per_cpu(lookup_lock, cpu));
|
||||
raw_spin_lock_init(&per_cpu(tstats_lookup_lock, cpu));
|
||||
}
|
||||
|
||||
static int __init init_tstats_procfs(void)
|
||||
|
@@ -423,7 +423,7 @@ struct ring_buffer_per_cpu {
|
||||
int cpu;
|
||||
struct ring_buffer *buffer;
|
||||
spinlock_t reader_lock; /* serialize readers */
|
||||
raw_spinlock_t lock;
|
||||
arch_spinlock_t lock;
|
||||
struct lock_class_key lock_key;
|
||||
struct list_head *pages;
|
||||
struct buffer_page *head_page; /* read from head */
|
||||
@@ -998,7 +998,7 @@ rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
|
||||
cpu_buffer->buffer = buffer;
|
||||
spin_lock_init(&cpu_buffer->reader_lock);
|
||||
lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
|
||||
cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
|
||||
GFP_KERNEL, cpu_to_node(cpu));
|
||||
@@ -2834,7 +2834,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
int ret;
|
||||
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&cpu_buffer->lock);
|
||||
arch_spin_lock(&cpu_buffer->lock);
|
||||
|
||||
again:
|
||||
/*
|
||||
@@ -2923,7 +2923,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
|
||||
goto again;
|
||||
|
||||
out:
|
||||
__raw_spin_unlock(&cpu_buffer->lock);
|
||||
arch_spin_unlock(&cpu_buffer->lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return reader;
|
||||
@@ -3286,9 +3286,9 @@ ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
|
||||
synchronize_sched();
|
||||
|
||||
spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
|
||||
__raw_spin_lock(&cpu_buffer->lock);
|
||||
arch_spin_lock(&cpu_buffer->lock);
|
||||
rb_iter_reset(iter);
|
||||
__raw_spin_unlock(&cpu_buffer->lock);
|
||||
arch_spin_unlock(&cpu_buffer->lock);
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
||||
return iter;
|
||||
@@ -3408,11 +3408,11 @@ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
|
||||
if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
|
||||
goto out;
|
||||
|
||||
__raw_spin_lock(&cpu_buffer->lock);
|
||||
arch_spin_lock(&cpu_buffer->lock);
|
||||
|
||||
rb_reset_cpu(cpu_buffer);
|
||||
|
||||
__raw_spin_unlock(&cpu_buffer->lock);
|
||||
arch_spin_unlock(&cpu_buffer->lock);
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
|
||||
|
@@ -86,17 +86,17 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set)
|
||||
*/
|
||||
static int tracing_disabled = 1;
|
||||
|
||||
DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
|
||||
DEFINE_PER_CPU(int, ftrace_cpu_disabled);
|
||||
|
||||
static inline void ftrace_disable_cpu(void)
|
||||
{
|
||||
preempt_disable();
|
||||
local_inc(&__get_cpu_var(ftrace_cpu_disabled));
|
||||
__this_cpu_inc(per_cpu_var(ftrace_cpu_disabled));
|
||||
}
|
||||
|
||||
static inline void ftrace_enable_cpu(void)
|
||||
{
|
||||
local_dec(&__get_cpu_var(ftrace_cpu_disabled));
|
||||
__this_cpu_dec(per_cpu_var(ftrace_cpu_disabled));
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ cycle_t ftrace_now(int cpu)
|
||||
*/
|
||||
static struct trace_array max_tr;
|
||||
|
||||
static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
|
||||
static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
|
||||
|
||||
/* tracer_enabled is used to toggle activation of a tracer */
|
||||
static int tracer_enabled = 1;
|
||||
@@ -493,15 +493,15 @@ static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
|
||||
* protected by per_cpu spinlocks. But the action of the swap
|
||||
* needs its own lock.
|
||||
*
|
||||
* This is defined as a raw_spinlock_t in order to help
|
||||
* This is defined as a arch_spinlock_t in order to help
|
||||
* with performance when lockdep debugging is enabled.
|
||||
*
|
||||
* It is also used in other places outside the update_max_tr
|
||||
* so it needs to be defined outside of the
|
||||
* CONFIG_TRACER_MAX_TRACE.
|
||||
*/
|
||||
static raw_spinlock_t ftrace_max_lock =
|
||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t ftrace_max_lock =
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||
unsigned long __read_mostly tracing_max_latency;
|
||||
@@ -555,13 +555,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
__raw_spin_lock(&ftrace_max_lock);
|
||||
arch_spin_lock(&ftrace_max_lock);
|
||||
|
||||
tr->buffer = max_tr.buffer;
|
||||
max_tr.buffer = buf;
|
||||
|
||||
__update_max_tr(tr, tsk, cpu);
|
||||
__raw_spin_unlock(&ftrace_max_lock);
|
||||
arch_spin_unlock(&ftrace_max_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -581,7 +581,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
return;
|
||||
|
||||
WARN_ON_ONCE(!irqs_disabled());
|
||||
__raw_spin_lock(&ftrace_max_lock);
|
||||
arch_spin_lock(&ftrace_max_lock);
|
||||
|
||||
ftrace_disable_cpu();
|
||||
|
||||
@@ -603,7 +603,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
|
||||
WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
|
||||
|
||||
__update_max_tr(tr, tsk, cpu);
|
||||
__raw_spin_unlock(&ftrace_max_lock);
|
||||
arch_spin_unlock(&ftrace_max_lock);
|
||||
}
|
||||
#endif /* CONFIG_TRACER_MAX_TRACE */
|
||||
|
||||
@@ -802,7 +802,7 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
|
||||
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
|
||||
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
|
||||
static int cmdline_idx;
|
||||
static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
/* temporary disable recording */
|
||||
static atomic_t trace_record_cmdline_disabled __read_mostly;
|
||||
@@ -915,7 +915,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
|
||||
* nor do we want to disable interrupts,
|
||||
* so if we miss here, then better luck next time.
|
||||
*/
|
||||
if (!__raw_spin_trylock(&trace_cmdline_lock))
|
||||
if (!arch_spin_trylock(&trace_cmdline_lock))
|
||||
return;
|
||||
|
||||
idx = map_pid_to_cmdline[tsk->pid];
|
||||
@@ -940,7 +940,7 @@ static void trace_save_cmdline(struct task_struct *tsk)
|
||||
|
||||
memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
|
||||
|
||||
__raw_spin_unlock(&trace_cmdline_lock);
|
||||
arch_spin_unlock(&trace_cmdline_lock);
|
||||
}
|
||||
|
||||
void trace_find_cmdline(int pid, char comm[])
|
||||
@@ -958,14 +958,14 @@ void trace_find_cmdline(int pid, char comm[])
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
__raw_spin_lock(&trace_cmdline_lock);
|
||||
arch_spin_lock(&trace_cmdline_lock);
|
||||
map = map_pid_to_cmdline[pid];
|
||||
if (map != NO_CMDLINE_MAP)
|
||||
strcpy(comm, saved_cmdlines[map]);
|
||||
else
|
||||
strcpy(comm, "<...>");
|
||||
|
||||
__raw_spin_unlock(&trace_cmdline_lock);
|
||||
arch_spin_unlock(&trace_cmdline_lock);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
@@ -1085,7 +1085,7 @@ trace_function(struct trace_array *tr,
|
||||
struct ftrace_entry *entry;
|
||||
|
||||
/* If we are reading the ring buffer, don't trace */
|
||||
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
|
||||
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
||||
return;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
|
||||
@@ -1251,8 +1251,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
|
||||
*/
|
||||
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
||||
{
|
||||
static raw_spinlock_t trace_buf_lock =
|
||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t trace_buf_lock =
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
static u32 trace_buf[TRACE_BUF_SIZE];
|
||||
|
||||
struct ftrace_event_call *call = &event_bprint;
|
||||
@@ -1283,7 +1283,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
||||
|
||||
/* Lockdep uses trace_printk for lock tracing */
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&trace_buf_lock);
|
||||
arch_spin_lock(&trace_buf_lock);
|
||||
len = vbin_printf(trace_buf, TRACE_BUF_SIZE, fmt, args);
|
||||
|
||||
if (len > TRACE_BUF_SIZE || len < 0)
|
||||
@@ -1304,7 +1304,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
|
||||
ring_buffer_unlock_commit(buffer, event);
|
||||
|
||||
out_unlock:
|
||||
__raw_spin_unlock(&trace_buf_lock);
|
||||
arch_spin_unlock(&trace_buf_lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
out:
|
||||
@@ -1334,7 +1334,7 @@ int trace_array_printk(struct trace_array *tr,
|
||||
int trace_array_vprintk(struct trace_array *tr,
|
||||
unsigned long ip, const char *fmt, va_list args)
|
||||
{
|
||||
static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t trace_buf_lock = __ARCH_SPIN_LOCK_UNLOCKED;
|
||||
static char trace_buf[TRACE_BUF_SIZE];
|
||||
|
||||
struct ftrace_event_call *call = &event_print;
|
||||
@@ -1360,7 +1360,7 @@ int trace_array_vprintk(struct trace_array *tr,
|
||||
|
||||
pause_graph_tracing();
|
||||
raw_local_irq_save(irq_flags);
|
||||
__raw_spin_lock(&trace_buf_lock);
|
||||
arch_spin_lock(&trace_buf_lock);
|
||||
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
|
||||
|
||||
size = sizeof(*entry) + len + 1;
|
||||
@@ -1378,7 +1378,7 @@ int trace_array_vprintk(struct trace_array *tr,
|
||||
ring_buffer_unlock_commit(buffer, event);
|
||||
|
||||
out_unlock:
|
||||
__raw_spin_unlock(&trace_buf_lock);
|
||||
arch_spin_unlock(&trace_buf_lock);
|
||||
raw_local_irq_restore(irq_flags);
|
||||
unpause_graph_tracing();
|
||||
out:
|
||||
@@ -2279,7 +2279,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
|
||||
mutex_lock(&tracing_cpumask_update_lock);
|
||||
|
||||
local_irq_disable();
|
||||
__raw_spin_lock(&ftrace_max_lock);
|
||||
arch_spin_lock(&ftrace_max_lock);
|
||||
for_each_tracing_cpu(cpu) {
|
||||
/*
|
||||
* Increase/decrease the disabled counter if we are
|
||||
@@ -2294,7 +2294,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf,
|
||||
atomic_dec(&global_trace.data[cpu]->disabled);
|
||||
}
|
||||
}
|
||||
__raw_spin_unlock(&ftrace_max_lock);
|
||||
arch_spin_unlock(&ftrace_max_lock);
|
||||
local_irq_enable();
|
||||
|
||||
cpumask_copy(tracing_cpumask, tracing_cpumask_new);
|
||||
@@ -4307,8 +4307,8 @@ trace_printk_seq(struct trace_seq *s)
|
||||
|
||||
static void __ftrace_dump(bool disable_tracing)
|
||||
{
|
||||
static raw_spinlock_t ftrace_dump_lock =
|
||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t ftrace_dump_lock =
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
/* use static because iter can be a bit big for the stack */
|
||||
static struct trace_iterator iter;
|
||||
unsigned int old_userobj;
|
||||
@@ -4318,7 +4318,7 @@ static void __ftrace_dump(bool disable_tracing)
|
||||
|
||||
/* only one dump */
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&ftrace_dump_lock);
|
||||
arch_spin_lock(&ftrace_dump_lock);
|
||||
if (dump_ran)
|
||||
goto out;
|
||||
|
||||
@@ -4393,7 +4393,7 @@ static void __ftrace_dump(bool disable_tracing)
|
||||
}
|
||||
|
||||
out:
|
||||
__raw_spin_unlock(&ftrace_dump_lock);
|
||||
arch_spin_unlock(&ftrace_dump_lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -4454,7 +4454,7 @@ __init static int tracer_alloc_buffers(void)
|
||||
/* Allocate the first page for all buffers */
|
||||
for_each_tracing_cpu(i) {
|
||||
global_trace.data[i] = &per_cpu(global_trace_cpu, i);
|
||||
max_tr.data[i] = &per_cpu(max_data, i);
|
||||
max_tr.data[i] = &per_cpu(max_tr_data, i);
|
||||
}
|
||||
|
||||
trace_init_cmdlines();
|
||||
|
@@ -443,7 +443,7 @@ extern int DYN_FTRACE_TEST_NAME(void);
|
||||
|
||||
extern int ring_buffer_expanded;
|
||||
extern bool tracing_selftest_disabled;
|
||||
DECLARE_PER_CPU(local_t, ftrace_cpu_disabled);
|
||||
DECLARE_PER_CPU(int, ftrace_cpu_disabled);
|
||||
|
||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||
extern int trace_selftest_startup_function(struct tracer *trace,
|
||||
|
@@ -71,10 +71,10 @@ u64 notrace trace_clock(void)
|
||||
/* keep prev_time and lock in the same cacheline. */
|
||||
static struct {
|
||||
u64 prev_time;
|
||||
raw_spinlock_t lock;
|
||||
arch_spinlock_t lock;
|
||||
} trace_clock_struct ____cacheline_aligned_in_smp =
|
||||
{
|
||||
.lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED,
|
||||
.lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED,
|
||||
};
|
||||
|
||||
u64 notrace trace_clock_global(void)
|
||||
@@ -94,7 +94,7 @@ u64 notrace trace_clock_global(void)
|
||||
if (unlikely(in_nmi()))
|
||||
goto out;
|
||||
|
||||
__raw_spin_lock(&trace_clock_struct.lock);
|
||||
arch_spin_lock(&trace_clock_struct.lock);
|
||||
|
||||
/*
|
||||
* TODO: if this happens often then maybe we should reset
|
||||
@@ -106,7 +106,7 @@ u64 notrace trace_clock_global(void)
|
||||
|
||||
trace_clock_struct.prev_time = now;
|
||||
|
||||
__raw_spin_unlock(&trace_clock_struct.lock);
|
||||
arch_spin_unlock(&trace_clock_struct.lock);
|
||||
|
||||
out:
|
||||
raw_local_irq_restore(flags);
|
||||
|
@@ -187,7 +187,7 @@ static int __trace_graph_entry(struct trace_array *tr,
|
||||
struct ring_buffer *buffer = tr->buffer;
|
||||
struct ftrace_graph_ent_entry *entry;
|
||||
|
||||
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
|
||||
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
||||
return 0;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
|
||||
@@ -251,7 +251,7 @@ static void __trace_graph_return(struct trace_array *tr,
|
||||
struct ring_buffer *buffer = tr->buffer;
|
||||
struct ftrace_graph_ret_entry *entry;
|
||||
|
||||
if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
|
||||
if (unlikely(__this_cpu_read(per_cpu_var(ftrace_cpu_disabled))))
|
||||
return;
|
||||
|
||||
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
|
||||
|
@@ -20,10 +20,10 @@
|
||||
|
||||
#define BTS_BUFFER_SIZE (1 << 13)
|
||||
|
||||
static DEFINE_PER_CPU(struct bts_tracer *, tracer);
|
||||
static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], buffer);
|
||||
static DEFINE_PER_CPU(struct bts_tracer *, hwb_tracer);
|
||||
static DEFINE_PER_CPU(unsigned char[BTS_BUFFER_SIZE], hwb_buffer);
|
||||
|
||||
#define this_tracer per_cpu(tracer, smp_processor_id())
|
||||
#define this_tracer per_cpu(hwb_tracer, smp_processor_id())
|
||||
|
||||
static int trace_hw_branches_enabled __read_mostly;
|
||||
static int trace_hw_branches_suspended __read_mostly;
|
||||
@@ -32,12 +32,13 @@ static struct trace_array *hw_branch_trace __read_mostly;
|
||||
|
||||
static void bts_trace_init_cpu(int cpu)
|
||||
{
|
||||
per_cpu(tracer, cpu) =
|
||||
ds_request_bts_cpu(cpu, per_cpu(buffer, cpu), BTS_BUFFER_SIZE,
|
||||
NULL, (size_t)-1, BTS_KERNEL);
|
||||
per_cpu(hwb_tracer, cpu) =
|
||||
ds_request_bts_cpu(cpu, per_cpu(hwb_buffer, cpu),
|
||||
BTS_BUFFER_SIZE, NULL, (size_t)-1,
|
||||
BTS_KERNEL);
|
||||
|
||||
if (IS_ERR(per_cpu(tracer, cpu)))
|
||||
per_cpu(tracer, cpu) = NULL;
|
||||
if (IS_ERR(per_cpu(hwb_tracer, cpu)))
|
||||
per_cpu(hwb_tracer, cpu) = NULL;
|
||||
}
|
||||
|
||||
static int bts_trace_init(struct trace_array *tr)
|
||||
@@ -51,7 +52,7 @@ static int bts_trace_init(struct trace_array *tr)
|
||||
for_each_online_cpu(cpu) {
|
||||
bts_trace_init_cpu(cpu);
|
||||
|
||||
if (likely(per_cpu(tracer, cpu)))
|
||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||
trace_hw_branches_enabled = 1;
|
||||
}
|
||||
trace_hw_branches_suspended = 0;
|
||||
@@ -67,9 +68,9 @@ static void bts_trace_reset(struct trace_array *tr)
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (likely(per_cpu(tracer, cpu))) {
|
||||
ds_release_bts(per_cpu(tracer, cpu));
|
||||
per_cpu(tracer, cpu) = NULL;
|
||||
if (likely(per_cpu(hwb_tracer, cpu))) {
|
||||
ds_release_bts(per_cpu(hwb_tracer, cpu));
|
||||
per_cpu(hwb_tracer, cpu) = NULL;
|
||||
}
|
||||
}
|
||||
trace_hw_branches_enabled = 0;
|
||||
@@ -83,8 +84,8 @@ static void bts_trace_start(struct trace_array *tr)
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu)
|
||||
if (likely(per_cpu(tracer, cpu)))
|
||||
ds_resume_bts(per_cpu(tracer, cpu));
|
||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||
ds_resume_bts(per_cpu(hwb_tracer, cpu));
|
||||
trace_hw_branches_suspended = 0;
|
||||
put_online_cpus();
|
||||
}
|
||||
@@ -95,8 +96,8 @@ static void bts_trace_stop(struct trace_array *tr)
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu)
|
||||
if (likely(per_cpu(tracer, cpu)))
|
||||
ds_suspend_bts(per_cpu(tracer, cpu));
|
||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
|
||||
trace_hw_branches_suspended = 1;
|
||||
put_online_cpus();
|
||||
}
|
||||
@@ -114,16 +115,16 @@ static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb,
|
||||
bts_trace_init_cpu(cpu);
|
||||
|
||||
if (trace_hw_branches_suspended &&
|
||||
likely(per_cpu(tracer, cpu)))
|
||||
ds_suspend_bts(per_cpu(tracer, cpu));
|
||||
likely(per_cpu(hwb_tracer, cpu)))
|
||||
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
|
||||
}
|
||||
break;
|
||||
|
||||
case CPU_DOWN_PREPARE:
|
||||
/* The notification is sent with interrupts enabled. */
|
||||
if (likely(per_cpu(tracer, cpu))) {
|
||||
ds_release_bts(per_cpu(tracer, cpu));
|
||||
per_cpu(tracer, cpu) = NULL;
|
||||
if (likely(per_cpu(hwb_tracer, cpu))) {
|
||||
ds_release_bts(per_cpu(hwb_tracer, cpu));
|
||||
per_cpu(hwb_tracer, cpu) = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -258,8 +259,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu)
|
||||
if (likely(per_cpu(tracer, cpu)))
|
||||
ds_suspend_bts(per_cpu(tracer, cpu));
|
||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||
ds_suspend_bts(per_cpu(hwb_tracer, cpu));
|
||||
/*
|
||||
* We need to collect the trace on the respective cpu since ftrace
|
||||
* implicitly adds the record for the current cpu.
|
||||
@@ -268,8 +269,8 @@ static void trace_bts_prepare(struct trace_iterator *iter)
|
||||
on_each_cpu(trace_bts_cpu, iter->tr, 1);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
if (likely(per_cpu(tracer, cpu)))
|
||||
ds_resume_bts(per_cpu(tracer, cpu));
|
||||
if (likely(per_cpu(hwb_tracer, cpu)))
|
||||
ds_resume_bts(per_cpu(hwb_tracer, cpu));
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
|
@@ -28,8 +28,8 @@ static int wakeup_current_cpu;
|
||||
static unsigned wakeup_prio = -1;
|
||||
static int wakeup_rt;
|
||||
|
||||
static raw_spinlock_t wakeup_lock =
|
||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t wakeup_lock =
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static void __wakeup_reset(struct trace_array *tr);
|
||||
|
||||
@@ -143,7 +143,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
|
||||
goto out;
|
||||
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&wakeup_lock);
|
||||
arch_spin_lock(&wakeup_lock);
|
||||
|
||||
/* We could race with grabbing wakeup_lock */
|
||||
if (unlikely(!tracer_enabled || next != wakeup_task))
|
||||
@@ -169,7 +169,7 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
|
||||
|
||||
out_unlock:
|
||||
__wakeup_reset(wakeup_trace);
|
||||
__raw_spin_unlock(&wakeup_lock);
|
||||
arch_spin_unlock(&wakeup_lock);
|
||||
local_irq_restore(flags);
|
||||
out:
|
||||
atomic_dec(&wakeup_trace->data[cpu]->disabled);
|
||||
@@ -193,9 +193,9 @@ static void wakeup_reset(struct trace_array *tr)
|
||||
tracing_reset_online_cpus(tr);
|
||||
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&wakeup_lock);
|
||||
arch_spin_lock(&wakeup_lock);
|
||||
__wakeup_reset(tr);
|
||||
__raw_spin_unlock(&wakeup_lock);
|
||||
arch_spin_unlock(&wakeup_lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -225,7 +225,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
|
||||
goto out;
|
||||
|
||||
/* interrupts should be off from try_to_wake_up */
|
||||
__raw_spin_lock(&wakeup_lock);
|
||||
arch_spin_lock(&wakeup_lock);
|
||||
|
||||
/* check for races. */
|
||||
if (!tracer_enabled || p->prio >= wakeup_prio)
|
||||
@@ -255,7 +255,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success)
|
||||
trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
|
||||
|
||||
out_locked:
|
||||
__raw_spin_unlock(&wakeup_lock);
|
||||
arch_spin_unlock(&wakeup_lock);
|
||||
out:
|
||||
atomic_dec(&wakeup_trace->data[cpu]->disabled);
|
||||
}
|
||||
|
@@ -67,7 +67,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
|
||||
|
||||
/* Don't allow flipping of max traces now */
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&ftrace_max_lock);
|
||||
arch_spin_lock(&ftrace_max_lock);
|
||||
|
||||
cnt = ring_buffer_entries(tr->buffer);
|
||||
|
||||
@@ -85,7 +85,7 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
|
||||
break;
|
||||
}
|
||||
tracing_on();
|
||||
__raw_spin_unlock(&ftrace_max_lock);
|
||||
arch_spin_unlock(&ftrace_max_lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
if (count)
|
||||
|
@@ -27,8 +27,8 @@ static struct stack_trace max_stack_trace = {
|
||||
};
|
||||
|
||||
static unsigned long max_stack_size;
|
||||
static raw_spinlock_t max_stack_lock =
|
||||
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
|
||||
static arch_spinlock_t max_stack_lock =
|
||||
(arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
|
||||
|
||||
static int stack_trace_disabled __read_mostly;
|
||||
static DEFINE_PER_CPU(int, trace_active);
|
||||
@@ -54,7 +54,7 @@ static inline void check_stack(void)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&max_stack_lock);
|
||||
arch_spin_lock(&max_stack_lock);
|
||||
|
||||
/* a race could have already updated it */
|
||||
if (this_size <= max_stack_size)
|
||||
@@ -103,7 +103,7 @@ static inline void check_stack(void)
|
||||
}
|
||||
|
||||
out:
|
||||
__raw_spin_unlock(&max_stack_lock);
|
||||
arch_spin_unlock(&max_stack_lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@@ -171,9 +171,9 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
|
||||
return ret;
|
||||
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&max_stack_lock);
|
||||
arch_spin_lock(&max_stack_lock);
|
||||
*ptr = val;
|
||||
__raw_spin_unlock(&max_stack_lock);
|
||||
arch_spin_unlock(&max_stack_lock);
|
||||
local_irq_restore(flags);
|
||||
|
||||
return count;
|
||||
@@ -207,7 +207,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
local_irq_disable();
|
||||
__raw_spin_lock(&max_stack_lock);
|
||||
arch_spin_lock(&max_stack_lock);
|
||||
|
||||
if (*pos == 0)
|
||||
return SEQ_START_TOKEN;
|
||||
@@ -217,7 +217,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
|
||||
|
||||
static void t_stop(struct seq_file *m, void *p)
|
||||
{
|
||||
__raw_spin_unlock(&max_stack_lock);
|
||||
arch_spin_unlock(&max_stack_lock);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
|
Referência em uma nova issue
Block a user