Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits) clockevents: Convert to raw_spinlock clockevents: Make tick_device_lock static debugobjects: Convert to raw_spinlocks perf_event: Convert to raw_spinlock hrtimers: Convert to raw_spinlocks genirq: Convert irq_desc.lock to raw_spinlock smp: Convert smplocks to raw_spinlocks rtmutes: Convert rtmutex.lock to raw_spinlock sched: Convert pi_lock to raw_spinlock sched: Convert cpupri lock to raw_spinlock sched: Convert rt_runtime_lock to raw_spinlock sched: Convert rq->lock to raw_spinlock plist: Make plist debugging raw_spinlock aware bkl: Fixup core_lock fallout locking: Cleanup the name space completely locking: Further name space cleanups alpha: Fix fallout from locking changes locking: Implement new raw_spinlock locking: Convert raw_rwlock functions to arch_rwlock locking: Convert raw_rwlock to arch_rwlock ...
This commit is contained in:
32
kernel/smp.c
32
kernel/smp.c
@@ -16,11 +16,11 @@ static DEFINE_PER_CPU(struct call_single_queue, call_single_queue);
|
||||
|
||||
static struct {
|
||||
struct list_head queue;
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
} call_function __cacheline_aligned_in_smp =
|
||||
{
|
||||
.queue = LIST_HEAD_INIT(call_function.queue),
|
||||
.lock = __SPIN_LOCK_UNLOCKED(call_function.lock),
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(call_function.lock),
|
||||
};
|
||||
|
||||
enum {
|
||||
@@ -35,7 +35,7 @@ struct call_function_data {
|
||||
|
||||
struct call_single_queue {
|
||||
struct list_head list;
|
||||
spinlock_t lock;
|
||||
raw_spinlock_t lock;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct call_function_data, cfd_data);
|
||||
@@ -80,7 +80,7 @@ static int __cpuinit init_call_single_data(void)
|
||||
for_each_possible_cpu(i) {
|
||||
struct call_single_queue *q = &per_cpu(call_single_queue, i);
|
||||
|
||||
spin_lock_init(&q->lock);
|
||||
raw_spin_lock_init(&q->lock);
|
||||
INIT_LIST_HEAD(&q->list);
|
||||
}
|
||||
|
||||
@@ -141,10 +141,10 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
|
||||
unsigned long flags;
|
||||
int ipi;
|
||||
|
||||
spin_lock_irqsave(&dst->lock, flags);
|
||||
raw_spin_lock_irqsave(&dst->lock, flags);
|
||||
ipi = list_empty(&dst->list);
|
||||
list_add_tail(&data->list, &dst->list);
|
||||
spin_unlock_irqrestore(&dst->lock, flags);
|
||||
raw_spin_unlock_irqrestore(&dst->lock, flags);
|
||||
|
||||
/*
|
||||
* The list addition should be visible before sending the IPI
|
||||
@@ -201,9 +201,9 @@ void generic_smp_call_function_interrupt(void)
|
||||
refs = atomic_dec_return(&data->refs);
|
||||
WARN_ON(refs < 0);
|
||||
if (!refs) {
|
||||
spin_lock(&call_function.lock);
|
||||
raw_spin_lock(&call_function.lock);
|
||||
list_del_rcu(&data->csd.list);
|
||||
spin_unlock(&call_function.lock);
|
||||
raw_spin_unlock(&call_function.lock);
|
||||
}
|
||||
|
||||
if (refs)
|
||||
@@ -229,9 +229,9 @@ void generic_smp_call_function_single_interrupt(void)
|
||||
*/
|
||||
WARN_ON_ONCE(!cpu_online(smp_processor_id()));
|
||||
|
||||
spin_lock(&q->lock);
|
||||
raw_spin_lock(&q->lock);
|
||||
list_replace_init(&q->list, &list);
|
||||
spin_unlock(&q->lock);
|
||||
raw_spin_unlock(&q->lock);
|
||||
|
||||
while (!list_empty(&list)) {
|
||||
struct call_single_data *data;
|
||||
@@ -448,14 +448,14 @@ void smp_call_function_many(const struct cpumask *mask,
|
||||
cpumask_clear_cpu(this_cpu, data->cpumask);
|
||||
atomic_set(&data->refs, cpumask_weight(data->cpumask));
|
||||
|
||||
spin_lock_irqsave(&call_function.lock, flags);
|
||||
raw_spin_lock_irqsave(&call_function.lock, flags);
|
||||
/*
|
||||
* Place entry at the _HEAD_ of the list, so that any cpu still
|
||||
* observing the entry in generic_smp_call_function_interrupt()
|
||||
* will not miss any other list entries:
|
||||
*/
|
||||
list_add_rcu(&data->csd.list, &call_function.queue);
|
||||
spin_unlock_irqrestore(&call_function.lock, flags);
|
||||
raw_spin_unlock_irqrestore(&call_function.lock, flags);
|
||||
|
||||
/*
|
||||
* Make the list addition visible before sending the ipi.
|
||||
@@ -500,20 +500,20 @@ EXPORT_SYMBOL(smp_call_function);
|
||||
|
||||
void ipi_call_lock(void)
|
||||
{
|
||||
spin_lock(&call_function.lock);
|
||||
raw_spin_lock(&call_function.lock);
|
||||
}
|
||||
|
||||
void ipi_call_unlock(void)
|
||||
{
|
||||
spin_unlock(&call_function.lock);
|
||||
raw_spin_unlock(&call_function.lock);
|
||||
}
|
||||
|
||||
void ipi_call_lock_irq(void)
|
||||
{
|
||||
spin_lock_irq(&call_function.lock);
|
||||
raw_spin_lock_irq(&call_function.lock);
|
||||
}
|
||||
|
||||
void ipi_call_unlock_irq(void)
|
||||
{
|
||||
spin_unlock_irq(&call_function.lock);
|
||||
raw_spin_unlock_irq(&call_function.lock);
|
||||
}
|
||||
|
Reference in New Issue
Block a user