Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
  rtmutex: Add missing rcu_read_unlock() in debug_rt_mutex_print_deadlock()
  lockdep: Comment all warnings
  lib: atomic64: Change the type of local lock to raw_spinlock_t
  locking, lib/atomic64: Annotate atomic64_lock::lock as raw
  locking, x86, iommu: Annotate qi->q_lock as raw
  locking, x86, iommu: Annotate irq_2_ir_lock as raw
  locking, x86, iommu: Annotate iommu->register_lock as raw
  locking, dma, ipu: Annotate bank_lock as raw
  locking, ARM: Annotate low level hw locks as raw
  locking, drivers/dca: Annotate dca_lock as raw
  locking, powerpc: Annotate uic->lock as raw
  locking, x86: mce: Annotate cmci_discover_lock as raw
  locking, ACPI: Annotate c3_lock as raw
  locking, oprofile: Annotate oprofilefs lock as raw
  locking, video: Annotate vga console lock as raw
  locking, latencytop: Annotate latency_lock as raw
  locking, timer_stats: Annotate table_lock as raw
  locking, rwsem: Annotate inner lock as raw
  locking, semaphores: Annotate inner lock as raw
  locking, sched: Annotate thread_group_cputimer as raw
  ...

Fix up conflicts in kernel/posix-cpu-timers.c manually: making
cputimer->cputime a raw lock conflicted with the ABBA fix in commit
bcd5cff721 ("cputimer: Cure lock inversion").
This commit is contained in:
Linus Torvalds
2011-10-26 16:17:32 +02:00
61 changed files with 670 additions and 573 deletions

View File

@@ -852,7 +852,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
}
static int c3_cpu_count;
static DEFINE_SPINLOCK(c3_lock);
static DEFINE_RAW_SPINLOCK(c3_lock);
/**
* acpi_idle_enter_bm - enters C3 with proper BM handling
@@ -930,12 +930,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
* without doing anything.
*/
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
raw_spin_lock(&c3_lock);
c3_cpu_count++;
/* Disable bus master arbitration when all CPUs are in C3 */
if (c3_cpu_count == num_online_cpus())
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
spin_unlock(&c3_lock);
raw_spin_unlock(&c3_lock);
} else if (!pr->flags.bm_check) {
ACPI_FLUSH_CPU_CACHE();
}
@@ -944,10 +944,10 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
/* Re-enable bus master arbitration */
if (pr->flags.bm_check && pr->flags.bm_control) {
spin_lock(&c3_lock);
raw_spin_lock(&c3_lock);
acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
c3_cpu_count--;
spin_unlock(&c3_lock);
raw_spin_unlock(&c3_lock);
}
kt2 = ktime_get_real();
idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));