locking, ARM: Annotate low level hw locks as raw
Annotate the low level hardware locks which must not be preempted. In mainline this change documents the low level nature of the lock - otherwise there's no functional difference. Lockdep and Sparse checking will work as usual. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Russell King <rmk+kernel@arm.linux.org.uk> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:

committed by
Ingo Molnar

parent
a1741e7fcb
commit
bd31b85960
@@ -26,7 +26,7 @@
|
||||
#define CACHE_LINE_SIZE 32
|
||||
|
||||
static void __iomem *l2x0_base;
|
||||
static DEFINE_SPINLOCK(l2x0_lock);
|
||||
static DEFINE_RAW_SPINLOCK(l2x0_lock);
|
||||
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
|
||||
static uint32_t l2x0_size;
|
||||
|
||||
@@ -115,9 +115,9 @@ static void l2x0_cache_sync(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
cache_sync();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void __l2x0_flush_all(void)
|
||||
@@ -134,9 +134,9 @@ static void l2x0_flush_all(void)
|
||||
unsigned long flags;
|
||||
|
||||
/* clean all ways */
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
__l2x0_flush_all();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_clean_all(void)
|
||||
@@ -144,11 +144,11 @@ static void l2x0_clean_all(void)
|
||||
unsigned long flags;
|
||||
|
||||
/* clean all ways */
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
|
||||
cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
|
||||
cache_sync();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_inv_all(void)
|
||||
@@ -156,13 +156,13 @@ static void l2x0_inv_all(void)
|
||||
unsigned long flags;
|
||||
|
||||
/* invalidate all ways */
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
/* Invalidating when L2 is enabled is a nono */
|
||||
BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
|
||||
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
|
||||
cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
|
||||
cache_sync();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_inv_range(unsigned long start, unsigned long end)
|
||||
@@ -170,7 +170,7 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
|
||||
void __iomem *base = l2x0_base;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
if (start & (CACHE_LINE_SIZE - 1)) {
|
||||
start &= ~(CACHE_LINE_SIZE - 1);
|
||||
debug_writel(0x03);
|
||||
@@ -195,13 +195,13 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
|
||||
}
|
||||
|
||||
if (blk_end < end) {
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
}
|
||||
}
|
||||
cache_wait(base + L2X0_INV_LINE_PA, 1);
|
||||
cache_sync();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_clean_range(unsigned long start, unsigned long end)
|
||||
@@ -214,7 +214,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
start &= ~(CACHE_LINE_SIZE - 1);
|
||||
while (start < end) {
|
||||
unsigned long blk_end = start + min(end - start, 4096UL);
|
||||
@@ -225,13 +225,13 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
|
||||
}
|
||||
|
||||
if (blk_end < end) {
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
}
|
||||
}
|
||||
cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
|
||||
cache_sync();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_flush_range(unsigned long start, unsigned long end)
|
||||
@@ -244,7 +244,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
start &= ~(CACHE_LINE_SIZE - 1);
|
||||
while (start < end) {
|
||||
unsigned long blk_end = start + min(end - start, 4096UL);
|
||||
@@ -257,24 +257,24 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
|
||||
debug_writel(0x00);
|
||||
|
||||
if (blk_end < end) {
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
}
|
||||
}
|
||||
cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
|
||||
cache_sync();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void l2x0_disable(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&l2x0_lock, flags);
|
||||
raw_spin_lock_irqsave(&l2x0_lock, flags);
|
||||
__l2x0_flush_all();
|
||||
writel_relaxed(0, l2x0_base + L2X0_CTRL);
|
||||
dsb();
|
||||
spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
|
||||
}
|
||||
|
||||
static void __init l2x0_unlock(__u32 cache_id)
|
||||
|
@@ -16,7 +16,7 @@
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
static DEFINE_SPINLOCK(cpu_asid_lock);
|
||||
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
|
||||
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
|
||||
#ifdef CONFIG_SMP
|
||||
DEFINE_PER_CPU(struct mm_struct *, current_mm);
|
||||
@@ -31,7 +31,7 @@ DEFINE_PER_CPU(struct mm_struct *, current_mm);
|
||||
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
|
||||
{
|
||||
mm->context.id = 0;
|
||||
spin_lock_init(&mm->context.id_lock);
|
||||
raw_spin_lock_init(&mm->context.id_lock);
|
||||
}
|
||||
|
||||
static void flush_context(void)
|
||||
@@ -58,7 +58,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid)
|
||||
* the broadcast. This function is also called via IPI so the
|
||||
* mm->context.id_lock has to be IRQ-safe.
|
||||
*/
|
||||
spin_lock_irqsave(&mm->context.id_lock, flags);
|
||||
raw_spin_lock_irqsave(&mm->context.id_lock, flags);
|
||||
if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
|
||||
/*
|
||||
* Old version of ASID found. Set the new one and
|
||||
@@ -67,7 +67,7 @@ static void set_mm_context(struct mm_struct *mm, unsigned int asid)
|
||||
mm->context.id = asid;
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
}
|
||||
spin_unlock_irqrestore(&mm->context.id_lock, flags);
|
||||
raw_spin_unlock_irqrestore(&mm->context.id_lock, flags);
|
||||
|
||||
/*
|
||||
* Set the mm_cpumask(mm) bit for the current CPU.
|
||||
@@ -117,7 +117,7 @@ void __new_context(struct mm_struct *mm)
|
||||
{
|
||||
unsigned int asid;
|
||||
|
||||
spin_lock(&cpu_asid_lock);
|
||||
raw_spin_lock(&cpu_asid_lock);
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Check the ASID again, in case the change was broadcast from
|
||||
@@ -125,7 +125,7 @@ void __new_context(struct mm_struct *mm)
|
||||
*/
|
||||
if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
|
||||
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
|
||||
spin_unlock(&cpu_asid_lock);
|
||||
raw_spin_unlock(&cpu_asid_lock);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
@@ -153,5 +153,5 @@ void __new_context(struct mm_struct *mm)
|
||||
}
|
||||
|
||||
set_mm_context(mm, asid);
|
||||
spin_unlock(&cpu_asid_lock);
|
||||
raw_spin_unlock(&cpu_asid_lock);
|
||||
}
|
||||
|
@@ -30,7 +30,7 @@
|
||||
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
|
||||
L_PTE_MT_MINICACHE)
|
||||
|
||||
static DEFINE_SPINLOCK(minicache_lock);
|
||||
static DEFINE_RAW_SPINLOCK(minicache_lock);
|
||||
|
||||
/*
|
||||
* ARMv4 mini-dcache optimised copy_user_highpage
|
||||
@@ -76,14 +76,14 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
|
||||
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
|
||||
__flush_dcache_page(page_mapping(from), from);
|
||||
|
||||
spin_lock(&minicache_lock);
|
||||
raw_spin_lock(&minicache_lock);
|
||||
|
||||
set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
|
||||
flush_tlb_kernel_page(0xffff8000);
|
||||
|
||||
mc_copy_user_page((void *)0xffff8000, kto);
|
||||
|
||||
spin_unlock(&minicache_lock);
|
||||
raw_spin_unlock(&minicache_lock);
|
||||
|
||||
kunmap_atomic(kto, KM_USER1);
|
||||
}
|
||||
|
@@ -27,7 +27,7 @@
|
||||
#define from_address (0xffff8000)
|
||||
#define to_address (0xffffc000)
|
||||
|
||||
static DEFINE_SPINLOCK(v6_lock);
|
||||
static DEFINE_RAW_SPINLOCK(v6_lock);
|
||||
|
||||
/*
|
||||
* Copy the user page. No aliasing to deal with so we can just
|
||||
@@ -88,7 +88,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
|
||||
* Now copy the page using the same cache colour as the
|
||||
* pages ultimate destination.
|
||||
*/
|
||||
spin_lock(&v6_lock);
|
||||
raw_spin_lock(&v6_lock);
|
||||
|
||||
set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
|
||||
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
|
||||
@@ -101,7 +101,7 @@ static void v6_copy_user_highpage_aliasing(struct page *to,
|
||||
|
||||
copy_page((void *)kto, (void *)kfrom);
|
||||
|
||||
spin_unlock(&v6_lock);
|
||||
raw_spin_unlock(&v6_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -121,13 +121,13 @@ static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vad
|
||||
* Now clear the page using the same cache colour as
|
||||
* the pages ultimate destination.
|
||||
*/
|
||||
spin_lock(&v6_lock);
|
||||
raw_spin_lock(&v6_lock);
|
||||
|
||||
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
|
||||
flush_tlb_kernel_page(to);
|
||||
clear_page((void *)to);
|
||||
|
||||
spin_unlock(&v6_lock);
|
||||
raw_spin_unlock(&v6_lock);
|
||||
}
|
||||
|
||||
struct cpu_user_fns v6_user_fns __initdata = {
|
||||
|
@@ -32,7 +32,7 @@
|
||||
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
|
||||
L_PTE_MT_MINICACHE)
|
||||
|
||||
static DEFINE_SPINLOCK(minicache_lock);
|
||||
static DEFINE_RAW_SPINLOCK(minicache_lock);
|
||||
|
||||
/*
|
||||
* XScale mini-dcache optimised copy_user_highpage
|
||||
@@ -98,14 +98,14 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
|
||||
if (!test_and_set_bit(PG_dcache_clean, &from->flags))
|
||||
__flush_dcache_page(page_mapping(from), from);
|
||||
|
||||
spin_lock(&minicache_lock);
|
||||
raw_spin_lock(&minicache_lock);
|
||||
|
||||
set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
|
||||
flush_tlb_kernel_page(COPYPAGE_MINICACHE);
|
||||
|
||||
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
|
||||
|
||||
spin_unlock(&minicache_lock);
|
||||
raw_spin_unlock(&minicache_lock);
|
||||
|
||||
kunmap_atomic(kto, KM_USER1);
|
||||
}
|
||||
|
Reference in New Issue
Block a user