Merge branch 'for-rmk' of git://git.marvell.com/orion into devel
This commit is contained in:
@@ -114,3 +114,16 @@
|
||||
.align 3; \
|
||||
.long 9999b,9001f; \
|
||||
.previous
|
||||
|
||||
/*
|
||||
* SMP data memory barrier
|
||||
*/
|
||||
.macro smp_dmb
|
||||
#ifdef CONFIG_SMP
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
dmb
|
||||
#elif __LINUX_ARM_ARCH__ == 6
|
||||
mcr p15, 0, r0, c7, c10, 5 @ dmb
|
||||
#endif
|
||||
#endif
|
||||
.endm
|
||||
|
@@ -44,11 +44,29 @@ static inline void atomic_set(atomic_t *v, int i)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
__asm__ __volatile__("@ atomic_add\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" add %0, %0, %3\n"
|
||||
" strex %1, %0, [%2]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic_add_return\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" add %0, %0, %3\n"
|
||||
@@ -59,14 +77,34 @@ static inline int atomic_add_return(int i, atomic_t *v)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline void atomic_sub(int i, atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
__asm__ __volatile__("@ atomic_sub\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" sub %0, %0, %3\n"
|
||||
" strex %1, %0, [%2]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic_sub_return\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" sub %0, %0, %3\n"
|
||||
@@ -77,6 +115,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@@ -84,6 +124,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
||||
{
|
||||
unsigned long oldval, res;
|
||||
|
||||
smp_mb();
|
||||
|
||||
do {
|
||||
__asm__ __volatile__("@ atomic_cmpxchg\n"
|
||||
"ldrex %1, [%2]\n"
|
||||
@@ -95,6 +137,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
|
||||
smp_mb();
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
@@ -135,6 +179,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
|
||||
|
||||
return val;
|
||||
}
|
||||
#define atomic_add(i, v) (void) atomic_add_return(i, v)
|
||||
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
@@ -148,6 +193,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
|
||||
return val;
|
||||
}
|
||||
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
@@ -187,10 +233,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
}
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
|
||||
#define atomic_add(i, v) (void) atomic_add_return(i, v)
|
||||
#define atomic_inc(v) (void) atomic_add_return(1, v)
|
||||
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
|
||||
#define atomic_dec(v) (void) atomic_sub_return(1, v)
|
||||
#define atomic_inc(v) atomic_add(1, v)
|
||||
#define atomic_dec(v) atomic_sub(1, v)
|
||||
|
||||
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
|
||||
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
|
||||
@@ -200,11 +244,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
|
||||
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
|
||||
|
||||
/* Atomic operations are already serializing on ARM */
|
||||
#define smp_mb__before_atomic_dec() barrier()
|
||||
#define smp_mb__after_atomic_dec() barrier()
|
||||
#define smp_mb__before_atomic_inc() barrier()
|
||||
#define smp_mb__after_atomic_inc() barrier()
|
||||
#define smp_mb__before_atomic_dec() smp_mb()
|
||||
#define smp_mb__after_atomic_dec() smp_mb()
|
||||
#define smp_mb__before_atomic_inc() smp_mb()
|
||||
#define smp_mb__after_atomic_inc() smp_mb()
|
||||
|
||||
#include <asm-generic/atomic.h>
|
||||
#endif
|
||||
|
@@ -5,9 +5,6 @@
|
||||
#ifndef __ARM_FLAT_H__
|
||||
#define __ARM_FLAT_H__
|
||||
|
||||
/* An odd number of words will be pushed after this alignment, so
|
||||
deliberately misalign the value. */
|
||||
#define flat_stack_align(sp) sp = (void *)(((unsigned long)(sp) - 4) | 4)
|
||||
#define flat_argvp_envp_on_stack() 1
|
||||
#define flat_old_ram_flag(flags) (flags)
|
||||
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
|
||||
|
@@ -29,6 +29,7 @@
|
||||
#define SZ_512 0x00000200
|
||||
|
||||
#define SZ_1K 0x00000400
|
||||
#define SZ_2K 0x00000800
|
||||
#define SZ_4K 0x00001000
|
||||
#define SZ_8K 0x00002000
|
||||
#define SZ_16K 0x00004000
|
||||
|
@@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
unsigned int tmp;
|
||||
#endif
|
||||
|
||||
smp_mb();
|
||||
|
||||
switch (size) {
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
case 1:
|
||||
@@ -307,6 +309,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
__bad_xchg(ptr, size), ret = 0;
|
||||
break;
|
||||
}
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -316,6 +319,12 @@ extern void enable_hlt(void);
|
||||
|
||||
#include <asm-generic/cmpxchg-local.h>
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#error "SMP is not supported on this platform"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
|
||||
* them available.
|
||||
@@ -329,6 +338,173 @@ extern void enable_hlt(void);
|
||||
#include <asm-generic/cmpxchg.h>
|
||||
#endif
|
||||
|
||||
#else /* __LINUX_ARM_ARCH__ >= 6 */
|
||||
|
||||
extern void __bad_cmpxchg(volatile void *ptr, int size);
|
||||
|
||||
/*
|
||||
* cmpxchg only support 32-bits operands on ARMv6.
|
||||
*/
|
||||
|
||||
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long oldval, res;
|
||||
|
||||
switch (size) {
|
||||
#ifdef CONFIG_CPU_32v6K
|
||||
case 1:
|
||||
do {
|
||||
asm volatile("@ __cmpxchg1\n"
|
||||
" ldrexb %1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" strexbeq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (old), "r" (new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
break;
|
||||
case 2:
|
||||
do {
|
||||
asm volatile("@ __cmpxchg1\n"
|
||||
" ldrexh %1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" strexheq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (old), "r" (new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
break;
|
||||
#endif /* CONFIG_CPU_32v6K */
|
||||
case 4:
|
||||
do {
|
||||
asm volatile("@ __cmpxchg4\n"
|
||||
" ldrex %1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" strexeq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (old), "r" (new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
break;
|
||||
default:
|
||||
__bad_cmpxchg(ptr, size);
|
||||
oldval = 0;
|
||||
}
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg(ptr, old, new, size);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))))
|
||||
|
||||
static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
||||
unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
switch (size) {
|
||||
#ifndef CONFIG_CPU_32v6K
|
||||
case 1:
|
||||
case 2:
|
||||
ret = __cmpxchg_local_generic(ptr, old, new, size);
|
||||
break;
|
||||
#endif /* !CONFIG_CPU_32v6K */
|
||||
default:
|
||||
ret = __cmpxchg(ptr, old, new, size);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg_local(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))))
|
||||
|
||||
#ifdef CONFIG_CPU_32v6K
|
||||
|
||||
/*
|
||||
* Note : ARMv7-M (currently unsupported by Linux) does not support
|
||||
* ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
|
||||
* not be allowed to use __cmpxchg64.
|
||||
*/
|
||||
static inline unsigned long long __cmpxchg64(volatile void *ptr,
|
||||
unsigned long long old,
|
||||
unsigned long long new)
|
||||
{
|
||||
register unsigned long long oldval asm("r0");
|
||||
register unsigned long long __old asm("r2") = old;
|
||||
register unsigned long long __new asm("r4") = new;
|
||||
unsigned long res;
|
||||
|
||||
do {
|
||||
asm volatile(
|
||||
" @ __cmpxchg8\n"
|
||||
" ldrexd %1, %H1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" teqeq %H1, %H3\n"
|
||||
" strexdeq %0, %4, %H4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (__old), "r" (__new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
|
||||
unsigned long long old,
|
||||
unsigned long long new)
|
||||
{
|
||||
unsigned long long ret;
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg64(ptr, old, new);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg64(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#define cmpxchg64_local(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#else /* !CONFIG_CPU_32v6K */
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
|
||||
|
||||
#endif /* CONFIG_CPU_32v6K */
|
||||
|
||||
#endif /* __LINUX_ARM_ARCH__ >= 6 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define arch_align_stack(x) (x)
|
||||
|
Reference in New Issue
Block a user