Merge tag 'arc-5.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC updates from Vineet Gupta: - unaligned access support for HS cores - Removed extra memory barrier around spinlock code - HSDK platform updates: enable dmac, reset - some more boot logging updates - misc minor fixes * tag 'arc-5.1-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: arch: arc: Kconfig: pedantic formatting ARCv2: spinlock: remove the extra smp_mb before lock, after unlock ARC: unaligned: relax the check for gcc supporting -mno-unaligned-access ARC: boot log: cut down on verbosity ARCv2: boot log: refurbish HS core/release identification arc: hsdk_defconfig: Enable CONFIG_BLK_DEV_RAM ARC: u-boot args: check that magic number is correct ARC: perf: bpok condition only exists for ARCompact ARCv2: Add explcit unaligned access support (and ability to disable too) ARCv2: lib: introduce memcpy optimized for unaligned access ARC: [plat-hsdk]: Enable AXI DW DMAC support ARC: [plat-hsdk]: Add reset controller handle to manage USB reset ARC: DTB: [scripted] fix node name and address spelling
This commit is contained in:
@@ -82,6 +82,7 @@
|
||||
#define ECR_V_DTLB_MISS 0x05
|
||||
#define ECR_V_PROTV 0x06
|
||||
#define ECR_V_TRAP 0x09
|
||||
#define ECR_V_MISALIGN 0x0d
|
||||
#endif
|
||||
|
||||
/* DTLB Miss and Protection Violation Cause Codes */
|
||||
@@ -167,14 +168,6 @@ struct bcr_mpy {
|
||||
#endif
|
||||
};
|
||||
|
||||
struct bcr_extn_xymem {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int ram_org:2, num_banks:4, bank_sz:4, ver:8;
|
||||
#else
|
||||
unsigned int ver:8, bank_sz:4, num_banks:4, ram_org:2;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct bcr_iccm_arcompact {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int base:16, pad:5, sz:3, ver:8;
|
||||
@@ -312,7 +305,7 @@ struct cpuinfo_arc {
|
||||
struct cpuinfo_arc_bpu bpu;
|
||||
struct bcr_identity core;
|
||||
struct bcr_isa_arcv2 isa;
|
||||
const char *details, *name;
|
||||
const char *release, *name;
|
||||
unsigned int vec_base;
|
||||
struct cpuinfo_arc_ccm iccm, dccm;
|
||||
struct {
|
||||
@@ -322,7 +315,6 @@ struct cpuinfo_arc {
|
||||
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
|
||||
} extn;
|
||||
struct bcr_mpy extn_mpy;
|
||||
struct bcr_extn_xymem extn_xymem;
|
||||
};
|
||||
|
||||
extern struct cpuinfo_arc cpuinfo_arc700[];
|
||||
|
@@ -44,7 +44,13 @@
|
||||
#define ARCV2_IRQ_DEF_PRIO 1
|
||||
|
||||
/* seed value for status register */
|
||||
#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \
|
||||
#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
|
||||
#define __AD_ENB STATUS_AD_MASK
|
||||
#else
|
||||
#define __AD_ENB 0
|
||||
#endif
|
||||
|
||||
#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | __AD_ENB | \
|
||||
(ARCV2_IRQ_DEF_PRIO << 1))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
@@ -105,10 +105,10 @@ static const char * const arc_pmu_ev_hw_map[] = {
|
||||
[PERF_COUNT_HW_INSTRUCTIONS] = "iall",
|
||||
/* All jump instructions that are taken */
|
||||
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = "ijmptak",
|
||||
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
|
||||
#ifdef CONFIG_ISA_ARCV2
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = "bpmp",
|
||||
#else
|
||||
[PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
|
||||
[PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
|
||||
#endif
|
||||
[PERF_COUNT_ARC_LDC] = "imemrdc", /* Instr: mem read cached */
|
||||
|
@@ -21,8 +21,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: llock %[val], [%[slock]] \n"
|
||||
" breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
|
||||
@@ -34,6 +32,14 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
[LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
|
||||
: "memory", "cc");
|
||||
|
||||
/*
|
||||
* ACQUIRE barrier to ensure load/store after taking the lock
|
||||
* don't "bleed-up" out of the critical section (leak-in is allowed)
|
||||
* http://www.spinics.net/lists/kernel/msg2010409.html
|
||||
*
|
||||
* ARCv2 only has load-load, store-store and all-all barrier
|
||||
* thus need the full all-all barrier
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
@@ -42,8 +48,6 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int val, got_it = 0;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: llock %[val], [%[slock]] \n"
|
||||
" breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
|
||||
@@ -67,9 +71,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
smp_mb();
|
||||
|
||||
lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
|
||||
|
||||
smp_mb();
|
||||
WRITE_ONCE(lock->slock, __ARCH_SPIN_LOCK_UNLOCKED__);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -81,8 +83,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* zero means writer holds the lock exclusively, deny Reader.
|
||||
* Otherwise grant lock to first/subseq reader
|
||||
@@ -113,8 +113,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int val, got_it = 0;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: llock %[val], [%[rwlock]] \n"
|
||||
" brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
|
||||
@@ -140,8 +138,6 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int val;
|
||||
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
|
||||
* deny writer. Otherwise if unlocked grant to writer
|
||||
@@ -175,8 +171,6 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
|
||||
{
|
||||
unsigned int val, got_it = 0;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: llock %[val], [%[rwlock]] \n"
|
||||
" brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
|
||||
@@ -217,17 +211,13 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
|
||||
: [val] "=&r" (val)
|
||||
: [rwlock] "r" (&(rw->counter))
|
||||
: "memory", "cc");
|
||||
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
static inline void arch_write_unlock(arch_rwlock_t *rw)
|
||||
{
|
||||
smp_mb();
|
||||
|
||||
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
|
||||
|
||||
smp_mb();
|
||||
WRITE_ONCE(rw->counter, __ARCH_RW_LOCK_UNLOCKED__);
|
||||
}
|
||||
|
||||
#else /* !CONFIG_ARC_HAS_LLSC */
|
||||
@@ -237,10 +227,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
|
||||
|
||||
/*
|
||||
* This smp_mb() is technically superfluous, we only need the one
|
||||
* after the lock for providing the ACQUIRE semantics.
|
||||
* However doing the "right" thing was regressing hackbench
|
||||
* so keeping this, pending further investigation
|
||||
* Per lkmm, smp_mb() is only required after _lock (and before_unlock)
|
||||
* for ACQ and REL semantics respectively. However EX based spinlocks
|
||||
* need the extra smp_mb to workaround a hardware quirk.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
@@ -257,14 +246,6 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
#endif
|
||||
: "memory");
|
||||
|
||||
/*
|
||||
* ACQUIRE barrier to ensure load/store after taking the lock
|
||||
* don't "bleed-up" out of the critical section (leak-in is allowed)
|
||||
* http://www.spinics.net/lists/kernel/msg2010409.html
|
||||
*
|
||||
* ARCv2 only has load-load, store-store and all-all barrier
|
||||
* thus need the full all-all barrier
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
@@ -309,8 +290,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
: "memory");
|
||||
|
||||
/*
|
||||
* superfluous, but keeping for now - see pairing version in
|
||||
* arch_spin_lock above
|
||||
* see pairing version/comment in arch_spin_lock above
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
@@ -344,7 +324,6 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
|
||||
arch_spin_unlock(&(rw->lock_mutex));
|
||||
local_irq_restore(flags);
|
||||
|
||||
smp_mb();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user