Merge tag 'arc-4.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC architecture updates from Vineet Gupta: - support for HS38 cores based on ARCv2 ISA ARCv2 is the next generation ISA from Synopsys and basis for the HS3{4,6,8} families of processors which retain the traditional ARC mantra of low power and configurability and are now more performant and feature rich. HS38x is a 10 stage pipeline core which supports MMU (with huge pages) and SMP (upto 4 cores) among other features. + www.synopsys.com/dw/ipdir.php?ds=arc-hs38-processor + http://news.synopsys.com/2014-10-14-New-DesignWare-ARC-HS38-Processor-Doubles-Performance-for-Embedded-Linux-Applications + http://www.embedded.com/electronics-news/4435975/Synopsys-ARC-HS38-core-gives-2X-boost-to-Linux-based-apps - support for ARC SDP (Software Development platform): Main Board + CPU Cards = AXS101: CPU Card with ARC700 in silicon @ 700 MHz = AXS103: CPU Card with HS38x in FPGA - refactoring of ARCompact port to accomodate new ARCv2 ISA - misc updates/cleanups * tag 'arc-4.2-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: (72 commits) ARC: Fix build failures for ARCompact in linux-next after ARCv2 support ARCv2: Allow older gcc to cope with new regime of ARCv2/ARCompact support ARCv2: [vdk] dts files and defconfig for HS38 VDK ARCv2: [axs103] Support ARC SDP FPGA platform for HS38x cores ARC: [axs101] Prepare for AXS103 ARCv2: [nsim*hs*] Support simulation platforms for HS38x cores ARCv2: All bits in place, allow ARCv2 builds ARCv2: SLC: Handle explcit flush for DMA ops (w/o IO-coherency) ARCv2: STAR 9000837815 workaround hardware exclusive transactions livelock ARC: Reduce bitops lines of code using macros ARCv2: barriers arch: conditionally define smp_{mb,rmb,wmb} ARC: add smp barriers around atomics per Documentation/atomic_ops.txt ARC: add compiler barrier to LLSC based cmpxchg ARCv2: SMP: intc: IDU 2nd level intc for dynamic IRQ distribution ARCv2: SMP: clocksource: Enable Global Real Time counter ARCv2: SMP: ARConnect debug/robustness ARCv2: SMP: Support ARConnect (MCIP) for Inter-Core-Interrupts et al ARC: make plat_smp_ops weak to allow over-rides ARCv2: clocksource: Introduce 64bit local RTC counter ...
This commit is contained in:
@@ -1,5 +1,4 @@
|
||||
generic-y += auxvec.h
|
||||
generic-y += barrier.h
|
||||
generic-y += bitsperlong.h
|
||||
generic-y += bugs.h
|
||||
generic-y += clkdev.h
|
||||
|
@@ -16,6 +16,8 @@
|
||||
#define ARC_REG_PERIBASE_BCR 0x69
|
||||
#define ARC_REG_FP_BCR 0x6B /* ARCompact: Single-Precision FPU */
|
||||
#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */
|
||||
#define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */
|
||||
#define ARC_REG_SLC_BCR 0xce
|
||||
#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
|
||||
#define ARC_REG_TIMERS_BCR 0x75
|
||||
#define ARC_REG_AP_BCR 0x76
|
||||
@@ -31,6 +33,7 @@
|
||||
#define ARC_REG_BPU_BCR 0xc0
|
||||
#define ARC_REG_ISA_CFG_BCR 0xc1
|
||||
#define ARC_REG_RTT_BCR 0xF2
|
||||
#define ARC_REG_IRQ_BCR 0xF3
|
||||
#define ARC_REG_SMART_BCR 0xFF
|
||||
|
||||
/* status32 Bits Positions */
|
||||
@@ -51,6 +54,7 @@
|
||||
* [15: 8] = Exception Cause Code
|
||||
* [ 7: 0] = Exception Parameters (for certain types only)
|
||||
*/
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
#define ECR_V_MEM_ERR 0x01
|
||||
#define ECR_V_INSN_ERR 0x02
|
||||
#define ECR_V_MACH_CHK 0x20
|
||||
@@ -58,6 +62,15 @@
|
||||
#define ECR_V_DTLB_MISS 0x22
|
||||
#define ECR_V_PROTV 0x23
|
||||
#define ECR_V_TRAP 0x25
|
||||
#else
|
||||
#define ECR_V_MEM_ERR 0x01
|
||||
#define ECR_V_INSN_ERR 0x02
|
||||
#define ECR_V_MACH_CHK 0x03
|
||||
#define ECR_V_ITLB_MISS 0x04
|
||||
#define ECR_V_DTLB_MISS 0x05
|
||||
#define ECR_V_PROTV 0x06
|
||||
#define ECR_V_TRAP 0x09
|
||||
#endif
|
||||
|
||||
/* DTLB Miss and Protection Violation Cause Codes */
|
||||
|
||||
@@ -76,9 +89,6 @@
|
||||
#define ECR_C_BIT_DTLB_LD_MISS 8
|
||||
#define ECR_C_BIT_DTLB_ST_MISS 9
|
||||
|
||||
/* Dummy ECR values for Interrupts */
|
||||
#define event_IRQ1 0x0031abcd
|
||||
#define event_IRQ2 0x0032abcd
|
||||
|
||||
/* Auxiliary registers */
|
||||
#define AUX_IDENTITY 4
|
||||
@@ -204,9 +214,11 @@ struct bcr_identity {
|
||||
|
||||
struct bcr_isa {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int pad1:23, atomic1:1, ver:8;
|
||||
unsigned int div_rem:4, pad2:4, ldd:1, unalign:1, atomic:1, be:1,
|
||||
pad1:11, atomic1:1, ver:8;
|
||||
#else
|
||||
unsigned int ver:8, atomic1:1, pad1:23;
|
||||
unsigned int ver:8, atomic1:1, pad1:11, be:1, atomic:1, unalign:1,
|
||||
ldd:1, pad2:4, div_rem:4;
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -269,11 +281,19 @@ struct bcr_fp_arcompact {
|
||||
#endif
|
||||
};
|
||||
|
||||
struct bcr_fp_arcv2 {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int pad2:15, dp:1, pad1:7, sp:1, ver:8;
|
||||
#else
|
||||
unsigned int ver:8, sp:1, pad1:7, dp:1, pad2:15;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct bcr_timer {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int pad2:15, rtsc:1, pad1:6, t1:1, t0:1, ver:8;
|
||||
unsigned int pad2:15, rtsc:1, pad1:5, rtc:1, t1:1, t0:1, ver:8;
|
||||
#else
|
||||
unsigned int ver:8, t0:1, t1:1, pad1:6, rtsc:1, pad2:15;
|
||||
unsigned int ver:8, t0:1, t1:1, rtc:1, pad1:5, rtsc:1, pad2:15;
|
||||
#endif
|
||||
};
|
||||
|
||||
@@ -285,6 +305,14 @@ struct bcr_bpu_arcompact {
|
||||
#endif
|
||||
};
|
||||
|
||||
struct bcr_bpu_arcv2 {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int pad:6, fbe:2, tqe:2, ts:4, ft:1, rse:2, pte:3, bce:3, ver:8;
|
||||
#else
|
||||
unsigned int ver:8, bce:3, pte:3, rse:2, ft:1, ts:4, tqe:2, fbe:2, pad:6;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct bcr_generic {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int pad:24, ver:8;
|
||||
@@ -299,11 +327,12 @@ struct bcr_generic {
|
||||
*/
|
||||
|
||||
struct cpuinfo_arc_mmu {
|
||||
unsigned int ver, pg_sz, sets, ways, u_dtlb, u_itlb, num_tlb;
|
||||
unsigned int ver:4, pg_sz_k:8, s_pg_sz_m:8, u_dtlb:6, u_itlb:6;
|
||||
unsigned int num_tlb:16, sets:12, ways:4;
|
||||
};
|
||||
|
||||
struct cpuinfo_arc_cache {
|
||||
unsigned int sz_k:8, line_len:8, assoc:4, ver:4, alias:1, vipt:1, pad:6;
|
||||
unsigned int sz_k:14, line_len:8, assoc:4, ver:4, alias:1, vipt:1;
|
||||
};
|
||||
|
||||
struct cpuinfo_arc_bpu {
|
||||
@@ -315,14 +344,13 @@ struct cpuinfo_arc_ccm {
|
||||
};
|
||||
|
||||
struct cpuinfo_arc {
|
||||
struct cpuinfo_arc_cache icache, dcache;
|
||||
struct cpuinfo_arc_cache icache, dcache, slc;
|
||||
struct cpuinfo_arc_mmu mmu;
|
||||
struct cpuinfo_arc_bpu bpu;
|
||||
struct bcr_identity core;
|
||||
struct bcr_isa isa;
|
||||
struct bcr_timer timers;
|
||||
unsigned int vec_base;
|
||||
unsigned int uncached_base;
|
||||
struct cpuinfo_arc_ccm iccm, dccm;
|
||||
struct {
|
||||
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
|
||||
@@ -336,6 +364,22 @@ struct cpuinfo_arc {
|
||||
|
||||
extern struct cpuinfo_arc cpuinfo_arc700[];
|
||||
|
||||
static inline int is_isa_arcv2(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ISA_ARCV2);
|
||||
}
|
||||
|
||||
static inline int is_isa_arcompact(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ISA_ARCOMPACT);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_ISA_ARCOMPACT) && !defined(_CPU_DEFAULT_A7)
|
||||
#error "Toolchain not configured for ARCompact builds"
|
||||
#elif defined(CONFIG_ISA_ARCV2) && !defined(_CPU_DEFAULT_HS)
|
||||
#error "Toolchain not configured for ARCv2 builds"
|
||||
#endif
|
||||
|
||||
#endif /* __ASEMBLY__ */
|
||||
|
||||
#endif /* _ASM_ARC_ARCREGS_H */
|
||||
|
@@ -23,13 +23,21 @@
|
||||
|
||||
#define atomic_set(v, i) (((v)->counter) = (i))
|
||||
|
||||
#ifdef CONFIG_ISA_ARCV2
|
||||
#define PREFETCHW " prefetchw [%1] \n"
|
||||
#else
|
||||
#define PREFETCHW
|
||||
#endif
|
||||
|
||||
#define ATOMIC_OP(op, c_op, asm_op) \
|
||||
static inline void atomic_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
unsigned int temp; \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
"1: llock %0, [%1] \n" \
|
||||
"1: \n" \
|
||||
PREFETCHW \
|
||||
" llock %0, [%1] \n" \
|
||||
" " #asm_op " %0, %0, %2 \n" \
|
||||
" scond %0, [%1] \n" \
|
||||
" bnz 1b \n" \
|
||||
@@ -43,8 +51,16 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
{ \
|
||||
unsigned int temp; \
|
||||
\
|
||||
/* \
|
||||
* Explicit full memory barrier needed before/after as \
|
||||
* LLOCK/SCOND thmeselves don't provide any such semantics \
|
||||
*/ \
|
||||
smp_mb(); \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
"1: llock %0, [%1] \n" \
|
||||
"1: \n" \
|
||||
PREFETCHW \
|
||||
" llock %0, [%1] \n" \
|
||||
" " #asm_op " %0, %0, %2 \n" \
|
||||
" scond %0, [%1] \n" \
|
||||
" bnz 1b \n" \
|
||||
@@ -52,6 +68,8 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
: "r"(&v->counter), "ir"(i) \
|
||||
: "cc"); \
|
||||
\
|
||||
smp_mb(); \
|
||||
\
|
||||
return temp; \
|
||||
}
|
||||
|
||||
@@ -105,6 +123,9 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
||||
unsigned long flags; \
|
||||
unsigned long temp; \
|
||||
\
|
||||
/* \
|
||||
* spin lock/unlock provides the needed smp_mb() before/after \
|
||||
*/ \
|
||||
atomic_ops_lock(flags); \
|
||||
temp = v->counter; \
|
||||
temp c_op i; \
|
||||
@@ -142,9 +163,19 @@ ATOMIC_OP(and, &=, and)
|
||||
#define __atomic_add_unless(v, a, u) \
|
||||
({ \
|
||||
int c, old; \
|
||||
\
|
||||
/* \
|
||||
* Explicit full memory barrier needed before/after as \
|
||||
* LLOCK/SCOND thmeselves don't provide any such semantics \
|
||||
*/ \
|
||||
smp_mb(); \
|
||||
\
|
||||
c = atomic_read(v); \
|
||||
while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
|
||||
c = old; \
|
||||
\
|
||||
smp_mb(); \
|
||||
\
|
||||
c; \
|
||||
})
|
||||
|
||||
|
48
arch/arc/include/asm/barrier.h
Normal file
48
arch/arc/include/asm/barrier.h
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_BARRIER_H
|
||||
#define __ASM_BARRIER_H
|
||||
|
||||
#ifdef CONFIG_ISA_ARCV2
|
||||
|
||||
/*
|
||||
* ARCv2 based HS38 cores are in-order issue, but still weakly ordered
|
||||
* due to micro-arch buffering/queuing of load/store, cache hit vs. miss ...
|
||||
*
|
||||
* Explicit barrier provided by DMB instruction
|
||||
* - Operand supports fine grained load/store/load+store semantics
|
||||
* - Ensures that selected memory operation issued before it will complete
|
||||
* before any subsequent memory operation of same type
|
||||
* - DMB guarantees SMP as well as local barrier semantics
|
||||
* (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e.
|
||||
* UP: barrier(), SMP: smp_*mb == *mb)
|
||||
* - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed
|
||||
* in the general case. Plus it only provides full barrier.
|
||||
*/
|
||||
|
||||
#define mb() asm volatile("dmb 3\n" : : : "memory")
|
||||
#define rmb() asm volatile("dmb 1\n" : : : "memory")
|
||||
#define wmb() asm volatile("dmb 2\n" : : : "memory")
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
|
||||
/*
|
||||
* ARCompact based cores (ARC700) only have SYNC instruction which is super
|
||||
* heavy weight as it flushes the pipeline as well.
|
||||
* There are no real SMP implementations of such cores.
|
||||
*/
|
||||
|
||||
#define mb() asm volatile("sync\n" : : : "memory")
|
||||
#endif
|
||||
|
||||
#include <asm-generic/barrier.h>
|
||||
|
||||
#endif
|
@@ -18,83 +18,50 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/barrier.h>
|
||||
#ifndef CONFIG_ARC_HAS_LLSC
|
||||
#include <asm/smp.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Hardware assisted read-modify-write using ARC700 LLOCK/SCOND insns.
|
||||
* The Kconfig glue ensures that in SMP, this is only set if the container
|
||||
* SoC/platform has cross-core coherent LLOCK/SCOND
|
||||
*/
|
||||
#if defined(CONFIG_ARC_HAS_LLSC)
|
||||
|
||||
static inline void set_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned int temp;
|
||||
/*
|
||||
* Hardware assisted Atomic-R-M-W
|
||||
*/
|
||||
|
||||
m += nr >> 5;
|
||||
|
||||
/*
|
||||
* ARC ISA micro-optimization:
|
||||
*
|
||||
* Instructions dealing with bitpos only consider lower 5 bits (0-31)
|
||||
* e.g (x << 33) is handled like (x << 1) by ASL instruction
|
||||
* (mem pointer still needs adjustment to point to next word)
|
||||
*
|
||||
* Hence the masking to clamp @nr arg can be elided in general.
|
||||
*
|
||||
* However if @nr is a constant (above assumed it in a register),
|
||||
* and greater than 31, gcc can optimize away (x << 33) to 0,
|
||||
* as overflow, given the 32-bit ISA. Thus masking needs to be done
|
||||
* for constant @nr, but no code is generated due to const prop.
|
||||
*/
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: llock %0, [%1] \n"
|
||||
" bset %0, %0, %2 \n"
|
||||
" scond %0, [%1] \n"
|
||||
" bnz 1b \n"
|
||||
: "=&r"(temp)
|
||||
: "r"(m), "ir"(nr)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned int temp;
|
||||
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: llock %0, [%1] \n"
|
||||
" bclr %0, %0, %2 \n"
|
||||
" scond %0, [%1] \n"
|
||||
" bnz 1b \n"
|
||||
: "=&r"(temp)
|
||||
: "r"(m), "ir"(nr)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline void change_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned int temp;
|
||||
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: llock %0, [%1] \n"
|
||||
" bxor %0, %0, %2 \n"
|
||||
" scond %0, [%1] \n"
|
||||
" bnz 1b \n"
|
||||
: "=&r"(temp)
|
||||
: "r"(m), "ir"(nr)
|
||||
: "cc");
|
||||
#define BIT_OP(op, c_op, asm_op) \
|
||||
static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
|
||||
{ \
|
||||
unsigned int temp; \
|
||||
\
|
||||
m += nr >> 5; \
|
||||
\
|
||||
/* \
|
||||
* ARC ISA micro-optimization: \
|
||||
* \
|
||||
* Instructions dealing with bitpos only consider lower 5 bits \
|
||||
* e.g (x << 33) is handled like (x << 1) by ASL instruction \
|
||||
* (mem pointer still needs adjustment to point to next word) \
|
||||
* \
|
||||
* Hence the masking to clamp @nr arg can be elided in general. \
|
||||
* \
|
||||
* However if @nr is a constant (above assumed in a register), \
|
||||
* and greater than 31, gcc can optimize away (x << 33) to 0, \
|
||||
* as overflow, given the 32-bit ISA. Thus masking needs to be \
|
||||
* done for const @nr, but no code is generated due to gcc \
|
||||
* const prop. \
|
||||
*/ \
|
||||
if (__builtin_constant_p(nr)) \
|
||||
nr &= 0x1f; \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
"1: llock %0, [%1] \n" \
|
||||
" " #asm_op " %0, %0, %2 \n" \
|
||||
" scond %0, [%1] \n" \
|
||||
" bnz 1b \n" \
|
||||
: "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
|
||||
: "r"(m), /* Not "m": llock only supports reg direct addr mode */ \
|
||||
"ir"(nr) \
|
||||
: "cc"); \
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -108,75 +75,38 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *m)
|
||||
* Since ARC lacks a equivalent h/w primitive, the bit is set unconditionally
|
||||
* and the old value of bit is returned
|
||||
*/
|
||||
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long old, temp;
|
||||
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: llock %0, [%2] \n"
|
||||
" bset %1, %0, %3 \n"
|
||||
" scond %1, [%2] \n"
|
||||
" bnz 1b \n"
|
||||
: "=&r"(old), "=&r"(temp)
|
||||
: "r"(m), "ir"(nr)
|
||||
: "cc");
|
||||
|
||||
return (old & (1 << nr)) != 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned int old, temp;
|
||||
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: llock %0, [%2] \n"
|
||||
" bclr %1, %0, %3 \n"
|
||||
" scond %1, [%2] \n"
|
||||
" bnz 1b \n"
|
||||
: "=&r"(old), "=&r"(temp)
|
||||
: "r"(m), "ir"(nr)
|
||||
: "cc");
|
||||
|
||||
return (old & (1 << nr)) != 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
test_and_change_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned int old, temp;
|
||||
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: llock %0, [%2] \n"
|
||||
" bxor %1, %0, %3 \n"
|
||||
" scond %1, [%2] \n"
|
||||
" bnz 1b \n"
|
||||
: "=&r"(old), "=&r"(temp)
|
||||
: "r"(m), "ir"(nr)
|
||||
: "cc");
|
||||
|
||||
return (old & (1 << nr)) != 0;
|
||||
#define TEST_N_BIT_OP(op, c_op, asm_op) \
|
||||
static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
|
||||
{ \
|
||||
unsigned long old, temp; \
|
||||
\
|
||||
m += nr >> 5; \
|
||||
\
|
||||
if (__builtin_constant_p(nr)) \
|
||||
nr &= 0x1f; \
|
||||
\
|
||||
/* \
|
||||
* Explicit full memory barrier needed before/after as \
|
||||
* LLOCK/SCOND themselves don't provide any such smenatic \
|
||||
*/ \
|
||||
smp_mb(); \
|
||||
\
|
||||
__asm__ __volatile__( \
|
||||
"1: llock %0, [%2] \n" \
|
||||
" " #asm_op " %1, %0, %3 \n" \
|
||||
" scond %1, [%2] \n" \
|
||||
" bnz 1b \n" \
|
||||
: "=&r"(old), "=&r"(temp) \
|
||||
: "r"(m), "ir"(nr) \
|
||||
: "cc"); \
|
||||
\
|
||||
smp_mb(); \
|
||||
\
|
||||
return (old & (1 << nr)) != 0; \
|
||||
}
|
||||
|
||||
#else /* !CONFIG_ARC_HAS_LLSC */
|
||||
|
||||
#include <asm/smp.h>
|
||||
|
||||
/*
|
||||
* Non hardware assisted Atomic-R-M-W
|
||||
* Locking would change to irq-disabling only (UP) and spinlocks (SMP)
|
||||
@@ -193,108 +123,43 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
|
||||
* at compile time)
|
||||
*/
|
||||
|
||||
static inline void set_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long temp, flags;
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
bitops_lock(flags);
|
||||
|
||||
temp = *m;
|
||||
*m = temp | (1UL << nr);
|
||||
|
||||
bitops_unlock(flags);
|
||||
#define BIT_OP(op, c_op, asm_op) \
|
||||
static inline void op##_bit(unsigned long nr, volatile unsigned long *m)\
|
||||
{ \
|
||||
unsigned long temp, flags; \
|
||||
m += nr >> 5; \
|
||||
\
|
||||
if (__builtin_constant_p(nr)) \
|
||||
nr &= 0x1f; \
|
||||
\
|
||||
/* \
|
||||
* spin lock/unlock provide the needed smp_mb() before/after \
|
||||
*/ \
|
||||
bitops_lock(flags); \
|
||||
\
|
||||
temp = *m; \
|
||||
*m = temp c_op (1UL << nr); \
|
||||
\
|
||||
bitops_unlock(flags); \
|
||||
}
|
||||
|
||||
static inline void clear_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long temp, flags;
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
bitops_lock(flags);
|
||||
|
||||
temp = *m;
|
||||
*m = temp & ~(1UL << nr);
|
||||
|
||||
bitops_unlock(flags);
|
||||
}
|
||||
|
||||
static inline void change_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long temp, flags;
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
bitops_lock(flags);
|
||||
|
||||
temp = *m;
|
||||
*m = temp ^ (1UL << nr);
|
||||
|
||||
bitops_unlock(flags);
|
||||
}
|
||||
|
||||
static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long old, flags;
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
bitops_lock(flags);
|
||||
|
||||
old = *m;
|
||||
*m = old | (1 << nr);
|
||||
|
||||
bitops_unlock(flags);
|
||||
|
||||
return (old & (1 << nr)) != 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long old, flags;
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
bitops_lock(flags);
|
||||
|
||||
old = *m;
|
||||
*m = old & ~(1 << nr);
|
||||
|
||||
bitops_unlock(flags);
|
||||
|
||||
return (old & (1 << nr)) != 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
test_and_change_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long old, flags;
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
bitops_lock(flags);
|
||||
|
||||
old = *m;
|
||||
*m = old ^ (1 << nr);
|
||||
|
||||
bitops_unlock(flags);
|
||||
|
||||
return (old & (1 << nr)) != 0;
|
||||
#define TEST_N_BIT_OP(op, c_op, asm_op) \
|
||||
static inline int test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
|
||||
{ \
|
||||
unsigned long old, flags; \
|
||||
m += nr >> 5; \
|
||||
\
|
||||
if (__builtin_constant_p(nr)) \
|
||||
nr &= 0x1f; \
|
||||
\
|
||||
bitops_lock(flags); \
|
||||
\
|
||||
old = *m; \
|
||||
*m = old c_op (1 << nr); \
|
||||
\
|
||||
bitops_unlock(flags); \
|
||||
\
|
||||
return (old & (1 << nr)) != 0; \
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ARC_HAS_LLSC */
|
||||
@@ -303,86 +168,51 @@ test_and_change_bit(unsigned long nr, volatile unsigned long *m)
|
||||
* Non atomic variants
|
||||
**************************************/
|
||||
|
||||
static inline void __set_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long temp;
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
temp = *m;
|
||||
*m = temp | (1UL << nr);
|
||||
#define __BIT_OP(op, c_op, asm_op) \
|
||||
static inline void __##op##_bit(unsigned long nr, volatile unsigned long *m) \
|
||||
{ \
|
||||
unsigned long temp; \
|
||||
m += nr >> 5; \
|
||||
\
|
||||
if (__builtin_constant_p(nr)) \
|
||||
nr &= 0x1f; \
|
||||
\
|
||||
temp = *m; \
|
||||
*m = temp c_op (1UL << nr); \
|
||||
}
|
||||
|
||||
static inline void __clear_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long temp;
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
temp = *m;
|
||||
*m = temp & ~(1UL << nr);
|
||||
#define __TEST_N_BIT_OP(op, c_op, asm_op) \
|
||||
static inline int __test_and_##op##_bit(unsigned long nr, volatile unsigned long *m)\
|
||||
{ \
|
||||
unsigned long old; \
|
||||
m += nr >> 5; \
|
||||
\
|
||||
if (__builtin_constant_p(nr)) \
|
||||
nr &= 0x1f; \
|
||||
\
|
||||
old = *m; \
|
||||
*m = old c_op (1 << nr); \
|
||||
\
|
||||
return (old & (1 << nr)) != 0; \
|
||||
}
|
||||
|
||||
static inline void __change_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long temp;
|
||||
m += nr >> 5;
|
||||
#define BIT_OPS(op, c_op, asm_op) \
|
||||
\
|
||||
/* set_bit(), clear_bit(), change_bit() */ \
|
||||
BIT_OP(op, c_op, asm_op) \
|
||||
\
|
||||
/* test_and_set_bit(), test_and_clear_bit(), test_and_change_bit() */\
|
||||
TEST_N_BIT_OP(op, c_op, asm_op) \
|
||||
\
|
||||
/* __set_bit(), __clear_bit(), __change_bit() */ \
|
||||
__BIT_OP(op, c_op, asm_op) \
|
||||
\
|
||||
/* __test_and_set_bit(), __test_and_clear_bit(), __test_and_change_bit() */\
|
||||
__TEST_N_BIT_OP(op, c_op, asm_op)
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
temp = *m;
|
||||
*m = temp ^ (1UL << nr);
|
||||
}
|
||||
|
||||
static inline int
|
||||
__test_and_set_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long old;
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
old = *m;
|
||||
*m = old | (1 << nr);
|
||||
|
||||
return (old & (1 << nr)) != 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__test_and_clear_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long old;
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
old = *m;
|
||||
*m = old & ~(1 << nr);
|
||||
|
||||
return (old & (1 << nr)) != 0;
|
||||
}
|
||||
|
||||
static inline int
|
||||
__test_and_change_bit(unsigned long nr, volatile unsigned long *m)
|
||||
{
|
||||
unsigned long old;
|
||||
m += nr >> 5;
|
||||
|
||||
if (__builtin_constant_p(nr))
|
||||
nr &= 0x1f;
|
||||
|
||||
old = *m;
|
||||
*m = old ^ (1 << nr);
|
||||
|
||||
return (old & (1 << nr)) != 0;
|
||||
}
|
||||
BIT_OPS(set, |, bset)
|
||||
BIT_OPS(clear, & ~, bclr)
|
||||
BIT_OPS(change, ^, bxor)
|
||||
|
||||
/*
|
||||
* This routine doesn't need to be atomic.
|
||||
@@ -402,6 +232,8 @@ test_bit(unsigned int nr, const volatile unsigned long *addr)
|
||||
return ((mask & *addr) != 0);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
|
||||
/*
|
||||
* Count the number of zeros, starting from MSB
|
||||
* Helper for fls( ) friends
|
||||
@@ -494,6 +326,75 @@ static inline __attribute__ ((const)) int __ffs(unsigned long word)
|
||||
return ffs(word) - 1;
|
||||
}
|
||||
|
||||
#else /* CONFIG_ISA_ARCV2 */
|
||||
|
||||
/*
|
||||
* fls = Find Last Set in word
|
||||
* @result: [1-32]
|
||||
* fls(1) = 1, fls(0x80000000) = 32, fls(0) = 0
|
||||
*/
|
||||
static inline __attribute__ ((const)) int fls(unsigned long x)
|
||||
{
|
||||
int n;
|
||||
|
||||
asm volatile(
|
||||
" fls.f %0, %1 \n" /* 0:31; 0(Z) if src 0 */
|
||||
" add.nz %0, %0, 1 \n" /* 0:31 -> 1:32 */
|
||||
: "=r"(n) /* Early clobber not needed */
|
||||
: "r"(x)
|
||||
: "cc");
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
/*
|
||||
* __fls: Similar to fls, but zero based (0-31). Also 0 if no bit set
|
||||
*/
|
||||
static inline __attribute__ ((const)) int __fls(unsigned long x)
|
||||
{
|
||||
/* FLS insn has exactly same semantics as the API */
|
||||
return __builtin_arc_fls(x);
|
||||
}
|
||||
|
||||
/*
|
||||
* ffs = Find First Set in word (LSB to MSB)
|
||||
* @result: [1-32], 0 if all 0's
|
||||
*/
|
||||
static inline __attribute__ ((const)) int ffs(unsigned long x)
|
||||
{
|
||||
int n;
|
||||
|
||||
asm volatile(
|
||||
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
|
||||
" add.nz %0, %0, 1 \n" /* 0:31 -> 1:32 */
|
||||
" mov.z %0, 0 \n" /* 31(Z)-> 0 */
|
||||
: "=r"(n) /* Early clobber not needed */
|
||||
: "r"(x)
|
||||
: "cc");
|
||||
|
||||
return n;
|
||||
}
|
||||
|
||||
/*
|
||||
* __ffs: Similar to ffs, but zero based (0-31)
|
||||
*/
|
||||
static inline __attribute__ ((const)) int __ffs(unsigned long x)
|
||||
{
|
||||
int n;
|
||||
|
||||
asm volatile(
|
||||
" ffs.f %0, %1 \n" /* 0:31; 31(Z) if src 0 */
|
||||
" mov.z %0, 0 \n" /* 31(Z)-> 0 */
|
||||
: "=r"(n)
|
||||
: "r"(x)
|
||||
: "cc");
|
||||
|
||||
return n;
|
||||
|
||||
}
|
||||
|
||||
#endif /* CONFIG_ISA_ARCOMPACT */
|
||||
|
||||
/*
|
||||
* ffz = Find First Zero in word.
|
||||
* @return:[0-31], 32 if all 1's
|
||||
|
@@ -60,7 +60,7 @@ extern void read_decode_cache_bcr(void);
|
||||
#define ARC_REG_IC_IVIC 0x10
|
||||
#define ARC_REG_IC_CTRL 0x11
|
||||
#define ARC_REG_IC_IVIL 0x19
|
||||
#if defined(CONFIG_ARC_MMU_V3)
|
||||
#if defined(CONFIG_ARC_MMU_V3) || defined(CONFIG_ARC_MMU_V4)
|
||||
#define ARC_REG_IC_PTAG 0x1E
|
||||
#endif
|
||||
|
||||
@@ -74,12 +74,24 @@ extern void read_decode_cache_bcr(void);
|
||||
#define ARC_REG_DC_IVDL 0x4A
|
||||
#define ARC_REG_DC_FLSH 0x4B
|
||||
#define ARC_REG_DC_FLDL 0x4C
|
||||
#if defined(CONFIG_ARC_MMU_V3)
|
||||
#define ARC_REG_DC_PTAG 0x5C
|
||||
#endif
|
||||
|
||||
/* Bit val in DC_CTRL */
|
||||
#define DC_CTRL_INV_MODE_FLUSH 0x40
|
||||
#define DC_CTRL_FLUSH_STATUS 0x100
|
||||
|
||||
/*System-level cache (L2 cache) related Auxiliary registers */
|
||||
#define ARC_REG_SLC_CFG 0x901
|
||||
#define ARC_REG_SLC_CTRL 0x903
|
||||
#define ARC_REG_SLC_FLUSH 0x904
|
||||
#define ARC_REG_SLC_INVALIDATE 0x905
|
||||
#define ARC_REG_SLC_RGN_START 0x914
|
||||
#define ARC_REG_SLC_RGN_END 0x916
|
||||
|
||||
/* Bit val in SLC_CONTROL */
|
||||
#define SLC_CTRL_IM 0x040
|
||||
#define SLC_CTRL_DISABLE 0x001
|
||||
#define SLC_CTRL_BUSY 0x100
|
||||
#define SLC_CTRL_RGN_OP_INV 0x200
|
||||
|
||||
#endif /* _ASM_CACHE_H */
|
||||
|
@@ -34,9 +34,7 @@ void flush_cache_all(void);
|
||||
void flush_icache_range(unsigned long start, unsigned long end);
|
||||
void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len);
|
||||
void __inv_icache_page(unsigned long paddr, unsigned long vaddr);
|
||||
void ___flush_dcache_page(unsigned long paddr, unsigned long vaddr);
|
||||
#define __flush_dcache_page(p, v) \
|
||||
___flush_dcache_page((unsigned long)p, (unsigned long)v)
|
||||
void __flush_dcache_page(unsigned long paddr, unsigned long vaddr);
|
||||
|
||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||
|
||||
|
@@ -10,6 +10,8 @@
|
||||
#define __ASM_ARC_CMPXCHG_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#include <asm/barrier.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_LLSC
|
||||
@@ -19,16 +21,25 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
|
||||
{
|
||||
unsigned long prev;
|
||||
|
||||
/*
|
||||
* Explicit full memory barrier needed before/after as
|
||||
* LLOCK/SCOND thmeselves don't provide any such semantics
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: llock %0, [%1] \n"
|
||||
" brne %0, %2, 2f \n"
|
||||
" scond %3, [%1] \n"
|
||||
" bnz 1b \n"
|
||||
"2: \n"
|
||||
: "=&r"(prev)
|
||||
: "r"(ptr), "ir"(expected),
|
||||
"r"(new) /* can't be "ir". scond can't take limm for "b" */
|
||||
: "cc");
|
||||
: "=&r"(prev) /* Early clobber, to prevent reg reuse */
|
||||
: "r"(ptr), /* Not "m": llock only supports reg direct addr mode */
|
||||
"ir"(expected),
|
||||
"r"(new) /* can't be "ir". scond can't take LIMM for "b" */
|
||||
: "cc", "memory"); /* so that gcc knows memory is being written here */
|
||||
|
||||
smp_mb();
|
||||
|
||||
return prev;
|
||||
}
|
||||
@@ -42,6 +53,9 @@ __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
|
||||
int prev;
|
||||
volatile unsigned long *p = ptr;
|
||||
|
||||
/*
|
||||
* spin lock/unlock provide the needed smp_mb() before/after
|
||||
*/
|
||||
atomic_ops_lock(flags);
|
||||
prev = *p;
|
||||
if (prev == expected)
|
||||
@@ -77,12 +91,16 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
|
||||
|
||||
switch (size) {
|
||||
case 4:
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
" ex %0, [%1] \n"
|
||||
: "+r"(val)
|
||||
: "r"(ptr)
|
||||
: "memory");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return val;
|
||||
}
|
||||
return __xchg_bad_pointer();
|
||||
|
@@ -22,11 +22,10 @@
|
||||
static inline void __delay(unsigned long loops)
|
||||
{
|
||||
__asm__ __volatile__(
|
||||
"1: sub.f %0, %0, 1 \n"
|
||||
" jpnz 1b \n"
|
||||
: "+r"(loops)
|
||||
:
|
||||
: "cc");
|
||||
" lp 1f \n"
|
||||
" nop \n"
|
||||
"1: \n"
|
||||
: "+l"(loops));
|
||||
}
|
||||
|
||||
extern void __bad_udelay(void);
|
||||
|
@@ -14,23 +14,6 @@
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
|
||||
/*
|
||||
* dma_map_* API take cpu addresses, which is kernel logical address in the
|
||||
* untranslated address space (0x8000_0000) based. The dma address (bus addr)
|
||||
* ideally needs to be 0x0000_0000 based hence these glue routines.
|
||||
* However given that intermediate bus bridges can ignore the high bit, we can
|
||||
* do with these routines being no-ops.
|
||||
* If a platform/device comes up which sriclty requires 0 based bus addr
|
||||
* (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
|
||||
*/
|
||||
#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
|
||||
#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
|
||||
|
||||
#else
|
||||
#include <plat/dma_addr.h>
|
||||
#endif
|
||||
|
||||
void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t gfp);
|
||||
|
||||
@@ -94,7 +77,7 @@ dma_map_single(struct device *dev, void *cpu_addr, size_t size,
|
||||
enum dma_data_direction dir)
|
||||
{
|
||||
_dma_cache_sync((unsigned long)cpu_addr, size, dir);
|
||||
return plat_kernel_addr_to_dma(dev, cpu_addr);
|
||||
return (dma_addr_t)cpu_addr;
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -147,16 +130,14 @@ static inline void
|
||||
dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
|
||||
DMA_FROM_DEVICE);
|
||||
_dma_cache_sync(dma_handle, size, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
|
||||
size_t size, enum dma_data_direction dir)
|
||||
{
|
||||
_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
|
||||
DMA_TO_DEVICE);
|
||||
_dma_cache_sync(dma_handle, size, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -164,8 +145,7 @@ dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
|
||||
size, DMA_FROM_DEVICE);
|
||||
_dma_cache_sync(dma_handle + offset, size, DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -173,8 +153,7 @@ dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
|
||||
size, DMA_TO_DEVICE);
|
||||
_dma_cache_sync(dma_handle + offset, size, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
static inline void
|
||||
|
@@ -15,6 +15,11 @@
|
||||
/* These ELF defines belong to uapi but libc elf.h already defines them */
|
||||
#define EM_ARCOMPACT 93
|
||||
|
||||
#define EM_ARCV2 195 /* ARCv2 Cores */
|
||||
|
||||
#define EM_ARC_INUSE (IS_ENABLED(CONFIG_ISA_ARCOMPACT) ? \
|
||||
EM_ARCOMPACT : EM_ARCV2)
|
||||
|
||||
/* ARC Relocations (kernel Modules only) */
|
||||
#define R_ARC_32 0x4
|
||||
#define R_ARC_32_ME 0x1B
|
||||
|
190
arch/arc/include/asm/entry-arcv2.h
Normal file
190
arch/arc/include/asm/entry-arcv2.h
Normal file
@@ -0,0 +1,190 @@
|
||||
|
||||
#ifndef __ASM_ARC_ENTRY_ARCV2_H
|
||||
#define __ASM_ARC_ENTRY_ARCV2_H
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/irqflags-arcv2.h>
|
||||
#include <asm/thread_info.h> /* For THREAD_SIZE */
|
||||
|
||||
/*------------------------------------------------------------------------*/
|
||||
.macro INTERRUPT_PROLOGUE called_from
|
||||
|
||||
; Before jumping to Interrupt Vector, hardware micro-ops did following:
|
||||
; 1. SP auto-switched to kernel mode stack
|
||||
; 2. STATUS32.Z flag set to U mode at time of interrupt (U:1, K:0)
|
||||
; 3. Auto saved: r0-r11, blink, LPE,LPS,LPC, JLI,LDI,EI, PC, STAT32
|
||||
;
|
||||
; Now manually save: r12, sp, fp, gp, r25
|
||||
|
||||
PUSH r12
|
||||
|
||||
; Saving pt_regs->sp correctly requires some extra work due to the way
|
||||
; Auto stack switch works
|
||||
; - U mode: retrieve it from AUX_USER_SP
|
||||
; - K mode: add the offset from current SP where H/w starts auto push
|
||||
;
|
||||
; Utilize the fact that Z bit is set if Intr taken in U mode
|
||||
mov.nz r9, sp
|
||||
add.nz r9, r9, SZ_PT_REGS - PT_sp - 4
|
||||
bnz 1f
|
||||
|
||||
lr r9, [AUX_USER_SP]
|
||||
1:
|
||||
PUSH r9 ; SP
|
||||
|
||||
PUSH fp
|
||||
PUSH gp
|
||||
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
PUSH r25 ; user_r25
|
||||
GET_CURR_TASK_ON_CPU r25
|
||||
#else
|
||||
sub sp, sp, 4
|
||||
#endif
|
||||
|
||||
.ifnc \called_from, exception
|
||||
sub sp, sp, 12 ; BTA/ECR/orig_r0 placeholder per pt_regs
|
||||
.endif
|
||||
|
||||
.endm
|
||||
|
||||
/*------------------------------------------------------------------------*/
|
||||
.macro INTERRUPT_EPILOGUE called_from
|
||||
|
||||
.ifnc \called_from, exception
|
||||
add sp, sp, 12 ; skip BTA/ECR/orig_r0 placeholderss
|
||||
.endif
|
||||
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
POP r25
|
||||
#else
|
||||
add sp, sp, 4
|
||||
#endif
|
||||
|
||||
POP gp
|
||||
POP fp
|
||||
|
||||
; Don't touch AUX_USER_SP if returning to K mode (Z bit set)
|
||||
; (Z bit set on K mode is inverse of INTERRUPT_PROLOGUE)
|
||||
add.z sp, sp, 4
|
||||
bz 1f
|
||||
|
||||
POPAX AUX_USER_SP
|
||||
1:
|
||||
POP r12
|
||||
|
||||
.endm
|
||||
|
||||
/*------------------------------------------------------------------------*/
|
||||
.macro EXCEPTION_PROLOGUE
|
||||
|
||||
; Before jumping to Exception Vector, hardware micro-ops did following:
|
||||
; 1. SP auto-switched to kernel mode stack
|
||||
; 2. STATUS32.Z flag set to U mode at time of interrupt (U:1,K:0)
|
||||
;
|
||||
; Now manually save the complete reg file
|
||||
|
||||
PUSH r9 ; freeup a register: slot of erstatus
|
||||
|
||||
PUSHAX eret
|
||||
sub sp, sp, 12 ; skip JLI, LDI, EI
|
||||
PUSH lp_count
|
||||
PUSHAX lp_start
|
||||
PUSHAX lp_end
|
||||
PUSH blink
|
||||
|
||||
PUSH r11
|
||||
PUSH r10
|
||||
|
||||
ld.as r9, [sp, 10] ; load stashed r9 (status32 stack slot)
|
||||
lr r10, [erstatus]
|
||||
st.as r10, [sp, 10] ; save status32 at it's right stack slot
|
||||
|
||||
PUSH r9
|
||||
PUSH r8
|
||||
PUSH r7
|
||||
PUSH r6
|
||||
PUSH r5
|
||||
PUSH r4
|
||||
PUSH r3
|
||||
PUSH r2
|
||||
PUSH r1
|
||||
PUSH r0
|
||||
|
||||
; -- for interrupts, regs above are auto-saved by h/w in that order --
|
||||
; Now do what ISR prologue does (manually save r12, sp, fp, gp, r25)
|
||||
;
|
||||
; Set Z flag if this was from U mode (expected by INTERRUPT_PROLOGUE)
|
||||
; Although H/w exception micro-ops do set Z flag for U mode (just like
|
||||
; for interrupts), it could get clobbered in case we soft land here from
|
||||
; a TLB Miss exception handler (tlbex.S)
|
||||
|
||||
and r10, r10, STATUS_U_MASK
|
||||
xor.f 0, r10, STATUS_U_MASK
|
||||
|
||||
INTERRUPT_PROLOGUE exception
|
||||
|
||||
PUSHAX erbta
|
||||
PUSHAX ecr ; r9 contains ECR, expected by EV_Trap
|
||||
|
||||
PUSH r0 ; orig_r0
|
||||
.endm
|
||||
|
||||
/*------------------------------------------------------------------------*/
|
||||
.macro EXCEPTION_EPILOGUE
|
||||
|
||||
; Assumes r0 has PT_status32
|
||||
btst r0, STATUS_U_BIT ; Z flag set if K, used in INTERRUPT_EPILOGUE
|
||||
|
||||
add sp, sp, 8 ; orig_r0/ECR don't need restoring
|
||||
POPAX erbta
|
||||
|
||||
INTERRUPT_EPILOGUE exception
|
||||
|
||||
POP r0
|
||||
POP r1
|
||||
POP r2
|
||||
POP r3
|
||||
POP r4
|
||||
POP r5
|
||||
POP r6
|
||||
POP r7
|
||||
POP r8
|
||||
POP r9
|
||||
POP r10
|
||||
POP r11
|
||||
|
||||
POP blink
|
||||
POPAX lp_end
|
||||
POPAX lp_start
|
||||
|
||||
POP r9
|
||||
mov lp_count, r9
|
||||
|
||||
add sp, sp, 12 ; skip JLI, LDI, EI
|
||||
POPAX eret
|
||||
POPAX erstatus
|
||||
|
||||
ld.as r9, [sp, -12] ; reload r9 which got clobbered
|
||||
.endm
|
||||
|
||||
.macro FAKE_RET_FROM_EXCPN
|
||||
lr r9, [status32]
|
||||
bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK|STATUS_AE_MASK)
|
||||
or r9, r9, (STATUS_L_MASK|STATUS_IE_MASK)
|
||||
kflag r9
|
||||
.endm
|
||||
|
||||
/* Get thread_info of "current" tsk */
|
||||
.macro GET_CURR_THR_INFO_FROM_SP reg
|
||||
bmskn \reg, sp, THREAD_SHIFT - 1
|
||||
.endm
|
||||
|
||||
/* Get CPU-ID of this core */
|
||||
.macro GET_CPU_ID reg
|
||||
lr \reg, [identity]
|
||||
xbfu \reg, \reg, 0xE8 /* 00111 01000 */
|
||||
/* M = 8-1 N = 8 */
|
||||
.endm
|
||||
|
||||
#endif
|
307
arch/arc/include/asm/entry-compact.h
Normal file
307
arch/arc/include/asm/entry-compact.h
Normal file
@@ -0,0 +1,307 @@
|
||||
/*
|
||||
* Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
|
||||
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Vineetg: March 2009 (Supporting 2 levels of Interrupts)
|
||||
* Stack switching code can no longer reliably rely on the fact that
|
||||
* if we are NOT in user mode, stack is switched to kernel mode.
|
||||
* e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
|
||||
* it's prologue including stack switching from user mode
|
||||
*
|
||||
* Vineetg: Aug 28th 2008: Bug #94984
|
||||
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
|
||||
* Normally CPU does this automatically, however when doing FAKE rtie,
|
||||
* we also need to explicitly do this. The problem in macros
|
||||
* FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
|
||||
* was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
|
||||
*
|
||||
* Vineetg: May 5th 2008
|
||||
* -Modified CALLEE_REG save/restore macros to handle the fact that
|
||||
* r25 contains the kernel current task ptr
|
||||
* - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
|
||||
* - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
|
||||
* address Write back load ld.ab instead of seperate ld/add instn
|
||||
*
|
||||
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ARC_ENTRY_COMPACT_H
|
||||
#define __ASM_ARC_ENTRY_COMPACT_H
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/irqflags-compact.h>
|
||||
#include <asm/thread_info.h> /* For THREAD_SIZE */
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Switch to Kernel Mode stack if SP points to User Mode stack
|
||||
*
|
||||
* Entry : r9 contains pre-IRQ/exception/trap status32
|
||||
* Exit : SP set to K mode stack
|
||||
* SP at the time of entry (K/U) saved @ pt_regs->sp
|
||||
* Clobbers: r9
|
||||
*-------------------------------------------------------------*/
|
||||
|
||||
.macro SWITCH_TO_KERNEL_STK
|
||||
|
||||
/* User Mode when this happened ? Yes: Proceed to switch stack */
|
||||
bbit1 r9, STATUS_U_BIT, 88f
|
||||
|
||||
/* OK we were already in kernel mode when this event happened, thus can
|
||||
* assume SP is kernel mode SP. _NO_ need to do any stack switching
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
|
||||
/* However....
|
||||
* If Level 2 Interrupts enabled, we may end up with a corner case:
|
||||
* 1. User Task executing
|
||||
* 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
|
||||
* 3. But before it could switch SP from USER to KERNEL stack
|
||||
* a L2 IRQ "Interrupts" L1
|
||||
* Thay way although L2 IRQ happened in Kernel mode, stack is still
|
||||
* not switched.
|
||||
* To handle this, we may need to switch stack even if in kernel mode
|
||||
* provided SP has values in range of USER mode stack ( < 0x7000_0000 )
|
||||
*/
|
||||
brlo sp, VMALLOC_START, 88f
|
||||
|
||||
/* TODO: vineetg:
|
||||
* We need to be a bit more cautious here. What if a kernel bug in
|
||||
* L1 ISR, caused SP to go whaco (some small value which looks like
|
||||
* USER stk) and then we take L2 ISR.
|
||||
* Above brlo alone would treat it as a valid L1-L2 sceanrio
|
||||
* instead of shouting alound
|
||||
* The only feasible way is to make sure this L2 happened in
|
||||
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
|
||||
* L1 ISR before it switches stack
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
/*------Intr/Ecxp happened in kernel mode, SP already setup ------ */
|
||||
/* save it nevertheless @ pt_regs->sp for uniformity */
|
||||
|
||||
b.d 66f
|
||||
st sp, [sp, PT_sp - SZ_PT_REGS]
|
||||
|
||||
88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
|
||||
|
||||
GET_CURR_TASK_ON_CPU r9
|
||||
|
||||
/* With current tsk in r9, get it's kernel mode stack base */
|
||||
GET_TSK_STACK_BASE r9, r9
|
||||
|
||||
/* save U mode SP @ pt_regs->sp */
|
||||
st sp, [r9, PT_sp - SZ_PT_REGS]
|
||||
|
||||
/* final SP switch */
|
||||
mov sp, r9
|
||||
66:
|
||||
.endm
|
||||
|
||||
/*------------------------------------------------------------
|
||||
* "FAKE" a rtie to return from CPU Exception context
|
||||
* This is to re-enable Exceptions within exception
|
||||
* Look at EV_ProtV to see how this is actually used
|
||||
*-------------------------------------------------------------*/
|
||||
|
||||
.macro FAKE_RET_FROM_EXCPN
|
||||
|
||||
ld r9, [sp, PT_status32]
|
||||
bic r9, r9, (STATUS_U_MASK|STATUS_DE_MASK)
|
||||
bset r9, r9, STATUS_L_BIT
|
||||
sr r9, [erstatus]
|
||||
mov r9, 55f
|
||||
sr r9, [eret]
|
||||
|
||||
rtie
|
||||
55:
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* For early Exception/ISR Prologue, a core reg is temporarily needed to
|
||||
* code the rest of prolog (stack switching). This is done by stashing
|
||||
* it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
|
||||
*
|
||||
* Before saving the full regfile - this reg is restored back, only
|
||||
* to be saved again on kernel mode stack, as part of pt_regs.
|
||||
*-------------------------------------------------------------*/
|
||||
.macro PROLOG_FREEUP_REG reg, mem
|
||||
#ifdef CONFIG_SMP
|
||||
sr \reg, [ARC_REG_SCRATCH_DATA0]
|
||||
#else
|
||||
st \reg, [\mem]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro PROLOG_RESTORE_REG reg, mem
|
||||
#ifdef CONFIG_SMP
|
||||
lr \reg, [ARC_REG_SCRATCH_DATA0]
|
||||
#else
|
||||
ld \reg, [\mem]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Exception Entry prologue
|
||||
* -Switches stack to K mode (if not already)
|
||||
* -Saves the register file
|
||||
*
|
||||
* After this it is safe to call the "C" handlers
|
||||
*-------------------------------------------------------------*/
|
||||
.macro EXCEPTION_PROLOGUE
|
||||
|
||||
/* Need at least 1 reg to code the early exception prologue */
|
||||
PROLOG_FREEUP_REG r9, @ex_saved_reg1
|
||||
|
||||
/* U/K mode at time of exception (stack not switched if already K) */
|
||||
lr r9, [erstatus]
|
||||
|
||||
/* ARC700 doesn't provide auto-stack switching */
|
||||
SWITCH_TO_KERNEL_STK
|
||||
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
/* Treat r25 as scratch reg (save on stack) and load with "current" */
|
||||
PUSH r25
|
||||
GET_CURR_TASK_ON_CPU r25
|
||||
#else
|
||||
sub sp, sp, 4
|
||||
#endif
|
||||
|
||||
st.a r0, [sp, -8] /* orig_r0 needed for syscall (skip ECR slot) */
|
||||
sub sp, sp, 4 /* skip pt_regs->sp, already saved above */
|
||||
|
||||
/* Restore r9 used to code the early prologue */
|
||||
PROLOG_RESTORE_REG r9, @ex_saved_reg1
|
||||
|
||||
/* now we are ready to save the regfile */
|
||||
SAVE_R0_TO_R12
|
||||
PUSH gp
|
||||
PUSH fp
|
||||
PUSH blink
|
||||
PUSHAX eret
|
||||
PUSHAX erstatus
|
||||
PUSH lp_count
|
||||
PUSHAX lp_end
|
||||
PUSHAX lp_start
|
||||
PUSHAX erbta
|
||||
|
||||
lr r9, [ecr]
|
||||
st r9, [sp, PT_event] /* EV_Trap expects r9 to have ECR */
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Restore all registers used by system call or Exceptions
|
||||
* SP should always be pointing to the next free stack element
|
||||
* when entering this macro.
|
||||
*
|
||||
* NOTE:
|
||||
*
|
||||
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
|
||||
* for memory load operations. If used in that way interrupts are deffered
|
||||
* by hardware and that is not good.
|
||||
*-------------------------------------------------------------*/
|
||||
.macro EXCEPTION_EPILOGUE
|
||||
POPAX erbta
|
||||
POPAX lp_start
|
||||
POPAX lp_end
|
||||
|
||||
POP r9
|
||||
mov lp_count, r9 ;LD to lp_count is not allowed
|
||||
|
||||
POPAX erstatus
|
||||
POPAX eret
|
||||
POP blink
|
||||
POP fp
|
||||
POP gp
|
||||
RESTORE_R12_TO_R0
|
||||
|
||||
ld sp, [sp] /* restore original sp */
|
||||
/* orig_r0, ECR, user_r25 skipped automatically */
|
||||
.endm
|
||||
|
||||
/* Dummy ECR values for Interrupts */
|
||||
#define event_IRQ1 0x0031abcd
|
||||
#define event_IRQ2 0x0032abcd
|
||||
|
||||
.macro INTERRUPT_PROLOGUE LVL
|
||||
|
||||
/* free up r9 as scratchpad */
|
||||
PROLOG_FREEUP_REG r9, @int\LVL\()_saved_reg
|
||||
|
||||
/* Which mode (user/kernel) was the system in when intr occured */
|
||||
lr r9, [status32_l\LVL\()]
|
||||
|
||||
SWITCH_TO_KERNEL_STK
|
||||
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
/* Treat r25 as scratch reg (save on stack) and load with "current" */
|
||||
PUSH r25
|
||||
GET_CURR_TASK_ON_CPU r25
|
||||
#else
|
||||
sub sp, sp, 4
|
||||
#endif
|
||||
|
||||
PUSH 0x003\LVL\()abcd /* Dummy ECR */
|
||||
sub sp, sp, 8 /* skip orig_r0 (not needed)
|
||||
skip pt_regs->sp, already saved above */
|
||||
|
||||
/* Restore r9 used to code the early prologue */
|
||||
PROLOG_RESTORE_REG r9, @int\LVL\()_saved_reg
|
||||
|
||||
SAVE_R0_TO_R12
|
||||
PUSH gp
|
||||
PUSH fp
|
||||
PUSH blink
|
||||
PUSH ilink\LVL\()
|
||||
PUSHAX status32_l\LVL\()
|
||||
PUSH lp_count
|
||||
PUSHAX lp_end
|
||||
PUSHAX lp_start
|
||||
PUSHAX bta_l\LVL\()
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Restore all registers used by interrupt handlers.
|
||||
*
|
||||
* NOTE:
|
||||
*
|
||||
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
|
||||
* for memory load operations. If used in that way interrupts are deffered
|
||||
* by hardware and that is not good.
|
||||
*-------------------------------------------------------------*/
|
||||
.macro INTERRUPT_EPILOGUE LVL
|
||||
POPAX bta_l\LVL\()
|
||||
POPAX lp_start
|
||||
POPAX lp_end
|
||||
|
||||
POP r9
|
||||
mov lp_count, r9 ;LD to lp_count is not allowed
|
||||
|
||||
POPAX status32_l\LVL\()
|
||||
POP ilink\LVL\()
|
||||
POP blink
|
||||
POP fp
|
||||
POP gp
|
||||
RESTORE_R12_TO_R0
|
||||
|
||||
ld sp, [sp] /* restore original sp */
|
||||
/* orig_r0, ECR, user_r25 skipped automatically */
|
||||
.endm
|
||||
|
||||
/* Get thread_info of "current" tsk */
|
||||
.macro GET_CURR_THR_INFO_FROM_SP reg
|
||||
bic \reg, sp, (THREAD_SIZE - 1)
|
||||
.endm
|
||||
|
||||
/* Get CPU-ID of this core */
|
||||
.macro GET_CPU_ID reg
|
||||
lr \reg, [identity]
|
||||
lsr \reg, \reg, 8
|
||||
bmsk \reg, \reg, 7
|
||||
.endm
|
||||
|
||||
#endif /* __ASM_ARC_ENTRY_COMPACT_H */
|
@@ -1,45 +1,27 @@
|
||||
/*
|
||||
* Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
|
||||
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Vineetg: March 2009 (Supporting 2 levels of Interrupts)
|
||||
* Stack switching code can no longer reliably rely on the fact that
|
||||
* if we are NOT in user mode, stack is switched to kernel mode.
|
||||
* e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
|
||||
* it's prologue including stack switching from user mode
|
||||
*
|
||||
* Vineetg: Aug 28th 2008: Bug #94984
|
||||
* -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
|
||||
* Normally CPU does this automatically, however when doing FAKE rtie,
|
||||
* we also need to explicitly do this. The problem in macros
|
||||
* FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
|
||||
* was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
|
||||
*
|
||||
* Vineetg: May 5th 2008
|
||||
* -Modified CALLEE_REG save/restore macros to handle the fact that
|
||||
* r25 contains the kernel current task ptr
|
||||
* - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
|
||||
* - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
|
||||
* address Write back load ld.ab instead of seperate ld/add instn
|
||||
*
|
||||
* Amit Bhor, Sameer Dhavale: Codito Technologies 2004
|
||||
*/
|
||||
|
||||
#ifndef __ASM_ARC_ENTRY_H
|
||||
#define __ASM_ARC_ENTRY_H
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
#include <asm/unistd.h> /* For NR_syscalls defination */
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/arcregs.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/processor.h> /* For VMALLOC_START */
|
||||
#include <asm/thread_info.h> /* For THREAD_SIZE */
|
||||
#include <asm/mmu.h>
|
||||
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
#include <asm/entry-compact.h> /* ISA specific bits */
|
||||
#else
|
||||
#include <asm/entry-arcv2.h>
|
||||
#endif
|
||||
|
||||
/* Note on the LD/ST addr modes with addr reg wback
|
||||
*
|
||||
* LD.a same as LD.aw
|
||||
@@ -143,8 +125,6 @@
|
||||
POP r13
|
||||
.endm
|
||||
|
||||
#define OFF_USER_R25_FROM_R24 (SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Collect User Mode callee regs as struct callee_regs - needed by
|
||||
* fork/do_signal/unaligned-access-emulation.
|
||||
@@ -157,12 +137,13 @@
|
||||
*-------------------------------------------------------------*/
|
||||
.macro SAVE_CALLEE_SAVED_USER
|
||||
|
||||
mov r12, sp ; save SP as ref to pt_regs
|
||||
SAVE_R13_TO_R24
|
||||
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
; Retrieve orig r25 and save it on stack
|
||||
ld.as r12, [sp, OFF_USER_R25_FROM_R24]
|
||||
st.a r12, [sp, -4]
|
||||
; Retrieve orig r25 and save it with rest of callee_regs
|
||||
ld.as r12, [r12, PT_user_r25]
|
||||
PUSH r12
|
||||
#else
|
||||
PUSH r25
|
||||
#endif
|
||||
@@ -209,12 +190,16 @@
|
||||
.macro RESTORE_CALLEE_SAVED_USER
|
||||
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
ld.ab r12, [sp, 4]
|
||||
st.as r12, [sp, OFF_USER_R25_FROM_R24]
|
||||
POP r12
|
||||
#else
|
||||
POP r25
|
||||
#endif
|
||||
RESTORE_R24_TO_R13
|
||||
|
||||
; SP is back to start of pt_regs
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
st.as r12, [sp, PT_user_r25]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
@@ -240,117 +225,6 @@
|
||||
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Switch to Kernel Mode stack if SP points to User Mode stack
|
||||
*
|
||||
* Entry : r9 contains pre-IRQ/exception/trap status32
|
||||
* Exit : SP is set to kernel mode stack pointer
|
||||
* If CURR_IN_REG, r25 set to "current" task pointer
|
||||
* Clobbers: r9
|
||||
*-------------------------------------------------------------*/
|
||||
|
||||
.macro SWITCH_TO_KERNEL_STK
|
||||
|
||||
/* User Mode when this happened ? Yes: Proceed to switch stack */
|
||||
bbit1 r9, STATUS_U_BIT, 88f
|
||||
|
||||
/* OK we were already in kernel mode when this event happened, thus can
|
||||
* assume SP is kernel mode SP. _NO_ need to do any stack switching
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
|
||||
/* However....
|
||||
* If Level 2 Interrupts enabled, we may end up with a corner case:
|
||||
* 1. User Task executing
|
||||
* 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
|
||||
* 3. But before it could switch SP from USER to KERNEL stack
|
||||
* a L2 IRQ "Interrupts" L1
|
||||
* Thay way although L2 IRQ happened in Kernel mode, stack is still
|
||||
* not switched.
|
||||
* To handle this, we may need to switch stack even if in kernel mode
|
||||
* provided SP has values in range of USER mode stack ( < 0x7000_0000 )
|
||||
*/
|
||||
brlo sp, VMALLOC_START, 88f
|
||||
|
||||
/* TODO: vineetg:
|
||||
* We need to be a bit more cautious here. What if a kernel bug in
|
||||
* L1 ISR, caused SP to go whaco (some small value which looks like
|
||||
* USER stk) and then we take L2 ISR.
|
||||
* Above brlo alone would treat it as a valid L1-L2 sceanrio
|
||||
* instead of shouting alound
|
||||
* The only feasible way is to make sure this L2 happened in
|
||||
* L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
|
||||
* L1 ISR before it switches stack
|
||||
*/
|
||||
|
||||
#endif
|
||||
|
||||
/* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
|
||||
* safe-keeping not really needed, but it keeps the epilogue code
|
||||
* (SP restore) simpler/uniform.
|
||||
*/
|
||||
b.d 66f
|
||||
mov r9, sp
|
||||
|
||||
88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
|
||||
|
||||
GET_CURR_TASK_ON_CPU r9
|
||||
|
||||
/* With current tsk in r9, get it's kernel mode stack base */
|
||||
GET_TSK_STACK_BASE r9, r9
|
||||
|
||||
66:
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
/*
|
||||
* Treat r25 as scratch reg, save it on stack first
|
||||
* Load it with current task pointer
|
||||
*/
|
||||
st r25, [r9, -4]
|
||||
GET_CURR_TASK_ON_CPU r25
|
||||
#endif
|
||||
|
||||
/* Save Pre Intr/Exception User SP on kernel stack */
|
||||
st.a sp, [r9, -16] ; Make room for orig_r0, ECR, user_r25
|
||||
|
||||
/* CAUTION:
|
||||
* SP should be set at the very end when we are done with everything
|
||||
* In case of 2 levels of interrupt we depend on value of SP to assume
|
||||
* that everything else is done (loading r25 etc)
|
||||
*/
|
||||
|
||||
/* set SP to point to kernel mode stack */
|
||||
mov sp, r9
|
||||
|
||||
/* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
|
||||
|
||||
.endm
|
||||
|
||||
/*------------------------------------------------------------
|
||||
* "FAKE" a rtie to return from CPU Exception context
|
||||
* This is to re-enable Exceptions within exception
|
||||
* Look at EV_ProtV to see how this is actually used
|
||||
*-------------------------------------------------------------*/
|
||||
|
||||
.macro FAKE_RET_FROM_EXCPN reg
|
||||
|
||||
ld \reg, [sp, PT_status32]
|
||||
bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
|
||||
bset \reg, \reg, STATUS_L_BIT
|
||||
sr \reg, [erstatus]
|
||||
mov \reg, 55f
|
||||
sr \reg, [eret]
|
||||
|
||||
rtie
|
||||
55:
|
||||
.endm
|
||||
|
||||
/*
|
||||
* @reg [OUT] &thread_info of "current"
|
||||
*/
|
||||
.macro GET_CURR_THR_INFO_FROM_SP reg
|
||||
bic \reg, sp, (THREAD_SIZE - 1)
|
||||
.endm
|
||||
|
||||
/*
|
||||
* @reg [OUT] thread_info->flags of "current"
|
||||
*/
|
||||
@@ -359,222 +233,6 @@
|
||||
ld \reg, [\reg, THREAD_INFO_FLAGS]
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* For early Exception Prologue, a core reg is temporarily needed to
|
||||
* code the rest of prolog (stack switching). This is done by stashing
|
||||
* it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
|
||||
*
|
||||
* Before saving the full regfile - this reg is restored back, only
|
||||
* to be saved again on kernel mode stack, as part of pt_regs.
|
||||
*-------------------------------------------------------------*/
|
||||
.macro EXCPN_PROLOG_FREEUP_REG reg
|
||||
#ifdef CONFIG_SMP
|
||||
sr \reg, [ARC_REG_SCRATCH_DATA0]
|
||||
#else
|
||||
st \reg, [@ex_saved_reg1]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro EXCPN_PROLOG_RESTORE_REG reg
|
||||
#ifdef CONFIG_SMP
|
||||
lr \reg, [ARC_REG_SCRATCH_DATA0]
|
||||
#else
|
||||
ld \reg, [@ex_saved_reg1]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Exception Entry prologue
|
||||
* -Switches stack to K mode (if not already)
|
||||
* -Saves the register file
|
||||
*
|
||||
* After this it is safe to call the "C" handlers
|
||||
*-------------------------------------------------------------*/
|
||||
.macro EXCEPTION_PROLOGUE
|
||||
|
||||
/* Need at least 1 reg to code the early exception prologue */
|
||||
EXCPN_PROLOG_FREEUP_REG r9
|
||||
|
||||
/* U/K mode at time of exception (stack not switched if already K) */
|
||||
lr r9, [erstatus]
|
||||
|
||||
/* ARC700 doesn't provide auto-stack switching */
|
||||
SWITCH_TO_KERNEL_STK
|
||||
|
||||
/* save the regfile */
|
||||
SAVE_ALL_SYS
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
|
||||
* Requires SP to be already switched to kernel mode Stack
|
||||
* sp points to the next free element on the stack at exit of this macro.
|
||||
* Registers are pushed / popped in the order defined in struct ptregs
|
||||
* in asm/ptrace.h
|
||||
* Note that syscalls are implemented via TRAP which is also a exception
|
||||
* from CPU's point of view
|
||||
*-------------------------------------------------------------*/
|
||||
.macro SAVE_ALL_SYS
|
||||
|
||||
lr r9, [ecr]
|
||||
st r9, [sp, 8] /* ECR */
|
||||
st r0, [sp, 4] /* orig_r0, needed only for sys calls */
|
||||
|
||||
/* Restore r9 used to code the early prologue */
|
||||
EXCPN_PROLOG_RESTORE_REG r9
|
||||
|
||||
SAVE_R0_TO_R12
|
||||
PUSH gp
|
||||
PUSH fp
|
||||
PUSH blink
|
||||
PUSHAX eret
|
||||
PUSHAX erstatus
|
||||
PUSH lp_count
|
||||
PUSHAX lp_end
|
||||
PUSHAX lp_start
|
||||
PUSHAX erbta
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Restore all registers used by system call or Exceptions
|
||||
* SP should always be pointing to the next free stack element
|
||||
* when entering this macro.
|
||||
*
|
||||
* NOTE:
|
||||
*
|
||||
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
|
||||
* for memory load operations. If used in that way interrupts are deffered
|
||||
* by hardware and that is not good.
|
||||
*-------------------------------------------------------------*/
|
||||
.macro RESTORE_ALL_SYS
|
||||
POPAX erbta
|
||||
POPAX lp_start
|
||||
POPAX lp_end
|
||||
|
||||
POP r9
|
||||
mov lp_count, r9 ;LD to lp_count is not allowed
|
||||
|
||||
POPAX erstatus
|
||||
POPAX eret
|
||||
POP blink
|
||||
POP fp
|
||||
POP gp
|
||||
RESTORE_R12_TO_R0
|
||||
|
||||
ld sp, [sp] /* restore original sp */
|
||||
/* orig_r0, ECR, user_r25 skipped automatically */
|
||||
.endm
|
||||
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Save all registers used by interrupt handlers.
|
||||
*-------------------------------------------------------------*/
|
||||
.macro SAVE_ALL_INT1
|
||||
|
||||
/* restore original r9 to be saved as part of reg-file */
|
||||
#ifdef CONFIG_SMP
|
||||
lr r9, [ARC_REG_SCRATCH_DATA0]
|
||||
#else
|
||||
ld r9, [@int1_saved_reg]
|
||||
#endif
|
||||
|
||||
/* now we are ready to save the remaining context :) */
|
||||
st event_IRQ1, [sp, 8] /* Dummy ECR */
|
||||
st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
|
||||
|
||||
SAVE_R0_TO_R12
|
||||
PUSH gp
|
||||
PUSH fp
|
||||
PUSH blink
|
||||
PUSH ilink1
|
||||
PUSHAX status32_l1
|
||||
PUSH lp_count
|
||||
PUSHAX lp_end
|
||||
PUSHAX lp_start
|
||||
PUSHAX bta_l1
|
||||
.endm
|
||||
|
||||
.macro SAVE_ALL_INT2
|
||||
|
||||
/* TODO-vineetg: SMP we can't use global nor can we use
|
||||
* SCRATCH0 as we do for int1 because while int1 is using
|
||||
* it, int2 can come
|
||||
*/
|
||||
/* retsore original r9 , saved in sys_saved_r9 */
|
||||
ld r9, [@int2_saved_reg]
|
||||
|
||||
/* now we are ready to save the remaining context :) */
|
||||
st event_IRQ2, [sp, 8] /* Dummy ECR */
|
||||
st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
|
||||
|
||||
SAVE_R0_TO_R12
|
||||
PUSH gp
|
||||
PUSH fp
|
||||
PUSH blink
|
||||
PUSH ilink2
|
||||
PUSHAX status32_l2
|
||||
PUSH lp_count
|
||||
PUSHAX lp_end
|
||||
PUSHAX lp_start
|
||||
PUSHAX bta_l2
|
||||
.endm
|
||||
|
||||
/*--------------------------------------------------------------
|
||||
* Restore all registers used by interrupt handlers.
|
||||
*
|
||||
* NOTE:
|
||||
*
|
||||
* It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
|
||||
* for memory load operations. If used in that way interrupts are deffered
|
||||
* by hardware and that is not good.
|
||||
*-------------------------------------------------------------*/
|
||||
|
||||
.macro RESTORE_ALL_INT1
|
||||
POPAX bta_l1
|
||||
POPAX lp_start
|
||||
POPAX lp_end
|
||||
|
||||
POP r9
|
||||
mov lp_count, r9 ;LD to lp_count is not allowed
|
||||
|
||||
POPAX status32_l1
|
||||
POP ilink1
|
||||
POP blink
|
||||
POP fp
|
||||
POP gp
|
||||
RESTORE_R12_TO_R0
|
||||
|
||||
ld sp, [sp] /* restore original sp */
|
||||
/* orig_r0, ECR, user_r25 skipped automatically */
|
||||
.endm
|
||||
|
||||
.macro RESTORE_ALL_INT2
|
||||
POPAX bta_l2
|
||||
POPAX lp_start
|
||||
POPAX lp_end
|
||||
|
||||
POP r9
|
||||
mov lp_count, r9 ;LD to lp_count is not allowed
|
||||
|
||||
POPAX status32_l2
|
||||
POP ilink2
|
||||
POP blink
|
||||
POP fp
|
||||
POP gp
|
||||
RESTORE_R12_TO_R0
|
||||
|
||||
ld sp, [sp] /* restore original sp */
|
||||
/* orig_r0, ECR, user_r25 skipped automatically */
|
||||
.endm
|
||||
|
||||
|
||||
/* Get CPU-ID of this core */
|
||||
.macro GET_CPU_ID reg
|
||||
lr \reg, [identity]
|
||||
lsr \reg, \reg, 8
|
||||
bmsk \reg, \reg, 7
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
/*-------------------------------------------------
|
||||
@@ -643,6 +301,4 @@
|
||||
|
||||
#endif /* CONFIG_ARC_CURR_IN_REG */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_ARC_ENTRY_H */
|
||||
|
@@ -99,9 +99,45 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)
|
||||
|
||||
}
|
||||
|
||||
#define readb_relaxed readb
|
||||
#define readw_relaxed readw
|
||||
#define readl_relaxed readl
|
||||
#ifdef CONFIG_ISA_ARCV2
|
||||
#include <asm/barrier.h>
|
||||
#define __iormb() rmb()
|
||||
#define __iowmb() wmb()
|
||||
#else
|
||||
#define __iormb() do { } while (0)
|
||||
#define __iowmb() do { } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
* MMIO can also get buffered/optimized in micro-arch, so barriers needed
|
||||
* Based on ARM model for the typical use case
|
||||
*
|
||||
* <ST [DMA buffer]>
|
||||
* <writel MMIO "go" reg>
|
||||
* or:
|
||||
* <readl MMIO "status" reg>
|
||||
* <LD [DMA buffer]>
|
||||
*
|
||||
* http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
|
||||
*/
|
||||
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
|
||||
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
|
||||
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
|
||||
|
||||
#define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
|
||||
#define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
|
||||
#define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
|
||||
|
||||
/*
|
||||
* Relaxed API for drivers which can handle any ordering themselves
|
||||
*/
|
||||
#define readb_relaxed(c) __raw_readb(c)
|
||||
#define readw_relaxed(c) __raw_readw(c)
|
||||
#define readl_relaxed(c) __raw_readl(c)
|
||||
|
||||
#define writeb_relaxed(v,c) __raw_writeb(v,c)
|
||||
#define writew_relaxed(v,c) __raw_writew(v,c)
|
||||
#define writel_relaxed(v,c) __raw_writel(v,c)
|
||||
|
||||
#include <asm-generic/io.h>
|
||||
|
||||
|
@@ -13,8 +13,14 @@
|
||||
#define NR_IRQS 128 /* allow some CPU external IRQ handling */
|
||||
|
||||
/* Platform Independent IRQs */
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
#define TIMER0_IRQ 3
|
||||
#define TIMER1_IRQ 4
|
||||
#else
|
||||
#define TIMER0_IRQ 16
|
||||
#define TIMER1_IRQ 17
|
||||
#define IPI_IRQ 19
|
||||
#endif
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm-generic/irq.h>
|
||||
|
124
arch/arc/include/asm/irqflags-arcv2.h
Normal file
124
arch/arc/include/asm/irqflags-arcv2.h
Normal file
@@ -0,0 +1,124 @@
|
||||
/*
|
||||
* Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_IRQFLAGS_ARCV2_H
|
||||
#define __ASM_IRQFLAGS_ARCV2_H
|
||||
|
||||
#include <asm/arcregs.h>
|
||||
|
||||
/* status32 Bits */
|
||||
#define STATUS_AD_BIT 19 /* Disable Align chk: core supports non-aligned */
|
||||
#define STATUS_IE_BIT 31
|
||||
|
||||
#define STATUS_AD_MASK (1<<STATUS_AD_BIT)
|
||||
#define STATUS_IE_MASK (1<<STATUS_IE_BIT)
|
||||
|
||||
#define AUX_USER_SP 0x00D
|
||||
#define AUX_IRQ_CTRL 0x00E
|
||||
#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
|
||||
#define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
|
||||
#define AUX_IRQ_PRIORITY 0x206
|
||||
#define ICAUSE 0x40a
|
||||
#define AUX_IRQ_SELECT 0x40b
|
||||
#define AUX_IRQ_ENABLE 0x40c
|
||||
|
||||
/* Was Intr taken in User Mode */
|
||||
#define AUX_IRQ_ACT_BIT_U 31
|
||||
|
||||
/* 0 is highest level, but taken by FIRQs, if present in design */
|
||||
#define ARCV2_IRQ_DEF_PRIO 0
|
||||
|
||||
/* seed value for status register */
|
||||
#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \
|
||||
(ARCV2_IRQ_DEF_PRIO << 1))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* Save IRQ state and disable IRQs
|
||||
*/
|
||||
static inline long arch_local_irq_save(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
__asm__ __volatile__(" clri %0 \n" : "=r" (flags) : : "memory");
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* restore saved IRQ state
|
||||
*/
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
__asm__ __volatile__(" seti %0 \n" : : "r" (flags) : "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* Unconditionally Enable IRQs
|
||||
*/
|
||||
static inline void arch_local_irq_enable(void)
|
||||
{
|
||||
unsigned int irqact = read_aux_reg(AUX_IRQ_ACT);
|
||||
|
||||
if (irqact & 0xffff)
|
||||
write_aux_reg(AUX_IRQ_ACT, irqact & ~0xffff);
|
||||
|
||||
__asm__ __volatile__(" seti \n" : : : "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* Unconditionally Disable IRQs
|
||||
*/
|
||||
static inline void arch_local_irq_disable(void)
|
||||
{
|
||||
__asm__ __volatile__(" clri \n" : : : "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* save IRQ state
|
||||
*/
|
||||
static inline long arch_local_save_flags(void)
|
||||
{
|
||||
unsigned long temp;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" lr %0, [status32] \n"
|
||||
: "=&r"(temp)
|
||||
:
|
||||
: "memory");
|
||||
|
||||
return temp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Query IRQ state
|
||||
*/
|
||||
static inline int arch_irqs_disabled_flags(unsigned long flags)
|
||||
{
|
||||
return !(flags & (STATUS_IE_MASK));
|
||||
}
|
||||
|
||||
static inline int arch_irqs_disabled(void)
|
||||
{
|
||||
return arch_irqs_disabled_flags(arch_local_save_flags());
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
.macro IRQ_DISABLE scratch
|
||||
clri
|
||||
.endm
|
||||
|
||||
.macro IRQ_ENABLE scratch
|
||||
seti
|
||||
.endm
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
183
arch/arc/include/asm/irqflags-compact.h
Normal file
183
arch/arc/include/asm/irqflags-compact.h
Normal file
@@ -0,0 +1,183 @@
|
||||
/*
|
||||
* Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
|
||||
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_IRQFLAGS_ARCOMPACT_H
|
||||
#define __ASM_IRQFLAGS_ARCOMPACT_H
|
||||
|
||||
/* vineetg: March 2010 : local_irq_save( ) optimisation
|
||||
* -Remove explicit mov of current status32 into reg, that is not needed
|
||||
* -Use BIC insn instead of INVERTED + AND
|
||||
* -Conditionally disable interrupts (if they are not enabled, don't disable)
|
||||
*/
|
||||
|
||||
#include <asm/arcregs.h>
|
||||
|
||||
/* status32 Reg bits related to Interrupt Handling */
|
||||
#define STATUS_E1_BIT 1 /* Int 1 enable */
|
||||
#define STATUS_E2_BIT 2 /* Int 2 enable */
|
||||
#define STATUS_A1_BIT 3 /* Int 1 active */
|
||||
#define STATUS_A2_BIT 4 /* Int 2 active */
|
||||
|
||||
#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
|
||||
#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
|
||||
#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
|
||||
#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
|
||||
#define STATUS_IE_MASK (STATUS_E1_MASK | STATUS_E2_MASK)
|
||||
|
||||
/* Other Interrupt Handling related Aux regs */
|
||||
#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
|
||||
#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
|
||||
#define AUX_IRQ_LV12 0x43 /* interrupt level register */
|
||||
|
||||
#define AUX_IENABLE 0x40c
|
||||
#define AUX_ITRIGGER 0x40d
|
||||
#define AUX_IPULSE 0x415
|
||||
|
||||
#define ISA_INIT_STATUS_BITS STATUS_IE_MASK
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/******************************************************************
|
||||
* IRQ Control Macros
|
||||
*
|
||||
* All of them have "memory" clobber (compiler barrier) which is needed to
|
||||
* ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
|
||||
* are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
|
||||
*
|
||||
* Noted at the time of Abilis Timer List corruption
|
||||
* Orig Bug + Rejected solution : https://lkml.org/lkml/2013/3/29/67
|
||||
* Reasoning : https://lkml.org/lkml/2013/4/8/15
|
||||
*
|
||||
******************************************************************/
|
||||
|
||||
/*
|
||||
* Save IRQ state and disable IRQs
|
||||
*/
|
||||
static inline long arch_local_irq_save(void)
|
||||
{
|
||||
unsigned long temp, flags;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" lr %1, [status32] \n"
|
||||
" bic %0, %1, %2 \n"
|
||||
" and.f 0, %1, %2 \n"
|
||||
" flag.nz %0 \n"
|
||||
: "=r"(temp), "=r"(flags)
|
||||
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
|
||||
: "memory", "cc");
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* restore saved IRQ state
|
||||
*/
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
|
||||
__asm__ __volatile__(
|
||||
" flag %0 \n"
|
||||
:
|
||||
: "r"(flags)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* Unconditionally Enable IRQs
|
||||
*/
|
||||
extern void arch_local_irq_enable(void);
|
||||
|
||||
/*
|
||||
* Unconditionally Disable IRQs
|
||||
*/
|
||||
static inline void arch_local_irq_disable(void)
|
||||
{
|
||||
unsigned long temp;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" lr %0, [status32] \n"
|
||||
" and %0, %0, %1 \n"
|
||||
" flag %0 \n"
|
||||
: "=&r"(temp)
|
||||
: "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* save IRQ state
|
||||
*/
|
||||
static inline long arch_local_save_flags(void)
|
||||
{
|
||||
unsigned long temp;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" lr %0, [status32] \n"
|
||||
: "=&r"(temp)
|
||||
:
|
||||
: "memory");
|
||||
|
||||
return temp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Query IRQ state
|
||||
*/
|
||||
static inline int arch_irqs_disabled_flags(unsigned long flags)
|
||||
{
|
||||
return !(flags & (STATUS_E1_MASK
|
||||
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
|
||||
| STATUS_E2_MASK
|
||||
#endif
|
||||
));
|
||||
}
|
||||
|
||||
static inline int arch_irqs_disabled(void)
|
||||
{
|
||||
return arch_irqs_disabled_flags(arch_local_save_flags());
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
||||
.macro TRACE_ASM_IRQ_DISABLE
|
||||
bl trace_hardirqs_off
|
||||
.endm
|
||||
|
||||
.macro TRACE_ASM_IRQ_ENABLE
|
||||
bl trace_hardirqs_on
|
||||
.endm
|
||||
|
||||
#else
|
||||
|
||||
.macro TRACE_ASM_IRQ_DISABLE
|
||||
.endm
|
||||
|
||||
.macro TRACE_ASM_IRQ_ENABLE
|
||||
.endm
|
||||
|
||||
#endif
|
||||
|
||||
.macro IRQ_DISABLE scratch
|
||||
lr \scratch, [status32]
|
||||
bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
|
||||
flag \scratch
|
||||
TRACE_ASM_IRQ_DISABLE
|
||||
.endm
|
||||
|
||||
.macro IRQ_ENABLE scratch
|
||||
lr \scratch, [status32]
|
||||
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
|
||||
flag \scratch
|
||||
TRACE_ASM_IRQ_ENABLE
|
||||
.endm
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
@@ -1,4 +1,5 @@
|
||||
/*
|
||||
* Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
|
||||
* Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
@@ -9,171 +10,10 @@
|
||||
#ifndef __ASM_ARC_IRQFLAGS_H
|
||||
#define __ASM_ARC_IRQFLAGS_H
|
||||
|
||||
/* vineetg: March 2010 : local_irq_save( ) optimisation
|
||||
* -Remove explicit mov of current status32 into reg, that is not needed
|
||||
* -Use BIC insn instead of INVERTED + AND
|
||||
* -Conditionally disable interrupts (if they are not enabled, don't disable)
|
||||
*/
|
||||
|
||||
#include <asm/arcregs.h>
|
||||
|
||||
/* status32 Reg bits related to Interrupt Handling */
|
||||
#define STATUS_E1_BIT 1 /* Int 1 enable */
|
||||
#define STATUS_E2_BIT 2 /* Int 2 enable */
|
||||
#define STATUS_A1_BIT 3 /* Int 1 active */
|
||||
#define STATUS_A2_BIT 4 /* Int 2 active */
|
||||
|
||||
#define STATUS_E1_MASK (1<<STATUS_E1_BIT)
|
||||
#define STATUS_E2_MASK (1<<STATUS_E2_BIT)
|
||||
#define STATUS_A1_MASK (1<<STATUS_A1_BIT)
|
||||
#define STATUS_A2_MASK (1<<STATUS_A2_BIT)
|
||||
|
||||
/* Other Interrupt Handling related Aux regs */
|
||||
#define AUX_IRQ_LEV 0x200 /* IRQ Priority: L1 or L2 */
|
||||
#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
|
||||
#define AUX_IRQ_LV12 0x43 /* interrupt level register */
|
||||
|
||||
#define AUX_IENABLE 0x40c
|
||||
#define AUX_ITRIGGER 0x40d
|
||||
#define AUX_IPULSE 0x415
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/******************************************************************
|
||||
* IRQ Control Macros
|
||||
*
|
||||
* All of them have "memory" clobber (compiler barrier) which is needed to
|
||||
* ensure that LD/ST requiring irq safetly (R-M-W when LLSC is not available)
|
||||
* are redone after IRQs are re-enabled (and gcc doesn't reuse stale register)
|
||||
*
|
||||
* Noted at the time of Abilis Timer List corruption
|
||||
* Orig Bug + Rejected solution : https://lkml.org/lkml/2013/3/29/67
|
||||
* Reasoning : https://lkml.org/lkml/2013/4/8/15
|
||||
*
|
||||
******************************************************************/
|
||||
|
||||
/*
|
||||
* Save IRQ state and disable IRQs
|
||||
*/
|
||||
static inline long arch_local_irq_save(void)
|
||||
{
|
||||
unsigned long temp, flags;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" lr %1, [status32] \n"
|
||||
" bic %0, %1, %2 \n"
|
||||
" and.f 0, %1, %2 \n"
|
||||
" flag.nz %0 \n"
|
||||
: "=r"(temp), "=r"(flags)
|
||||
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
|
||||
: "memory", "cc");
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
/*
|
||||
* restore saved IRQ state
|
||||
*/
|
||||
static inline void arch_local_irq_restore(unsigned long flags)
|
||||
{
|
||||
|
||||
__asm__ __volatile__(
|
||||
" flag %0 \n"
|
||||
:
|
||||
: "r"(flags)
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* Unconditionally Enable IRQs
|
||||
*/
|
||||
extern void arch_local_irq_enable(void);
|
||||
|
||||
/*
|
||||
* Unconditionally Disable IRQs
|
||||
*/
|
||||
static inline void arch_local_irq_disable(void)
|
||||
{
|
||||
unsigned long temp;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" lr %0, [status32] \n"
|
||||
" and %0, %0, %1 \n"
|
||||
" flag %0 \n"
|
||||
: "=&r"(temp)
|
||||
: "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/*
|
||||
* save IRQ state
|
||||
*/
|
||||
static inline long arch_local_save_flags(void)
|
||||
{
|
||||
unsigned long temp;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" lr %0, [status32] \n"
|
||||
: "=&r"(temp)
|
||||
:
|
||||
: "memory");
|
||||
|
||||
return temp;
|
||||
}
|
||||
|
||||
/*
|
||||
* Query IRQ state
|
||||
*/
|
||||
static inline int arch_irqs_disabled_flags(unsigned long flags)
|
||||
{
|
||||
return !(flags & (STATUS_E1_MASK
|
||||
#ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
|
||||
| STATUS_E2_MASK
|
||||
#endif
|
||||
));
|
||||
}
|
||||
|
||||
static inline int arch_irqs_disabled(void)
|
||||
{
|
||||
return arch_irqs_disabled_flags(arch_local_save_flags());
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
#include <asm/irqflags-compact.h>
|
||||
#else
|
||||
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
|
||||
.macro TRACE_ASM_IRQ_DISABLE
|
||||
bl trace_hardirqs_off
|
||||
.endm
|
||||
|
||||
.macro TRACE_ASM_IRQ_ENABLE
|
||||
bl trace_hardirqs_on
|
||||
.endm
|
||||
|
||||
#else
|
||||
|
||||
.macro TRACE_ASM_IRQ_DISABLE
|
||||
.endm
|
||||
|
||||
.macro TRACE_ASM_IRQ_ENABLE
|
||||
.endm
|
||||
|
||||
#include <asm/irqflags-arcv2.h>
|
||||
#endif
|
||||
|
||||
.macro IRQ_DISABLE scratch
|
||||
lr \scratch, [status32]
|
||||
bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
|
||||
flag \scratch
|
||||
TRACE_ASM_IRQ_DISABLE
|
||||
.endm
|
||||
|
||||
.macro IRQ_ENABLE scratch
|
||||
lr \scratch, [status32]
|
||||
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
|
||||
flag \scratch
|
||||
TRACE_ASM_IRQ_ENABLE
|
||||
.endm
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
||||
|
94
arch/arc/include/asm/mcip.h
Normal file
94
arch/arc/include/asm/mcip.h
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* ARConnect IP Support (Multi core enabler: Cross core IPI, RTC ...)
|
||||
*
|
||||
* Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef __ASM_MCIP_H
|
||||
#define __ASM_MCIP_H
|
||||
|
||||
#ifdef CONFIG_ISA_ARCV2
|
||||
|
||||
#include <asm/arcregs.h>
|
||||
|
||||
#define ARC_REG_MCIP_BCR 0x0d0
|
||||
#define ARC_REG_MCIP_CMD 0x600
|
||||
#define ARC_REG_MCIP_WDATA 0x601
|
||||
#define ARC_REG_MCIP_READBACK 0x602
|
||||
|
||||
struct mcip_cmd {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned int pad:8, param:16, cmd:8;
|
||||
#else
|
||||
unsigned int cmd:8, param:16, pad:8;
|
||||
#endif
|
||||
|
||||
#define CMD_INTRPT_GENERATE_IRQ 0x01
|
||||
#define CMD_INTRPT_GENERATE_ACK 0x02
|
||||
#define CMD_INTRPT_READ_STATUS 0x03
|
||||
#define CMD_INTRPT_CHECK_SOURCE 0x04
|
||||
|
||||
/* Semaphore Commands */
|
||||
#define CMD_SEMA_CLAIM_AND_READ 0x11
|
||||
#define CMD_SEMA_RELEASE 0x12
|
||||
|
||||
#define CMD_DEBUG_SET_MASK 0x34
|
||||
#define CMD_DEBUG_SET_SELECT 0x36
|
||||
|
||||
#define CMD_GRTC_READ_LO 0x42
|
||||
#define CMD_GRTC_READ_HI 0x43
|
||||
|
||||
#define CMD_IDU_ENABLE 0x71
|
||||
#define CMD_IDU_DISABLE 0x72
|
||||
#define CMD_IDU_SET_MODE 0x74
|
||||
#define CMD_IDU_SET_DEST 0x76
|
||||
#define CMD_IDU_SET_MASK 0x7C
|
||||
|
||||
#define IDU_M_TRIG_LEVEL 0x0
|
||||
#define IDU_M_TRIG_EDGE 0x1
|
||||
|
||||
#define IDU_M_DISTRI_RR 0x0
|
||||
#define IDU_M_DISTRI_DEST 0x2
|
||||
};
|
||||
|
||||
/*
|
||||
* MCIP programming model
|
||||
*
|
||||
* - Simple commands write {cmd:8,param:16} to MCIP_CMD aux reg
|
||||
* (param could be irq, common_irq, core_id ...)
|
||||
* - More involved commands setup MCIP_WDATA with cmd specific data
|
||||
* before invoking the simple command
|
||||
*/
|
||||
static inline void __mcip_cmd(unsigned int cmd, unsigned int param)
|
||||
{
|
||||
struct mcip_cmd buf;
|
||||
|
||||
buf.pad = 0;
|
||||
buf.cmd = cmd;
|
||||
buf.param = param;
|
||||
|
||||
WRITE_AUX(ARC_REG_MCIP_CMD, buf);
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup additional data for a cmd
|
||||
* Callers need to lock to ensure atomicity
|
||||
*/
|
||||
static inline void __mcip_cmd_data(unsigned int cmd, unsigned int param,
|
||||
unsigned int data)
|
||||
{
|
||||
write_aux_reg(ARC_REG_MCIP_WDATA, data);
|
||||
|
||||
__mcip_cmd(cmd, param);
|
||||
}
|
||||
|
||||
extern void mcip_init_early_smp(void);
|
||||
extern void mcip_init_smp(unsigned int cpu);
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
@@ -15,24 +15,41 @@
|
||||
#define CONFIG_ARC_MMU_VER 2
|
||||
#elif defined(CONFIG_ARC_MMU_V3)
|
||||
#define CONFIG_ARC_MMU_VER 3
|
||||
#elif defined(CONFIG_ARC_MMU_V4)
|
||||
#define CONFIG_ARC_MMU_VER 4
|
||||
#endif
|
||||
|
||||
/* MMU Management regs */
|
||||
#define ARC_REG_MMU_BCR 0x06f
|
||||
#if (CONFIG_ARC_MMU_VER < 4)
|
||||
#define ARC_REG_TLBPD0 0x405
|
||||
#define ARC_REG_TLBPD1 0x406
|
||||
#define ARC_REG_TLBINDEX 0x407
|
||||
#define ARC_REG_TLBCOMMAND 0x408
|
||||
#define ARC_REG_PID 0x409
|
||||
#define ARC_REG_SCRATCH_DATA0 0x418
|
||||
#else
|
||||
#define ARC_REG_TLBPD0 0x460
|
||||
#define ARC_REG_TLBPD1 0x461
|
||||
#define ARC_REG_TLBINDEX 0x464
|
||||
#define ARC_REG_TLBCOMMAND 0x465
|
||||
#define ARC_REG_PID 0x468
|
||||
#define ARC_REG_SCRATCH_DATA0 0x46c
|
||||
#endif
|
||||
|
||||
/* Bits in MMU PID register */
|
||||
#define MMU_ENABLE (1 << 31) /* Enable MMU for process */
|
||||
#define __TLB_ENABLE (1 << 31)
|
||||
#define __PROG_ENABLE (1 << 30)
|
||||
#define MMU_ENABLE (__TLB_ENABLE | __PROG_ENABLE)
|
||||
|
||||
/* Error code if probe fails */
|
||||
#define TLB_LKUP_ERR 0x80000000
|
||||
|
||||
#if (CONFIG_ARC_MMU_VER < 4)
|
||||
#define TLB_DUP_ERR (TLB_LKUP_ERR | 0x00000001)
|
||||
#else
|
||||
#define TLB_DUP_ERR (TLB_LKUP_ERR | 0x40000000)
|
||||
#endif
|
||||
|
||||
/* TLB Commands */
|
||||
#define TLBWrite 0x1
|
||||
@@ -45,6 +62,11 @@
|
||||
#define TLBIVUTLB 0x6 /* explicitly inv uTLBs */
|
||||
#endif
|
||||
|
||||
#if (CONFIG_ARC_MMU_VER >= 4)
|
||||
#define TLBInsertEntry 0x7
|
||||
#define TLBDeleteEntry 0x8
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
typedef struct {
|
||||
|
@@ -72,8 +72,18 @@
|
||||
#define _PAGE_READ (1<<3) /* Page has user read perm (H) */
|
||||
#define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
|
||||
#define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */
|
||||
|
||||
#if (CONFIG_ARC_MMU_VER >= 4)
|
||||
#define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */
|
||||
#endif
|
||||
|
||||
#define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
|
||||
#define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
|
||||
|
||||
#if (CONFIG_ARC_MMU_VER >= 4)
|
||||
#define _PAGE_SZ (1<<10) /* Page Size indicator (H) */
|
||||
#endif
|
||||
|
||||
#define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
|
||||
usable for shared TLB entries (H) */
|
||||
#endif
|
||||
|
@@ -77,7 +77,7 @@ struct task_struct;
|
||||
*/
|
||||
#define TSK_K_ESP(tsk) (tsk->thread.ksp)
|
||||
|
||||
#define TSK_K_REG(tsk, off) (*((unsigned int *)(TSK_K_ESP(tsk) + \
|
||||
#define TSK_K_REG(tsk, off) (*((unsigned long *)(TSK_K_ESP(tsk) + \
|
||||
sizeof(struct callee_regs) + off)))
|
||||
|
||||
#define TSK_K_BLINK(tsk) TSK_K_REG(tsk, 4)
|
||||
@@ -100,29 +100,26 @@ extern unsigned int get_wchan(struct task_struct *p);
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
/* Kernels Virtual memory area.
|
||||
* Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
|
||||
* "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
|
||||
* of the translated bottom 2GB for kernel virtual memory and protect
|
||||
* these pages from user accesses by disabling Ru, Eu and Wu.
|
||||
/*
|
||||
* System Memory Map on ARC
|
||||
*
|
||||
* ---------------------------- (lower 2G, Translated) -------------------------
|
||||
* 0x0000_0000 0x5FFF_FFFF (user vaddr: TASK_SIZE)
|
||||
* 0x6000_0000 0x6FFF_FFFF (reserved gutter between U/K)
|
||||
* 0x7000_0000 0x7FFF_FFFF (kvaddr: vmalloc/modules/pkmap..)
|
||||
*
|
||||
* PAGE_OFFSET ---------------- (Upper 2G, Untranslated) -----------------------
|
||||
* 0x8000_0000 0xBFFF_FFFF (kernel direct mapped)
|
||||
* 0xC000_0000 0xFFFF_FFFF (peripheral uncached space)
|
||||
* -----------------------------------------------------------------------------
|
||||
*/
|
||||
#define VMALLOC_SIZE (0x10000000) /* 256M */
|
||||
#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
|
||||
#define VMALLOC_END (PAGE_OFFSET)
|
||||
#define VMALLOC_START 0x70000000
|
||||
#define VMALLOC_SIZE (PAGE_OFFSET - VMALLOC_START)
|
||||
#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE)
|
||||
|
||||
/* Most of the architectures seem to be keeping some kind of padding between
|
||||
* userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
|
||||
*/
|
||||
#define USER_KERNEL_GUTTER 0x10000000
|
||||
|
||||
/* User address space:
|
||||
* On ARC700, CPU allows the entire lower half of 32 bit address space to be
|
||||
* translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
|
||||
* However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
|
||||
* 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
|
||||
* Thus total User vaddr space is (0:0x5FFF_FFFF)
|
||||
*/
|
||||
#define TASK_SIZE (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
|
||||
#define TASK_SIZE (VMALLOC_START - USER_KERNEL_GUTTER)
|
||||
|
||||
#define STACK_TOP TASK_SIZE
|
||||
#define STACK_TOP_MAX STACK_TOP
|
||||
|
@@ -16,6 +16,7 @@
|
||||
|
||||
/* THE pt_regs: Defines how regs are saved during entry into kernel */
|
||||
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
struct pt_regs {
|
||||
|
||||
/* Real registers */
|
||||
@@ -56,6 +57,48 @@ struct pt_regs {
|
||||
|
||||
long user_r25;
|
||||
};
|
||||
#else
|
||||
|
||||
struct pt_regs {
|
||||
|
||||
long orig_r0;
|
||||
|
||||
union {
|
||||
struct {
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
unsigned long state:8, ecr_vec:8,
|
||||
ecr_cause:8, ecr_param:8;
|
||||
#else
|
||||
unsigned long ecr_param:8, ecr_cause:8,
|
||||
ecr_vec:8, state:8;
|
||||
#endif
|
||||
};
|
||||
unsigned long event;
|
||||
};
|
||||
|
||||
long bta; /* bta_l1, bta_l2, erbta */
|
||||
|
||||
long user_r25;
|
||||
|
||||
long r26; /* gp */
|
||||
long fp;
|
||||
long sp; /* user/kernel sp depending on where we came from */
|
||||
|
||||
long r12;
|
||||
|
||||
/*------- Below list auto saved by h/w -----------*/
|
||||
long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
|
||||
|
||||
long blink;
|
||||
long lp_end, lp_start, lp_count;
|
||||
|
||||
long ei, ldi, jli;
|
||||
|
||||
long ret;
|
||||
long status32;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/* Callee saved registers - need to be saved only when you are scheduled out */
|
||||
|
||||
|
@@ -22,24 +22,46 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
|
||||
|
||||
/*
|
||||
* This smp_mb() is technically superfluous, we only need the one
|
||||
* after the lock for providing the ACQUIRE semantics.
|
||||
* However doing the "right" thing was regressing hackbench
|
||||
* so keeping this, pending further investigation
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: ex %0, [%1] \n"
|
||||
" breq %0, %2, 1b \n"
|
||||
: "+&r" (tmp)
|
||||
: "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
|
||||
: "memory");
|
||||
|
||||
/*
|
||||
* ACQUIRE barrier to ensure load/store after taking the lock
|
||||
* don't "bleed-up" out of the critical section (leak-in is allowed)
|
||||
* http://www.spinics.net/lists/kernel/msg2010409.html
|
||||
*
|
||||
* ARCv2 only has load-load, store-store and all-all barrier
|
||||
* thus need the full all-all barrier
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
static inline int arch_spin_trylock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: ex %0, [%1] \n"
|
||||
: "+r" (tmp)
|
||||
: "r"(&(lock->slock))
|
||||
: "memory");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__);
|
||||
}
|
||||
|
||||
@@ -47,12 +69,22 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
|
||||
{
|
||||
unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
|
||||
|
||||
/*
|
||||
* RELEASE barrier: given the instructions avail on ARCv2, full barrier
|
||||
* is the only option
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__(
|
||||
" ex %0, [%1] \n"
|
||||
: "+r" (tmp)
|
||||
: "r"(&(lock->slock))
|
||||
: "memory");
|
||||
|
||||
/*
|
||||
* superfluous, but keeping for now - see pairing version in
|
||||
* arch_spin_lock above
|
||||
*/
|
||||
smp_mb();
|
||||
}
|
||||
|
||||
|
@@ -25,6 +25,7 @@
|
||||
#endif
|
||||
|
||||
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
|
||||
#define THREAD_SHIFT (PAGE_SHIFT << THREAD_SIZE_ORDER)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@@ -659,31 +659,30 @@ static inline unsigned long __arc_clear_user(void __user *to, unsigned long n)
|
||||
static inline long
|
||||
__arc_strncpy_from_user(char *dst, const char __user *src, long count)
|
||||
{
|
||||
long res = count;
|
||||
long res = 0;
|
||||
char val;
|
||||
unsigned int hw_count;
|
||||
|
||||
if (count == 0)
|
||||
return 0;
|
||||
|
||||
__asm__ __volatile__(
|
||||
" lp 2f \n"
|
||||
" lp 3f \n"
|
||||
"1: ldb.ab %3, [%2, 1] \n"
|
||||
" breq.d %3, 0, 2f \n"
|
||||
" breq.d %3, 0, 3f \n"
|
||||
" stb.ab %3, [%1, 1] \n"
|
||||
"2: sub %0, %6, %4 \n"
|
||||
"3: ;nop \n"
|
||||
" add %0, %0, 1 # Num of NON NULL bytes copied \n"
|
||||
"3: \n"
|
||||
" .section .fixup, \"ax\" \n"
|
||||
" .align 4 \n"
|
||||
"4: mov %0, %5 \n"
|
||||
"4: mov %0, %4 # sets @res as -EFAULT \n"
|
||||
" j 3b \n"
|
||||
" .previous \n"
|
||||
" .section __ex_table, \"a\" \n"
|
||||
" .align 4 \n"
|
||||
" .word 1b, 4b \n"
|
||||
" .previous \n"
|
||||
: "=r"(res), "+r"(dst), "+r"(src), "=&r"(val), "=l"(hw_count)
|
||||
: "g"(-EFAULT), "ir"(count), "4"(count) /* this "4" seeds lp_count */
|
||||
: "+r"(res), "+r"(dst), "+r"(src), "=r"(val)
|
||||
: "g"(-EFAULT), "l"(count)
|
||||
: "memory");
|
||||
|
||||
return res;
|
||||
|
Reference in New Issue
Block a user