Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS updates from Ralf Baechle:
 "Boston platform support:
   - Document DT bindings
   - Add CLK driver for board clocks

  CM:
   - Avoid per-core locking with CM3 & higher
   - WARN on attempt to lock invalid VP, not BUG

  CPS:
   - Select CONFIG_SYS_SUPPORTS_SCHED_SMT for MIPSr6
   - Prevent multi-core with dcache aliasing
   - Handle cores not powering down more gracefully
   - Handle spurious VP starts more gracefully

  DSP:
   - Add lwx & lhx missaligned access support

  eBPF:
   - Add MIPS support along with many supporting change to add the
     required infrastructure

  Generic arch code:
   - Misc sysmips MIPS_ATOMIC_SET fixes
   - Drop duplicate HAVE_SYSCALL_TRACEPOINTS
   - Negate error syscall return in trace
   - Correct forced syscall errors
   - Traced negative syscalls should return -ENOSYS
   - Allow samples/bpf/tracex5 to access syscall arguments for sane
     traces
   - Cleanup from old Kconfig options in defconfigs
   - Fix PREF instruction usage by memcpy for MIPS R6
   - Fix various special cases in the FPU eulation
   - Fix some special cases in MIPS16e2 support
   - Fix MIPS I ISA /proc/cpuinfo reporting
   - Sort MIPS Kconfig alphabetically
   - Fix minimum alignment requirement of IRQ stack as required by
     ABI / GCC
   - Fix special cases in the module loader
   - Perform post-DMA cache flushes on systems with MAARs
   - Probe the I6500 CPU
   - Cleanup cmpxchg and add support for 1 and 2 byte operations
   - Use queued read/write locks (qrwlock)
   - Use queued spinlocks (qspinlock)
   - Add CPU shared FTLB feature detection
   - Handle tlbex-tlbp race condition
   - Allow storing pgd in C0_CONTEXT for MIPSr6
   - Use current_cpu_type() in m4kc_tlbp_war()
   - Support Boston in the generic kernel

  Generic platform:
   - yamon-dt: Pull YAMON DT shim code out of SEAD-3 board
   - yamon-dt: Support > 256MB of RAM
   - yamon-dt: Use serial* rather than uart* aliases
   - Abstract FDT fixup application
   - Set RTC_ALWAYS_BCD to 0
   - Add a MAINTAINERS entry

  core kernel:
   - qspinlock.c: include linux/prefetch.h

  Loongson 3:
   - Add support

  Perf:
   - Add I6500 support

  SEAD-3:
   - Remove GIC timer from DT
   - Set interrupt-parent per-device, not at root node
   - Fix GIC interrupt specifiers

  SMP:
   - Skip IPI setup if we only have a single CPU

  VDSO:
   - Make comment match reality
   - Improvements to time code in VDSO"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (86 commits)
  locking/qspinlock: Include linux/prefetch.h
  MIPS: Fix MIPS I ISA /proc/cpuinfo reporting
  MIPS: Fix minimum alignment requirement of IRQ stack
  MIPS: generic: Support MIPS Boston development boards
  MIPS: DTS: img: Don't attempt to build-in all .dtb files
  clk: boston: Add a driver for MIPS Boston board clocks
  dt-bindings: Document img,boston-clock binding
  MIPS: Traced negative syscalls should return -ENOSYS
  MIPS: Correct forced syscall errors
  MIPS: Negate error syscall return in trace
  MIPS: Drop duplicate HAVE_SYSCALL_TRACEPOINTS select
  MIPS16e2: Provide feature overrides for non-MIPS16 systems
  MIPS: MIPS16e2: Report ASE presence in /proc/cpuinfo
  MIPS: MIPS16e2: Subdecode extended LWSP/SWSP instructions
  MIPS: MIPS16e2: Identify ASE presence
  MIPS: VDSO: Fix a mismatch between comment and preprocessor constant
  MIPS: VDSO: Add implementation of gettimeofday() fallback
  MIPS: VDSO: Add implementation of clock_gettime() fallback
  MIPS: VDSO: Fix conversions in do_monotonic()/do_monotonic_coarse()
  MIPS: Use current_cpu_type() in m4kc_tlbp_war()
  ...
This commit is contained in:
Linus Torvalds
2017-07-15 10:59:54 -07:00
136 changed files with 2462 additions and 1845 deletions

View File

@@ -12,6 +12,8 @@ generic-y += mm-arch-hooks.h
generic-y += parport.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += qrwlock.h
generic-y += qspinlock.h
generic-y += sections.h
generic-y += segment.h
generic-y += serial.h

View File

@@ -74,10 +74,7 @@ static inline int compute_return_epc(struct pt_regs *regs)
return __microMIPS_compute_return_epc(regs);
if (cpu_has_mips16)
return __MIPS16e_compute_return_epc(regs);
return regs->cp0_epc;
}
if (!delay_slot(regs)) {
} else if (!delay_slot(regs)) {
regs->cp0_epc += 4;
return 0;
}

View File

@@ -13,153 +13,107 @@
#include <asm/compiler.h>
#include <asm/war.h>
static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{
__u32 retval;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long dummy;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: ll %0, %3 # xchg_u32 \n"
" .set mips0 \n"
" move %2, %z4 \n"
" .set arch=r4000 \n"
" sc %2, %1 \n"
" beqzl %2, 1b \n"
" .set mips0 \n"
: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long dummy;
do {
__asm__ __volatile__(
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" ll %0, %3 # xchg_u32 \n"
" .set mips0 \n"
" move %2, %z4 \n"
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" sc %2, %1 \n"
" .set mips0 \n"
: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
"=&r" (dummy)
: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
: "memory");
} while (unlikely(!dummy));
} else {
unsigned long flags;
raw_local_irq_save(flags);
retval = *m;
*m = val;
raw_local_irq_restore(flags); /* implies memory barrier */
}
smp_llsc_mb();
return retval;
}
#ifdef CONFIG_64BIT
static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
{
__u64 retval;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long dummy;
__asm__ __volatile__(
" .set arch=r4000 \n"
"1: lld %0, %3 # xchg_u64 \n"
" move %2, %z4 \n"
" scd %2, %1 \n"
" beqzl %2, 1b \n"
" .set mips0 \n"
: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
: "memory");
} else if (kernel_uses_llsc) {
unsigned long dummy;
do {
__asm__ __volatile__(
" .set "MIPS_ISA_ARCH_LEVEL" \n"
" lld %0, %3 # xchg_u64 \n"
" move %2, %z4 \n"
" scd %2, %1 \n"
" .set mips0 \n"
: "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
"=&r" (dummy)
: GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
: "memory");
} while (unlikely(!dummy));
} else {
unsigned long flags;
raw_local_irq_save(flags);
retval = *m;
*m = val;
raw_local_irq_restore(flags); /* implies memory barrier */
}
smp_llsc_mb();
return retval;
}
/*
* Using a branch-likely instruction to check the result of an sc instruction
* works around a bug present in R10000 CPUs prior to revision 3.0 that could
* cause ll-sc sequences to execute non-atomically.
*/
#if R10000_LLSC_WAR
# define __scbeqz "beqzl"
#else
extern __u64 __xchg_u64_unsupported_on_32bit_kernels(volatile __u64 * m, __u64 val);
#define __xchg_u64 __xchg_u64_unsupported_on_32bit_kernels
# define __scbeqz "beqz"
#endif
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
/*
* These functions doesn't exist, so if they are called you'll either:
*
* - Get an error at compile-time due to __compiletime_error, if supported by
* your compiler.
*
* or:
*
* - Get an error at link-time due to the call to the missing function.
*/
extern unsigned long __cmpxchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for cmpxchg");
extern unsigned long __xchg_called_with_bad_pointer(void)
__compiletime_error("Bad argument size for xchg");
#define __xchg_asm(ld, st, m, val) \
({ \
__typeof(*(m)) __ret; \
\
if (kernel_uses_llsc) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \
"1: " ld " %0, %2 # __xchg_asm \n" \
" .set mips0 \n" \
" move $1, %z3 \n" \
" .set " MIPS_ISA_ARCH_LEVEL " \n" \
" " st " $1, %1 \n" \
"\t" __scbeqz " $1, 1b \n" \
" .set pop \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
: GCC_OFF_SMALL_ASM() (*m), "Jr" (val) \
: "memory"); \
} else { \
unsigned long __flags; \
\
raw_local_irq_save(__flags); \
__ret = *m; \
*m = val; \
raw_local_irq_restore(__flags); \
} \
\
__ret; \
})
extern unsigned long __xchg_small(volatile void *ptr, unsigned long val,
unsigned int size);
static inline unsigned long __xchg(volatile void *ptr, unsigned long x,
int size)
{
switch (size) {
case 4:
return __xchg_u32(ptr, x);
case 8:
return __xchg_u64(ptr, x);
}
case 1:
case 2:
return __xchg_small(ptr, x, size);
return x;
case 4:
return __xchg_asm("ll", "sc", (volatile u32 *)ptr, x);
case 8:
if (!IS_ENABLED(CONFIG_64BIT))
return __xchg_called_with_bad_pointer();
return __xchg_asm("lld", "scd", (volatile u64 *)ptr, x);
default:
return __xchg_called_with_bad_pointer();
}
}
#define xchg(ptr, x) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) & ~0xc); \
__typeof__(*(ptr)) __res; \
\
((__typeof__(*(ptr))) \
__xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))); \
smp_mb__before_llsc(); \
\
__res = (__typeof__(*(ptr))) \
__xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \
\
smp_llsc_mb(); \
\
__res; \
})
#define __cmpxchg_asm(ld, st, m, old, new) \
({ \
__typeof(*(m)) __ret; \
\
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" .set arch=r4000 \n" \
"1: " ld " %0, %2 # __cmpxchg_asm \n" \
" bne %0, %z3, 2f \n" \
" .set mips0 \n" \
" move $1, %z4 \n" \
" .set arch=r4000 \n" \
" " st " $1, %1 \n" \
" beqzl $1, 1b \n" \
"2: \n" \
" .set pop \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
: GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \
: "memory"); \
} else if (kernel_uses_llsc) { \
if (kernel_uses_llsc) { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
@@ -170,7 +124,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
" move $1, %z4 \n" \
" .set "MIPS_ISA_ARCH_LEVEL" \n" \
" " st " $1, %1 \n" \
" beqz $1, 1b \n" \
"\t" __scbeqz " $1, 1b \n" \
" .set pop \n" \
"2: \n" \
: "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \
@@ -189,44 +143,50 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
__ret; \
})
/*
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid cmpxchg().
*/
extern void __cmpxchg_called_with_bad_pointer(void);
extern unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size);
#define __cmpxchg(ptr, old, new, pre_barrier, post_barrier) \
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, unsigned int size)
{
switch (size) {
case 1:
case 2:
return __cmpxchg_small(ptr, old, new, size);
case 4:
return __cmpxchg_asm("ll", "sc", (volatile u32 *)ptr, old, new);
case 8:
/* lld/scd are only available for MIPS64 */
if (!IS_ENABLED(CONFIG_64BIT))
return __cmpxchg_called_with_bad_pointer();
return __cmpxchg_asm("lld", "scd", (volatile u64 *)ptr, old, new);
default:
return __cmpxchg_called_with_bad_pointer();
}
}
#define cmpxchg_local(ptr, old, new) \
((__typeof__(*(ptr))) \
__cmpxchg((ptr), \
(unsigned long)(__typeof__(*(ptr)))(old), \
(unsigned long)(__typeof__(*(ptr)))(new), \
sizeof(*(ptr))))
#define cmpxchg(ptr, old, new) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __res = 0; \
__typeof__(*(ptr)) __res; \
\
pre_barrier; \
\
switch (sizeof(*(__ptr))) { \
case 4: \
__res = __cmpxchg_asm("ll", "sc", __ptr, __old, __new); \
break; \
case 8: \
if (sizeof(long) == 8) { \
__res = __cmpxchg_asm("lld", "scd", __ptr, \
__old, __new); \
break; \
} \
default: \
__cmpxchg_called_with_bad_pointer(); \
break; \
} \
\
post_barrier; \
smp_mb__before_llsc(); \
__res = cmpxchg_local((ptr), (old), (new)); \
smp_llsc_mb(); \
\
__res; \
})
#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_mb__before_llsc(), smp_llsc_mb())
#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new, , )
#ifdef CONFIG_64BIT
#define cmpxchg64_local(ptr, o, n) \
({ \
@@ -245,4 +205,6 @@ extern void __cmpxchg_called_with_bad_pointer(void);
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif
#undef __scbeqz
#endif /* __ASM_CMPXCHG_H */

View File

@@ -138,6 +138,9 @@
#ifndef cpu_has_mips16
#define cpu_has_mips16 (cpu_data[0].ases & MIPS_ASE_MIPS16)
#endif
#ifndef cpu_has_mips16e2
#define cpu_has_mips16e2 (cpu_data[0].ases & MIPS_ASE_MIPS16E2)
#endif
#ifndef cpu_has_mdmx
#define cpu_has_mdmx (cpu_data[0].ases & MIPS_ASE_MDMX)
#endif
@@ -487,6 +490,47 @@
# define cpu_has_perf (cpu_data[0].options & MIPS_CPU_PERF)
#endif
#if defined(CONFIG_SMP) && defined(__mips_isa_rev) && (__mips_isa_rev >= 6)
/*
* Some systems share FTLB RAMs between threads within a core (siblings in
* kernel parlance). This means that FTLB entries may become invalid at almost
* any point when an entry is evicted due to a sibling thread writing an entry
* to the shared FTLB RAM.
*
* This is only relevant to SMP systems, and the only systems that exhibit this
* property implement MIPSr6 or higher so we constrain support for this to
* kernels that will run on such systems.
*/
# ifndef cpu_has_shared_ftlb_ram
# define cpu_has_shared_ftlb_ram \
(current_cpu_data.options & MIPS_CPU_SHARED_FTLB_RAM)
# endif
/*
* Some systems take this a step further & share FTLB entries between siblings.
* This is implemented as TLB writes happening as usual, but if an entry
* written by a sibling exists in the shared FTLB for a translation which would
* otherwise cause a TLB refill exception then the CPU will use the entry
* written by its sibling rather than triggering a refill & writing a matching
* TLB entry for itself.
*
* This is naturally only valid if a TLB entry is known to be suitable for use
* on all siblings in a CPU, and so it only takes effect when MMIDs are in use
* rather than ASIDs or when a TLB entry is marked global.
*/
# ifndef cpu_has_shared_ftlb_entries
# define cpu_has_shared_ftlb_entries \
(current_cpu_data.options & MIPS_CPU_SHARED_FTLB_ENTRIES)
# endif
#endif /* SMP && __mips_isa_rev >= 6 */
#ifndef cpu_has_shared_ftlb_ram
# define cpu_has_shared_ftlb_ram 0
#endif
#ifndef cpu_has_shared_ftlb_entries
# define cpu_has_shared_ftlb_entries 0
#endif
/*
* Guest capabilities
*/

View File

@@ -84,6 +84,7 @@ static inline int __pure __get_cpu_type(const int cpu_type)
#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R6
case CPU_I6400:
case CPU_I6500:
case CPU_P6600:
#endif

View File

@@ -124,6 +124,7 @@
#define PRID_IMP_P5600 0xa800
#define PRID_IMP_I6400 0xa900
#define PRID_IMP_M6250 0xab00
#define PRID_IMP_I6500 0xb000
/*
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -247,6 +248,7 @@
#define PRID_REV_LOONGSON3B_R1 0x0006
#define PRID_REV_LOONGSON3B_R2 0x0007
#define PRID_REV_LOONGSON3A_R2 0x0008
#define PRID_REV_LOONGSON3A_R3 0x0009
/*
* Older processors used to encode processor version and revision in two
@@ -322,7 +324,7 @@ enum cpu_type_enum {
*/
CPU_5KC, CPU_5KE, CPU_20KC, CPU_25KF, CPU_SB1, CPU_SB1A, CPU_LOONGSON2,
CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, CPU_I6500,
CPU_QEMU_GENERIC,
@@ -416,6 +418,10 @@ enum cpu_type_enum {
#define MIPS_CPU_GUESTID MBIT_ULL(51) /* CPU uses VZ ASE GuestID feature */
#define MIPS_CPU_DRG MBIT_ULL(52) /* CPU has VZ Direct Root to Guest (DRG) */
#define MIPS_CPU_UFR MBIT_ULL(53) /* CPU supports User mode FR switching */
#define MIPS_CPU_SHARED_FTLB_RAM \
MBIT_ULL(54) /* CPU shares FTLB RAM with another */
#define MIPS_CPU_SHARED_FTLB_ENTRIES \
MBIT_ULL(55) /* CPU shares FTLB entries with another */
/*
* CPU ASE encodings
@@ -430,5 +436,6 @@ enum cpu_type_enum {
#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */
#define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
#define MIPS_ASE_DSP3 0x00000200 /* Signal Processing ASE Rev 3*/
#define MIPS_ASE_MIPS16E2 0x00000400 /* MIPS16e2 */
#endif /* _ASM_CPU_H */

View File

@@ -18,7 +18,7 @@
#include <irq.h>
#define IRQ_STACK_SIZE THREAD_SIZE
#define IRQ_STACK_START (IRQ_STACK_SIZE - sizeof(unsigned long))
#define IRQ_STACK_START (IRQ_STACK_SIZE - 16)
extern void *irq_stack[NR_CPUS];

View File

@@ -40,6 +40,7 @@
#endif
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0

View File

@@ -31,6 +31,7 @@
#define cpu_has_ejtag 1
#define cpu_has_llsc 1
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0

View File

@@ -19,6 +19,7 @@
#define cpu_has_ejtag 1
#define cpu_has_llsc 1
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0

View File

@@ -37,6 +37,7 @@
#endif
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0

View File

@@ -27,6 +27,7 @@
#define cpu_has_mcheck 0
#define cpu_has_ejtag 0
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0

View File

@@ -27,7 +27,7 @@ static inline void CMOS_WRITE(unsigned char data, unsigned long addr)
outb_p(data, RTC_PORT(1));
}
#define RTC_ALWAYS_BCD 1
#define RTC_ALWAYS_BCD 0
#ifndef mc146818_decode_year
#define mc146818_decode_year(year) ((year) < 70 ? (year) + 2000 : (year) + 1900)

View File

@@ -19,6 +19,7 @@
#define cpu_has_32fpr 1
#define cpu_has_counter 1
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_divec 0
#define cpu_has_cache_cdex_p 1
#define cpu_has_prefetch 0

View File

@@ -43,6 +43,7 @@
#define cpu_has_ejtag 0
#define cpu_has_llsc 1
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0

View File

@@ -16,6 +16,7 @@
*/
#define cpu_has_watch 1
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_divec 0
#define cpu_has_vce 0
#define cpu_has_cache_cdex_p 0

View File

@@ -29,6 +29,7 @@
#define cpu_has_32fpr 1
#define cpu_has_counter 1
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_vce 0
#define cpu_has_cache_cdex_s 0
#define cpu_has_mcheck 0

View File

@@ -23,6 +23,7 @@
#define cpu_has_ejtag 1
#define cpu_has_llsc 1
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0

View File

@@ -27,12 +27,22 @@ struct efi_memory_map_loongson {
} __packed;
enum loongson_cpu_type {
Loongson_2E = 0,
Loongson_2F = 1,
Loongson_3A = 2,
Loongson_3B = 3,
Loongson_1A = 4,
Loongson_1B = 5
Legacy_2E = 0x0,
Legacy_2F = 0x1,
Legacy_3A = 0x2,
Legacy_3B = 0x3,
Legacy_1A = 0x4,
Legacy_1B = 0x5,
Legacy_2G = 0x6,
Legacy_2H = 0x7,
Loongson_1A = 0x100,
Loongson_1B = 0x101,
Loongson_2E = 0x200,
Loongson_2F = 0x201,
Loongson_2G = 0x202,
Loongson_2H = 0x203,
Loongson_3A = 0x300,
Loongson_3B = 0x301
};
/*

View File

@@ -32,6 +32,7 @@
#define cpu_has_mcheck 0
#define cpu_has_mdmx 0
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_mips3d 0
#define cpu_has_mipsmt 0
#define cpu_has_smartmips 0

View File

@@ -13,6 +13,7 @@
#define cpu_has_4k_cache 1
#define cpu_has_watch 1
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_counter 1
#define cpu_has_divec 1
#define cpu_has_vce 0

View File

@@ -48,6 +48,7 @@
#define cpu_has_llsc 1
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0

View File

@@ -17,6 +17,7 @@
#define cpu_has_counter 1
#define cpu_has_watch 0
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_divec 0
#define cpu_has_cache_cdex_p 1
#define cpu_has_prefetch 0

View File

@@ -13,6 +13,7 @@
*/
#define cpu_has_watch 1
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_divec 1
#define cpu_has_vce 0
#define cpu_has_cache_cdex_p 0

View File

@@ -6,6 +6,7 @@
#define cpu_has_inclusive_pcaches 0
#define cpu_has_mips16 0
#define cpu_has_mips16e2 0
#define cpu_has_mdmx 0
#define cpu_has_mips3d 0
#define cpu_has_smartmips 0

View File

@@ -60,4 +60,35 @@ mips_machine_is_compatible(const struct mips_machine *mach, const void *fdt)
return NULL;
}
/**
* struct mips_fdt_fixup - Describe a fixup to apply to an FDT
* @apply: applies the fixup to @fdt, returns zero on success else -errno
* @description: a short description of the fixup
*
* Describes a fixup applied to an FDT blob by the @apply function. The
* @description field provides a short description of the fixup intended for
* use in error messages if the @apply function returns non-zero.
*/
struct mips_fdt_fixup {
int (*apply)(void *fdt);
const char *description;
};
/**
* apply_mips_fdt_fixups() - apply fixups to an FDT blob
* @fdt_out: buffer in which to place the fixed-up FDT
* @fdt_out_size: the size of the @fdt_out buffer
* @fdt_in: the FDT blob
* @fixups: pointer to an array of fixups to be applied
*
* Loop through the array of fixups pointed to by @fixups, calling the apply
* function on each until either one returns an error or we reach the end of
* the list as indicated by an entry with a NULL apply field.
*
* Return: zero on success, else -errno
*/
extern int __init apply_mips_fdt_fixups(void *fdt_out, size_t fdt_out_size,
const void *fdt_in,
const struct mips_fdt_fixup *fixups);
#endif /* __MIPS_ASM_MACHINE_H__ */

View File

@@ -652,6 +652,7 @@
#define MIPS_CONF5_SBRI (_ULCAST_(1) << 6)
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
#define MIPS_CONF5_CA2 (_ULCAST_(1) << 14)
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
#define MIPS_CONF5_CV (_ULCAST_(1) << 29)

View File

@@ -47,8 +47,8 @@ typedef struct {
#define Elf_Mips_Rel Elf32_Rel
#define Elf_Mips_Rela Elf32_Rela
#define ELF_MIPS_R_SYM(rel) ELF32_R_SYM(rel.r_info)
#define ELF_MIPS_R_TYPE(rel) ELF32_R_TYPE(rel.r_info)
#define ELF_MIPS_R_SYM(rel) ELF32_R_SYM((rel).r_info)
#define ELF_MIPS_R_TYPE(rel) ELF32_R_TYPE((rel).r_info)
#endif
@@ -65,8 +65,8 @@ typedef struct {
#define Elf_Mips_Rel Elf64_Mips_Rel
#define Elf_Mips_Rela Elf64_Mips_Rela
#define ELF_MIPS_R_SYM(rel) (rel.r_sym)
#define ELF_MIPS_R_TYPE(rel) (rel.r_type)
#define ELF_MIPS_R_SYM(rel) ((rel).r_sym)
#define ELF_MIPS_R_TYPE(rel) ((rel).r_type)
#endif

View File

@@ -9,431 +9,9 @@
#ifndef _ASM_SPINLOCK_H
#define _ASM_SPINLOCK_H
#include <linux/compiler.h>
#include <asm/barrier.h>
#include <asm/processor.h>
#include <asm/compiler.h>
#include <asm/war.h>
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* These are fair FIFO ticket locks
*
* (the type definitions are in asm/spinlock_types.h)
*/
/*
* Ticket locks are conceptually two parts, one indicating the current head of
* the queue, and the other indicating the current tail. The lock is acquired
* by atomically noting the tail and incrementing it by one (thus adding
* ourself to the queue and noting our position), then waiting until the head
* becomes equal to the the initial value of the tail.
*/
static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
u32 counters = ACCESS_ONCE(lock->lock);
return ((counters >> 16) ^ counters) & 0xffff;
}
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.h.serving_now == lock.h.ticket;
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
u16 owner = READ_ONCE(lock->h.serving_now);
smp_rmb();
for (;;) {
arch_spinlock_t tmp = READ_ONCE(*lock);
if (tmp.h.serving_now == tmp.h.ticket ||
tmp.h.serving_now != owner)
break;
cpu_relax();
}
smp_acquire__after_ctrl_dep();
}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
u32 counters = ACCESS_ONCE(lock->lock);
return (((counters >> 16) - counters) & 0xffff) > 1;
}
#define arch_spin_is_contended arch_spin_is_contended
static inline void arch_spin_lock(arch_spinlock_t *lock)
{
int my_ticket;
int tmp;
int inc = 0x10000;
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
" .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" addu %[my_ticket], %[ticket], %[inc] \n"
" sc %[my_ticket], %[ticket_ptr] \n"
" beqzl %[my_ticket], 1b \n"
" nop \n"
" srl %[my_ticket], %[ticket], 16 \n"
" andi %[ticket], %[ticket], 0xffff \n"
" bne %[ticket], %[my_ticket], 4f \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
"2: \n"
" .subsection 2 \n"
"4: andi %[ticket], %[ticket], 0xffff \n"
" sll %[ticket], 5 \n"
" \n"
"6: bnez %[ticket], 6b \n"
" subu %[ticket], 1 \n"
" \n"
" lhu %[ticket], %[serving_now_ptr] \n"
" beq %[ticket], %[my_ticket], 2b \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
" b 4b \n"
" subu %[ticket], %[ticket], 1 \n"
" .previous \n"
" .set pop \n"
: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
[serving_now_ptr] "+m" (lock->h.serving_now),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (my_ticket)
: [inc] "r" (inc));
} else {
__asm__ __volatile__ (
" .set push # arch_spin_lock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" addu %[my_ticket], %[ticket], %[inc] \n"
" sc %[my_ticket], %[ticket_ptr] \n"
" beqz %[my_ticket], 1b \n"
" srl %[my_ticket], %[ticket], 16 \n"
" andi %[ticket], %[ticket], 0xffff \n"
" bne %[ticket], %[my_ticket], 4f \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
"2: .insn \n"
" .subsection 2 \n"
"4: andi %[ticket], %[ticket], 0xffff \n"
" sll %[ticket], 5 \n"
" \n"
"6: bnez %[ticket], 6b \n"
" subu %[ticket], 1 \n"
" \n"
" lhu %[ticket], %[serving_now_ptr] \n"
" beq %[ticket], %[my_ticket], 2b \n"
" subu %[ticket], %[my_ticket], %[ticket] \n"
" b 4b \n"
" subu %[ticket], %[ticket], 1 \n"
" .previous \n"
" .set pop \n"
: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
[serving_now_ptr] "+m" (lock->h.serving_now),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (my_ticket)
: [inc] "r" (inc));
}
smp_llsc_mb();
}
static inline void arch_spin_unlock(arch_spinlock_t *lock)
{
unsigned int serving_now = lock->h.serving_now + 1;
wmb();
lock->h.serving_now = (u16)serving_now;
nudge_writes();
}
static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
{
int tmp, tmp2, tmp3;
int inc = 0x10000;
if (R10000_LLSC_WAR) {
__asm__ __volatile__ (
" .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" srl %[my_ticket], %[ticket], 16 \n"
" andi %[now_serving], %[ticket], 0xffff \n"
" bne %[my_ticket], %[now_serving], 3f \n"
" addu %[ticket], %[ticket], %[inc] \n"
" sc %[ticket], %[ticket_ptr] \n"
" beqzl %[ticket], 1b \n"
" li %[ticket], 1 \n"
"2: \n"
" .subsection 2 \n"
"3: b 2b \n"
" li %[ticket], 0 \n"
" .previous \n"
" .set pop \n"
: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (tmp2),
[now_serving] "=&r" (tmp3)
: [inc] "r" (inc));
} else {
__asm__ __volatile__ (
" .set push # arch_spin_trylock \n"
" .set noreorder \n"
" \n"
"1: ll %[ticket], %[ticket_ptr] \n"
" srl %[my_ticket], %[ticket], 16 \n"
" andi %[now_serving], %[ticket], 0xffff \n"
" bne %[my_ticket], %[now_serving], 3f \n"
" addu %[ticket], %[ticket], %[inc] \n"
" sc %[ticket], %[ticket_ptr] \n"
" beqz %[ticket], 1b \n"
" li %[ticket], 1 \n"
"2: .insn \n"
" .subsection 2 \n"
"3: b 2b \n"
" li %[ticket], 0 \n"
" .previous \n"
" .set pop \n"
: [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
[ticket] "=&r" (tmp),
[my_ticket] "=&r" (tmp2),
[now_serving] "=&r" (tmp3)
: [inc] "r" (inc));
}
smp_llsc_mb();
return tmp;
}
/*
* Read-write spinlocks, allowing multiple readers but only one writer.
*
* NOTE! it is quite common to have readers in interrupts but no interrupt
* writers. For those circumstances we can "mix" irq-safe locks - any writer
* needs to get a irq-safe write-lock, but readers can get non-irqsafe
* read-locks.
*/
/*
* read_can_lock - would read_trylock() succeed?
* @lock: the rwlock in question.
*/
#define arch_read_can_lock(rw) ((rw)->lock >= 0)
/*
* write_can_lock - would write_trylock() succeed?
* @lock: the rwlock in question.
*/
#define arch_write_can_lock(rw) (!(rw)->lock)
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
" .set noreorder # arch_read_lock \n"
"1: ll %1, %2 \n"
" bltz %1, 1b \n"
" addu %1, 1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
" nop \n"
" .set reorder \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} else {
do {
__asm__ __volatile__(
"1: ll %1, %2 # arch_read_lock \n"
" bltz %1, 1b \n"
" addu %1, 1 \n"
"2: sc %1, %0 \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} while (unlikely(!tmp));
}
smp_llsc_mb();
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp;
smp_mb__before_llsc();
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
"1: ll %1, %2 # arch_read_unlock \n"
" addiu %1, -1 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} else {
do {
__asm__ __volatile__(
"1: ll %1, %2 # arch_read_unlock \n"
" addiu %1, -1 \n"
" sc %1, %0 \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} while (unlikely(!tmp));
}
}
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
" .set noreorder # arch_write_lock \n"
"1: ll %1, %2 \n"
" bnez %1, 1b \n"
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
" nop \n"
" .set reorder \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} else {
do {
__asm__ __volatile__(
"1: ll %1, %2 # arch_write_lock \n"
" bnez %1, 1b \n"
" lui %1, 0x8000 \n"
"2: sc %1, %0 \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} while (unlikely(!tmp));
}
smp_llsc_mb();
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb__before_llsc();
__asm__ __volatile__(
" # arch_write_unlock \n"
" sw $0, %0 \n"
: "=m" (rw->lock)
: "m" (rw->lock)
: "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
" .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
" addu %1, 1 \n"
" sc %1, %0 \n"
" .set reorder \n"
" beqzl %1, 1b \n"
" nop \n"
__WEAK_LLSC_MB
" li %2, 1 \n"
"2: \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} else {
__asm__ __volatile__(
" .set noreorder # arch_read_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bltz %1, 2f \n"
" addu %1, 1 \n"
" sc %1, %0 \n"
" beqz %1, 1b \n"
" nop \n"
" .set reorder \n"
__WEAK_LLSC_MB
" li %2, 1 \n"
"2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
}
return ret;
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
int ret;
if (R10000_LLSC_WAR) {
__asm__ __volatile__(
" .set noreorder # arch_write_trylock \n"
" li %2, 0 \n"
"1: ll %1, %3 \n"
" bnez %1, 2f \n"
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" beqzl %1, 1b \n"
" nop \n"
__WEAK_LLSC_MB
" li %2, 1 \n"
" .set reorder \n"
"2: \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} else {
do {
__asm__ __volatile__(
" ll %1, %3 # arch_write_trylock \n"
" li %2, 0 \n"
" bnez %1, 2f \n"
" lui %1, 0x8000 \n"
" sc %1, %0 \n"
" li %2, 1 \n"
"2: .insn \n"
: "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
"=&r" (ret)
: GCC_OFF_SMALL_ASM() (rw->lock)
: "memory");
} while (unlikely(!tmp));
smp_llsc_mb();
}
return ret;
}
#include <asm/qrwlock.h>
#include <asm/qspinlock.h>
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)

View File

@@ -1,37 +1,7 @@
#ifndef _ASM_SPINLOCK_TYPES_H
#define _ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H
# error "please don't include this file directly"
#endif
#include <linux/types.h>
#include <asm/byteorder.h>
typedef union {
/*
* bits 0..15 : serving_now
* bits 16..31 : ticket
*/
u32 lock;
struct {
#ifdef __BIG_ENDIAN
u16 ticket;
u16 serving_now;
#else
u16 serving_now;
u16 ticket;
#endif
} h;
} arch_spinlock_t;
#define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0 }
typedef struct {
volatile unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#include <asm-generic/qspinlock_types.h>
#include <asm-generic/qrwlock_types.h>
#endif

View File

@@ -85,7 +85,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
{
if (error) {
regs->regs[2] = -error;
regs->regs[7] = -1;
regs->regs[7] = 1;
} else {
regs->regs[2] = val;
regs->regs[7] = 0;

View File

@@ -72,9 +72,12 @@ Ip_u1u2s3(_beq);
Ip_u1u2s3(_beql);
Ip_u1s2(_bgez);
Ip_u1s2(_bgezl);
Ip_u1s2(_bgtz);
Ip_u1s2(_blez);
Ip_u1s2(_bltz);
Ip_u1s2(_bltzl);
Ip_u1u2s3(_bne);
Ip_u1(_break);
Ip_u2s3u1(_cache);
Ip_u1u2(_cfc1);
Ip_u2u1(_cfcmsa);
@@ -82,19 +85,28 @@ Ip_u1u2(_ctc1);
Ip_u2u1(_ctcmsa);
Ip_u2u1s3(_daddiu);
Ip_u3u1u2(_daddu);
Ip_u1u2(_ddivu);
Ip_u1(_di);
Ip_u2u1msbu3(_dins);
Ip_u2u1msbu3(_dinsm);
Ip_u2u1msbu3(_dinsu);
Ip_u1u2(_divu);
Ip_u1u2u3(_dmfc0);
Ip_u1u2u3(_dmtc0);
Ip_u1u2(_dmultu);
Ip_u2u1u3(_drotr);
Ip_u2u1u3(_drotr32);
Ip_u2u1(_dsbh);
Ip_u2u1(_dshd);
Ip_u2u1u3(_dsll);
Ip_u2u1u3(_dsll32);
Ip_u3u2u1(_dsllv);
Ip_u2u1u3(_dsra);
Ip_u2u1u3(_dsra32);
Ip_u3u2u1(_dsrav);
Ip_u2u1u3(_dsrl);
Ip_u2u1u3(_dsrl32);
Ip_u3u2u1(_dsrlv);
Ip_u3u1u2(_dsubu);
Ip_0(_eret);
Ip_u2u1msbu3(_ext);
@@ -104,6 +116,7 @@ Ip_u1(_jal);
Ip_u2u1(_jalr);
Ip_u1(_jr);
Ip_u2s3u1(_lb);
Ip_u2s3u1(_lbu);
Ip_u2s3u1(_ld);
Ip_u3u1u2(_ldx);
Ip_u2s3u1(_lh);
@@ -112,27 +125,35 @@ Ip_u2s3u1(_ll);
Ip_u2s3u1(_lld);
Ip_u1s2(_lui);
Ip_u2s3u1(_lw);
Ip_u2s3u1(_lwu);
Ip_u3u1u2(_lwx);
Ip_u1u2u3(_mfc0);
Ip_u1u2u3(_mfhc0);
Ip_u1(_mfhi);
Ip_u1(_mflo);
Ip_u3u1u2(_movn);
Ip_u3u1u2(_movz);
Ip_u1u2u3(_mtc0);
Ip_u1u2u3(_mthc0);
Ip_u1(_mthi);
Ip_u1(_mtlo);
Ip_u3u1u2(_mul);
Ip_u1u2(_multu);
Ip_u3u1u2(_nor);
Ip_u3u1u2(_or);
Ip_u2u1u3(_ori);
Ip_u2s3u1(_pref);
Ip_0(_rfe);
Ip_u2u1u3(_rotr);
Ip_u2s3u1(_sb);
Ip_u2s3u1(_sc);
Ip_u2s3u1(_scd);
Ip_u2s3u1(_sd);
Ip_u2s3u1(_sh);
Ip_u2u1u3(_sll);
Ip_u3u2u1(_sllv);
Ip_s3s1s2(_slt);
Ip_u2u1s3(_slti);
Ip_u2u1s3(_sltiu);
Ip_u3u1u2(_sltu);
Ip_u2u1u3(_sra);
@@ -248,6 +269,15 @@ static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1,
uasm_i_dsrl32(p, a1, a2, a3 - 32);
}
static inline void uasm_i_dsra_safe(u32 **p, unsigned int a1,
unsigned int a2, unsigned int a3)
{
if (a3 < 32)
uasm_i_dsra(p, a1, a2, a3);
else
uasm_i_dsra32(p, a1, a2, a3 - 32);
}
/* Handle relocations. */
struct uasm_reloc {
u32 *addr;

View File

@@ -79,8 +79,8 @@ union mips_vdso_data {
struct {
u64 xtime_sec;
u64 xtime_nsec;
u32 wall_to_mono_sec;
u32 wall_to_mono_nsec;
u64 wall_to_mono_sec;
u64 wall_to_mono_nsec;
u32 seq_count;
u32 cs_shift;
u8 clock_mode;

View File

@@ -0,0 +1,64 @@
/*
* Copyright (C) 2016 Imagination Technologies
* Author: Paul Burton <paul.burton@imgtec.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __MIPS_ASM_YAMON_DT_H__
#define __MIPS_ASM_YAMON_DT_H__
#include <linux/types.h>
/**
* struct yamon_mem_region - Represents a contiguous range of physical RAM.
* @start: Start physical address.
* @size: Maximum size of region.
* @discard: Length of additional memory to discard after the region.
*/
struct yamon_mem_region {
phys_addr_t start;
phys_addr_t size;
phys_addr_t discard;
};
/**
* yamon_dt_append_cmdline() - Append YAMON-provided command line to /chosen
* @fdt: the FDT blob
*
* Write the YAMON-provided command line to the bootargs property of the
* /chosen node in @fdt.
*
* Return: 0 on success, else -errno
*/
extern __init int yamon_dt_append_cmdline(void *fdt);
/**
* yamon_dt_append_memory() - Append YAMON-provided memory info to /memory
* @fdt: the FDT blob
* @regions: zero size terminated array of physical memory regions
*
* Generate a /memory node in @fdt based upon memory size information provided
* by YAMON in its environment and the @regions array.
*
* Return: 0 on success, else -errno
*/
extern __init int yamon_dt_append_memory(void *fdt,
const struct yamon_mem_region *regions);
/**
* yamon_dt_serial_config() - Append YAMON-provided serial config to /chosen
* @fdt: the FDT blob
*
* Generate a stdout-path property in the /chosen node of @fdt, based upon
* information provided in the YAMON environment about the UART configuration
* of the system.
*
* Return: 0 on success, else -errno
*/
extern __init int yamon_dt_serial_config(void *fdt);
#endif /* __MIPS_ASM_YAMON_DT_H__ */

View File

@@ -276,11 +276,18 @@ enum lx_func {
*/
enum bshfl_func {
wsbh_op = 0x2,
dshd_op = 0x5,
seb_op = 0x10,
seh_op = 0x18,
};
/*
* DBSHFL opcodes
*/
enum dbshfl_func {
dsbh_op = 0x2,
dshd_op = 0x5,
};
/*
* MSA minor opcodes.
*/
@@ -755,6 +762,16 @@ struct msa_mi10_format { /* MSA MI10 */
;))))))
};
struct dsp_format { /* SPEC3 DSP format instructions */
__BITFIELD_FIELD(unsigned int opcode : 6,
__BITFIELD_FIELD(unsigned int base : 5,
__BITFIELD_FIELD(unsigned int index : 5,
__BITFIELD_FIELD(unsigned int rd : 5,
__BITFIELD_FIELD(unsigned int op : 5,
__BITFIELD_FIELD(unsigned int func : 6,
;))))))
};
struct spec3_format { /* SPEC3 */
__BITFIELD_FIELD(unsigned int opcode:6,
__BITFIELD_FIELD(unsigned int rs:5,
@@ -1046,6 +1063,7 @@ union mips_instruction {
struct b_format b_format;
struct ps_format ps_format;
struct v_format v_format;
struct dsp_format dsp_format;
struct spec3_format spec3_format;
struct fb_format fb_format;
struct fp0_format fp0_format;