Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask into merge-rr-cpumask
Conflicts: arch/x86/kernel/io_apic.c kernel/rcuclassic.c kernel/sched.c kernel/time/tick-sched.c Signed-off-by: Mike Travis <travis@sgi.com> [ mingo@elte.hu: backmerged typo fix for io_apic.c ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
@@ -49,5 +49,16 @@
|
||||
/* Check if an address can be reached in 29 bits */
|
||||
#define IS_29BIT(a) (((unsigned long)(a)) < 0x20000000)
|
||||
|
||||
#ifdef CONFIG_SH_STORE_QUEUES
|
||||
/*
|
||||
* This is a special case for the SH-4 store queues, as pages for this
|
||||
* space still need to be faulted in before it's possible to flush the
|
||||
* store queue cache for writeout to the remapped region.
|
||||
*/
|
||||
#define P3_ADDR_MAX (P4SEG_STORE_QUE + 0x04000000)
|
||||
#else
|
||||
#define P3_ADDR_MAX P4SEG
|
||||
#endif
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_SH_ADDRSPACE_H */
|
||||
|
@@ -166,4 +166,7 @@ static inline int test_and_change_bit(int nr, volatile void * addr)
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/non-atomic.h>
|
||||
|
||||
#endif /* __ASM_SH_BITOPS_GRB_H */
|
||||
|
@@ -1,91 +0,0 @@
|
||||
#ifndef __ASM_SH_BITOPS_IRQ_H
|
||||
#define __ASM_SH_BITOPS_IRQ_H
|
||||
|
||||
static inline void set_bit(int nr, volatile void *addr)
|
||||
{
|
||||
int mask;
|
||||
volatile unsigned int *a = addr;
|
||||
unsigned long flags;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
*a |= mask;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void clear_bit(int nr, volatile void *addr)
|
||||
{
|
||||
int mask;
|
||||
volatile unsigned int *a = addr;
|
||||
unsigned long flags;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
*a &= ~mask;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void change_bit(int nr, volatile void *addr)
|
||||
{
|
||||
int mask;
|
||||
volatile unsigned int *a = addr;
|
||||
unsigned long flags;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
*a ^= mask;
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline int test_and_set_bit(int nr, volatile void *addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = addr;
|
||||
unsigned long flags;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
retval = (mask & *a) != 0;
|
||||
*a |= mask;
|
||||
local_irq_restore(flags);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline int test_and_clear_bit(int nr, volatile void *addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = addr;
|
||||
unsigned long flags;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
retval = (mask & *a) != 0;
|
||||
*a &= ~mask;
|
||||
local_irq_restore(flags);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
static inline int test_and_change_bit(int nr, volatile void *addr)
|
||||
{
|
||||
int mask, retval;
|
||||
volatile unsigned int *a = addr;
|
||||
unsigned long flags;
|
||||
|
||||
a += nr >> 5;
|
||||
mask = 1 << (nr & 0x1f);
|
||||
local_irq_save(flags);
|
||||
retval = (mask & *a) != 0;
|
||||
*a ^= mask;
|
||||
local_irq_restore(flags);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
#endif /* __ASM_SH_BITOPS_IRQ_H */
|
@@ -141,4 +141,6 @@ static inline int test_and_change_bit(int nr, volatile void * addr)
|
||||
return retval != 0;
|
||||
}
|
||||
|
||||
#include <asm-generic/bitops/non-atomic.h>
|
||||
|
||||
#endif /* __ASM_SH_BITOPS_LLSC_H */
|
||||
|
142
arch/sh/include/asm/bitops-op32.h
Normal file
142
arch/sh/include/asm/bitops-op32.h
Normal file
@@ -0,0 +1,142 @@
|
||||
#ifndef __ASM_SH_BITOPS_OP32_H
|
||||
#define __ASM_SH_BITOPS_OP32_H
|
||||
|
||||
/*
|
||||
* The bit modifying instructions on SH-2A are only capable of working
|
||||
* with a 3-bit immediate, which signifies the shift position for the bit
|
||||
* being worked on.
|
||||
*/
|
||||
#if defined(__BIG_ENDIAN)
|
||||
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
|
||||
#define BYTE_NUMBER(nr) ((nr ^ BITOP_LE_SWIZZLE) / BITS_PER_BYTE)
|
||||
#define BYTE_OFFSET(nr) ((nr ^ BITOP_LE_SWIZZLE) % BITS_PER_BYTE)
|
||||
#else
|
||||
#define BYTE_NUMBER(nr) ((nr) / BITS_PER_BYTE)
|
||||
#define BYTE_OFFSET(nr) ((nr) % BITS_PER_BYTE)
|
||||
#endif
|
||||
|
||||
#define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
|
||||
|
||||
static inline void __set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (IS_IMMEDIATE(nr)) {
|
||||
__asm__ __volatile__ (
|
||||
"bset.b %1, @(%O2,%0) ! __set_bit\n\t"
|
||||
: "+r" (addr)
|
||||
: "i" (BYTE_OFFSET(nr)), "i" (BYTE_NUMBER(nr))
|
||||
: "t", "memory"
|
||||
);
|
||||
} else {
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p |= mask;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (IS_IMMEDIATE(nr)) {
|
||||
__asm__ __volatile__ (
|
||||
"bclr.b %1, @(%O2,%0) ! __clear_bit\n\t"
|
||||
: "+r" (addr)
|
||||
: "i" (BYTE_OFFSET(nr)),
|
||||
"i" (BYTE_NUMBER(nr))
|
||||
: "t", "memory"
|
||||
);
|
||||
} else {
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p &= ~mask;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* __change_bit - Toggle a bit in memory
|
||||
* @nr: the bit to change
|
||||
* @addr: the address to start counting from
|
||||
*
|
||||
* Unlike change_bit(), this function is non-atomic and may be reordered.
|
||||
* If it's called on the same region of memory simultaneously, the effect
|
||||
* may be that only one operation succeeds.
|
||||
*/
|
||||
static inline void __change_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
if (IS_IMMEDIATE(nr)) {
|
||||
__asm__ __volatile__ (
|
||||
"bxor.b %1, @(%O2,%0) ! __change_bit\n\t"
|
||||
: "+r" (addr)
|
||||
: "i" (BYTE_OFFSET(nr)),
|
||||
"i" (BYTE_NUMBER(nr))
|
||||
: "t", "memory"
|
||||
);
|
||||
} else {
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
|
||||
*p ^= mask;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_set_bit - Set a bit and return its old value
|
||||
* @nr: Bit to set
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old | mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __test_and_clear_bit - Clear a bit and return its old value
|
||||
* @nr: Bit to clear
|
||||
* @addr: Address to count from
|
||||
*
|
||||
* This operation is non-atomic and can be reordered.
|
||||
* If two examples of this operation race, one can appear to succeed
|
||||
* but actually fail. You must protect multiple accesses with a lock.
|
||||
*/
|
||||
static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old & ~mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/* WARNING: non atomic and it can be reordered! */
|
||||
static inline int __test_and_change_bit(int nr,
|
||||
volatile unsigned long *addr)
|
||||
{
|
||||
unsigned long mask = BIT_MASK(nr);
|
||||
unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
|
||||
unsigned long old = *p;
|
||||
|
||||
*p = old ^ mask;
|
||||
return (old & mask) != 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* test_bit - Determine whether a bit is set
|
||||
* @nr: bit number to test
|
||||
* @addr: Address to start counting from
|
||||
*/
|
||||
static inline int test_bit(int nr, const volatile unsigned long *addr)
|
||||
{
|
||||
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
|
||||
}
|
||||
|
||||
#endif /* __ASM_SH_BITOPS_OP32_H */
|
@@ -13,21 +13,22 @@
|
||||
|
||||
#ifdef CONFIG_GUSA_RB
|
||||
#include <asm/bitops-grb.h>
|
||||
#elif defined(CONFIG_CPU_SH2A)
|
||||
#include <asm-generic/bitops/atomic.h>
|
||||
#include <asm/bitops-op32.h>
|
||||
#elif defined(CONFIG_CPU_SH4A)
|
||||
#include <asm/bitops-llsc.h>
|
||||
#else
|
||||
#include <asm/bitops-irq.h>
|
||||
#include <asm-generic/bitops/atomic.h>
|
||||
#include <asm-generic/bitops/non-atomic.h>
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
* clear_bit() doesn't provide any barrier for the compiler.
|
||||
*/
|
||||
#define smp_mb__before_clear_bit() barrier()
|
||||
#define smp_mb__after_clear_bit() barrier()
|
||||
|
||||
#include <asm-generic/bitops/non-atomic.h>
|
||||
|
||||
#ifdef CONFIG_SUPERH32
|
||||
static inline unsigned long ffz(unsigned long word)
|
||||
{
|
||||
|
@@ -25,7 +25,7 @@ static void __init check_bugs(void)
|
||||
case CPU_SH7619:
|
||||
*p++ = '2';
|
||||
break;
|
||||
case CPU_SH7203 ... CPU_MXG:
|
||||
case CPU_SH7201 ... CPU_MXG:
|
||||
*p++ = '2';
|
||||
*p++ = 'a';
|
||||
break;
|
||||
|
@@ -108,13 +108,11 @@ typedef struct user_fpu_struct elf_fpregset_t;
|
||||
#define elf_check_fdpic(x) ((x)->e_flags & EF_SH_FDPIC)
|
||||
#define elf_check_const_displacement(x) ((x)->e_flags & EF_SH_PIC)
|
||||
|
||||
#ifdef CONFIG_SUPERH32
|
||||
/*
|
||||
* Enable dump using regset.
|
||||
* This covers all of general/DSP/FPU regs.
|
||||
*/
|
||||
#define CORE_DUMP_USE_REGSET
|
||||
#endif
|
||||
|
||||
#define USE_ELF_CORE_DUMP
|
||||
#define ELF_FDPIC_CORE_EFLAGS EF_SH_FDPIC
|
||||
@@ -204,7 +202,7 @@ do { \
|
||||
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
|
||||
struct linux_binprm;
|
||||
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
|
||||
int executable_stack);
|
||||
int uses_interp);
|
||||
|
||||
extern unsigned int vdso_enabled;
|
||||
extern void __kernel_vsyscall;
|
||||
|
@@ -1,8 +1,34 @@
|
||||
#ifndef __ASM_SH_FTRACE_H
|
||||
#define __ASM_SH_FTRACE_H
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
|
||||
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern void mcount(void);
|
||||
#endif
|
||||
|
||||
#define MCOUNT_ADDR ((long)(mcount))
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
#define CALLER_ADDR ((long)(ftrace_caller))
|
||||
#define STUB_ADDR ((long)(ftrace_stub))
|
||||
|
||||
#define MCOUNT_INSN_OFFSET ((STUB_ADDR - CALLER_ADDR) >> 1)
|
||||
|
||||
struct dyn_arch_ftrace {
|
||||
/* No extra data needed on sh */
|
||||
};
|
||||
|
||||
#endif /* CONFIG_DYNAMIC_FTRACE */
|
||||
|
||||
static inline unsigned long ftrace_call_adjust(unsigned long addr)
|
||||
{
|
||||
/* 'addr' is the memory table address. */
|
||||
return addr;
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* CONFIG_FUNCTION_TRACER */
|
||||
|
||||
#endif /* __ASM_SH_FTRACE_H */
|
||||
|
@@ -260,6 +260,10 @@ __ioremap_mode(unsigned long offset, unsigned long size, unsigned long flags)
|
||||
|
||||
return (void __iomem *)P2SEGADDR(offset);
|
||||
}
|
||||
|
||||
/* P4 above the store queues are always mapped. */
|
||||
if (unlikely(offset >= P3_ADDR_MAX))
|
||||
return (void __iomem *)P4SEGADDR(offset);
|
||||
#endif
|
||||
|
||||
return __ioremap(offset, size, flags);
|
||||
|
@@ -1,21 +1,7 @@
|
||||
/*
|
||||
* May be copied or modified under the terms of the GNU General Public
|
||||
* License. See linux/COPYING for more information.
|
||||
*
|
||||
* Based on original code by Glenn Engel, Jim Kingdon,
|
||||
* David Grothe <dave@gcom.com>, Tigran Aivazian, <tigran@sco.com> and
|
||||
* Amit S. Kale <akale@veritas.com>
|
||||
*
|
||||
* Super-H port based on sh-stub.c (Ben Lee and Steve Chamberlain) by
|
||||
* Henry Bell <henry.bell@st.com>
|
||||
*
|
||||
* Header file for low-level support for remote debug using GDB.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __KGDB_H
|
||||
#define __KGDB_H
|
||||
#ifndef __ASM_SH_KGDB_H
|
||||
#define __ASM_SH_KGDB_H
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/* Same as pt_regs but has vbr in place of syscall_nr */
|
||||
@@ -30,40 +16,26 @@ struct kgdb_regs {
|
||||
unsigned long vbr;
|
||||
};
|
||||
|
||||
enum regnames {
|
||||
GDB_R0, GDB_R1, GDB_R2, GDB_R3, GDB_R4, GDB_R5, GDB_R6, GDB_R7,
|
||||
GDB_R8, GDB_R9, GDB_R10, GDB_R11, GDB_R12, GDB_R13, GDB_R14, GDB_R15,
|
||||
|
||||
GDB_PC, GDB_PR, GDB_SR, GDB_GBR, GDB_MACH, GDB_MACL, GDB_VBR,
|
||||
};
|
||||
|
||||
#define NUMREGBYTES ((GDB_VBR + 1) * 4)
|
||||
|
||||
static inline void arch_kgdb_breakpoint(void)
|
||||
{
|
||||
__asm__ __volatile__ ("trapa #0x3c\n");
|
||||
}
|
||||
|
||||
/* State info */
|
||||
extern char kgdb_in_gdb_mode;
|
||||
extern int kgdb_nofault; /* Ignore bus errors (in gdb mem access) */
|
||||
extern char in_nmi; /* Debounce flag to prevent NMI reentry*/
|
||||
|
||||
/* SCI */
|
||||
extern int kgdb_portnum;
|
||||
extern int kgdb_baud;
|
||||
extern char kgdb_parity;
|
||||
extern char kgdb_bits;
|
||||
#define BUFMAX 2048
|
||||
|
||||
/* Init and interface stuff */
|
||||
extern int kgdb_init(void);
|
||||
extern int (*kgdb_getchar)(void);
|
||||
extern void (*kgdb_putchar)(int);
|
||||
#define CACHE_FLUSH_IS_SAFE 1
|
||||
#define BREAK_INSTR_SIZE 2
|
||||
|
||||
/* Trap functions */
|
||||
typedef void (kgdb_debug_hook_t)(struct pt_regs *regs);
|
||||
typedef void (kgdb_bus_error_hook_t)(void);
|
||||
extern kgdb_debug_hook_t *kgdb_debug_hook;
|
||||
extern kgdb_bus_error_hook_t *kgdb_bus_err_hook;
|
||||
|
||||
/* Console */
|
||||
struct console;
|
||||
void kgdb_console_write(struct console *co, const char *s, unsigned count);
|
||||
extern int kgdb_console_setup(struct console *, char *);
|
||||
|
||||
/* Prototypes for jmp fns */
|
||||
#define _JBLEN 9
|
||||
typedef int jmp_buf[_JBLEN];
|
||||
extern void longjmp(jmp_buf __jmpb, int __retval);
|
||||
extern int setjmp(jmp_buf __jmpb);
|
||||
|
||||
/* Forced breakpoint */
|
||||
#define breakpoint() __asm__ __volatile__("trapa #0x3c")
|
||||
|
||||
#endif
|
||||
#endif /* __ASM_SH_KGDB_H */
|
||||
|
@@ -14,8 +14,6 @@
|
||||
#include <linux/time.h>
|
||||
#include <asm/machtypes.h>
|
||||
|
||||
struct device;
|
||||
|
||||
struct sh_machine_vector {
|
||||
void (*mv_setup)(char **cmdline_p);
|
||||
const char *mv_name;
|
||||
@@ -45,9 +43,6 @@ struct sh_machine_vector {
|
||||
int (*mv_irq_demux)(int irq);
|
||||
|
||||
void (*mv_init_irq)(void);
|
||||
void (*mv_init_pci)(void);
|
||||
|
||||
void (*mv_heartbeat)(void);
|
||||
|
||||
void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size);
|
||||
void (*mv_ioport_unmap)(void __iomem *);
|
||||
|
@@ -22,7 +22,7 @@
|
||||
#define MMU_CONTEXT_ASID_MASK 0x000000ff
|
||||
#define MMU_CONTEXT_VERSION_MASK 0xffffff00
|
||||
#define MMU_CONTEXT_FIRST_VERSION 0x00000100
|
||||
#define NO_CONTEXT 0
|
||||
#define NO_CONTEXT 0UL
|
||||
|
||||
/* ASID is 8-bit value, so it can't be 0x100 */
|
||||
#define MMU_NO_ASID 0x100
|
||||
@@ -130,7 +130,7 @@ static inline void switch_mm(struct mm_struct *prev,
|
||||
#define destroy_context(mm) do { } while (0)
|
||||
#define set_asid(asid) do { } while (0)
|
||||
#define get_asid() (0)
|
||||
#define cpu_asid(cpu, mm) ({ (void)cpu; 0; })
|
||||
#define cpu_asid(cpu, mm) ({ (void)cpu; NO_CONTEXT; })
|
||||
#define switch_and_save_asid(asid) (0)
|
||||
#define set_TTB(pgd) do { } while (0)
|
||||
#define get_TTB() (0)
|
||||
|
112
arch/sh/include/asm/mutex-llsc.h
Normal file
112
arch/sh/include/asm/mutex-llsc.h
Normal file
@@ -0,0 +1,112 @@
|
||||
/*
|
||||
* arch/sh/include/asm/mutex-llsc.h
|
||||
*
|
||||
* SH-4A optimized mutex locking primitives
|
||||
*
|
||||
* Please look into asm-generic/mutex-xchg.h for a formal definition.
|
||||
*/
|
||||
#ifndef __ASM_SH_MUTEX_LLSC_H
|
||||
#define __ASM_SH_MUTEX_LLSC_H
|
||||
|
||||
/*
|
||||
* Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
|
||||
* with a bastardized atomic decrement (it is not a reliable atomic decrement
|
||||
* but it satisfies the defined semantics for our purpose, while being
|
||||
* smaller and faster than a real atomic decrement or atomic swap.
|
||||
* The idea is to attempt decrementing the lock value only once. If once
|
||||
* decremented it isn't zero, or if its store-back fails due to a dispute
|
||||
* on the exclusive store, we simply bail out immediately through the slow
|
||||
* path where the lock will be reattempted until it succeeds.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int __ex_flag, __res;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"movli.l @%2, %0 \n"
|
||||
"add #-1, %0 \n"
|
||||
"movco.l %0, @%2 \n"
|
||||
"movt %1 \n"
|
||||
: "=&z" (__res), "=&r" (__ex_flag)
|
||||
: "r" (&(count)->counter)
|
||||
: "t");
|
||||
|
||||
__res |= !__ex_flag;
|
||||
if (unlikely(__res != 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int __ex_flag, __res;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"movli.l @%2, %0 \n"
|
||||
"add #-1, %0 \n"
|
||||
"movco.l %0, @%2 \n"
|
||||
"movt %1 \n"
|
||||
: "=&z" (__res), "=&r" (__ex_flag)
|
||||
: "r" (&(count)->counter)
|
||||
: "t");
|
||||
|
||||
__res |= !__ex_flag;
|
||||
if (unlikely(__res != 0))
|
||||
__res = fail_fn(count);
|
||||
|
||||
return __res;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int __ex_flag, __res;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"movli.l @%2, %0 \n\t"
|
||||
"add #1, %0 \n\t"
|
||||
"movco.l %0, @%2 \n\t"
|
||||
"movt %1 \n\t"
|
||||
: "=&z" (__res), "=&r" (__ex_flag)
|
||||
: "r" (&(count)->counter)
|
||||
: "t");
|
||||
|
||||
__res |= !__ex_flag;
|
||||
if (unlikely(__res <= 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
/*
|
||||
* If the unlock was done on a contended lock, or if the unlock simply fails
|
||||
* then the mutex remains locked.
|
||||
*/
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/*
|
||||
* For __mutex_fastpath_trylock we do an atomic decrement and check the
|
||||
* result and put it in the __res variable.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
int __res, __orig;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: movli.l @%2, %0 \n\t"
|
||||
"dt %0 \n\t"
|
||||
"movco.l %0,@%2 \n\t"
|
||||
"bf 1b \n\t"
|
||||
"cmp/eq #0,%0 \n\t"
|
||||
"bt 2f \n\t"
|
||||
"mov #0, %1 \n\t"
|
||||
"bf 3f \n\t"
|
||||
"2: mov #1, %1 \n\t"
|
||||
"3: "
|
||||
: "=&z" (__orig), "=&r" (__res)
|
||||
: "r" (&count->counter)
|
||||
: "t");
|
||||
|
||||
return __res;
|
||||
}
|
||||
#endif /* __ASM_SH_MUTEX_LLSC_H */
|
@@ -5,5 +5,8 @@
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
*/
|
||||
|
||||
#if defined(CONFIG_CPU_SH4A)
|
||||
#include <asm/mutex-llsc.h>
|
||||
#else
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
#endif
|
||||
|
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Copyright 2006 (c) Andriy Skulysh <askulysh@gmail.com>
|
||||
*
|
||||
*/
|
||||
#ifndef __ASM_SH_PM_H
|
||||
#define __ASM_SH_PM_H
|
||||
|
||||
extern u8 wakeup_start;
|
||||
extern u8 wakeup_end;
|
||||
|
||||
void pm_enter(void);
|
||||
|
||||
#endif
|
@@ -18,7 +18,7 @@ enum cpu_type {
|
||||
CPU_SH7619,
|
||||
|
||||
/* SH-2A types */
|
||||
CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_MXG,
|
||||
CPU_SH7201, CPU_SH7203, CPU_SH7206, CPU_SH7263, CPU_MXG,
|
||||
|
||||
/* SH-3 types */
|
||||
CPU_SH7705, CPU_SH7706, CPU_SH7707,
|
||||
@@ -82,6 +82,9 @@ extern struct sh_cpuinfo cpu_data[];
|
||||
#define current_cpu_data cpu_data[smp_processor_id()]
|
||||
#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
|
||||
|
||||
#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
|
||||
#define cpu_relax() barrier()
|
||||
|
||||
/* Forward decl */
|
||||
struct seq_operations;
|
||||
|
||||
|
@@ -175,6 +175,15 @@ static __inline__ void enable_fpu(void)
|
||||
|
||||
void show_trace(struct task_struct *tsk, unsigned long *sp,
|
||||
struct pt_regs *regs);
|
||||
|
||||
#ifdef CONFIG_DUMP_CODE
|
||||
void show_code(struct pt_regs *regs);
|
||||
#else
|
||||
static inline void show_code(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
extern unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
|
||||
@@ -182,9 +191,6 @@ extern unsigned long get_wchan(struct task_struct *p);
|
||||
|
||||
#define user_stack_pointer(regs) ((regs)->regs[15])
|
||||
|
||||
#define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory")
|
||||
#define cpu_relax() barrier()
|
||||
|
||||
#if defined(CONFIG_CPU_SH2A) || defined(CONFIG_CPU_SH3) || \
|
||||
defined(CONFIG_CPU_SH4)
|
||||
#define PREFETCH_STRIDE L1_CACHE_BYTES
|
||||
|
@@ -226,9 +226,7 @@ extern unsigned long get_wchan(struct task_struct *p);
|
||||
#define KSTK_EIP(tsk) ((tsk)->thread.pc)
|
||||
#define KSTK_ESP(tsk) ((tsk)->thread.sp)
|
||||
|
||||
#define user_stack_pointer(regs) ((regs)->sp)
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
#define user_stack_pointer(regs) ((regs)->regs[15])
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __ASM_SH_PROCESSOR_64_H */
|
||||
|
@@ -86,6 +86,7 @@ struct pt_dspregs {
|
||||
unsigned long re;
|
||||
unsigned long mod;
|
||||
};
|
||||
#endif
|
||||
|
||||
#define PTRACE_GETREGS 12 /* General registers */
|
||||
#define PTRACE_SETREGS 13
|
||||
@@ -100,7 +101,6 @@ struct pt_dspregs {
|
||||
|
||||
#define PTRACE_GETDSPREGS 55 /* DSP registers */
|
||||
#define PTRACE_SETDSPREGS 56
|
||||
#endif
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#include <asm/addrspace.h>
|
||||
|
@@ -10,7 +10,6 @@
|
||||
|
||||
extern void sh_bios_console_write(const char *buf, unsigned int len);
|
||||
extern void sh_bios_char_out(char ch);
|
||||
extern int sh_bios_in_gdb_mode(void);
|
||||
extern void sh_bios_gdb_detach(void);
|
||||
|
||||
extern void sh_bios_get_node_addr(unsigned char *node_addr);
|
||||
|
@@ -1,17 +1,20 @@
|
||||
#ifndef __ASM_SH_STRING_64_H
|
||||
#define __ASM_SH_STRING_64_H
|
||||
|
||||
/*
|
||||
* include/asm-sh/string_64.h
|
||||
*
|
||||
* Copyright (C) 2000, 2001 Paolo Alberelli
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*/
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define __HAVE_ARCH_MEMSET
|
||||
extern void *memset(void *__s, int __c, size_t __count);
|
||||
|
||||
#define __HAVE_ARCH_MEMCPY
|
||||
extern void *memcpy(void *dest, const void *src, size_t count);
|
||||
|
||||
#define __HAVE_ARCH_STRLEN
|
||||
extern size_t strlen(const char *);
|
||||
|
||||
#define __HAVE_ARCH_STRCPY
|
||||
extern char *strcpy(char *__dest, const char *__src);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#endif /* __ASM_SH_STRING_64_H */
|
||||
|
@@ -5,7 +5,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/* The system call number is given by the user in %g1 */
|
||||
/* The system call number is given by the user in R3 */
|
||||
static inline long syscall_get_nr(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
|
@@ -1,6 +1,80 @@
|
||||
#ifndef __ASM_SH_SYSCALL_64_H
|
||||
#define __ASM_SH_SYSCALL_64_H
|
||||
|
||||
#include <asm-generic/syscall.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
/* The system call number is given by the user in R9 */
|
||||
static inline long syscall_get_nr(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return (regs->syscall_nr >= 0) ? regs->regs[9] : -1L;
|
||||
}
|
||||
|
||||
static inline void syscall_rollback(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
/*
|
||||
* XXX: This needs some thought. On SH we don't
|
||||
* save away the original R9 value anywhere.
|
||||
*/
|
||||
}
|
||||
|
||||
static inline bool syscall_has_error(struct pt_regs *regs)
|
||||
{
|
||||
return (regs->sr & 0x1) ? true : false;
|
||||
}
|
||||
static inline void syscall_set_error(struct pt_regs *regs)
|
||||
{
|
||||
regs->sr |= 0x1;
|
||||
}
|
||||
static inline void syscall_clear_error(struct pt_regs *regs)
|
||||
{
|
||||
regs->sr &= ~0x1;
|
||||
}
|
||||
|
||||
static inline long syscall_get_error(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return syscall_has_error(regs) ? regs->regs[9] : 0;
|
||||
}
|
||||
|
||||
static inline long syscall_get_return_value(struct task_struct *task,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return regs->regs[9];
|
||||
}
|
||||
|
||||
static inline void syscall_set_return_value(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
int error, long val)
|
||||
{
|
||||
if (error) {
|
||||
syscall_set_error(regs);
|
||||
regs->regs[9] = -error;
|
||||
} else {
|
||||
syscall_clear_error(regs);
|
||||
regs->regs[9] = val;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void syscall_get_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
memcpy(args, ®s->regs[2 + i], n * sizeof(args[0]));
|
||||
}
|
||||
|
||||
static inline void syscall_set_arguments(struct task_struct *task,
|
||||
struct pt_regs *regs,
|
||||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
BUG_ON(i + n > 6);
|
||||
memcpy(®s->regs[2 + i], args, n * sizeof(args[0]));
|
||||
}
|
||||
|
||||
#endif /* __ASM_SH_SYSCALL_64_H */
|
||||
|
@@ -175,6 +175,8 @@ asmlinkage void name##_trap_handler(unsigned int vec, struct pt_regs *regs)
|
||||
BUILD_TRAP_HANDLER(address_error);
|
||||
BUILD_TRAP_HANDLER(debug);
|
||||
BUILD_TRAP_HANDLER(bug);
|
||||
BUILD_TRAP_HANDLER(breakpoint);
|
||||
BUILD_TRAP_HANDLER(singlestep);
|
||||
BUILD_TRAP_HANDLER(fpu_error);
|
||||
BUILD_TRAP_HANDLER(fpu_state_restore);
|
||||
|
||||
|
@@ -32,6 +32,7 @@
|
||||
#define parent_node(node) ((void)(node),0)
|
||||
|
||||
#define node_to_cpumask(node) ((void)node, cpu_online_map)
|
||||
#define cpumask_of_node(node) ((void)node, cpu_online_mask)
|
||||
#define node_to_first_cpu(node) ((void)(node),0)
|
||||
|
||||
#define pcibus_to_node(bus) ((void)(bus), -1)
|
||||
|
258
arch/sh/include/asm/unaligned-sh4a.h
Normal file
258
arch/sh/include/asm/unaligned-sh4a.h
Normal file
@@ -0,0 +1,258 @@
|
||||
#ifndef __ASM_SH_UNALIGNED_SH4A_H
|
||||
#define __ASM_SH_UNALIGNED_SH4A_H
|
||||
|
||||
/*
|
||||
* SH-4A has support for unaligned 32-bit loads, and 32-bit loads only.
|
||||
* Support for 16 and 64-bit accesses are done through shifting and
|
||||
* masking relative to the endianness. Unaligned stores are not supported
|
||||
* by the instruction encoding, so these continue to use the packed
|
||||
* struct.
|
||||
*
|
||||
* The same note as with the movli.l/movco.l pair applies here, as long
|
||||
* as the load is gauranteed to be inlined, nothing else will hook in to
|
||||
* r0 and we get the return value for free.
|
||||
*
|
||||
* NOTE: Due to the fact we require r0 encoding, care should be taken to
|
||||
* avoid mixing these heavily with other r0 consumers, such as the atomic
|
||||
* ops. Failure to adhere to this can result in the compiler running out
|
||||
* of spill registers and blowing up when building at low optimization
|
||||
* levels. See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=34777.
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
static __always_inline u32 __get_unaligned_cpu32(const u8 *p)
|
||||
{
|
||||
unsigned long unaligned;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"movua.l @%1, %0\n\t"
|
||||
: "=z" (unaligned)
|
||||
: "r" (p)
|
||||
);
|
||||
|
||||
return unaligned;
|
||||
}
|
||||
|
||||
struct __una_u16 { u16 x __attribute__((packed)); };
|
||||
struct __una_u32 { u32 x __attribute__((packed)); };
|
||||
struct __una_u64 { u64 x __attribute__((packed)); };
|
||||
|
||||
static inline u16 __get_unaligned_cpu16(const u8 *p)
|
||||
{
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
return __get_unaligned_cpu32(p) & 0xffff;
|
||||
#else
|
||||
return __get_unaligned_cpu32(p) >> 16;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Even though movua.l supports auto-increment on the read side, it can
|
||||
* only store to r0 due to instruction encoding constraints, so just let
|
||||
* the compiler sort it out on its own.
|
||||
*/
|
||||
static inline u64 __get_unaligned_cpu64(const u8 *p)
|
||||
{
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
return (u64)__get_unaligned_cpu32(p + 4) << 32 |
|
||||
__get_unaligned_cpu32(p);
|
||||
#else
|
||||
return (u64)__get_unaligned_cpu32(p) << 32 |
|
||||
__get_unaligned_cpu32(p + 4);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline u16 get_unaligned_le16(const void *p)
|
||||
{
|
||||
return le16_to_cpu(__get_unaligned_cpu16(p));
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_le32(const void *p)
|
||||
{
|
||||
return le32_to_cpu(__get_unaligned_cpu32(p));
|
||||
}
|
||||
|
||||
static inline u64 get_unaligned_le64(const void *p)
|
||||
{
|
||||
return le64_to_cpu(__get_unaligned_cpu64(p));
|
||||
}
|
||||
|
||||
static inline u16 get_unaligned_be16(const void *p)
|
||||
{
|
||||
return be16_to_cpu(__get_unaligned_cpu16(p));
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_be32(const void *p)
|
||||
{
|
||||
return be32_to_cpu(__get_unaligned_cpu32(p));
|
||||
}
|
||||
|
||||
static inline u64 get_unaligned_be64(const void *p)
|
||||
{
|
||||
return be64_to_cpu(__get_unaligned_cpu64(p));
|
||||
}
|
||||
|
||||
static inline void __put_le16_noalign(u8 *p, u16 val)
|
||||
{
|
||||
*p++ = val;
|
||||
*p++ = val >> 8;
|
||||
}
|
||||
|
||||
static inline void __put_le32_noalign(u8 *p, u32 val)
|
||||
{
|
||||
__put_le16_noalign(p, val);
|
||||
__put_le16_noalign(p + 2, val >> 16);
|
||||
}
|
||||
|
||||
static inline void __put_le64_noalign(u8 *p, u64 val)
|
||||
{
|
||||
__put_le32_noalign(p, val);
|
||||
__put_le32_noalign(p + 4, val >> 32);
|
||||
}
|
||||
|
||||
static inline void __put_be16_noalign(u8 *p, u16 val)
|
||||
{
|
||||
*p++ = val >> 8;
|
||||
*p++ = val;
|
||||
}
|
||||
|
||||
static inline void __put_be32_noalign(u8 *p, u32 val)
|
||||
{
|
||||
__put_be16_noalign(p, val >> 16);
|
||||
__put_be16_noalign(p + 2, val);
|
||||
}
|
||||
|
||||
static inline void __put_be64_noalign(u8 *p, u64 val)
|
||||
{
|
||||
__put_be32_noalign(p, val >> 32);
|
||||
__put_be32_noalign(p + 4, val);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le16(u16 val, void *p)
|
||||
{
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
((struct __una_u16 *)p)->x = val;
|
||||
#else
|
||||
__put_le16_noalign(p, val);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le32(u32 val, void *p)
|
||||
{
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
((struct __una_u32 *)p)->x = val;
|
||||
#else
|
||||
__put_le32_noalign(p, val);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le64(u64 val, void *p)
|
||||
{
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
((struct __una_u64 *)p)->x = val;
|
||||
#else
|
||||
__put_le64_noalign(p, val);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be16(u16 val, void *p)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
((struct __una_u16 *)p)->x = val;
|
||||
#else
|
||||
__put_be16_noalign(p, val);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be32(u32 val, void *p)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
((struct __una_u32 *)p)->x = val;
|
||||
#else
|
||||
__put_be32_noalign(p, val);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be64(u64 val, void *p)
|
||||
{
|
||||
#ifdef __BIG_ENDIAN
|
||||
((struct __una_u64 *)p)->x = val;
|
||||
#else
|
||||
__put_be64_noalign(p, val);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Cause a link-time error if we try an unaligned access other than
|
||||
* 1,2,4 or 8 bytes long
|
||||
*/
|
||||
extern void __bad_unaligned_access_size(void);
|
||||
|
||||
#define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \
|
||||
__bad_unaligned_access_size())))); \
|
||||
}))
|
||||
|
||||
#define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \
|
||||
__builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \
|
||||
__bad_unaligned_access_size())))); \
|
||||
}))
|
||||
|
||||
#define __put_unaligned_le(val, ptr) ({ \
|
||||
void *__gu_p = (ptr); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
*(u8 *)__gu_p = (__force u8)(val); \
|
||||
break; \
|
||||
case 2: \
|
||||
put_unaligned_le16((__force u16)(val), __gu_p); \
|
||||
break; \
|
||||
case 4: \
|
||||
put_unaligned_le32((__force u32)(val), __gu_p); \
|
||||
break; \
|
||||
case 8: \
|
||||
put_unaligned_le64((__force u64)(val), __gu_p); \
|
||||
break; \
|
||||
default: \
|
||||
__bad_unaligned_access_size(); \
|
||||
break; \
|
||||
} \
|
||||
(void)0; })
|
||||
|
||||
#define __put_unaligned_be(val, ptr) ({ \
|
||||
void *__gu_p = (ptr); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: \
|
||||
*(u8 *)__gu_p = (__force u8)(val); \
|
||||
break; \
|
||||
case 2: \
|
||||
put_unaligned_be16((__force u16)(val), __gu_p); \
|
||||
break; \
|
||||
case 4: \
|
||||
put_unaligned_be32((__force u32)(val), __gu_p); \
|
||||
break; \
|
||||
case 8: \
|
||||
put_unaligned_be64((__force u64)(val), __gu_p); \
|
||||
break; \
|
||||
default: \
|
||||
__bad_unaligned_access_size(); \
|
||||
break; \
|
||||
} \
|
||||
(void)0; })
|
||||
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
# define get_unaligned __get_unaligned_le
|
||||
# define put_unaligned __put_unaligned_le
|
||||
#else
|
||||
# define get_unaligned __get_unaligned_be
|
||||
# define put_unaligned __put_unaligned_be
|
||||
#endif
|
||||
|
||||
#endif /* __ASM_SH_UNALIGNED_SH4A_H */
|
@@ -1,7 +1,11 @@
|
||||
#ifndef _ASM_SH_UNALIGNED_H
|
||||
#define _ASM_SH_UNALIGNED_H
|
||||
|
||||
/* SH can't handle unaligned accesses. */
|
||||
#ifdef CONFIG_CPU_SH4A
|
||||
/* SH-4A can handle unaligned loads in a relatively neutered fashion. */
|
||||
#include <asm/unaligned-sh4a.h>
|
||||
#else
|
||||
/* Otherwise, SH can't handle unaligned accesses. */
|
||||
#ifdef __LITTLE_ENDIAN__
|
||||
# include <linux/unaligned/le_struct.h>
|
||||
# include <linux/unaligned/be_byteshift.h>
|
||||
@@ -15,5 +19,6 @@
|
||||
# define get_unaligned __get_unaligned_be
|
||||
# define put_unaligned __put_unaligned_be
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_SH_UNALIGNED_H */
|
||||
|
Reference in New Issue
Block a user