Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (144 commits) powerpc/44x: Support 16K/64K base page sizes on 44x powerpc: Force memory size to be a multiple of PAGE_SIZE powerpc/32: Wire up the trampoline code for kdump powerpc/32: Add the ability for a classic ppc kernel to be loaded at 32M powerpc/32: Allow __ioremap on RAM addresses for kdump kernel powerpc/32: Setup OF properties for kdump powerpc/32/kdump: Implement crash_setup_regs() using ppc_save_regs() powerpc: Prepare xmon_save_regs for use with kdump powerpc: Remove default kexec/crash_kernel ops assignments powerpc: Make default kexec/crash_kernel ops implicit powerpc: Setup OF properties for ppc32 kexec powerpc/pseries: Fix cpu hotplug powerpc: Fix KVM build on ppc440 powerpc/cell: add QPACE as a separate Cell platform powerpc/cell: fix build breakage with CONFIG_SPUFS disabled powerpc/mpc5200: fix error paths in PSC UART probe function powerpc/mpc5200: add rts/cts handling in PSC UART driver powerpc/mpc5200: Make PSC UART driver update serial errors counters powerpc/mpc5200: Remove obsolete code from mpc5200 MDIO driver powerpc/mpc5200: Add MDMA/UDMA support to MPC5200 ATA driver ... Fix trivial conflict in drivers/char/Makefile as per Paul's directions
This commit is contained in:
@@ -111,7 +111,7 @@ static __inline__ void atomic_inc(atomic_t *v)
|
||||
bne- 1b"
|
||||
: "=&r" (t), "+m" (v->counter)
|
||||
: "r" (&v->counter)
|
||||
: "cc");
|
||||
: "cc", "xer");
|
||||
}
|
||||
|
||||
static __inline__ int atomic_inc_return(atomic_t *v)
|
||||
@@ -128,7 +128,7 @@ static __inline__ int atomic_inc_return(atomic_t *v)
|
||||
ISYNC_ON_SMP
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter)
|
||||
: "cc", "memory");
|
||||
: "cc", "xer", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
@@ -155,7 +155,7 @@ static __inline__ void atomic_dec(atomic_t *v)
|
||||
bne- 1b"
|
||||
: "=&r" (t), "+m" (v->counter)
|
||||
: "r" (&v->counter)
|
||||
: "cc");
|
||||
: "cc", "xer");
|
||||
}
|
||||
|
||||
static __inline__ int atomic_dec_return(atomic_t *v)
|
||||
@@ -172,7 +172,7 @@ static __inline__ int atomic_dec_return(atomic_t *v)
|
||||
ISYNC_ON_SMP
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter)
|
||||
: "cc", "memory");
|
||||
: "cc", "xer", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
@@ -346,7 +346,7 @@ static __inline__ void atomic64_inc(atomic64_t *v)
|
||||
bne- 1b"
|
||||
: "=&r" (t), "+m" (v->counter)
|
||||
: "r" (&v->counter)
|
||||
: "cc");
|
||||
: "cc", "xer");
|
||||
}
|
||||
|
||||
static __inline__ long atomic64_inc_return(atomic64_t *v)
|
||||
@@ -362,7 +362,7 @@ static __inline__ long atomic64_inc_return(atomic64_t *v)
|
||||
ISYNC_ON_SMP
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter)
|
||||
: "cc", "memory");
|
||||
: "cc", "xer", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
@@ -388,7 +388,7 @@ static __inline__ void atomic64_dec(atomic64_t *v)
|
||||
bne- 1b"
|
||||
: "=&r" (t), "+m" (v->counter)
|
||||
: "r" (&v->counter)
|
||||
: "cc");
|
||||
: "cc", "xer");
|
||||
}
|
||||
|
||||
static __inline__ long atomic64_dec_return(atomic64_t *v)
|
||||
@@ -404,7 +404,7 @@ static __inline__ long atomic64_dec_return(atomic64_t *v)
|
||||
ISYNC_ON_SMP
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter)
|
||||
: "cc", "memory");
|
||||
: "cc", "xer", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
@@ -431,7 +431,7 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
|
||||
"\n\
|
||||
2:" : "=&r" (t)
|
||||
: "r" (&v->counter)
|
||||
: "cc", "memory");
|
||||
: "cc", "xer", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
@@ -3,6 +3,7 @@
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/asm-compat.h>
|
||||
|
||||
/*
|
||||
* Define an illegal instr to trap on the bug.
|
||||
* We don't use 0 because that marks the end of a function
|
||||
@@ -14,6 +15,7 @@
|
||||
#ifdef CONFIG_BUG
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
#include <asm/asm-offsets.h>
|
||||
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
||||
.macro EMIT_BUG_ENTRY addr,file,line,flags
|
||||
.section __bug_table,"a"
|
||||
@@ -26,7 +28,7 @@
|
||||
.previous
|
||||
.endm
|
||||
#else
|
||||
.macro EMIT_BUG_ENTRY addr,file,line,flags
|
||||
.macro EMIT_BUG_ENTRY addr,file,line,flags
|
||||
.section __bug_table,"a"
|
||||
5001: PPC_LONG \addr
|
||||
.short \flags
|
||||
@@ -113,6 +115,13 @@
|
||||
#define HAVE_ARCH_BUG_ON
|
||||
#define HAVE_ARCH_WARN_ON
|
||||
#endif /* __ASSEMBLY __ */
|
||||
#else
|
||||
#ifdef __ASSEMBLY__
|
||||
.macro EMIT_BUG_ENTRY addr,file,line,flags
|
||||
.endm
|
||||
#else /* !__ASSEMBLY__ */
|
||||
#define _EMIT_BUG_ENTRY
|
||||
#endif
|
||||
#endif /* CONFIG_BUG */
|
||||
|
||||
#include <asm-generic/bug.h>
|
||||
|
@@ -11,6 +11,8 @@
|
||||
#include <asm/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
#define __BIG_ENDIAN
|
||||
|
||||
#ifdef __GNUC__
|
||||
#ifdef __KERNEL__
|
||||
|
||||
@@ -21,12 +23,19 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr)
|
||||
__asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
|
||||
return val;
|
||||
}
|
||||
#define __arch_swab16p ld_le16
|
||||
|
||||
static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
|
||||
{
|
||||
__asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
static inline void __arch_swab16s(__u16 *addr)
|
||||
{
|
||||
st_le16(addr, *addr);
|
||||
}
|
||||
#define __arch_swab16s __arch_swab16s
|
||||
|
||||
static __inline__ __u32 ld_le32(const volatile __u32 *addr)
|
||||
{
|
||||
__u32 val;
|
||||
@@ -34,13 +43,20 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr)
|
||||
__asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
|
||||
return val;
|
||||
}
|
||||
#define __arch_swab32p ld_le32
|
||||
|
||||
static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
|
||||
{
|
||||
__asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
|
||||
}
|
||||
|
||||
static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
|
||||
static inline void __arch_swab32s(__u32 *addr)
|
||||
{
|
||||
st_le32(addr, *addr);
|
||||
}
|
||||
#define __arch_swab32s __arch_swab32s
|
||||
|
||||
static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
|
||||
{
|
||||
__u16 result;
|
||||
|
||||
@@ -49,8 +65,9 @@ static __inline__ __attribute_const__ __u16 ___arch__swab16(__u16 value)
|
||||
: "r" (value), "0" (value >> 8));
|
||||
return result;
|
||||
}
|
||||
#define __arch_swab16 __arch_swab16
|
||||
|
||||
static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
|
||||
static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
|
||||
{
|
||||
__u32 result;
|
||||
|
||||
@@ -61,29 +78,16 @@ static __inline__ __attribute_const__ __u32 ___arch__swab32(__u32 value)
|
||||
: "r" (value), "0" (value >> 24));
|
||||
return result;
|
||||
}
|
||||
|
||||
#define __arch__swab16(x) ___arch__swab16(x)
|
||||
#define __arch__swab32(x) ___arch__swab32(x)
|
||||
|
||||
/* The same, but returns converted value from the location pointer by addr. */
|
||||
#define __arch__swab16p(addr) ld_le16(addr)
|
||||
#define __arch__swab32p(addr) ld_le32(addr)
|
||||
|
||||
/* The same, but do the conversion in situ, ie. put the value back to addr. */
|
||||
#define __arch__swab16s(addr) st_le16(addr,*addr)
|
||||
#define __arch__swab32s(addr) st_le32(addr,*addr)
|
||||
#define __arch_swab32 __arch_swab32
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
|
||||
#ifndef __STRICT_ANSI__
|
||||
#define __BYTEORDER_HAS_U64__
|
||||
#ifndef __powerpc64__
|
||||
#define __SWAB_64_THRU_32__
|
||||
#endif /* __powerpc64__ */
|
||||
#endif /* __STRICT_ANSI__ */
|
||||
|
||||
#endif /* __GNUC__ */
|
||||
|
||||
#include <linux/byteorder/big_endian.h>
|
||||
#include <linux/byteorder.h>
|
||||
|
||||
#endif /* _ASM_POWERPC_BYTEORDER_H */
|
||||
|
@@ -82,6 +82,7 @@ struct cpu_spec {
|
||||
char *cpu_name;
|
||||
unsigned long cpu_features; /* Kernel features */
|
||||
unsigned int cpu_user_features; /* Userland features */
|
||||
unsigned int mmu_features; /* MMU features */
|
||||
|
||||
/* cache line sizes */
|
||||
unsigned int icache_bsize;
|
||||
@@ -144,17 +145,14 @@ extern const char *powerpc_base_platform;
|
||||
#define CPU_FTR_USE_TB ASM_CONST(0x0000000000000040)
|
||||
#define CPU_FTR_L2CSR ASM_CONST(0x0000000000000080)
|
||||
#define CPU_FTR_601 ASM_CONST(0x0000000000000100)
|
||||
#define CPU_FTR_HPTE_TABLE ASM_CONST(0x0000000000000200)
|
||||
#define CPU_FTR_CAN_NAP ASM_CONST(0x0000000000000400)
|
||||
#define CPU_FTR_L3CR ASM_CONST(0x0000000000000800)
|
||||
#define CPU_FTR_L3_DISABLE_NAP ASM_CONST(0x0000000000001000)
|
||||
#define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000)
|
||||
#define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000)
|
||||
#define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000)
|
||||
#define CPU_FTR_HAS_HIGH_BATS ASM_CONST(0x0000000000010000)
|
||||
#define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000)
|
||||
#define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000)
|
||||
#define CPU_FTR_BIG_PHYS ASM_CONST(0x0000000000080000)
|
||||
#define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000)
|
||||
#define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000)
|
||||
#define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000)
|
||||
@@ -163,6 +161,8 @@ extern const char *powerpc_base_platform;
|
||||
#define CPU_FTR_SPE ASM_CONST(0x0000000002000000)
|
||||
#define CPU_FTR_NEED_PAIRED_STWCX ASM_CONST(0x0000000004000000)
|
||||
#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
|
||||
#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000)
|
||||
#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000)
|
||||
|
||||
/*
|
||||
* Add the 64-bit processor unique features in the top half of the word;
|
||||
@@ -177,7 +177,6 @@ extern const char *powerpc_base_platform;
|
||||
#define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000)
|
||||
#define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000)
|
||||
#define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000)
|
||||
#define CPU_FTR_NOEXECUTE LONG_ASM_CONST(0x0000000800000000)
|
||||
#define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000)
|
||||
#define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000)
|
||||
#define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000)
|
||||
@@ -194,6 +193,7 @@ extern const char *powerpc_base_platform;
|
||||
#define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000)
|
||||
#define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000)
|
||||
#define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000)
|
||||
#define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@@ -264,164 +264,159 @@ extern const char *powerpc_base_platform;
|
||||
!defined(CONFIG_POWER3) && !defined(CONFIG_POWER4) && \
|
||||
!defined(CONFIG_BOOKE))
|
||||
|
||||
#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | CPU_FTR_HPTE_TABLE | \
|
||||
#define CPU_FTRS_PPC601 (CPU_FTR_COMMON | CPU_FTR_601 | \
|
||||
CPU_FTR_COHERENT_ICACHE | CPU_FTR_UNIFIED_ID_CACHE)
|
||||
#define CPU_FTRS_603 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_604 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | CPU_FTR_PPC_LE)
|
||||
CPU_FTR_USE_TB | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_740_NOTAU (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_740 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
|
||||
CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_750 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
|
||||
CPU_FTR_TAU | CPU_FTR_HPTE_TABLE | CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_TAU | CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_750CL (CPU_FTRS_750 | CPU_FTR_HAS_HIGH_BATS)
|
||||
#define CPU_FTRS_750CL (CPU_FTRS_750)
|
||||
#define CPU_FTRS_750FX1 (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | CPU_FTR_NO_DPM)
|
||||
#define CPU_FTRS_750FX2 (CPU_FTRS_750 | CPU_FTR_NO_DPM)
|
||||
#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX | \
|
||||
CPU_FTR_HAS_HIGH_BATS)
|
||||
#define CPU_FTRS_750FX (CPU_FTRS_750 | CPU_FTR_DUAL_PLL_750FX)
|
||||
#define CPU_FTRS_750GX (CPU_FTRS_750FX)
|
||||
#define CPU_FTRS_7400_NOTAU (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
|
||||
CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
|
||||
CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_7400 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | CPU_FTR_L2CR | \
|
||||
CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | CPU_FTR_HPTE_TABLE | \
|
||||
CPU_FTR_TAU | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_7450_20 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_7450_21 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_7450_23 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_7455_1 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
|
||||
CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | CPU_FTR_L3CR | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | CPU_FTR_HAS_HIGH_BATS | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
|
||||
CPU_FTR_SPEC7450 | CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_7455_20 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_NEED_PAIRED_STWCX | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_L3_DISABLE_NAP | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_HAS_HIGH_BATS | CPU_FTR_PPC_LE)
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_7455 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_7447_10 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_NO_BTIC | CPU_FTR_PPC_LE | \
|
||||
CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_7447 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_L3CR | CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
|
||||
CPU_FTR_L3CR | CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_7447A (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
|
||||
CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
|
||||
CPU_FTR_NEED_COHERENT | CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_7448 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_L2CR | CPU_FTR_ALTIVEC_COMP | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_SPEC7450 | \
|
||||
CPU_FTR_NAP_DISABLE_L2_PR | CPU_FTR_HAS_HIGH_BATS | \
|
||||
CPU_FTR_SPEC7450 | CPU_FTR_NAP_DISABLE_L2_PR | \
|
||||
CPU_FTR_PPC_LE | CPU_FTR_NEED_PAIRED_STWCX)
|
||||
#define CPU_FTRS_82XX (CPU_FTR_COMMON | \
|
||||
CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB)
|
||||
#define CPU_FTRS_G2_LE (CPU_FTR_COMMON | CPU_FTR_MAYBE_CAN_DOZE | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS)
|
||||
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP)
|
||||
#define CPU_FTRS_E300 (CPU_FTR_MAYBE_CAN_DOZE | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_COMMON)
|
||||
#define CPU_FTRS_E300C2 (CPU_FTR_MAYBE_CAN_DOZE | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_HAS_HIGH_BATS | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_COMMON | CPU_FTR_FPU_UNAVAILABLE)
|
||||
#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | \
|
||||
CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE)
|
||||
#define CPU_FTRS_CLASSIC32 (CPU_FTR_COMMON | CPU_FTR_USE_TB)
|
||||
#define CPU_FTRS_8XX (CPU_FTR_USE_TB)
|
||||
#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN)
|
||||
#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN)
|
||||
#define CPU_FTRS_40X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_44X (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_440x6 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE | \
|
||||
CPU_FTR_INDEXED_DCR)
|
||||
#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
|
||||
CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
|
||||
CPU_FTR_UNIFIED_ID_CACHE)
|
||||
CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
|
||||
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN)
|
||||
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
|
||||
CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
|
||||
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | \
|
||||
CPU_FTR_NODSISRALIGN)
|
||||
CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \
|
||||
CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_BIG_PHYS | CPU_FTR_NODSISRALIGN | \
|
||||
CPU_FTR_L2CSR | CPU_FTR_LWSYNC)
|
||||
CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
|
||||
CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE)
|
||||
#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
|
||||
|
||||
/* 64-bit CPUs */
|
||||
#define CPU_FTRS_POWER3 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | CPU_FTR_PPC_LE)
|
||||
CPU_FTR_IABR | CPU_FTR_PPC_LE)
|
||||
#define CPU_FTRS_RS64 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_IABR | \
|
||||
CPU_FTR_IABR | \
|
||||
CPU_FTR_MMCRA | CPU_FTR_CTRL)
|
||||
#define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ)
|
||||
#define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \
|
||||
CPU_FTR_CP_USE_DCBTZ)
|
||||
#define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
|
||||
CPU_FTR_PURR)
|
||||
#define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
|
||||
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
|
||||
CPU_FTR_DSCR)
|
||||
CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD)
|
||||
#define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \
|
||||
CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \
|
||||
CPU_FTR_DSCR | CPU_FTR_SAO)
|
||||
#define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
|
||||
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
|
||||
CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \
|
||||
CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ)
|
||||
CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \
|
||||
CPU_FTR_UNALIGNED_LD_STD)
|
||||
#define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
|
||||
CPU_FTR_PPCAS_ARCH_V2 | \
|
||||
CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \
|
||||
CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B)
|
||||
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | \
|
||||
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
|
||||
#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
|
||||
|
||||
#ifdef __powerpc64__
|
||||
#define CPU_FTRS_POSSIBLE \
|
||||
@@ -452,7 +447,7 @@ enum {
|
||||
CPU_FTRS_40X |
|
||||
#endif
|
||||
#ifdef CONFIG_44x
|
||||
CPU_FTRS_44X |
|
||||
CPU_FTRS_44X | CPU_FTRS_440x6 |
|
||||
#endif
|
||||
#ifdef CONFIG_E200
|
||||
CPU_FTRS_E200 |
|
||||
@@ -492,7 +487,7 @@ enum {
|
||||
CPU_FTRS_40X &
|
||||
#endif
|
||||
#ifdef CONFIG_44x
|
||||
CPU_FTRS_44X &
|
||||
CPU_FTRS_44X & CPU_FTRS_440x6 &
|
||||
#endif
|
||||
#ifdef CONFIG_E200
|
||||
CPU_FTRS_E200 &
|
||||
|
@@ -23,6 +23,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/cputable.h>
|
||||
|
||||
typedef struct {
|
||||
unsigned int base;
|
||||
@@ -39,23 +40,45 @@ static inline bool dcr_map_ok_native(dcr_host_native_t host)
|
||||
#define dcr_read_native(host, dcr_n) mfdcr(dcr_n + host.base)
|
||||
#define dcr_write_native(host, dcr_n, value) mtdcr(dcr_n + host.base, value)
|
||||
|
||||
/* Device Control Registers */
|
||||
void __mtdcr(int reg, unsigned int val);
|
||||
unsigned int __mfdcr(int reg);
|
||||
/* Table based DCR accessors */
|
||||
extern void __mtdcr(unsigned int reg, unsigned int val);
|
||||
extern unsigned int __mfdcr(unsigned int reg);
|
||||
|
||||
/* mfdcrx/mtdcrx instruction based accessors. We hand code
|
||||
* the opcodes in order not to depend on newer binutils
|
||||
*/
|
||||
static inline unsigned int mfdcrx(unsigned int reg)
|
||||
{
|
||||
unsigned int ret;
|
||||
asm volatile(".long 0x7c000206 | (%0 << 21) | (%1 << 16)"
|
||||
: "=r" (ret) : "r" (reg));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void mtdcrx(unsigned int reg, unsigned int val)
|
||||
{
|
||||
asm volatile(".long 0x7c000306 | (%0 << 21) | (%1 << 16)"
|
||||
: : "r" (val), "r" (reg));
|
||||
}
|
||||
|
||||
#define mfdcr(rn) \
|
||||
({unsigned int rval; \
|
||||
if (__builtin_constant_p(rn)) \
|
||||
if (__builtin_constant_p(rn) && rn < 1024) \
|
||||
asm volatile("mfdcr %0," __stringify(rn) \
|
||||
: "=r" (rval)); \
|
||||
else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
|
||||
rval = mfdcrx(rn); \
|
||||
else \
|
||||
rval = __mfdcr(rn); \
|
||||
rval;})
|
||||
|
||||
#define mtdcr(rn, v) \
|
||||
do { \
|
||||
if (__builtin_constant_p(rn)) \
|
||||
if (__builtin_constant_p(rn) && rn < 1024) \
|
||||
asm volatile("mtdcr " __stringify(rn) ",%0" \
|
||||
: : "r" (v)); \
|
||||
else if (likely(cpu_has_feature(CPU_FTR_INDEXED_DCR))) \
|
||||
mtdcrx(rn, v); \
|
||||
else \
|
||||
__mtdcr(rn, v); \
|
||||
} while (0)
|
||||
@@ -69,8 +92,13 @@ static inline unsigned __mfdcri(int base_addr, int base_data, int reg)
|
||||
unsigned int val;
|
||||
|
||||
spin_lock_irqsave(&dcr_ind_lock, flags);
|
||||
__mtdcr(base_addr, reg);
|
||||
val = __mfdcr(base_data);
|
||||
if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
|
||||
mtdcrx(base_addr, reg);
|
||||
val = mfdcrx(base_data);
|
||||
} else {
|
||||
__mtdcr(base_addr, reg);
|
||||
val = __mfdcr(base_data);
|
||||
}
|
||||
spin_unlock_irqrestore(&dcr_ind_lock, flags);
|
||||
return val;
|
||||
}
|
||||
@@ -81,8 +109,13 @@ static inline void __mtdcri(int base_addr, int base_data, int reg,
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dcr_ind_lock, flags);
|
||||
__mtdcr(base_addr, reg);
|
||||
__mtdcr(base_data, val);
|
||||
if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
|
||||
mtdcrx(base_addr, reg);
|
||||
mtdcrx(base_data, val);
|
||||
} else {
|
||||
__mtdcr(base_addr, reg);
|
||||
__mtdcr(base_data, val);
|
||||
}
|
||||
spin_unlock_irqrestore(&dcr_ind_lock, flags);
|
||||
}
|
||||
|
||||
@@ -93,9 +126,15 @@ static inline void __dcri_clrset(int base_addr, int base_data, int reg,
|
||||
unsigned int val;
|
||||
|
||||
spin_lock_irqsave(&dcr_ind_lock, flags);
|
||||
__mtdcr(base_addr, reg);
|
||||
val = (__mfdcr(base_data) & ~clr) | set;
|
||||
__mtdcr(base_data, val);
|
||||
if (cpu_has_feature(CPU_FTR_INDEXED_DCR)) {
|
||||
mtdcrx(base_addr, reg);
|
||||
val = (mfdcrx(base_data) & ~clr) | set;
|
||||
mtdcrx(base_data, val);
|
||||
} else {
|
||||
__mtdcr(base_addr, reg);
|
||||
val = (__mfdcr(base_data) & ~clr) | set;
|
||||
__mtdcr(base_data, val);
|
||||
}
|
||||
spin_unlock_irqrestore(&dcr_ind_lock, flags);
|
||||
}
|
||||
|
||||
|
@@ -68,9 +68,9 @@ typedef dcr_host_mmio_t dcr_host_t;
|
||||
* additional helpers to read the DCR * base from the device-tree
|
||||
*/
|
||||
struct device_node;
|
||||
extern unsigned int dcr_resource_start(struct device_node *np,
|
||||
extern unsigned int dcr_resource_start(const struct device_node *np,
|
||||
unsigned int index);
|
||||
extern unsigned int dcr_resource_len(struct device_node *np,
|
||||
extern unsigned int dcr_resource_len(const struct device_node *np,
|
||||
unsigned int index);
|
||||
#endif /* CONFIG_PPC_DCR */
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@@ -18,4 +18,16 @@ struct dev_archdata {
|
||||
void *dma_data;
|
||||
};
|
||||
|
||||
static inline void dev_archdata_set_node(struct dev_archdata *ad,
|
||||
struct device_node *np)
|
||||
{
|
||||
ad->of_node = np;
|
||||
}
|
||||
|
||||
static inline struct device_node *
|
||||
dev_archdata_get_node(const struct dev_archdata *ad)
|
||||
{
|
||||
return ad->of_node;
|
||||
}
|
||||
|
||||
#endif /* _ASM_POWERPC_DEVICE_H */
|
||||
|
@@ -60,12 +60,6 @@ struct dma_mapping_ops {
|
||||
dma_addr_t *dma_handle, gfp_t flag);
|
||||
void (*free_coherent)(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle);
|
||||
dma_addr_t (*map_single)(struct device *dev, void *ptr,
|
||||
size_t size, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs);
|
||||
void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs);
|
||||
int (*map_sg)(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs);
|
||||
@@ -82,6 +76,22 @@ struct dma_mapping_ops {
|
||||
dma_addr_t dma_address, size_t size,
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs);
|
||||
#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
|
||||
void (*sync_single_range_for_cpu)(struct device *hwdev,
|
||||
dma_addr_t dma_handle, unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction direction);
|
||||
void (*sync_single_range_for_device)(struct device *hwdev,
|
||||
dma_addr_t dma_handle, unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction direction);
|
||||
void (*sync_sg_for_cpu)(struct device *hwdev,
|
||||
struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction direction);
|
||||
void (*sync_sg_for_device)(struct device *hwdev,
|
||||
struct scatterlist *sg, int nelems,
|
||||
enum dma_data_direction direction);
|
||||
#endif
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -149,10 +159,9 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: map_/unmap_single will ideally go away, to be completely
|
||||
* replaced by map/unmap_page. Until then, we allow dma_ops to have
|
||||
* one or the other, or both by checking to see if the specific
|
||||
* function requested exists; and if not, falling back on the other set.
|
||||
* map_/unmap_single actually call through to map/unmap_page now that all the
|
||||
* dma_mapping_ops have been converted over. We just have to get the page and
|
||||
* offset to pass through to map_page
|
||||
*/
|
||||
static inline dma_addr_t dma_map_single_attrs(struct device *dev,
|
||||
void *cpu_addr,
|
||||
@@ -164,10 +173,6 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
if (dma_ops->map_single)
|
||||
return dma_ops->map_single(dev, cpu_addr, size, direction,
|
||||
attrs);
|
||||
|
||||
return dma_ops->map_page(dev, virt_to_page(cpu_addr),
|
||||
(unsigned long)cpu_addr % PAGE_SIZE, size,
|
||||
direction, attrs);
|
||||
@@ -183,11 +188,6 @@ static inline void dma_unmap_single_attrs(struct device *dev,
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
if (dma_ops->unmap_single) {
|
||||
dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
|
||||
return;
|
||||
}
|
||||
|
||||
dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
|
||||
}
|
||||
|
||||
@@ -201,12 +201,7 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
if (dma_ops->map_page)
|
||||
return dma_ops->map_page(dev, page, offset, size, direction,
|
||||
attrs);
|
||||
|
||||
return dma_ops->map_single(dev, page_address(page) + offset, size,
|
||||
direction, attrs);
|
||||
return dma_ops->map_page(dev, page, offset, size, direction, attrs);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page_attrs(struct device *dev,
|
||||
@@ -219,12 +214,7 @@ static inline void dma_unmap_page_attrs(struct device *dev,
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
if (dma_ops->unmap_page) {
|
||||
dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
|
||||
return;
|
||||
}
|
||||
|
||||
dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
|
||||
dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
|
||||
}
|
||||
|
||||
static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
|
||||
@@ -308,48 +298,108 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
__dma_sync(bus_to_virt(dma_handle), size, direction);
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
|
||||
size, direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
__dma_sync(bus_to_virt(dma_handle), size, direction);
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->sync_single_range_for_device(dev, dma_handle,
|
||||
0, size, direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
for_each_sg(sgl, sg, nents, i)
|
||||
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->sync_single_range_for_cpu(dev, dma_handle,
|
||||
offset, size, direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle, unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
|
||||
size, direction);
|
||||
}
|
||||
#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dma_sync_sg_for_cpu(struct device *dev,
|
||||
struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dma_sync_sg_for_device(struct device *dev,
|
||||
struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle, unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
@@ -382,22 +432,6 @@ static inline int dma_get_cache_alignment(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
/* just sync everything for now */
|
||||
dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
|
||||
}
|
||||
|
||||
static inline void dma_sync_single_range_for_device(struct device *dev,
|
||||
dma_addr_t dma_handle, unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
/* just sync everything for now */
|
||||
dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
|
||||
}
|
||||
|
||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
|
@@ -17,8 +17,8 @@
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
#ifndef _PPC64_EEH_H
|
||||
#define _PPC64_EEH_H
|
||||
#ifndef _POWERPC_EEH_H
|
||||
#define _POWERPC_EEH_H
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/init.h>
|
||||
@@ -110,6 +110,7 @@ static inline void eeh_remove_bus_device(struct pci_dev *dev) { }
|
||||
#define EEH_IO_ERROR_VALUE(size) (-1UL)
|
||||
#endif /* CONFIG_EEH */
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/*
|
||||
* MMIO read/write operations with EEH support.
|
||||
*/
|
||||
@@ -207,5 +208,6 @@ static inline void eeh_readsl(const volatile void __iomem *addr, void * buf,
|
||||
eeh_check_failure(addr, *(u32*)buf);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _PPC64_EEH_H */
|
||||
#endif /* _POWERPC_EEH_H */
|
||||
|
@@ -81,6 +81,36 @@ label##5: \
|
||||
#define ALT_FTR_SECTION_END_IFCLR(msk) \
|
||||
ALT_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
|
||||
|
||||
/* MMU feature dependent sections */
|
||||
#define BEGIN_MMU_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
|
||||
#define BEGIN_MMU_FTR_SECTION START_FTR_SECTION(97)
|
||||
|
||||
#define END_MMU_FTR_SECTION_NESTED(msk, val, label) \
|
||||
FTR_SECTION_ELSE_NESTED(label) \
|
||||
MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
|
||||
|
||||
#define END_MMU_FTR_SECTION(msk, val) \
|
||||
END_MMU_FTR_SECTION_NESTED(msk, val, 97)
|
||||
|
||||
#define END_MMU_FTR_SECTION_IFSET(msk) END_MMU_FTR_SECTION((msk), (msk))
|
||||
#define END_MMU_FTR_SECTION_IFCLR(msk) END_MMU_FTR_SECTION((msk), 0)
|
||||
|
||||
/* MMU feature sections with alternatives, use BEGIN_FTR_SECTION to start */
|
||||
#define MMU_FTR_SECTION_ELSE_NESTED(label) FTR_SECTION_ELSE_NESTED(label)
|
||||
#define MMU_FTR_SECTION_ELSE MMU_FTR_SECTION_ELSE_NESTED(97)
|
||||
#define ALT_MMU_FTR_SECTION_END_NESTED(msk, val, label) \
|
||||
MAKE_FTR_SECTION_ENTRY(msk, val, label, __mmu_ftr_fixup)
|
||||
#define ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, label) \
|
||||
ALT_MMU_FTR_SECTION_END_NESTED(msk, msk, label)
|
||||
#define ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, label) \
|
||||
ALT_MMU_FTR_SECTION_END_NESTED(msk, 0, label)
|
||||
#define ALT_MMU_FTR_SECTION_END(msk, val) \
|
||||
ALT_MMU_FTR_SECTION_END_NESTED(msk, val, 97)
|
||||
#define ALT_MMU_FTR_SECTION_END_IFSET(msk) \
|
||||
ALT_MMU_FTR_SECTION_END_NESTED_IFSET(msk, 97)
|
||||
#define ALT_MMU_FTR_SECTION_END_IFCLR(msk) \
|
||||
ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(msk, 97)
|
||||
|
||||
/* Firmware feature dependent sections */
|
||||
#define BEGIN_FW_FTR_SECTION_NESTED(label) START_FTR_SECTION(label)
|
||||
#define BEGIN_FW_FTR_SECTION START_FTR_SECTION(97)
|
||||
|
@@ -38,9 +38,24 @@ extern pte_t *pkmap_page_table;
|
||||
* easily, subsequent pte tables have to be allocated in one physical
|
||||
* chunk of RAM.
|
||||
*/
|
||||
#define LAST_PKMAP (1 << PTE_SHIFT)
|
||||
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
|
||||
/*
|
||||
* We use one full pte table with 4K pages. And with 16K/64K pages pte
|
||||
* table covers enough memory (32MB and 512MB resp.) that both FIXMAP
|
||||
* and PKMAP can be placed in single pte table. We use 1024 pages for
|
||||
* PKMAP in case of 16K/64K pages.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_4K_PAGES
|
||||
#define PKMAP_ORDER PTE_SHIFT
|
||||
#else
|
||||
#define PKMAP_ORDER 10
|
||||
#endif
|
||||
#define LAST_PKMAP (1 << PKMAP_ORDER)
|
||||
#ifndef CONFIG_PPC_4K_PAGES
|
||||
#define PKMAP_BASE (FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1))
|
||||
#else
|
||||
#define PKMAP_BASE ((FIXADDR_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK)
|
||||
#endif
|
||||
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
|
||||
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
|
||||
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
|
||||
|
||||
@@ -85,7 +100,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro
|
||||
BUG_ON(!pte_none(*(kmap_pte-idx)));
|
||||
#endif
|
||||
__set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
|
||||
flush_tlb_page(NULL, vaddr);
|
||||
local_flush_tlb_page(NULL, vaddr);
|
||||
|
||||
return (void*) vaddr;
|
||||
}
|
||||
@@ -113,7 +128,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
|
||||
* this pte without first remap it
|
||||
*/
|
||||
pte_clear(&init_mm, vaddr, kmap_pte-idx);
|
||||
flush_tlb_page(NULL, vaddr);
|
||||
local_flush_tlb_page(NULL, vaddr);
|
||||
#endif
|
||||
pagefault_enable();
|
||||
}
|
||||
|
@@ -713,13 +713,6 @@ static inline void * phys_to_virt(unsigned long address)
|
||||
*/
|
||||
#define page_to_phys(page) ((phys_addr_t)page_to_pfn(page) << PAGE_SHIFT)
|
||||
|
||||
/* We do NOT want virtual merging, it would put too much pressure on
|
||||
* our iommu allocator. Instead, we want drivers to be smart enough
|
||||
* to coalesce sglists that happen to have been mapped in a contiguous
|
||||
* way by the iommu
|
||||
*/
|
||||
#define BIO_VMERGE_BOUNDARY 0
|
||||
|
||||
/*
|
||||
* 32 bits still uses virt_to_bus() for it's implementation of DMA
|
||||
* mappings se we have to keep it defined here. We also have some old
|
||||
|
@@ -1,6 +1,8 @@
|
||||
#ifndef _PPC64_KDUMP_H
|
||||
#define _PPC64_KDUMP_H
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
/* Kdump kernel runs at 32 MB, change at your peril. */
|
||||
#define KDUMP_KERNELBASE 0x2000000
|
||||
|
||||
@@ -11,8 +13,19 @@
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
|
||||
/*
|
||||
* On PPC64 translation is disabled during trampoline setup, so we use
|
||||
* physical addresses. Though on PPC32 translation is already enabled,
|
||||
* so we can't do the same. Luckily create_trampoline() creates relative
|
||||
* branches, so we can just add the PAGE_OFFSET and don't worry about it.
|
||||
*/
|
||||
#ifdef __powerpc64__
|
||||
#define KDUMP_TRAMPOLINE_START 0x0100
|
||||
#define KDUMP_TRAMPOLINE_END 0x3000
|
||||
#else
|
||||
#define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET)
|
||||
#define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET)
|
||||
#endif /* __powerpc64__ */
|
||||
|
||||
#define KDUMP_MIN_TCE_ENTRIES 2048
|
||||
|
||||
|
@@ -33,12 +33,12 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/cpumask.h>
|
||||
#include <asm/reg.h>
|
||||
|
||||
typedef void (*crash_shutdown_t)(void);
|
||||
|
||||
#ifdef CONFIG_KEXEC
|
||||
|
||||
#ifdef __powerpc64__
|
||||
/*
|
||||
* This function is responsible for capturing register states if coming
|
||||
* via panic or invoking dump using sysrq-trigger.
|
||||
@@ -48,6 +48,7 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
|
||||
{
|
||||
if (oldregs)
|
||||
memcpy(newregs, oldregs, sizeof(*newregs));
|
||||
#ifdef __powerpc64__
|
||||
else {
|
||||
/* FIXME Merge this with xmon_save_regs ?? */
|
||||
unsigned long tmp1, tmp2;
|
||||
@@ -100,15 +101,11 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
|
||||
: "b" (newregs)
|
||||
: "memory");
|
||||
}
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Provide a dummy definition to avoid build failures. Will remain
|
||||
* empty till crash dump support is enabled.
|
||||
*/
|
||||
static inline void crash_setup_regs(struct pt_regs *newregs,
|
||||
struct pt_regs *oldregs) { }
|
||||
#endif /* !__powerpc64 __ */
|
||||
else
|
||||
ppc_save_regs(newregs);
|
||||
#endif /* __powerpc64__ */
|
||||
}
|
||||
|
||||
extern void kexec_smp_wait(void); /* get and clear naca physid, wait for
|
||||
master to copy new code to 0 */
|
||||
|
@@ -67,7 +67,7 @@ static __inline__ long local_inc_return(local_t *l)
|
||||
bne- 1b"
|
||||
: "=&r" (t)
|
||||
: "r" (&(l->a.counter))
|
||||
: "cc", "memory");
|
||||
: "cc", "xer", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
@@ -94,7 +94,7 @@ static __inline__ long local_dec_return(local_t *l)
|
||||
bne- 1b"
|
||||
: "=&r" (t)
|
||||
: "r" (&(l->a.counter))
|
||||
: "cc", "memory");
|
||||
: "cc", "xer", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
@@ -133,7 +133,8 @@ struct lppaca {
|
||||
//=============================================================================
|
||||
// CACHE_LINE_4-5 0x0180 - 0x027F Contains PMC interrupt data
|
||||
//=============================================================================
|
||||
u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF
|
||||
u32 page_ins; // CMO Hint - # page ins by OS x00-x04
|
||||
u8 pmc_save_area[252]; // PMC interrupt Area x04-xFF
|
||||
} __attribute__((__aligned__(0x400)));
|
||||
|
||||
extern struct lppaca lppaca[];
|
||||
|
@@ -54,8 +54,9 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
typedef struct {
|
||||
unsigned long id;
|
||||
unsigned long vdso_base;
|
||||
unsigned int id;
|
||||
unsigned int active;
|
||||
unsigned long vdso_base;
|
||||
} mm_context_t;
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
@@ -4,6 +4,8 @@
|
||||
* PPC440 support
|
||||
*/
|
||||
|
||||
#include <asm/page.h>
|
||||
|
||||
#define PPC44x_MMUCR_TID 0x000000ff
|
||||
#define PPC44x_MMUCR_STS 0x00010000
|
||||
|
||||
@@ -56,8 +58,9 @@
|
||||
extern unsigned int tlb_44x_hwater;
|
||||
|
||||
typedef struct {
|
||||
unsigned long id;
|
||||
unsigned long vdso_base;
|
||||
unsigned int id;
|
||||
unsigned int active;
|
||||
unsigned long vdso_base;
|
||||
} mm_context_t;
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
@@ -73,4 +76,19 @@ typedef struct {
|
||||
/* Size of the TLBs used for pinning in lowmem */
|
||||
#define PPC_PIN_SIZE (1 << 28) /* 256M */
|
||||
|
||||
#if (PAGE_SHIFT == 12)
|
||||
#define PPC44x_TLBE_SIZE PPC44x_TLB_4K
|
||||
#elif (PAGE_SHIFT == 14)
|
||||
#define PPC44x_TLBE_SIZE PPC44x_TLB_16K
|
||||
#elif (PAGE_SHIFT == 16)
|
||||
#define PPC44x_TLBE_SIZE PPC44x_TLB_64K
|
||||
#else
|
||||
#error "Unsupported PAGE_SIZE"
|
||||
#endif
|
||||
|
||||
#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2)
|
||||
#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2)
|
||||
#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2)
|
||||
#define PPC44x_PTE_ADD_MASK_BIT (32 - PTE_T_LOG2 - PTE_SHIFT)
|
||||
|
||||
#endif /* _ASM_POWERPC_MMU_44X_H_ */
|
||||
|
@@ -137,7 +137,8 @@
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
typedef struct {
|
||||
unsigned long id;
|
||||
unsigned int id;
|
||||
unsigned int active;
|
||||
unsigned long vdso_base;
|
||||
} mm_context_t;
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
@@ -40,6 +40,8 @@
|
||||
#define MAS2_M 0x00000004
|
||||
#define MAS2_G 0x00000002
|
||||
#define MAS2_E 0x00000001
|
||||
#define MAS2_EPN_MASK(size) (~0 << (2*(size) + 10))
|
||||
#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
|
||||
|
||||
#define MAS3_RPN 0xFFFFF000
|
||||
#define MAS3_U0 0x00000200
|
||||
@@ -74,8 +76,9 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
typedef struct {
|
||||
unsigned long id;
|
||||
unsigned long vdso_base;
|
||||
unsigned int id;
|
||||
unsigned int active;
|
||||
unsigned long vdso_base;
|
||||
} mm_context_t;
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
|
@@ -2,6 +2,63 @@
|
||||
#define _ASM_POWERPC_MMU_H_
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <asm/asm-compat.h>
|
||||
#include <asm/feature-fixups.h>
|
||||
|
||||
/*
|
||||
* MMU features bit definitions
|
||||
*/
|
||||
|
||||
/*
|
||||
* First half is MMU families
|
||||
*/
|
||||
#define MMU_FTR_HPTE_TABLE ASM_CONST(0x00000001)
|
||||
#define MMU_FTR_TYPE_8xx ASM_CONST(0x00000002)
|
||||
#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
|
||||
#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
|
||||
#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
|
||||
|
||||
/*
|
||||
* This is individual features
|
||||
*/
|
||||
|
||||
/* Enable use of high BAT registers */
|
||||
#define MMU_FTR_USE_HIGH_BATS ASM_CONST(0x00010000)
|
||||
|
||||
/* Enable >32-bit physical addresses on 32-bit processor, only used
|
||||
* by CONFIG_6xx currently as BookE supports that from day 1
|
||||
*/
|
||||
#define MMU_FTR_BIG_PHYS ASM_CONST(0x00020000)
|
||||
|
||||
/* Enable use of broadcast TLB invalidations. We don't always set it
|
||||
* on processors that support it due to other constraints with the
|
||||
* use of such invalidations
|
||||
*/
|
||||
#define MMU_FTR_USE_TLBIVAX_BCAST ASM_CONST(0x00040000)
|
||||
|
||||
/* Enable use of tlbilx invalidate-by-PID variant.
|
||||
*/
|
||||
#define MMU_FTR_USE_TLBILX_PID ASM_CONST(0x00080000)
|
||||
|
||||
/* This indicates that the processor cannot handle multiple outstanding
|
||||
* broadcast tlbivax or tlbsync. This makes the code use a spinlock
|
||||
* around such invalidate forms.
|
||||
*/
|
||||
#define MMU_FTR_LOCK_BCAST_INVAL ASM_CONST(0x00100000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <asm/cputable.h>
|
||||
|
||||
static inline int mmu_has_feature(unsigned long feature)
|
||||
{
|
||||
return (cur_cpu_spec->mmu_features & feature);
|
||||
}
|
||||
|
||||
extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/* 64-bit classic hash table MMU */
|
||||
# include <asm/mmu-hash64.h>
|
||||
|
@@ -2,237 +2,26 @@
|
||||
#define __ASM_POWERPC_MMU_CONTEXT_H
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm-generic/mm_hooks.h>
|
||||
|
||||
#ifndef CONFIG_PPC64
|
||||
#include <asm/atomic.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <asm/cputhreads.h>
|
||||
|
||||
/*
|
||||
* On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
|
||||
* (virtual segment identifiers) for each context. Although the
|
||||
* hardware supports 24-bit VSIDs, and thus >1 million contexts,
|
||||
* we only use 32,768 of them. That is ample, since there can be
|
||||
* at most around 30,000 tasks in the system anyway, and it means
|
||||
* that we can use a bitmap to indicate which contexts are in use.
|
||||
* Using a bitmap means that we entirely avoid all of the problems
|
||||
* that we used to have when the context number overflowed,
|
||||
* particularly on SMP systems.
|
||||
* -- paulus.
|
||||
* Most if the context management is out of line
|
||||
*/
|
||||
|
||||
/*
|
||||
* This function defines the mapping from contexts to VSIDs (virtual
|
||||
* segment IDs). We use a skew on both the context and the high 4 bits
|
||||
* of the 32-bit virtual address (the "effective segment ID") in order
|
||||
* to spread out the entries in the MMU hash table. Note, if this
|
||||
* function is changed then arch/ppc/mm/hashtable.S will have to be
|
||||
* changed to correspond.
|
||||
*/
|
||||
#define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
|
||||
& 0xffffff)
|
||||
|
||||
/*
|
||||
The MPC8xx has only 16 contexts. We rotate through them on each
|
||||
task switch. A better way would be to keep track of tasks that
|
||||
own contexts, and implement an LRU usage. That way very active
|
||||
tasks don't always have to pay the TLB reload overhead. The
|
||||
kernel pages are mapped shared, so the kernel can run on behalf
|
||||
of any task that makes a kernel entry. Shared does not mean they
|
||||
are not protected, just that the ASID comparison is not performed.
|
||||
-- Dan
|
||||
|
||||
The IBM4xx has 256 contexts, so we can just rotate through these
|
||||
as a way of "switching" contexts. If the TID of the TLB is zero,
|
||||
the PID/TID comparison is disabled, so we can use a TID of zero
|
||||
to represent all kernel pages as shared among all contexts.
|
||||
-- Dan
|
||||
*/
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_8xx
|
||||
#define NO_CONTEXT 16
|
||||
#define LAST_CONTEXT 15
|
||||
#define FIRST_CONTEXT 0
|
||||
|
||||
#elif defined(CONFIG_4xx)
|
||||
#define NO_CONTEXT 256
|
||||
#define LAST_CONTEXT 255
|
||||
#define FIRST_CONTEXT 1
|
||||
|
||||
#elif defined(CONFIG_E200) || defined(CONFIG_E500)
|
||||
#define NO_CONTEXT 256
|
||||
#define LAST_CONTEXT 255
|
||||
#define FIRST_CONTEXT 1
|
||||
|
||||
#else
|
||||
|
||||
/* PPC 6xx, 7xx CPUs */
|
||||
#define NO_CONTEXT ((unsigned long) -1)
|
||||
#define LAST_CONTEXT 32767
|
||||
#define FIRST_CONTEXT 1
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Set the current MMU context.
|
||||
* On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by
|
||||
* loading up the segment registers for the user part of the address space.
|
||||
*
|
||||
* Since the PGD is immediately available, it is much faster to simply
|
||||
* pass this along as a second parameter, which is required for 8xx and
|
||||
* can be used for debugging on all processors (if you happen to have
|
||||
* an Abatron).
|
||||
*/
|
||||
extern void set_context(unsigned long contextid, pgd_t *pgd);
|
||||
|
||||
/*
|
||||
* Bitmap of contexts in use.
|
||||
* The size of this bitmap is LAST_CONTEXT + 1 bits.
|
||||
*/
|
||||
extern unsigned long context_map[];
|
||||
|
||||
/*
|
||||
* This caches the next context number that we expect to be free.
|
||||
* Its use is an optimization only, we can't rely on this context
|
||||
* number to be free, but it usually will be.
|
||||
*/
|
||||
extern unsigned long next_mmu_context;
|
||||
|
||||
/*
|
||||
* If we don't have sufficient contexts to give one to every task
|
||||
* that could be in the system, we need to be able to steal contexts.
|
||||
* These variables support that.
|
||||
*/
|
||||
#if LAST_CONTEXT < 30000
|
||||
#define FEW_CONTEXTS 1
|
||||
extern atomic_t nr_free_contexts;
|
||||
extern struct mm_struct *context_mm[LAST_CONTEXT+1];
|
||||
extern void steal_context(void);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Get a new mmu context for the address space described by `mm'.
|
||||
*/
|
||||
static inline void get_mmu_context(struct mm_struct *mm)
|
||||
{
|
||||
unsigned long ctx;
|
||||
|
||||
if (mm->context.id != NO_CONTEXT)
|
||||
return;
|
||||
#ifdef FEW_CONTEXTS
|
||||
while (atomic_dec_if_positive(&nr_free_contexts) < 0)
|
||||
steal_context();
|
||||
#endif
|
||||
ctx = next_mmu_context;
|
||||
while (test_and_set_bit(ctx, context_map)) {
|
||||
ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
|
||||
if (ctx > LAST_CONTEXT)
|
||||
ctx = 0;
|
||||
}
|
||||
next_mmu_context = (ctx + 1) & LAST_CONTEXT;
|
||||
mm->context.id = ctx;
|
||||
#ifdef FEW_CONTEXTS
|
||||
context_mm[ctx] = mm;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up the context for a new address space.
|
||||
*/
|
||||
static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
|
||||
{
|
||||
mm->context.id = NO_CONTEXT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We're finished using the context for an address space.
|
||||
*/
|
||||
static inline void destroy_context(struct mm_struct *mm)
|
||||
{
|
||||
preempt_disable();
|
||||
if (mm->context.id != NO_CONTEXT) {
|
||||
clear_bit(mm->context.id, context_map);
|
||||
mm->context.id = NO_CONTEXT;
|
||||
#ifdef FEW_CONTEXTS
|
||||
atomic_inc(&nr_free_contexts);
|
||||
#endif
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
asm volatile ("dssall;\n"
|
||||
#ifndef CONFIG_POWER4
|
||||
"sync;\n" /* G4 needs a sync here, G5 apparently not */
|
||||
#endif
|
||||
: : );
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
tsk->thread.pgdir = next->pgd;
|
||||
|
||||
/* No need to flush userspace segments if the mm doesnt change */
|
||||
if (prev == next)
|
||||
return;
|
||||
|
||||
/* Setup new userspace context */
|
||||
get_mmu_context(next);
|
||||
set_context(next->context.id, next->pgd);
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
|
||||
/*
|
||||
* After we have set current->mm to a new value, this activates
|
||||
* the context for the new mm so we see the new mappings.
|
||||
*/
|
||||
#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current)
|
||||
|
||||
extern void mmu_context_init(void);
|
||||
|
||||
|
||||
#else
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
/*
|
||||
* Copyright (C) 2001 PPC 64 Team, IBM Corp
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* The proto-VSID space has 2^35 - 1 segments available for user mappings.
|
||||
* Each segment contains 2^28 bytes. Each context maps 2^44 bytes,
|
||||
* so we can support 2^19-1 contexts (19 == 35 + 28 - 44).
|
||||
*/
|
||||
#define NO_CONTEXT 0
|
||||
#define MAX_CONTEXT ((1UL << 19) - 1)
|
||||
|
||||
extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
|
||||
extern void destroy_context(struct mm_struct *mm);
|
||||
|
||||
extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
|
||||
extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
|
||||
extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
|
||||
extern void set_context(unsigned long id, pgd_t *pgd);
|
||||
|
||||
/*
|
||||
* switch_mm is the entry point called from the architecture independent
|
||||
@@ -241,22 +30,39 @@ extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
|
||||
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
|
||||
cpu_set(smp_processor_id(), next->cpu_vm_mask);
|
||||
/* Mark this context has been used on the new CPU */
|
||||
cpu_set(smp_processor_id(), next->cpu_vm_mask);
|
||||
|
||||
/* No need to flush userspace segments if the mm doesnt change */
|
||||
/* 32-bit keeps track of the current PGDIR in the thread struct */
|
||||
#ifdef CONFIG_PPC32
|
||||
tsk->thread.pgdir = next->pgd;
|
||||
#endif /* CONFIG_PPC32 */
|
||||
|
||||
/* Nothing else to do if we aren't actually switching */
|
||||
if (prev == next)
|
||||
return;
|
||||
|
||||
/* We must stop all altivec streams before changing the HW
|
||||
* context
|
||||
*/
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
if (cpu_has_feature(CPU_FTR_ALTIVEC))
|
||||
asm volatile ("dssall");
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
/* The actual HW switching method differs between the various
|
||||
* sub architectures.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
if (cpu_has_feature(CPU_FTR_SLB))
|
||||
switch_slb(tsk, next);
|
||||
else
|
||||
switch_stab(tsk, next);
|
||||
#else
|
||||
/* Out of line for now */
|
||||
switch_mmu_context(prev, next);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
#define deactivate_mm(tsk,mm) do { } while (0)
|
||||
@@ -274,6 +80,11 @@ static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
/* We don't currently use enter_lazy_tlb() for anything */
|
||||
static inline void enter_lazy_tlb(struct mm_struct *mm,
|
||||
struct task_struct *tsk)
|
||||
{
|
||||
}
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_POWERPC_MMU_CONTEXT_H */
|
||||
|
@@ -239,6 +239,25 @@ struct mpc52xx_cdm {
|
||||
u16 mclken_div_psc6; /* CDM + 0x36 reg13 byte2,3 */
|
||||
};
|
||||
|
||||
/* Interrupt controller Register set */
|
||||
struct mpc52xx_intr {
|
||||
u32 per_mask; /* INTR + 0x00 */
|
||||
u32 per_pri1; /* INTR + 0x04 */
|
||||
u32 per_pri2; /* INTR + 0x08 */
|
||||
u32 per_pri3; /* INTR + 0x0c */
|
||||
u32 ctrl; /* INTR + 0x10 */
|
||||
u32 main_mask; /* INTR + 0x14 */
|
||||
u32 main_pri1; /* INTR + 0x18 */
|
||||
u32 main_pri2; /* INTR + 0x1c */
|
||||
u32 reserved1; /* INTR + 0x20 */
|
||||
u32 enc_status; /* INTR + 0x24 */
|
||||
u32 crit_status; /* INTR + 0x28 */
|
||||
u32 main_status; /* INTR + 0x2c */
|
||||
u32 per_status; /* INTR + 0x30 */
|
||||
u32 reserved2; /* INTR + 0x34 */
|
||||
u32 per_error; /* INTR + 0x38 */
|
||||
};
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
||||
|
@@ -68,12 +68,20 @@
|
||||
#define MPC52xx_PSC_IMR_ORERR 0x1000
|
||||
#define MPC52xx_PSC_IMR_IPC 0x8000
|
||||
|
||||
/* PSC input port change bit */
|
||||
/* PSC input port change bits */
|
||||
#define MPC52xx_PSC_CTS 0x01
|
||||
#define MPC52xx_PSC_DCD 0x02
|
||||
#define MPC52xx_PSC_D_CTS 0x10
|
||||
#define MPC52xx_PSC_D_DCD 0x20
|
||||
|
||||
/* PSC acr bits */
|
||||
#define MPC52xx_PSC_IEC_CTS 0x01
|
||||
#define MPC52xx_PSC_IEC_DCD 0x02
|
||||
|
||||
/* PSC output port bits */
|
||||
#define MPC52xx_PSC_OP_RTS 0x01
|
||||
#define MPC52xx_PSC_OP_RES 0x02
|
||||
|
||||
/* PSC mode fields */
|
||||
#define MPC52xx_PSC_MODE_5_BITS 0x00
|
||||
#define MPC52xx_PSC_MODE_6_BITS 0x01
|
||||
@@ -91,6 +99,7 @@
|
||||
#define MPC52xx_PSC_MODE_ONE_STOP_5_BITS 0x00
|
||||
#define MPC52xx_PSC_MODE_ONE_STOP 0x07
|
||||
#define MPC52xx_PSC_MODE_TWO_STOP 0x0f
|
||||
#define MPC52xx_PSC_MODE_TXCTS 0x10
|
||||
|
||||
#define MPC52xx_PSC_RFNUM_MASK 0x01ff
|
||||
|
||||
|
@@ -1,9 +1,134 @@
|
||||
/*
|
||||
* Pull in the generic implementation for the mutex fastpath.
|
||||
*
|
||||
* TODO: implement optimized primitives instead, or leave the generic
|
||||
* implementation in place, or pick the atomic_xchg() based generic
|
||||
* implementation. (see asm-generic/mutex-xchg.h for details)
|
||||
* Optimised mutex implementation of include/asm-generic/mutex-dec.h algorithm
|
||||
*/
|
||||
#ifndef _ASM_POWERPC_MUTEX_H
|
||||
#define _ASM_POWERPC_MUTEX_H
|
||||
|
||||
#include <asm-generic/mutex-dec.h>
|
||||
static inline int __mutex_cmpxchg_lock(atomic_t *v, int old, int new)
|
||||
{
|
||||
int t;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"1: lwarx %0,0,%1 # mutex trylock\n\
|
||||
cmpw 0,%0,%2\n\
|
||||
bne- 2f\n"
|
||||
PPC405_ERR77(0,%1)
|
||||
" stwcx. %3,0,%1\n\
|
||||
bne- 1b"
|
||||
ISYNC_ON_SMP
|
||||
"\n\
|
||||
2:"
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter), "r" (old), "r" (new)
|
||||
: "cc", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
static inline int __mutex_dec_return_lock(atomic_t *v)
|
||||
{
|
||||
int t;
|
||||
|
||||
__asm__ __volatile__(
|
||||
"1: lwarx %0,0,%1 # mutex lock\n\
|
||||
addic %0,%0,-1\n"
|
||||
PPC405_ERR77(0,%1)
|
||||
" stwcx. %0,0,%1\n\
|
||||
bne- 1b"
|
||||
ISYNC_ON_SMP
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter)
|
||||
: "cc", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
static inline int __mutex_inc_return_unlock(atomic_t *v)
|
||||
{
|
||||
int t;
|
||||
|
||||
__asm__ __volatile__(
|
||||
LWSYNC_ON_SMP
|
||||
"1: lwarx %0,0,%1 # mutex unlock\n\
|
||||
addic %0,%0,1\n"
|
||||
PPC405_ERR77(0,%1)
|
||||
" stwcx. %0,0,%1 \n\
|
||||
bne- 1b"
|
||||
: "=&r" (t)
|
||||
: "r" (&v->counter)
|
||||
: "cc", "memory");
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
|
||||
* it wasn't 1 originally. This function MUST leave the value lower than
|
||||
* 1 even when the "1" assertion wasn't true.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(__mutex_dec_return_lock(count) < 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_lock_retval - try to take the lock by moving the count
|
||||
* from 1 to a 0 value
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 1
|
||||
*
|
||||
* Change the count from 1 to a value lower than 1, and call <fail_fn> if
|
||||
* it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
|
||||
* or anything the slow path function returns.
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(__mutex_dec_return_lock(count) < 0))
|
||||
return fail_fn(count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_unlock - try to promote the count from 0 to 1
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: function to call if the original value was not 0
|
||||
*
|
||||
* Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
|
||||
* In the failure case, this function is allowed to either set the value to
|
||||
* 1, or to set it to a value lower than 1.
|
||||
*/
|
||||
static inline void
|
||||
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (unlikely(__mutex_inc_return_unlock(count) <= 0))
|
||||
fail_fn(count);
|
||||
}
|
||||
|
||||
#define __mutex_slowpath_needs_to_unlock() 1
|
||||
|
||||
/**
|
||||
* __mutex_fastpath_trylock - try to acquire the mutex, without waiting
|
||||
*
|
||||
* @count: pointer of type atomic_t
|
||||
* @fail_fn: fallback function
|
||||
*
|
||||
* Change the count from 1 to 0, and return 1 (success), or if the count
|
||||
* was not 1, then return 0 (failure).
|
||||
*/
|
||||
static inline int
|
||||
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
|
||||
{
|
||||
if (likely(__mutex_cmpxchg_lock(count, 1, 0) == 1))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@@ -19,12 +19,15 @@
|
||||
#include <asm/kdump.h>
|
||||
|
||||
/*
|
||||
* On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
|
||||
* On regular PPC32 page size is 4K (but we support 4K/16K/64K pages
|
||||
* on PPC44x). For PPC64 we support either 4K or 64K software
|
||||
* page size. When using 64K pages however, whether we are really supporting
|
||||
* 64K pages in HW or not is irrelevant to those definitions.
|
||||
*/
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
#if defined(CONFIG_PPC_64K_PAGES)
|
||||
#define PAGE_SHIFT 16
|
||||
#elif defined(CONFIG_PPC_16K_PAGES)
|
||||
#define PAGE_SHIFT 14
|
||||
#else
|
||||
#define PAGE_SHIFT 12
|
||||
#endif
|
||||
@@ -151,7 +154,7 @@ typedef struct { pte_basic_t pte; } pte_t;
|
||||
/* 64k pages additionally define a bigger "real PTE" type that gathers
|
||||
* the "second half" part of the PTE for pseudo 64k pages
|
||||
*/
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
|
||||
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
|
||||
#else
|
||||
typedef struct { pte_t pte; } real_pte_t;
|
||||
@@ -191,10 +194,10 @@ typedef pte_basic_t pte_t;
|
||||
#define pte_val(x) (x)
|
||||
#define __pte(x) (x)
|
||||
|
||||
#ifdef CONFIG_PPC_64K_PAGES
|
||||
#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC_STD_MMU_64)
|
||||
typedef struct { pte_t pte; unsigned long hidx; } real_pte_t;
|
||||
#else
|
||||
typedef unsigned long real_pte_t;
|
||||
typedef pte_t real_pte_t;
|
||||
#endif
|
||||
|
||||
|
||||
|
@@ -19,6 +19,8 @@
|
||||
#define PTE_FLAGS_OFFSET 0
|
||||
#endif
|
||||
|
||||
#define PTE_SHIFT (PAGE_SHIFT - PTE_T_LOG2) /* full page */
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
/*
|
||||
* The basic type of a PTE - 64 bits for those CPUs with > 32 bit
|
||||
@@ -26,10 +28,8 @@
|
||||
*/
|
||||
#ifdef CONFIG_PTE_64BIT
|
||||
typedef unsigned long long pte_basic_t;
|
||||
#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
|
||||
#else
|
||||
typedef unsigned long pte_basic_t;
|
||||
#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
|
||||
#endif
|
||||
|
||||
struct page;
|
||||
@@ -39,6 +39,9 @@ extern void copy_page(void *to, void *from);
|
||||
|
||||
#include <asm-generic/page.h>
|
||||
|
||||
#define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
|
||||
#define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_POWERPC_PAGE_32_H */
|
||||
|
@@ -13,7 +13,6 @@
|
||||
|
||||
struct device_node;
|
||||
|
||||
extern unsigned int ppc_pci_flags;
|
||||
enum {
|
||||
/* Force re-assigning all resources (ignore firmware
|
||||
* setup completely)
|
||||
@@ -36,6 +35,31 @@ enum {
|
||||
/* ... except for domain 0 */
|
||||
PPC_PCI_COMPAT_DOMAIN_0 = 0x00000020,
|
||||
};
|
||||
#ifdef CONFIG_PCI
|
||||
extern unsigned int ppc_pci_flags;
|
||||
|
||||
static inline void ppc_pci_set_flags(int flags)
|
||||
{
|
||||
ppc_pci_flags = flags;
|
||||
}
|
||||
|
||||
static inline void ppc_pci_add_flags(int flags)
|
||||
{
|
||||
ppc_pci_flags |= flags;
|
||||
}
|
||||
|
||||
static inline int ppc_pci_has_flag(int flag)
|
||||
{
|
||||
return (ppc_pci_flags & flag);
|
||||
}
|
||||
#else
|
||||
static inline void ppc_pci_set_flags(int flags) { }
|
||||
static inline void ppc_pci_add_flags(int flags) { }
|
||||
static inline int ppc_pci_has_flag(int flag)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
/*
|
||||
@@ -241,9 +265,6 @@ extern void pcibios_remove_pci_devices(struct pci_bus *bus);
|
||||
|
||||
/** Discover new pci devices under this bus, and add them */
|
||||
extern void pcibios_add_pci_devices(struct pci_bus *bus);
|
||||
extern void pcibios_fixup_new_pci_devices(struct pci_bus *bus);
|
||||
|
||||
extern int pcibios_remove_root_bus(struct pci_controller *phb);
|
||||
|
||||
static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus)
|
||||
{
|
||||
@@ -290,6 +311,7 @@ extern void pci_process_bridge_OF_ranges(struct pci_controller *hose,
|
||||
/* Allocate & free a PCI host bridge structure */
|
||||
extern struct pci_controller *pcibios_alloc_controller(struct device_node *dev);
|
||||
extern void pcibios_free_controller(struct pci_controller *phb);
|
||||
extern void pcibios_setup_phb_resources(struct pci_controller *hose);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern unsigned long pci_address_to_pio(phys_addr_t address);
|
||||
|
@@ -38,8 +38,8 @@ struct pci_dev;
|
||||
* Set this to 1 if you want the kernel to re-assign all PCI
|
||||
* bus numbers (don't do that on ppc64 yet !)
|
||||
*/
|
||||
#define pcibios_assign_all_busses() (ppc_pci_flags & \
|
||||
PPC_PCI_REASSIGN_ALL_BUS)
|
||||
#define pcibios_assign_all_busses() \
|
||||
(ppc_pci_has_flag(PPC_PCI_REASSIGN_ALL_BUS))
|
||||
#define pcibios_scan_all_fns(a, b) 0
|
||||
|
||||
static inline void pcibios_set_master(struct pci_dev *dev)
|
||||
@@ -204,15 +204,14 @@ static inline struct resource *pcibios_select_root(struct pci_dev *pdev,
|
||||
return root;
|
||||
}
|
||||
|
||||
extern void pcibios_setup_new_device(struct pci_dev *dev);
|
||||
|
||||
extern void pcibios_claim_one_bus(struct pci_bus *b);
|
||||
|
||||
extern void pcibios_allocate_bus_resources(struct pci_bus *bus);
|
||||
extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
|
||||
|
||||
extern void pcibios_resource_survey(void);
|
||||
|
||||
extern struct pci_controller *init_phb_dynamic(struct device_node *dn);
|
||||
extern int remove_phb_dynamic(struct pci_controller *phb);
|
||||
|
||||
extern struct pci_dev *of_create_pci_dev(struct device_node *node,
|
||||
struct pci_bus *bus, int devfn);
|
||||
@@ -221,6 +220,7 @@ extern void of_scan_pci_bridge(struct device_node *node,
|
||||
struct pci_dev *dev);
|
||||
|
||||
extern void of_scan_bus(struct device_node *node, struct pci_bus *bus);
|
||||
extern void of_rescan_bus(struct device_node *node, struct pci_bus *bus);
|
||||
|
||||
extern int pci_read_irq_line(struct pci_dev *dev);
|
||||
|
||||
@@ -235,9 +235,8 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
|
||||
const struct resource *rsrc,
|
||||
resource_size_t *start, resource_size_t *end);
|
||||
|
||||
extern void pcibios_do_bus_setup(struct pci_bus *bus);
|
||||
extern void pcibios_fixup_of_probed_bus(struct pci_bus *bus);
|
||||
|
||||
extern void pcibios_setup_bus_devices(struct pci_bus *bus);
|
||||
extern void pcibios_setup_bus_self(struct pci_bus *bus);
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_POWERPC_PCI_H */
|
||||
|
@@ -3,6 +3,8 @@
|
||||
|
||||
#include <linux/threads.h>
|
||||
|
||||
#define PTE_NONCACHE_NUM 0 /* dummy for now to share code w/ppc64 */
|
||||
|
||||
extern void __bad_pte(pmd_t *pmd);
|
||||
|
||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||
@@ -33,10 +35,13 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||
|
||||
extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
|
||||
extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
|
||||
extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
|
||||
extern void pte_free(struct mm_struct *mm, pgtable_t pte);
|
||||
|
||||
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
|
||||
static inline void pgtable_free(pgtable_free_t pgf)
|
||||
{
|
||||
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
|
||||
|
||||
free_page((unsigned long)p);
|
||||
}
|
||||
|
||||
#define check_pgt_cache() do { } while (0)
|
||||
|
||||
|
@@ -7,7 +7,6 @@
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/percpu.h>
|
||||
@@ -108,31 +107,6 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
|
||||
return page;
|
||||
}
|
||||
|
||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
||||
{
|
||||
free_page((unsigned long)pte);
|
||||
}
|
||||
|
||||
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
||||
{
|
||||
pgtable_page_dtor(ptepage);
|
||||
__free_page(ptepage);
|
||||
}
|
||||
|
||||
#define PGF_CACHENUM_MASK 0x7
|
||||
|
||||
typedef struct pgtable_free {
|
||||
unsigned long val;
|
||||
} pgtable_free_t;
|
||||
|
||||
static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
|
||||
unsigned long mask)
|
||||
{
|
||||
BUG_ON(cachenum > PGF_CACHENUM_MASK);
|
||||
|
||||
return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
|
||||
}
|
||||
|
||||
static inline void pgtable_free(pgtable_free_t pgf)
|
||||
{
|
||||
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
|
||||
@@ -144,14 +118,6 @@ static inline void pgtable_free(pgtable_free_t pgf)
|
||||
kmem_cache_free(pgtable_cache[cachenum], p);
|
||||
}
|
||||
|
||||
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
|
||||
|
||||
#define __pte_free_tlb(tlb,ptepage) \
|
||||
do { \
|
||||
pgtable_page_dtor(ptepage); \
|
||||
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
|
||||
PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
|
||||
} while (0)
|
||||
#define __pmd_free_tlb(tlb, pmd) \
|
||||
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
|
||||
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
|
||||
|
@@ -2,11 +2,52 @@
|
||||
#define _ASM_POWERPC_PGALLOC_H
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
||||
{
|
||||
free_page((unsigned long)pte);
|
||||
}
|
||||
|
||||
static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
|
||||
{
|
||||
pgtable_page_dtor(ptepage);
|
||||
__free_page(ptepage);
|
||||
}
|
||||
|
||||
typedef struct pgtable_free {
|
||||
unsigned long val;
|
||||
} pgtable_free_t;
|
||||
|
||||
#define PGF_CACHENUM_MASK 0x7
|
||||
|
||||
static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
|
||||
unsigned long mask)
|
||||
{
|
||||
BUG_ON(cachenum > PGF_CACHENUM_MASK);
|
||||
|
||||
return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#include <asm/pgalloc-64.h>
|
||||
#else
|
||||
#include <asm/pgalloc-32.h>
|
||||
#endif
|
||||
|
||||
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#define __pte_free_tlb(tlb,ptepage) \
|
||||
do { \
|
||||
pgtable_page_dtor(ptepage); \
|
||||
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
|
||||
PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
|
||||
} while (0)
|
||||
#else
|
||||
#define __pte_free_tlb(tlb, pte) pte_free((tlb)->mm, (pte))
|
||||
#endif
|
||||
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_PGALLOC_H */
|
||||
|
@@ -228,9 +228,10 @@ extern int icache_44x_need_flush;
|
||||
* - FILE *must* be in the bottom three bits because swap cache
|
||||
* entries use the top 29 bits for TLB2.
|
||||
*
|
||||
* - CACHE COHERENT bit (M) has no effect on PPC440 core, because it
|
||||
* doesn't support SMP. So we can use this as software bit, like
|
||||
* DIRTY.
|
||||
* - CACHE COHERENT bit (M) has no effect on original PPC440 cores,
|
||||
* because it doesn't support SMP. However, some later 460 variants
|
||||
* have -some- form of SMP support and so I keep the bit there for
|
||||
* future use
|
||||
*
|
||||
* With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used
|
||||
* for memory protection related functions (see PTE structure in
|
||||
@@ -436,20 +437,23 @@ extern int icache_44x_need_flush;
|
||||
_PAGE_USER | _PAGE_ACCESSED | \
|
||||
_PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | \
|
||||
_PAGE_EXEC | _PAGE_HWEXEC)
|
||||
/*
|
||||
* Note: the _PAGE_COHERENT bit automatically gets set in the hardware
|
||||
* PTE if CONFIG_SMP is defined (hash_page does this); there is no need
|
||||
* to have it in the Linux PTE, and in fact the bit could be reused for
|
||||
* another purpose. -- paulus.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_44x
|
||||
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_GUARDED)
|
||||
/*
|
||||
* We define 2 sets of base prot bits, one for basic pages (ie,
|
||||
* cacheable kernel and user pages) and one for non cacheable
|
||||
* pages. We always set _PAGE_COHERENT when SMP is enabled or
|
||||
* the processor might need it for DMA coherency.
|
||||
*/
|
||||
#if defined(CONFIG_SMP) || defined(CONFIG_PPC_STD_MMU)
|
||||
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_COHERENT)
|
||||
#else
|
||||
#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED)
|
||||
#endif
|
||||
#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_NO_CACHE)
|
||||
|
||||
#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY | _PAGE_HWWRITE)
|
||||
#define _PAGE_KERNEL (_PAGE_BASE | _PAGE_SHARED | _PAGE_WRENABLE)
|
||||
#define _PAGE_KERNEL_NC (_PAGE_BASE_NC | _PAGE_SHARED | _PAGE_WRENABLE)
|
||||
|
||||
#ifdef CONFIG_PPC_STD_MMU
|
||||
/* On standard PPC MMU, no user access implies kernel read/write access,
|
||||
@@ -459,7 +463,7 @@ extern int icache_44x_need_flush;
|
||||
#define _PAGE_KERNEL_RO (_PAGE_BASE | _PAGE_SHARED)
|
||||
#endif
|
||||
|
||||
#define _PAGE_IO (_PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED)
|
||||
#define _PAGE_IO (_PAGE_KERNEL_NC | _PAGE_GUARDED)
|
||||
#define _PAGE_RAM (_PAGE_KERNEL | _PAGE_HWEXEC)
|
||||
|
||||
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
|
||||
@@ -552,9 +556,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;
|
||||
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE; }
|
||||
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
|
||||
|
||||
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
|
||||
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
|
||||
|
||||
static inline pte_t pte_wrprotect(pte_t pte) {
|
||||
pte_val(pte) &= ~(_PAGE_RW | _PAGE_HWWRITE); return pte; }
|
||||
static inline pte_t pte_mkclean(pte_t pte) {
|
||||
@@ -693,10 +694,11 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP)
|
||||
#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM)
|
||||
WARN_ON(pte_present(*ptep));
|
||||
#endif
|
||||
__set_pte_at(mm, addr, ptep, pte);
|
||||
@@ -760,16 +762,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
|
||||
__changed; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Macro to mark a page protection value as "uncacheable".
|
||||
*/
|
||||
#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
|
||||
|
||||
struct file;
|
||||
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot);
|
||||
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
|
||||
|
||||
|
@@ -100,7 +100,7 @@
|
||||
|
||||
#define _PAGE_WRENABLE (_PAGE_RW | _PAGE_DIRTY)
|
||||
|
||||
/* __pgprot defined in arch/powerpc/incliude/asm/page.h */
|
||||
/* __pgprot defined in arch/powerpc/include/asm/page.h */
|
||||
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
|
||||
|
||||
#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER)
|
||||
@@ -245,9 +245,6 @@ static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
|
||||
static inline int pte_file(pte_t pte) { return pte_val(pte) & _PAGE_FILE;}
|
||||
static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
|
||||
|
||||
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
|
||||
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
|
||||
|
||||
static inline pte_t pte_wrprotect(pte_t pte) {
|
||||
pte_val(pte) &= ~(_PAGE_RW); return pte; }
|
||||
static inline pte_t pte_mkclean(pte_t pte) {
|
||||
@@ -405,16 +402,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty)
|
||||
__changed; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Macro to mark a page protection value as "uncacheable".
|
||||
*/
|
||||
#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_NO_CACHE | _PAGE_GUARDED))
|
||||
|
||||
struct file;
|
||||
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot);
|
||||
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
|
||||
#define __HAVE_ARCH_PTE_SAME
|
||||
#define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0)
|
||||
|
||||
|
@@ -16,6 +16,32 @@ struct mm_struct;
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* Macro to mark a page protection value as "uncacheable".
|
||||
*/
|
||||
|
||||
#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
|
||||
_PAGE_WRITETHRU)
|
||||
|
||||
#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
|
||||
_PAGE_NO_CACHE | _PAGE_GUARDED))
|
||||
|
||||
#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
|
||||
_PAGE_NO_CACHE))
|
||||
|
||||
#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
|
||||
_PAGE_COHERENT))
|
||||
|
||||
#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
|
||||
_PAGE_COHERENT | _PAGE_WRITETHRU))
|
||||
|
||||
|
||||
struct file;
|
||||
extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot);
|
||||
#define __HAVE_PHYS_MEM_ACCESS_PROT
|
||||
|
||||
/*
|
||||
* ZERO_PAGE is a global shared page that is always zero: used
|
||||
* for zero-mapped memory areas etc..
|
||||
|
@@ -425,14 +425,14 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
|
||||
#define fromreal(rd) tovirt(rd,rd)
|
||||
|
||||
#define tophys(rd,rs) \
|
||||
0: addis rd,rs,-KERNELBASE@h; \
|
||||
0: addis rd,rs,-PAGE_OFFSET@h; \
|
||||
.section ".vtop_fixup","aw"; \
|
||||
.align 1; \
|
||||
.long 0b; \
|
||||
.previous
|
||||
|
||||
#define tovirt(rd,rs) \
|
||||
0: addis rd,rs,KERNELBASE@h; \
|
||||
0: addis rd,rs,PAGE_OFFSET@h; \
|
||||
.section ".ptov_fixup","aw"; \
|
||||
.align 1; \
|
||||
.long 0b; \
|
||||
|
@@ -69,8 +69,6 @@ extern int _prep_type;
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
extern int have_of;
|
||||
|
||||
struct task_struct;
|
||||
void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
|
||||
void release_thread(struct task_struct *);
|
||||
@@ -207,6 +205,11 @@ struct thread_struct {
|
||||
#define INIT_SP_LIMIT \
|
||||
(_ALIGN_UP(sizeof(init_thread_info), 16) + (unsigned long) &init_stack)
|
||||
|
||||
#ifdef CONFIG_SPE
|
||||
#define SPEFSCR_INIT .spefscr = SPEFSCR_FINVE | SPEFSCR_FDBZE | SPEFSCR_FUNFE | SPEFSCR_FOVFE,
|
||||
#else
|
||||
#define SPEFSCR_INIT
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
#define INIT_THREAD { \
|
||||
@@ -215,6 +218,7 @@ struct thread_struct {
|
||||
.fs = KERNEL_DS, \
|
||||
.pgdir = swapper_pg_dir, \
|
||||
.fpexc_mode = MSR_FE0 | MSR_FE1, \
|
||||
SPEFSCR_INIT \
|
||||
}
|
||||
#else
|
||||
#define INIT_THREAD { \
|
||||
|
@@ -253,6 +253,9 @@ extern void kdump_move_device_tree(void);
|
||||
/* CPU OF node matching */
|
||||
struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
|
||||
|
||||
/* cache lookup */
|
||||
struct device_node *of_find_next_cache_node(struct device_node *np);
|
||||
|
||||
/* Get the MAC address */
|
||||
extern const void *of_get_mac_address(struct device_node *np);
|
||||
|
||||
|
@@ -305,30 +305,34 @@ static inline const char* ps3_result(int result)
|
||||
/* system bus routines */
|
||||
|
||||
enum ps3_match_id {
|
||||
PS3_MATCH_ID_EHCI = 1,
|
||||
PS3_MATCH_ID_OHCI = 2,
|
||||
PS3_MATCH_ID_GELIC = 3,
|
||||
PS3_MATCH_ID_AV_SETTINGS = 4,
|
||||
PS3_MATCH_ID_SYSTEM_MANAGER = 5,
|
||||
PS3_MATCH_ID_STOR_DISK = 6,
|
||||
PS3_MATCH_ID_STOR_ROM = 7,
|
||||
PS3_MATCH_ID_STOR_FLASH = 8,
|
||||
PS3_MATCH_ID_SOUND = 9,
|
||||
PS3_MATCH_ID_GRAPHICS = 10,
|
||||
PS3_MATCH_ID_LPM = 11,
|
||||
PS3_MATCH_ID_EHCI = 1,
|
||||
PS3_MATCH_ID_OHCI = 2,
|
||||
PS3_MATCH_ID_GELIC = 3,
|
||||
PS3_MATCH_ID_AV_SETTINGS = 4,
|
||||
PS3_MATCH_ID_SYSTEM_MANAGER = 5,
|
||||
PS3_MATCH_ID_STOR_DISK = 6,
|
||||
PS3_MATCH_ID_STOR_ROM = 7,
|
||||
PS3_MATCH_ID_STOR_FLASH = 8,
|
||||
PS3_MATCH_ID_SOUND = 9,
|
||||
PS3_MATCH_ID_GPU = 10,
|
||||
PS3_MATCH_ID_LPM = 11,
|
||||
};
|
||||
|
||||
#define PS3_MODULE_ALIAS_EHCI "ps3:1"
|
||||
#define PS3_MODULE_ALIAS_OHCI "ps3:2"
|
||||
#define PS3_MODULE_ALIAS_GELIC "ps3:3"
|
||||
#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4"
|
||||
#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5"
|
||||
#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6"
|
||||
#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7"
|
||||
#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8"
|
||||
#define PS3_MODULE_ALIAS_SOUND "ps3:9"
|
||||
#define PS3_MODULE_ALIAS_GRAPHICS "ps3:10"
|
||||
#define PS3_MODULE_ALIAS_LPM "ps3:11"
|
||||
enum ps3_match_sub_id {
|
||||
PS3_MATCH_SUB_ID_GPU_FB = 1,
|
||||
};
|
||||
|
||||
#define PS3_MODULE_ALIAS_EHCI "ps3:1:0"
|
||||
#define PS3_MODULE_ALIAS_OHCI "ps3:2:0"
|
||||
#define PS3_MODULE_ALIAS_GELIC "ps3:3:0"
|
||||
#define PS3_MODULE_ALIAS_AV_SETTINGS "ps3:4:0"
|
||||
#define PS3_MODULE_ALIAS_SYSTEM_MANAGER "ps3:5:0"
|
||||
#define PS3_MODULE_ALIAS_STOR_DISK "ps3:6:0"
|
||||
#define PS3_MODULE_ALIAS_STOR_ROM "ps3:7:0"
|
||||
#define PS3_MODULE_ALIAS_STOR_FLASH "ps3:8:0"
|
||||
#define PS3_MODULE_ALIAS_SOUND "ps3:9:0"
|
||||
#define PS3_MODULE_ALIAS_GPU_FB "ps3:10:1"
|
||||
#define PS3_MODULE_ALIAS_LPM "ps3:11:0"
|
||||
|
||||
enum ps3_system_bus_device_type {
|
||||
PS3_DEVICE_TYPE_IOC0 = 1,
|
||||
@@ -337,11 +341,6 @@ enum ps3_system_bus_device_type {
|
||||
PS3_DEVICE_TYPE_LPM,
|
||||
};
|
||||
|
||||
enum ps3_match_sub_id {
|
||||
/* for PS3_MATCH_ID_GRAPHICS */
|
||||
PS3_MATCH_SUB_ID_FB = 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct ps3_system_bus_device - a device on the system bus
|
||||
*/
|
||||
@@ -516,4 +515,7 @@ void ps3_sync_irq(int node);
|
||||
u32 ps3_get_hw_thread_id(int cpu);
|
||||
u64 ps3_get_spe_id(void *arg);
|
||||
|
||||
/* mutex synchronizing GPU accesses and video mode changes */
|
||||
extern struct mutex ps3_gpu_mutex;
|
||||
|
||||
#endif
|
||||
|
@@ -740,8 +740,4 @@ extern int ps3av_audio_mute(int);
|
||||
extern int ps3av_audio_mute_analog(int);
|
||||
extern int ps3av_dev_open(void);
|
||||
extern int ps3av_dev_close(void);
|
||||
extern void ps3av_register_flip_ctl(void (*flip_ctl)(int on, void *data),
|
||||
void *flip_data);
|
||||
extern void ps3av_flip_ctl(int on);
|
||||
|
||||
#endif /* _ASM_POWERPC_PS3AV_H_ */
|
||||
|
@@ -783,6 +783,10 @@ extern void scom970_write(unsigned int address, unsigned long value);
|
||||
#define __get_SP() ({unsigned long sp; \
|
||||
asm volatile("mr %0,1": "=r" (sp)); sp;})
|
||||
|
||||
struct pt_regs;
|
||||
|
||||
extern void ppc_save_regs(struct pt_regs *regs);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _ASM_POWERPC_REG_H */
|
||||
|
@@ -168,6 +168,7 @@ extern void rtas_os_term(char *str);
|
||||
extern int rtas_get_sensor(int sensor, int index, int *state);
|
||||
extern int rtas_get_power_level(int powerdomain, int *level);
|
||||
extern int rtas_set_power_level(int powerdomain, int level, int *setlevel);
|
||||
extern bool rtas_indicator_present(int token, int *maxindex);
|
||||
extern int rtas_set_indicator(int indicator, int index, int new_value);
|
||||
extern int rtas_set_indicator_fast(int indicator, int index, int new_value);
|
||||
extern void rtas_progress(char *s, unsigned short hex);
|
||||
|
@@ -82,7 +82,7 @@
|
||||
#define _FP_MUL_MEAT_S(R,X,Y) _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
|
||||
#define _FP_MUL_MEAT_D(R,X,Y) _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
|
||||
|
||||
#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv(S,R,X,Y)
|
||||
#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
|
||||
#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
|
||||
|
||||
/* These macros define what NaN looks like. They're supposed to expand to
|
||||
@@ -97,6 +97,20 @@
|
||||
|
||||
#define _FP_KEEPNANFRACP 1
|
||||
|
||||
#ifdef FP_EX_BOOKE_E500_SPE
|
||||
#define FP_EX_INEXACT (1 << 21)
|
||||
#define FP_EX_INVALID (1 << 20)
|
||||
#define FP_EX_DIVZERO (1 << 19)
|
||||
#define FP_EX_UNDERFLOW (1 << 18)
|
||||
#define FP_EX_OVERFLOW (1 << 17)
|
||||
#define FP_INHIBIT_RESULTS 0
|
||||
|
||||
#define __FPU_FPSCR (current->thread.spefscr)
|
||||
#define __FPU_ENABLED_EXC \
|
||||
({ \
|
||||
(__FPU_FPSCR >> 2) & 0x1f; \
|
||||
})
|
||||
#else
|
||||
/* Exception flags. We use the bit positions of the appropriate bits
|
||||
in the FPSCR, which also correspond to the FE_* bits. This makes
|
||||
everything easier ;-). */
|
||||
@@ -111,22 +125,6 @@
|
||||
#define FP_EX_DIVZERO (1 << (31 - 5))
|
||||
#define FP_EX_INEXACT (1 << (31 - 6))
|
||||
|
||||
/* This macro appears to be called when both X and Y are NaNs, and
|
||||
* has to choose one and copy it to R. i386 goes for the larger of the
|
||||
* two, sparc64 just picks Y. I don't understand this at all so I'll
|
||||
* go with sparc64 because it's shorter :-> -- PMM
|
||||
*/
|
||||
#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
|
||||
do { \
|
||||
R##_s = Y##_s; \
|
||||
_FP_FRAC_COPY_##wc(R,Y); \
|
||||
R##_c = FP_CLS_NAN; \
|
||||
} while (0)
|
||||
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#define __FPU_FPSCR (current->thread.fpscr.val)
|
||||
|
||||
/* We only actually write to the destination register
|
||||
@@ -137,6 +135,32 @@
|
||||
(__FPU_FPSCR >> 3) & 0x1f; \
|
||||
})
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* If one NaN is signaling and the other is not,
|
||||
* we choose that one, otherwise we choose X.
|
||||
*/
|
||||
#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
|
||||
do { \
|
||||
if ((_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs) \
|
||||
&& !(_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs)) \
|
||||
{ \
|
||||
R##_s = X##_s; \
|
||||
_FP_FRAC_COPY_##wc(R,X); \
|
||||
} \
|
||||
else \
|
||||
{ \
|
||||
R##_s = Y##_s; \
|
||||
_FP_FRAC_COPY_##wc(R,Y); \
|
||||
} \
|
||||
R##_c = FP_CLS_NAN; \
|
||||
} while (0)
|
||||
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
#define __FPU_TRAP_P(bits) \
|
||||
((__FPU_ENABLED_EXC & (bits)) != 0)
|
||||
|
||||
|
@@ -81,6 +81,13 @@ extern int cpu_to_core_id(int cpu);
|
||||
#define PPC_MSG_CALL_FUNC_SINGLE 2
|
||||
#define PPC_MSG_DEBUGGER_BREAK 3
|
||||
|
||||
/*
|
||||
* irq controllers that have dedicated ipis per message and don't
|
||||
* need additional code in the action handler may use this
|
||||
*/
|
||||
extern int smp_request_message_ipi(int virq, int message);
|
||||
extern const char *smp_ipi_name[];
|
||||
|
||||
void smp_init_iSeries(void);
|
||||
void smp_init_pSeries(void);
|
||||
void smp_init_cell(void);
|
||||
|
@@ -277,7 +277,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
|
||||
bne- 1b"
|
||||
: "=&r"(tmp)
|
||||
: "r"(&rw->lock)
|
||||
: "cr0", "memory");
|
||||
: "cr0", "xer", "memory");
|
||||
}
|
||||
|
||||
static inline void __raw_write_unlock(raw_rwlock_t *rw)
|
||||
|
@@ -5,6 +5,10 @@
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/feature-fixups.h>
|
||||
|
||||
#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
|
||||
#define __SUBARCH_HAS_LWSYNC
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
|
||||
extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
|
||||
|
@@ -23,15 +23,17 @@
|
||||
* read_barrier_depends() prevents data-dependent loads being reordered
|
||||
* across this point (nop on PPC).
|
||||
*
|
||||
* We have to use the sync instructions for mb(), since lwsync doesn't
|
||||
* order loads with respect to previous stores. Lwsync is fine for
|
||||
* rmb(), though. Note that rmb() actually uses a sync on 32-bit
|
||||
* architectures.
|
||||
* *mb() variants without smp_ prefix must order all types of memory
|
||||
* operations with one another. sync is the only instruction sufficient
|
||||
* to do this.
|
||||
*
|
||||
* For wmb(), we use sync since wmb is used in drivers to order
|
||||
* stores to system memory with respect to writes to the device.
|
||||
* However, smp_wmb() can be a lighter-weight lwsync or eieio barrier
|
||||
* on SMP since it is only used to order updates to system memory.
|
||||
* For the smp_ barriers, ordering is for cacheable memory operations
|
||||
* only. We have to use the sync instruction for smp_mb(), since lwsync
|
||||
* doesn't order loads with respect to previous stores. Lwsync can be
|
||||
* used for smp_rmb() and smp_wmb().
|
||||
*
|
||||
* However, on CPUs that don't support lwsync, lwsync actually maps to a
|
||||
* heavy-weight sync, so smp_wmb() can be a lighter-weight eieio.
|
||||
*/
|
||||
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
|
||||
#define rmb() __asm__ __volatile__ ("sync" : : : "memory")
|
||||
@@ -45,14 +47,14 @@
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
#ifdef __SUBARCH_HAS_LWSYNC
|
||||
# define SMPWMB lwsync
|
||||
# define SMPWMB LWSYNC
|
||||
#else
|
||||
# define SMPWMB eieio
|
||||
#endif
|
||||
|
||||
#define smp_mb() mb()
|
||||
#define smp_rmb() rmb()
|
||||
#define smp_wmb() __asm__ __volatile__ (__stringify(SMPWMB) : : :"memory")
|
||||
#define smp_rmb() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory")
|
||||
#define smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory")
|
||||
#define smp_read_barrier_depends() read_barrier_depends()
|
||||
#else
|
||||
#define smp_mb() barrier()
|
||||
|
@@ -48,26 +48,6 @@ extern unsigned long ppc_proc_freq;
|
||||
extern unsigned long ppc_tb_freq;
|
||||
#define DEFAULT_TB_FREQ 125000000UL
|
||||
|
||||
/*
|
||||
* By putting all of this stuff into a single struct we
|
||||
* reduce the number of cache lines touched by do_gettimeofday.
|
||||
* Both by collecting all of the data in one cache line and
|
||||
* by touching only one TOC entry on ppc64.
|
||||
*/
|
||||
struct gettimeofday_vars {
|
||||
u64 tb_to_xs;
|
||||
u64 stamp_xsec;
|
||||
u64 tb_orig_stamp;
|
||||
};
|
||||
|
||||
struct gettimeofday_struct {
|
||||
unsigned long tb_ticks_per_sec;
|
||||
struct gettimeofday_vars vars[2];
|
||||
struct gettimeofday_vars * volatile varp;
|
||||
unsigned var_idx;
|
||||
unsigned tb_to_us;
|
||||
};
|
||||
|
||||
struct div_result {
|
||||
u64 result_high;
|
||||
u64 result_low;
|
||||
|
@@ -6,6 +6,9 @@
|
||||
*
|
||||
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
|
||||
* - flush_tlb_page(vma, vmaddr) flushes one page
|
||||
* - local_flush_tlb_mm(mm) flushes the specified mm context on
|
||||
* the local processor
|
||||
* - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor
|
||||
* - flush_tlb_page_nohash(vma, vmaddr) flushes one page if SW loaded TLB
|
||||
* - flush_tlb_range(vma, start, end) flushes a range of pages
|
||||
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
|
||||
@@ -17,7 +20,7 @@
|
||||
*/
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#if defined(CONFIG_4xx) || defined(CONFIG_8xx) || defined(CONFIG_FSL_BOOKE)
|
||||
#ifdef CONFIG_PPC_MMU_NOHASH
|
||||
/*
|
||||
* TLB flushing for software loaded TLB chips
|
||||
*
|
||||
@@ -28,63 +31,49 @@
|
||||
|
||||
#include <linux/mm.h>
|
||||
|
||||
extern void _tlbie(unsigned long address, unsigned int pid);
|
||||
extern void _tlbil_all(void);
|
||||
extern void _tlbil_pid(unsigned int pid);
|
||||
extern void _tlbil_va(unsigned long address, unsigned int pid);
|
||||
#define MMU_NO_CONTEXT ((unsigned int)-1)
|
||||
|
||||
#if defined(CONFIG_40x) || defined(CONFIG_8xx)
|
||||
#define _tlbia() asm volatile ("tlbia; sync" : : : "memory")
|
||||
#else /* CONFIG_44x || CONFIG_FSL_BOOKE */
|
||||
extern void _tlbia(void);
|
||||
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end);
|
||||
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
|
||||
extern void local_flush_tlb_mm(struct mm_struct *mm);
|
||||
extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
extern void flush_tlb_mm(struct mm_struct *mm);
|
||||
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
|
||||
#else
|
||||
#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
|
||||
#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr)
|
||||
#endif
|
||||
#define flush_tlb_page_nohash(vma,addr) flush_tlb_page(vma,addr)
|
||||
|
||||
static inline void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
_tlbil_pid(mm->context.id);
|
||||
}
|
||||
#elif defined(CONFIG_PPC_STD_MMU_32)
|
||||
|
||||
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
_tlbil_va(vmaddr, vma ? vma->vm_mm->context.id : 0);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_page_nohash(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
_tlbil_pid(vma->vm_mm->context.id);
|
||||
}
|
||||
|
||||
static inline void flush_tlb_kernel_range(unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
_tlbil_pid(0);
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_PPC32)
|
||||
/*
|
||||
* TLB flushing for "classic" hash-MMMU 32-bit CPUs, 6xx, 7xx, 7xxx
|
||||
* TLB flushing for "classic" hash-MMU 32-bit CPUs, 6xx, 7xx, 7xxx
|
||||
*/
|
||||
extern void _tlbie(unsigned long address);
|
||||
extern void _tlbia(void);
|
||||
|
||||
extern void flush_tlb_mm(struct mm_struct *mm);
|
||||
extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
|
||||
extern void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr);
|
||||
extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
|
||||
unsigned long end);
|
||||
extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
|
||||
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
flush_tlb_page(vma, vmaddr);
|
||||
}
|
||||
static inline void local_flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
flush_tlb_mm(mm);
|
||||
}
|
||||
|
||||
#elif defined(CONFIG_PPC_STD_MMU_64)
|
||||
|
||||
#else
|
||||
/*
|
||||
* TLB flushing for 64-bit has-MMU CPUs
|
||||
* TLB flushing for 64-bit hash-MMU CPUs
|
||||
*/
|
||||
|
||||
#include <linux/percpu.h>
|
||||
@@ -134,10 +123,19 @@ extern void flush_hash_page(unsigned long va, real_pte_t pte, int psize,
|
||||
extern void flush_hash_range(unsigned long number, int local);
|
||||
|
||||
|
||||
static inline void local_flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void flush_tlb_mm(struct mm_struct *mm)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void flush_tlb_page(struct vm_area_struct *vma,
|
||||
unsigned long vmaddr)
|
||||
{
|
||||
@@ -162,7 +160,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
|
||||
extern void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end);
|
||||
|
||||
|
||||
#else
|
||||
#error Unsupported MMU type
|
||||
#endif
|
||||
|
||||
#endif /*__KERNEL__ */
|
||||
|
@@ -39,6 +39,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/time.h>
|
||||
|
||||
#define SYSCALL_MAP_SIZE ((__NR_syscalls + 31) / 32)
|
||||
|
||||
@@ -83,6 +84,7 @@ struct vdso_data {
|
||||
__u32 icache_log_block_size; /* L1 i-cache log block size */
|
||||
__s32 wtom_clock_sec; /* Wall to monotonic clock */
|
||||
__s32 wtom_clock_nsec;
|
||||
struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
|
||||
__u32 syscall_map_64[SYSCALL_MAP_SIZE]; /* map of syscalls */
|
||||
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
|
||||
};
|
||||
@@ -102,6 +104,7 @@ struct vdso_data {
|
||||
__u32 tz_dsttime; /* Type of dst correction 0x5C */
|
||||
__s32 wtom_clock_sec; /* Wall to monotonic clock */
|
||||
__s32 wtom_clock_nsec;
|
||||
struct timespec stamp_xtime; /* xtime as at tb_orig_stamp */
|
||||
__u32 syscall_map_32[SYSCALL_MAP_SIZE]; /* map of syscalls */
|
||||
__u32 dcache_block_size; /* L1 d-cache block size */
|
||||
__u32 icache_block_size; /* L1 i-cache block size */
|
||||
|
Reference in New Issue
Block a user