Merge branch 'master' into for-next

Conflicts:
	drivers/devfreq/exynos4_bus.c

Sync with Linus' tree to be able to apply patches that are
against newer code (mvneta).
This commit is contained in:
Jiri Kosina
2013-01-29 10:48:30 +01:00
11382 changed files with 531475 additions and 303353 deletions

View File

@@ -1 +1,2 @@
# MIPS headers
generic-y += trace_clock.h

View File

@@ -14,7 +14,6 @@
#endif
#include <linux/compiler.h>
#include <linux/irqflags.h>
#include <linux/types.h>
#include <asm/barrier.h>
#include <asm/byteorder.h> /* sigh ... */
@@ -44,6 +43,24 @@
#define smp_mb__before_clear_bit() smp_mb__before_llsc()
#define smp_mb__after_clear_bit() smp_llsc_mb()
/*
* These are the "slower" versions of the functions and are in bitops.c.
* These functions call raw_local_irq_{save,restore}().
*/
void __mips_set_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_clear_bit(unsigned long nr, volatile unsigned long *addr);
void __mips_change_bit(unsigned long nr, volatile unsigned long *addr);
int __mips_test_and_set_bit(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr);
int __mips_test_and_change_bit(unsigned long nr,
volatile unsigned long *addr);
/*
* set_bit - Atomically set a bit in memory
* @nr: the bit to set
@@ -57,7 +74,7 @@
static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -92,17 +109,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a |= mask;
raw_local_irq_restore(flags);
}
} else
__mips_set_bit(nr, addr);
}
/*
@@ -118,7 +126,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long temp;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -153,17 +161,8 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)));
} while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a &= ~mask;
raw_local_irq_restore(flags);
}
} else
__mips_clear_bit(nr, addr);
}
/*
@@ -191,7 +190,7 @@ static inline void clear_bit_unlock(unsigned long nr, volatile unsigned long *ad
*/
static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -220,17 +219,8 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
: "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit));
} while (unlikely(!temp));
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
*a ^= mask;
raw_local_irq_restore(flags);
}
} else
__mips_change_bit(nr, addr);
}
/*
@@ -244,7 +234,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@@ -281,18 +271,8 @@ static inline int test_and_set_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_set_bit(nr, addr);
smp_llsc_mb();
@@ -310,7 +290,7 @@ static inline int test_and_set_bit(unsigned long nr,
static inline int test_and_set_bit_lock(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;
if (kernel_uses_llsc && R10000_LLSC_WAR) {
@@ -345,18 +325,8 @@ static inline int test_and_set_bit_lock(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_set_bit_lock(nr, addr);
smp_llsc_mb();
@@ -373,7 +343,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@@ -428,18 +398,8 @@ static inline int test_and_clear_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a &= ~mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_clear_bit(nr, addr);
smp_llsc_mb();
@@ -457,7 +417,7 @@ static inline int test_and_clear_bit(unsigned long nr,
static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
int bit = nr & SZLONG_MASK;
unsigned long res;
smp_mb__before_llsc();
@@ -494,18 +454,8 @@ static inline int test_and_change_bit(unsigned long nr,
} while (unlikely(!res));
res = temp & (1UL << bit);
} else {
volatile unsigned long *a = addr;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
res = (mask & *a);
*a ^= mask;
raw_local_irq_restore(flags);
}
} else
res = __mips_test_and_change_bit(nr, addr);
smp_llsc_mb();

View File

@@ -290,7 +290,7 @@ struct compat_shmid64_ds {
static inline int is_compat_task(void)
{
return test_thread_flag(TIF_32BIT);
return test_thread_flag(TIF_32BIT_ADDR);
}
#endif /* _ASM_COMPAT_H */

View File

@@ -243,9 +243,9 @@ enum cpu_type_enum {
*/
CPU_R4000PC, CPU_R4000SC, CPU_R4000MC, CPU_R4200, CPU_R4300, CPU_R4310,
CPU_R4400PC, CPU_R4400SC, CPU_R4400MC, CPU_R4600, CPU_R4640, CPU_R4650,
CPU_R4700, CPU_R5000, CPU_R5000A, CPU_R5500, CPU_NEVADA, CPU_R5432,
CPU_R10000, CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121,
CPU_VR4122, CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
CPU_R4700, CPU_R5000, CPU_R5500, CPU_NEVADA, CPU_R5432, CPU_R10000,
CPU_R12000, CPU_R14000, CPU_VR41XX, CPU_VR4111, CPU_VR4121, CPU_VR4122,
CPU_VR4131, CPU_VR4133, CPU_VR4181, CPU_VR4181A, CPU_RM7000,
CPU_SR71000, CPU_RM9000, CPU_TX49XX,
/*

View File

@@ -40,6 +40,8 @@ static inline int dma_supported(struct device *dev, u64 mask)
static inline int dma_mapping_error(struct device *dev, u64 mask)
{
struct dma_map_ops *ops = get_dma_ops(dev);
debug_dma_mapping_error(dev, mask);
return ops->mapping_error(dev, mask);
}

View File

@@ -10,7 +10,7 @@
#define _ASM_ARC_TYPES_H
#ifdef CONFIG_ARC32
#ifdef CONFIG_FW_ARC32
typedef char CHAR;
typedef short SHORT;
@@ -33,9 +33,9 @@ typedef LONG _PUSHORT;
typedef LONG _PULONG;
typedef LONG _PVOID;
#endif /* CONFIG_ARC32 */
#endif /* CONFIG_FW_ARC32 */
#ifdef CONFIG_ARC64
#ifdef CONFIG_FW_ARC64
typedef char CHAR;
typedef short SHORT;
@@ -57,7 +57,7 @@ typedef USHORT *_PUSHORT;
typedef ULONG *_PULONG;
typedef VOID *_PVOID;
#endif /* CONFIG_ARC64 */
#endif /* CONFIG_FW_ARC64 */
typedef CHAR *PCHAR;
typedef SHORT *PSHORT;

View File

@@ -161,31 +161,6 @@ ASMMACRO(back_to_back_c0_hazard,
)
#define instruction_hazard() do { } while (0)
#elif defined(CONFIG_CPU_RM9000)
/*
* RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
* use of the JTLB for instructions should not occur for 4 cpu cycles and use
* for data translations should not occur for 3 cpu cycles.
*/
ASMMACRO(mtc0_tlbw_hazard,
_ssnop; _ssnop; _ssnop; _ssnop
)
ASMMACRO(tlbw_use_hazard,
_ssnop; _ssnop; _ssnop; _ssnop
)
ASMMACRO(tlb_probe_hazard,
_ssnop; _ssnop; _ssnop; _ssnop
)
ASMMACRO(irq_enable_hazard,
)
ASMMACRO(irq_disable_hazard,
)
ASMMACRO(back_to_back_c0_hazard,
)
#define instruction_hazard() do { } while (0)
#elif defined(CONFIG_CPU_SB1)
/*

View File

@@ -95,7 +95,17 @@ static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t pte,
int dirty)
{
return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
int changed = !pte_same(*ptep, pte);
if (changed) {
set_pte_at(vma->vm_mm, addr, ptep, pte);
/*
* There could be some standard sized pages in there,
* get them all.
*/
flush_tlb_range(vma, addr, addr + HPAGE_SIZE);
}
return changed;
}
static inline pte_t huge_ptep_get(pte_t *ptep)

View File

@@ -15,6 +15,7 @@
#include <linux/compiler.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/irqflags.h>
#include <asm/addrspace.h>
#include <asm/bug.h>

View File

@@ -16,6 +16,105 @@
#include <linux/compiler.h>
#include <asm/hazards.h>
#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
" di \n"
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline void arch_local_irq_disable(void)
{
__asm__ __volatile__(
"arch_local_irq_disable"
: /* no outputs */
: /* no inputs */
: "memory");
}
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
" di \\result \n"
" andi \\result, 1 \n"
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
asm volatile("arch_local_irq_save\t%0"
: "=r" (flags)
: /* no inputs */
: "memory");
return flags;
}
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
#if defined(CONFIG_IRQ_CPU)
/*
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
" beqz \\flags, 1f \n"
" di \n"
" ei \n"
"1: \n"
#else
/*
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n"
" mtc0 $1, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline void __arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
#else
/* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
void arch_local_irq_disable(void);
unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags);
void __arch_local_irq_restore(unsigned long flags);
#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
__asm__(
" .macro arch_local_irq_enable \n"
" .set push \n"
@@ -57,55 +156,6 @@ static inline void arch_local_irq_enable(void)
}
/*
* For cli() we have to insert nops to make sure that the new value
* has actually arrived in the status register before the end of this
* macro.
* R4000/R4400 need three nops, the R4600 two nops and the R10000 needs
* no nops at all.
*/
/*
* For TX49, operating only IE bit is not enough.
*
* If mfc0 $12 follows store and the mfc0 is last instruction of a
* page and fetching the next instruction causes TLB miss, the result
* of the mfc0 might wrongly contain EXL bit.
*
* ERT-TX49H2-027, ERT-TX49H3-012, ERT-TX49HL3-006, ERT-TX49H4-008
*
* Workaround: mask EXL bit of the result or place a nop before mfc0.
*/
__asm__(
" .macro arch_local_irq_disable\n"
" .set push \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 $1, $2, 1 \n"
" ori $1, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" di \n"
#else
" mfc0 $1,$12 \n"
" ori $1,0x1f \n"
" xori $1,0x1f \n"
" .set noreorder \n"
" mtc0 $1,$12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline void arch_local_irq_disable(void)
{
__asm__ __volatile__(
"arch_local_irq_disable"
: /* no outputs */
: /* no inputs */
: "memory");
}
__asm__(
" .macro arch_local_save_flags flags \n"
" .set push \n"
@@ -125,113 +175,6 @@ static inline unsigned long arch_local_save_flags(void)
return flags;
}
__asm__(
" .macro arch_local_irq_save result \n"
" .set push \n"
" .set reorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 \\result, $2, 1 \n"
" ori $1, \\result, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
" andi \\result, \\result, 0x400 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" di \\result \n"
" andi \\result, 1 \n"
#else
" mfc0 \\result, $12 \n"
" ori $1, \\result, 0x1f \n"
" xori $1, 0x1f \n"
" .set noreorder \n"
" mtc0 $1, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline unsigned long arch_local_irq_save(void)
{
unsigned long flags;
asm volatile("arch_local_irq_save\t%0"
: "=r" (flags)
: /* no inputs */
: "memory");
return flags;
}
__asm__(
" .macro arch_local_irq_restore flags \n"
" .set push \n"
" .set noreorder \n"
" .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC
"mfc0 $1, $2, 1 \n"
"andi \\flags, 0x400 \n"
"ori $1, 0x400 \n"
"xori $1, 0x400 \n"
"or \\flags, $1 \n"
"mtc0 \\flags, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
/*
* Slow, but doesn't suffer from a relatively unlikely race
* condition we're having since days 1.
*/
" beqz \\flags, 1f \n"
" di \n"
" ei \n"
"1: \n"
#elif defined(CONFIG_CPU_MIPSR2)
/*
* Fast, dangerous. Life is fun, life is good.
*/
" mfc0 $1, $12 \n"
" ins $1, \\flags, 0, 1 \n"
" mtc0 $1, $12 \n"
#else
" mfc0 $1, $12 \n"
" andi \\flags, 1 \n"
" ori $1, 0x1f \n"
" xori $1, 0x1f \n"
" or \\flags, $1 \n"
" mtc0 \\flags, $12 \n"
#endif
" irq_disable_hazard \n"
" .set pop \n"
" .endm \n");
static inline void arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
if (unlikely(!(flags & 0x0400)))
smtc_ipi_replay();
#endif
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline void __arch_local_irq_restore(unsigned long flags)
{
unsigned long __tmp1;
__asm__ __volatile__(
"arch_local_irq_restore\t%0"
: "=r" (__tmp1)
: "0" (flags)
: "memory");
}
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
@@ -245,7 +188,7 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
#endif
}
#endif
#endif /* #ifndef __ASSEMBLY__ */
/*
* Do the CPU's IRQ-state tracing from assembly code.

View File

@@ -9,22 +9,43 @@
#ifndef _MIPS_KEXEC
# define _MIPS_KEXEC
#include <asm/stacktrace.h>
/* Maximum physical address we can use pages from */
#define KEXEC_SOURCE_MEMORY_LIMIT (0x20000000)
/* Maximum address we can reach in physical address mode */
#define KEXEC_DESTINATION_MEMORY_LIMIT (0x20000000)
/* Maximum address we can use for the control code buffer */
#define KEXEC_CONTROL_MEMORY_LIMIT (0x20000000)
#define KEXEC_CONTROL_PAGE_SIZE 4096
/* Reserve 3*4096 bytes for board-specific info */
#define KEXEC_CONTROL_PAGE_SIZE (4096 + 3*4096)
/* The native architecture */
#define KEXEC_ARCH KEXEC_ARCH_MIPS
#define MAX_NOTE_BYTES 1024
static inline void crash_setup_regs(struct pt_regs *newregs,
struct pt_regs *oldregs)
{
/* Dummy implementation for now */
if (oldregs)
memcpy(newregs, oldregs, sizeof(*newregs));
else
prepare_frametrace(newregs);
}
#ifdef CONFIG_KEXEC
struct kimage;
extern unsigned long kexec_args[4];
extern int (*_machine_kexec_prepare)(struct kimage *);
extern void (*_machine_kexec_shutdown)(void);
extern void (*_machine_crash_shutdown)(struct pt_regs *regs);
extern void default_machine_crash_shutdown(struct pt_regs *regs);
#ifdef CONFIG_SMP
extern const unsigned char kexec_smp_wait[];
extern unsigned long secondary_kexec_args[4];
extern void (*relocated_kexec_smp_wait) (void *);
extern atomic_t kexec_ready_to_reboot;
#endif
#endif
#endif /* !_MIPS_KEXEC */

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -44,8 +44,8 @@ union bcm47xx_bus {
extern union bcm47xx_bus bcm47xx_bus;
extern enum bcm47xx_bus_type bcm47xx_bus_type;
void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix);
void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom, const char *prefix);
void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
bool fallback);
#ifdef CONFIG_BCM47XX_SSB
void bcm47xx_fill_ssb_boardinfo(struct ssb_boardinfo *boardinfo,

View File

@@ -1,155 +1,17 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net>
*/
#ifndef __ASM_MIPS_MACH_BCM47XX_GPIO_H
#define __ASM_MIPS_MACH_BCM47XX_GPIO_H
#ifndef __BCM47XX_GPIO_H
#define __BCM47XX_GPIO_H
#include <asm-generic/gpio.h>
#include <linux/ssb/ssb_embedded.h>
#include <linux/bcma/bcma.h>
#include <asm/mach-bcm47xx/bcm47xx.h>
#define gpio_get_value __gpio_get_value
#define gpio_set_value __gpio_set_value
#define BCM47XX_EXTIF_GPIO_LINES 5
#define BCM47XX_CHIPCO_GPIO_LINES 16
#define gpio_cansleep __gpio_cansleep
#define gpio_to_irq __gpio_to_irq
extern int gpio_request(unsigned gpio, const char *label);
extern void gpio_free(unsigned gpio);
extern int gpio_to_irq(unsigned gpio);
static inline int gpio_get_value(unsigned gpio)
static inline int irq_to_gpio(unsigned int irq)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
return ssb_gpio_in(&bcm47xx_bus.ssb, 1 << gpio);
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
return bcma_chipco_gpio_in(&bcm47xx_bus.bcma.bus.drv_cc,
1 << gpio);
#endif
}
return -EINVAL;
}
#define gpio_get_value_cansleep gpio_get_value
static inline void gpio_set_value(unsigned gpio, int value)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
ssb_gpio_out(&bcm47xx_bus.ssb, 1 << gpio,
value ? 1 << gpio : 0);
return;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
bcma_chipco_gpio_out(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
value ? 1 << gpio : 0);
return;
#endif
}
}
#define gpio_set_value_cansleep gpio_set_value
static inline int gpio_cansleep(unsigned gpio)
{
return 0;
}
static inline int gpio_is_valid(unsigned gpio)
{
return gpio < (BCM47XX_EXTIF_GPIO_LINES + BCM47XX_CHIPCO_GPIO_LINES);
}
static inline int gpio_direction_input(unsigned gpio)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
ssb_gpio_outen(&bcm47xx_bus.ssb, 1 << gpio, 0);
return 0;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
bcma_chipco_gpio_outen(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
0);
return 0;
#endif
}
return -EINVAL;
}
static inline int gpio_direction_output(unsigned gpio, int value)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
/* first set the gpio out value */
ssb_gpio_out(&bcm47xx_bus.ssb, 1 << gpio,
value ? 1 << gpio : 0);
/* then set the gpio mode */
ssb_gpio_outen(&bcm47xx_bus.ssb, 1 << gpio, 1 << gpio);
return 0;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
/* first set the gpio out value */
bcma_chipco_gpio_out(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
value ? 1 << gpio : 0);
/* then set the gpio mode */
bcma_chipco_gpio_outen(&bcm47xx_bus.bcma.bus.drv_cc, 1 << gpio,
1 << gpio);
return 0;
#endif
}
return -EINVAL;
}
static inline int gpio_intmask(unsigned gpio, int value)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
ssb_gpio_intmask(&bcm47xx_bus.ssb, 1 << gpio,
value ? 1 << gpio : 0);
return 0;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
bcma_chipco_gpio_intmask(&bcm47xx_bus.bcma.bus.drv_cc,
1 << gpio, value ? 1 << gpio : 0);
return 0;
#endif
}
return -EINVAL;
}
static inline int gpio_polarity(unsigned gpio, int value)
{
switch (bcm47xx_bus_type) {
#ifdef CONFIG_BCM47XX_SSB
case BCM47XX_BUS_TYPE_SSB:
ssb_gpio_polarity(&bcm47xx_bus.ssb, 1 << gpio,
value ? 1 << gpio : 0);
return 0;
#endif
#ifdef CONFIG_BCM47XX_BCMA
case BCM47XX_BUS_TYPE_BCMA:
bcma_chipco_gpio_polarity(&bcm47xx_bus.bcma.bus.drv_cc,
1 << gpio, value ? 1 << gpio : 0);
return 0;
#endif
}
return -EINVAL;
}
#endif /* __BCM47XX_GPIO_H */

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -0,0 +1,35 @@
#ifndef BCM63XX_NVRAM_H
#define BCM63XX_NVRAM_H
#include <linux/types.h>
/**
* bcm63xx_nvram_init() - initializes nvram
* @nvram: address of the nvram data
*
* Initialized the local nvram copy from the target address and checks
* its checksum.
*
* Returns 0 on success.
*/
int __init bcm63xx_nvram_init(void *nvram);
/**
* bcm63xx_nvram_get_name() - returns the board name according to nvram
*
* Returns the board name field from nvram. Note that it might not be
* null terminated if it is exactly 16 bytes long.
*/
u8 *bcm63xx_nvram_get_name(void);
/**
* bcm63xx_nvram_get_mac_address() - register & return a new mac address
* @mac: pointer to array for allocated mac
*
* Registers and returns a mac address from the allocated macs from nvram.
*
* Returns 0 on success.
*/
int bcm63xx_nvram_get_mac_address(u8 *mac);
#endif /* BCM63XX_NVRAM_H */

View File

@@ -53,13 +53,18 @@
CKCTL_6338_SAR_EN | \
CKCTL_6338_SPI_EN)
#define CKCTL_6345_CPU_EN (1 << 0)
#define CKCTL_6345_BUS_EN (1 << 1)
#define CKCTL_6345_EBI_EN (1 << 2)
#define CKCTL_6345_UART_EN (1 << 3)
#define CKCTL_6345_ADSLPHY_EN (1 << 4)
#define CKCTL_6345_ENET_EN (1 << 7)
#define CKCTL_6345_USBH_EN (1 << 8)
/* BCM6345 clock bits are shifted by 16 on the left, because of the test
* control register which is 16-bits wide. That way we do not have any
* specific BCM6345 code for handling clocks, and writing 0 to the test
* control register is fine.
*/
#define CKCTL_6345_CPU_EN (1 << 16)
#define CKCTL_6345_BUS_EN (1 << 17)
#define CKCTL_6345_EBI_EN (1 << 18)
#define CKCTL_6345_UART_EN (1 << 19)
#define CKCTL_6345_ADSLPHY_EN (1 << 20)
#define CKCTL_6345_ENET_EN (1 << 23)
#define CKCTL_6345_USBH_EN (1 << 24)
#define CKCTL_6345_ALL_SAFE_EN (CKCTL_6345_ENET_EN | \
CKCTL_6345_USBH_EN | \
@@ -191,6 +196,7 @@
/* Soft Reset register */
#define PERF_SOFTRESET_REG 0x28
#define PERF_SOFTRESET_6328_REG 0x10
#define PERF_SOFTRESET_6358_REG 0x34
#define PERF_SOFTRESET_6368_REG 0x10
#define SOFTRESET_6328_SPI_MASK (1 << 0)
@@ -244,6 +250,15 @@
SOFTRESET_6348_ACLC_MASK | \
SOFTRESET_6348_ADSLMIPSPLL_MASK)
#define SOFTRESET_6358_SPI_MASK (1 << 0)
#define SOFTRESET_6358_ENET_MASK (1 << 2)
#define SOFTRESET_6358_MPI_MASK (1 << 3)
#define SOFTRESET_6358_EPHY_MASK (1 << 6)
#define SOFTRESET_6358_SAR_MASK (1 << 7)
#define SOFTRESET_6358_USBH_MASK (1 << 12)
#define SOFTRESET_6358_PCM_MASK (1 << 13)
#define SOFTRESET_6358_ADSL_MASK (1 << 14)
#define SOFTRESET_6368_SPI_MASK (1 << 0)
#define SOFTRESET_6368_MPI_MASK (1 << 3)
#define SOFTRESET_6368_EPHY_MASK (1 << 6)

View File

@@ -0,0 +1,21 @@
#ifndef __BCM63XX_RESET_H
#define __BCM63XX_RESET_H
enum bcm63xx_core_reset {
BCM63XX_RESET_SPI,
BCM63XX_RESET_ENET,
BCM63XX_RESET_USBH,
BCM63XX_RESET_USBD,
BCM63XX_RESET_SAR,
BCM63XX_RESET_DSL,
BCM63XX_RESET_EPHY,
BCM63XX_RESET_ENETSW,
BCM63XX_RESET_PCM,
BCM63XX_RESET_MPI,
BCM63XX_RESET_PCIE,
BCM63XX_RESET_PCIE_EXT,
};
void bcm63xx_core_set_reset(enum bcm63xx_core_reset, int reset);
#endif

View File

@@ -14,23 +14,6 @@
#define BCM963XX_CFE_VERSION_OFFSET 0x570
#define BCM963XX_NVRAM_OFFSET 0x580
/*
* nvram structure
*/
struct bcm963xx_nvram {
u32 version;
u8 reserved1[256];
u8 name[16];
u32 main_tp_number;
u32 psi_size;
u32 mac_addr_count;
u8 mac_addr_base[6];
u8 reserved2[2];
u32 checksum_old;
u8 reserved3[720];
u32 checksum_high;
};
/*
* board definition
*/

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -42,7 +42,6 @@ enum octeon_irq {
OCTEON_IRQ_TIMER3,
OCTEON_IRQ_USB0,
OCTEON_IRQ_USB1,
OCTEON_IRQ_BOOTDMA,
#ifndef CONFIG_PCI_MSI
OCTEON_IRQ_LAST = 127
#endif

View File

@@ -18,7 +18,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -34,12 +34,6 @@
#endif
#endif
#ifdef CONFIG_IRQ_CPU_RM9K
#ifndef RM9K_CPU_IRQ_BASE
#define RM9K_CPU_IRQ_BASE (MIPS_CPU_IRQ_BASE+12)
#endif
#endif
#endif /* CONFIG_IRQ_CPU */
#endif /* __ASM_MACH_GENERIC_IRQ_H */

View File

@@ -21,7 +21,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 1
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 1
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -16,7 +16,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -82,6 +82,9 @@ extern __iomem void *ltq_cgu_membase;
#define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000)
#define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344))
/* allow booting xrx200 phys */
int xrx200_gphy_boot(struct device *dev, unsigned int id, dma_addr_t dev_addr);
/* request a non-gpio and set the PIO config */
#define PMU_PPE BIT(13)
extern void ltq_pmu_enable(unsigned int module);

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -18,6 +18,7 @@ extern struct platform_device ls1x_eth0_device;
extern struct platform_device ls1x_ehci_device;
extern struct platform_device ls1x_rtc_device;
void ls1x_serial_setup(void);
extern void __init ls1x_clk_init(void);
extern void __init ls1x_serial_setup(struct platform_device *pdev);
#endif /* __ASM_MACH_LOONGSON1_PLATFORM_H */

View File

@@ -20,14 +20,15 @@
/* Clock PLL Divisor Register Bits */
#define DIV_DC_EN (0x1 << 31)
#define DIV_DC (0x1f << 26)
#define DIV_CPU_EN (0x1 << 25)
#define DIV_CPU (0x1f << 20)
#define DIV_DDR_EN (0x1 << 19)
#define DIV_DDR (0x1f << 14)
#define DIV_DC_SHIFT 26
#define DIV_CPU_SHIFT 20
#define DIV_DDR_SHIFT 14
#define DIV_DC_WIDTH 5
#define DIV_CPU_WIDTH 5
#define DIV_DDR_WIDTH 5
#endif /* __ASM_MACH_LOONGSON1_REGS_CLK_H */

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 1
#define MIPS_CACHE_SYNC_WAR 1
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -8,7 +8,9 @@
#ifndef __ASM_NETLOGIC_IRQ_H
#define __ASM_NETLOGIC_IRQ_H
#define NR_IRQS 64
#include <asm/mach-netlogic/multi-node.h>
#define NR_IRQS (64 * NLM_NR_NODES)
#define MIPS_CPU_IRQ_BASE 0
#endif /* __ASM_NETLOGIC_IRQ_H */

View File

@@ -0,0 +1,54 @@
/*
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the Broadcom
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _NETLOGIC_MULTI_NODE_H_
#define _NETLOGIC_MULTI_NODE_H_
#ifndef CONFIG_NLM_MULTINODE
#define NLM_NR_NODES 1
#else
#if defined(CONFIG_NLM_MULTINODE_2)
#define NLM_NR_NODES 2
#elif defined(CONFIG_NLM_MULTINODE_4)
#define NLM_NR_NODES 4
#else
#define NLM_NR_NODES 1
#endif
#endif
#define NLM_CORES_PER_NODE 8
#define NLM_THREADS_PER_CORE 4
#define NLM_CPUS_PER_NODE (NLM_CORES_PER_NODE * NLM_THREADS_PER_CORE)
#endif

View File

@@ -18,7 +18,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -20,7 +20,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 1
#define MIPS_CACHE_SYNC_WAR 1
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 1
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -21,7 +21,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 1
#define MIPS_CACHE_SYNC_WAR 1
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -33,7 +33,6 @@ extern int sb1250_m3_workaround_needed(void);
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 1
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0

View File

@@ -1,48 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2003, 04, 07 Ralf Baechle (ralf@linux-mips.org)
*/
#ifndef __ASM_MACH_YOSEMITE_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_YOSEMITE_CPU_FEATURE_OVERRIDES_H
/*
* Momentum Jaguar ATX always has the RM9000 processor.
*/
#define cpu_has_watch 1
#define cpu_has_mips16 0
#define cpu_has_divec 0
#define cpu_has_vce 0
#define cpu_has_cache_cdex_p 0
#define cpu_has_cache_cdex_s 0
#define cpu_has_prefetch 1
#define cpu_has_mcheck 0
#define cpu_has_ejtag 0
#define cpu_has_llsc 1
#define cpu_has_vtag_icache 0
#define cpu_has_dc_aliases 0
#define cpu_has_ic_fills_f_dc 0
#define cpu_has_dsp 0
#define cpu_has_dsp2 0
#define cpu_has_mipsmt 0
#define cpu_has_userlocal 0
#define cpu_icache_snoops_remote_store 0
#define cpu_has_nofpuex 0
#define cpu_has_64bits 1
#define cpu_has_inclusive_pcaches 0
#define cpu_dcache_line_size() 32
#define cpu_icache_line_size() 32
#define cpu_scache_line_size() 32
#define cpu_has_mips32r1 0
#define cpu_has_mips32r2 0
#define cpu_has_mips64r1 0
#define cpu_has_mips64r2 0
#endif /* __ASM_MACH_YOSEMITE_CPU_FEATURE_OVERRIDES_H */

View File

@@ -1,25 +0,0 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
*/
#ifndef __ASM_MIPS_MACH_YOSEMITE_WAR_H
#define __ASM_MIPS_MACH_YOSEMITE_WAR_H
#define R4600_V1_INDEX_ICACHEOP_WAR 0
#define R4600_V1_HIT_CACHEOP_WAR 0
#define R4600_V2_HIT_CACHEOP_WAR 0
#define R5432_CP0_INTERRUPT_WAR 0
#define BCM1250_M3_WAR 0
#define SIBYTE_1956_WAR 0
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 1
#define ICACHE_REFILLS_WORKAROUND_WAR 1
#define R10000_LLSC_WAR 0
#define MIPS34K_MISSED_ITLB_WAR 0
#endif /* __ASM_MIPS_MACH_YOSEMITE_WAR_H */

View File

@@ -240,7 +240,7 @@
#define PM_HUGE_MASK PM_64M
#elif defined(CONFIG_PAGE_SIZE_64KB)
#define PM_HUGE_MASK PM_256M
#elif defined(CONFIG_HUGETLB_PAGE)
#elif defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
#error Bad page size configuration for hugetlbfs!
#endif
@@ -977,10 +977,6 @@ do { \
#define read_c0_framemask() __read_32bit_c0_register($21, 0)
#define write_c0_framemask(val) __write_32bit_c0_register($21, 0, val)
/* RM9000 PerfControl performance counter control register */
#define read_c0_perfcontrol() __read_32bit_c0_register($22, 0)
#define write_c0_perfcontrol(val) __write_32bit_c0_register($22, 0, val)
#define read_c0_diag() __read_32bit_c0_register($22, 0)
#define write_c0_diag(val) __write_32bit_c0_register($22, 0, val)
@@ -1033,10 +1029,6 @@ do { \
#define read_c0_perfcntr3_64() __read_64bit_c0_register($25, 7)
#define write_c0_perfcntr3_64(val) __write_64bit_c0_register($25, 7, val)
/* RM9000 PerfCount performance counter register */
#define read_c0_perfcount() __read_64bit_c0_register($25, 0)
#define write_c0_perfcount(val) __write_64bit_c0_register($25, 0, val)
#define read_c0_ecc() __read_32bit_c0_register($26, 0)
#define write_c0_ecc(val) __write_32bit_c0_register($26, 0, val)

View File

@@ -72,12 +72,6 @@ extern unsigned long pgd_current[];
#define ASID_INC 0x10
#define ASID_MASK 0xff0
#elif defined(CONFIG_CPU_RM9000)
#define ASID_INC 0x1
#define ASID_MASK 0xfff
/* SMTC/34K debug hack - but maybe we'll keep it */
#elif defined(CONFIG_MIPS_MT_SMTC)
#define ASID_INC 0x1

View File

@@ -120,8 +120,6 @@ search_module_dbetables(unsigned long addr)
#define MODULE_PROC_FAMILY "R10000 "
#elif defined CONFIG_CPU_RM7000
#define MODULE_PROC_FAMILY "RM7000 "
#elif defined CONFIG_CPU_RM9000
#define MODULE_PROC_FAMILY "RM9000 "
#elif defined CONFIG_CPU_SB1
#define MODULE_PROC_FAMILY "SB1 "
#elif defined CONFIG_CPU_LOONGSON1

View File

@@ -45,15 +45,19 @@
#define BOOT_NMI_HANDLER 8
#ifndef __ASSEMBLY__
#include <linux/cpumask.h>
#include <linux/spinlock.h>
#include <asm/irq.h>
#include <asm/mach-netlogic/multi-node.h>
struct irq_desc;
extern struct plat_smp_ops nlm_smp_ops;
extern char nlm_reset_entry[], nlm_reset_entry_end[];
void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc);
void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc);
void nlm_smp_irq_init(void);
void nlm_smp_irq_init(int hwcpuid);
void nlm_boot_secondary_cpus(void);
int nlm_wakeup_secondary_cpus(u32 wakeup_mask);
int nlm_wakeup_secondary_cpus(void);
void nlm_rmiboot_preboot(void);
void nlm_percpu_init(int hwcpuid);
static inline void
nlm_set_nmi_handler(void *handler)
@@ -68,9 +72,42 @@ nlm_set_nmi_handler(void *handler)
* Misc.
*/
unsigned int nlm_get_cpu_frequency(void);
void nlm_node_init(int node);
extern struct plat_smp_ops nlm_smp_ops;
extern char nlm_reset_entry[], nlm_reset_entry_end[];
extern unsigned long nlm_common_ebase;
extern int nlm_threads_per_core;
extern uint32_t nlm_cpumask, nlm_coremask;
extern unsigned int nlm_threads_per_core;
extern cpumask_t nlm_cpumask;
struct nlm_soc_info {
unsigned long coremask; /* cores enabled on the soc */
unsigned long ebase;
uint64_t irqmask;
uint64_t sysbase; /* only for XLP */
uint64_t picbase;
spinlock_t piclock;
};
#define nlm_get_node(i) (&nlm_nodes[i])
#ifdef CONFIG_CPU_XLR
#define nlm_current_node() (&nlm_nodes[0])
#else
#define nlm_current_node() (&nlm_nodes[nlm_nodeid()])
#endif
struct irq_data;
uint64_t nlm_pci_irqmask(int node);
void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *));
/*
* The NR_IRQs is divided between nodes, each of them has a separate irq space
*/
static inline int nlm_irq_to_xirq(int node, int irq)
{
return node * NR_IRQS / NLM_NR_NODES + irq;
}
extern struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
extern int nlm_cpu_ready[];
#endif
#endif /* _NETLOGIC_COMMON_H_ */

View File

@@ -39,7 +39,7 @@
#define IRQ_IPI_SMP_FUNCTION 3
#define IRQ_IPI_SMP_RESCHEDULE 4
#define IRQ_MSGRING 6
#define IRQ_FMN 5
#define IRQ_TIMER 7
#endif

View File

@@ -73,4 +73,146 @@ static inline int hard_smp_processor_id(void)
return __read_32bit_c0_register($15, 1) & 0x3ff;
}
static inline int nlm_nodeid(void)
{
return (__read_32bit_c0_register($15, 1) >> 5) & 0x3;
}
static inline unsigned int nlm_core_id(void)
{
return (read_c0_ebase() & 0x1c) >> 2;
}
static inline unsigned int nlm_thread_id(void)
{
return read_c0_ebase() & 0x3;
}
#define __read_64bit_c2_split(source, sel) \
({ \
unsigned long long __val; \
unsigned long __flags; \
\
local_irq_save(__flags); \
if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dmfc2\t%M0, " #source "\n\t" \
"dsll\t%L0, %M0, 32\n\t" \
"dsra\t%M0, %M0, 32\n\t" \
"dsra\t%L0, %L0, 32\n\t" \
".set\tmips0\n\t" \
: "=r" (__val)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dmfc2\t%M0, " #source ", " #sel "\n\t" \
"dsll\t%L0, %M0, 32\n\t" \
"dsra\t%M0, %M0, 32\n\t" \
"dsra\t%L0, %L0, 32\n\t" \
".set\tmips0\n\t" \
: "=r" (__val)); \
local_irq_restore(__flags); \
\
__val; \
})
#define __write_64bit_c2_split(source, sel, val) \
do { \
unsigned long __flags; \
\
local_irq_save(__flags); \
if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
"dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc2\t%L0, " #source "\n\t" \
".set\tmips0\n\t" \
: : "r" (val)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dsll\t%L0, %L0, 32\n\t" \
"dsrl\t%L0, %L0, 32\n\t" \
"dsll\t%M0, %M0, 32\n\t" \
"or\t%L0, %L0, %M0\n\t" \
"dmtc2\t%L0, " #source ", " #sel "\n\t" \
".set\tmips0\n\t" \
: : "r" (val)); \
local_irq_restore(__flags); \
} while (0)
#define __read_32bit_c2_register(source, sel) \
({ uint32_t __res; \
if (sel == 0) \
__asm__ __volatile__( \
".set\tmips32\n\t" \
"mfc2\t%0, " #source "\n\t" \
".set\tmips0\n\t" \
: "=r" (__res)); \
else \
__asm__ __volatile__( \
".set\tmips32\n\t" \
"mfc2\t%0, " #source ", " #sel "\n\t" \
".set\tmips0\n\t" \
: "=r" (__res)); \
__res; \
})
#define __read_64bit_c2_register(source, sel) \
({ unsigned long long __res; \
if (sizeof(unsigned long) == 4) \
__res = __read_64bit_c2_split(source, sel); \
else if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dmfc2\t%0, " #source "\n\t" \
".set\tmips0\n\t" \
: "=r" (__res)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dmfc2\t%0, " #source ", " #sel "\n\t" \
".set\tmips0\n\t" \
: "=r" (__res)); \
__res; \
})
#define __write_64bit_c2_register(register, sel, value) \
do { \
if (sizeof(unsigned long) == 4) \
__write_64bit_c2_split(register, sel, value); \
else if (sel == 0) \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dmtc2\t%z0, " #register "\n\t" \
".set\tmips0\n\t" \
: : "Jr" (value)); \
else \
__asm__ __volatile__( \
".set\tmips64\n\t" \
"dmtc2\t%z0, " #register ", " #sel "\n\t" \
".set\tmips0\n\t" \
: : "Jr" (value)); \
} while (0)
#define __write_32bit_c2_register(reg, sel, value) \
({ \
if (sel == 0) \
__asm__ __volatile__( \
".set\tmips32\n\t" \
"mtc2\t%z0, " #reg "\n\t" \
".set\tmips0\n\t" \
: : "Jr" (value)); \
else \
__asm__ __volatile__( \
".set\tmips32\n\t" \
"mtc2\t%z0, " #reg ", " #sel "\n\t" \
".set\tmips0\n\t" \
: : "Jr" (value)); \
})
#endif /*_ASM_NLM_MIPS_EXTS_H */

View File

@@ -273,36 +273,16 @@ nlm_pic_read_irt(uint64_t base, int irt_index)
return nlm_read_pic_reg(base, PIC_IRT(irt_index));
}
static inline uint64_t
nlm_pic_read_control(uint64_t base)
{
return nlm_read_pic_reg(base, PIC_CTRL);
}
static inline void
nlm_pic_write_control(uint64_t base, uint64_t control)
{
nlm_write_pic_reg(base, PIC_CTRL, control);
}
static inline void
nlm_pic_update_control(uint64_t base, uint64_t control)
{
uint64_t val;
val = nlm_read_pic_reg(base, PIC_CTRL);
nlm_write_pic_reg(base, PIC_CTRL, control | val);
}
static inline void
nlm_set_irt_to_cpu(uint64_t base, int irt, int cpu)
{
uint64_t val;
val = nlm_read_pic_reg(base, PIC_IRT(irt));
val |= cpu & 0xf;
if (cpu > 15)
val |= 1 << 16;
/* clear cpuset and mask */
val &= ~((0x7ull << 16) | 0xffff);
/* set DB, cpuset and cpumask */
val |= (1 << 19) | ((cpu >> 4) << 16) | (1 << (cpu & 0xf));
nlm_write_pic_reg(base, PIC_IRT(irt), val);
}
@@ -369,7 +349,7 @@ nlm_pic_enable_irt(uint64_t base, int irt)
static inline void
nlm_pic_disable_irt(uint64_t base, int irt)
{
uint32_t reg;
uint64_t reg;
reg = nlm_read_pic_reg(base, PIC_IRT(irt));
nlm_write_pic_reg(base, PIC_IRT(irt), reg & ~((uint64_t)1 << 31));
@@ -379,15 +359,9 @@ static inline void
nlm_pic_send_ipi(uint64_t base, int hwt, int irq, int nmi)
{
uint64_t ipi;
int node, ncpu;
node = hwt / 32;
ncpu = hwt & 0x1f;
ipi = ((uint64_t)nmi << 31) | (irq << 20) | (node << 17) |
(1 << (ncpu & 0xf));
if (ncpu > 15)
ipi |= 0x10000; /* Setting bit 16 to select cpus 16-31 */
ipi = (nmi << 31) | (irq << 20);
ipi |= ((hwt >> 4) << 16) | (1 << (hwt & 0xf)); /* cpuset and mask */
nlm_write_pic_reg(base, PIC_IPI_CTL, ipi);
}
@@ -404,12 +378,10 @@ nlm_pic_ack(uint64_t base, int irt_num)
static inline void
nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt)
{
nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, 0);
nlm_pic_write_irt_direct(base, irt, 0, 0, 0, irq, hwt);
}
extern uint64_t nlm_pic_base;
int nlm_irq_to_irt(int irq);
int nlm_irt_to_irq(int irt);
#endif /* __ASSEMBLY__ */
#endif /* _NLM_HAL_PIC_H */

View File

@@ -124,6 +124,5 @@
#define nlm_get_sys_pcibase(node) nlm_pcicfg_base(XLP_IO_SYS_OFFSET(node))
#define nlm_get_sys_regbase(node) (nlm_get_sys_pcibase(node) + XLP_IO_PCI_HDRSZ)
extern uint64_t nlm_sys_base;
#endif
#endif

View File

@@ -0,0 +1,363 @@
/*
* Copyright (c) 2003-2012 Broadcom Corporation
* All Rights Reserved
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the Broadcom
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _NLM_FMN_H_
#define _NLM_FMN_H_
#include <asm/netlogic/mips-extns.h> /* for COP2 access */
/* Station IDs */
#define FMN_STNID_CPU0 0x00
#define FMN_STNID_CPU1 0x08
#define FMN_STNID_CPU2 0x10
#define FMN_STNID_CPU3 0x18
#define FMN_STNID_CPU4 0x20
#define FMN_STNID_CPU5 0x28
#define FMN_STNID_CPU6 0x30
#define FMN_STNID_CPU7 0x38
#define FMN_STNID_XGS0_TX 64
#define FMN_STNID_XMAC0_00_TX 64
#define FMN_STNID_XMAC0_01_TX 65
#define FMN_STNID_XMAC0_02_TX 66
#define FMN_STNID_XMAC0_03_TX 67
#define FMN_STNID_XMAC0_04_TX 68
#define FMN_STNID_XMAC0_05_TX 69
#define FMN_STNID_XMAC0_06_TX 70
#define FMN_STNID_XMAC0_07_TX 71
#define FMN_STNID_XMAC0_08_TX 72
#define FMN_STNID_XMAC0_09_TX 73
#define FMN_STNID_XMAC0_10_TX 74
#define FMN_STNID_XMAC0_11_TX 75
#define FMN_STNID_XMAC0_12_TX 76
#define FMN_STNID_XMAC0_13_TX 77
#define FMN_STNID_XMAC0_14_TX 78
#define FMN_STNID_XMAC0_15_TX 79
#define FMN_STNID_XGS1_TX 80
#define FMN_STNID_XMAC1_00_TX 80
#define FMN_STNID_XMAC1_01_TX 81
#define FMN_STNID_XMAC1_02_TX 82
#define FMN_STNID_XMAC1_03_TX 83
#define FMN_STNID_XMAC1_04_TX 84
#define FMN_STNID_XMAC1_05_TX 85
#define FMN_STNID_XMAC1_06_TX 86
#define FMN_STNID_XMAC1_07_TX 87
#define FMN_STNID_XMAC1_08_TX 88
#define FMN_STNID_XMAC1_09_TX 89
#define FMN_STNID_XMAC1_10_TX 90
#define FMN_STNID_XMAC1_11_TX 91
#define FMN_STNID_XMAC1_12_TX 92
#define FMN_STNID_XMAC1_13_TX 93
#define FMN_STNID_XMAC1_14_TX 94
#define FMN_STNID_XMAC1_15_TX 95
#define FMN_STNID_GMAC 96
#define FMN_STNID_GMACJFR_0 96
#define FMN_STNID_GMACRFR_0 97
#define FMN_STNID_GMACTX0 98
#define FMN_STNID_GMACTX1 99
#define FMN_STNID_GMACTX2 100
#define FMN_STNID_GMACTX3 101
#define FMN_STNID_GMACJFR_1 102
#define FMN_STNID_GMACRFR_1 103
#define FMN_STNID_DMA 104
#define FMN_STNID_DMA_0 104
#define FMN_STNID_DMA_1 105
#define FMN_STNID_DMA_2 106
#define FMN_STNID_DMA_3 107
#define FMN_STNID_XGS0FR 112
#define FMN_STNID_XMAC0JFR 112
#define FMN_STNID_XMAC0RFR 113
#define FMN_STNID_XGS1FR 114
#define FMN_STNID_XMAC1JFR 114
#define FMN_STNID_XMAC1RFR 115
#define FMN_STNID_SEC 120
#define FMN_STNID_SEC0 120
#define FMN_STNID_SEC1 121
#define FMN_STNID_SEC2 122
#define FMN_STNID_SEC3 123
#define FMN_STNID_PK0 124
#define FMN_STNID_SEC_RSA 124
#define FMN_STNID_SEC_RSVD0 125
#define FMN_STNID_SEC_RSVD1 126
#define FMN_STNID_SEC_RSVD2 127
#define FMN_STNID_GMAC1 80
#define FMN_STNID_GMAC1_FR_0 81
#define FMN_STNID_GMAC1_TX0 82
#define FMN_STNID_GMAC1_TX1 83
#define FMN_STNID_GMAC1_TX2 84
#define FMN_STNID_GMAC1_TX3 85
#define FMN_STNID_GMAC1_FR_1 87
#define FMN_STNID_GMAC0 96
#define FMN_STNID_GMAC0_FR_0 97
#define FMN_STNID_GMAC0_TX0 98
#define FMN_STNID_GMAC0_TX1 99
#define FMN_STNID_GMAC0_TX2 100
#define FMN_STNID_GMAC0_TX3 101
#define FMN_STNID_GMAC0_FR_1 103
#define FMN_STNID_CMP_0 108
#define FMN_STNID_CMP_1 109
#define FMN_STNID_CMP_2 110
#define FMN_STNID_CMP_3 111
#define FMN_STNID_PCIE_0 116
#define FMN_STNID_PCIE_1 117
#define FMN_STNID_PCIE_2 118
#define FMN_STNID_PCIE_3 119
#define FMN_STNID_XLS_PK0 121
#define nlm_read_c2_cc0(s) __read_32bit_c2_register($16, s)
#define nlm_read_c2_cc1(s) __read_32bit_c2_register($17, s)
#define nlm_read_c2_cc2(s) __read_32bit_c2_register($18, s)
#define nlm_read_c2_cc3(s) __read_32bit_c2_register($19, s)
#define nlm_read_c2_cc4(s) __read_32bit_c2_register($20, s)
#define nlm_read_c2_cc5(s) __read_32bit_c2_register($21, s)
#define nlm_read_c2_cc6(s) __read_32bit_c2_register($22, s)
#define nlm_read_c2_cc7(s) __read_32bit_c2_register($23, s)
#define nlm_read_c2_cc8(s) __read_32bit_c2_register($24, s)
#define nlm_read_c2_cc9(s) __read_32bit_c2_register($25, s)
#define nlm_read_c2_cc10(s) __read_32bit_c2_register($26, s)
#define nlm_read_c2_cc11(s) __read_32bit_c2_register($27, s)
#define nlm_read_c2_cc12(s) __read_32bit_c2_register($28, s)
#define nlm_read_c2_cc13(s) __read_32bit_c2_register($29, s)
#define nlm_read_c2_cc14(s) __read_32bit_c2_register($30, s)
#define nlm_read_c2_cc15(s) __read_32bit_c2_register($31, s)
#define nlm_write_c2_cc0(s, v) __write_32bit_c2_register($16, s, v)
#define nlm_write_c2_cc1(s, v) __write_32bit_c2_register($17, s, v)
#define nlm_write_c2_cc2(s, v) __write_32bit_c2_register($18, s, v)
#define nlm_write_c2_cc3(s, v) __write_32bit_c2_register($19, s, v)
#define nlm_write_c2_cc4(s, v) __write_32bit_c2_register($20, s, v)
#define nlm_write_c2_cc5(s, v) __write_32bit_c2_register($21, s, v)
#define nlm_write_c2_cc6(s, v) __write_32bit_c2_register($22, s, v)
#define nlm_write_c2_cc7(s, v) __write_32bit_c2_register($23, s, v)
#define nlm_write_c2_cc8(s, v) __write_32bit_c2_register($24, s, v)
#define nlm_write_c2_cc9(s, v) __write_32bit_c2_register($25, s, v)
#define nlm_write_c2_cc10(s, v) __write_32bit_c2_register($26, s, v)
#define nlm_write_c2_cc11(s, v) __write_32bit_c2_register($27, s, v)
#define nlm_write_c2_cc12(s, v) __write_32bit_c2_register($28, s, v)
#define nlm_write_c2_cc13(s, v) __write_32bit_c2_register($29, s, v)
#define nlm_write_c2_cc14(s, v) __write_32bit_c2_register($30, s, v)
#define nlm_write_c2_cc15(s, v) __write_32bit_c2_register($31, s, v)
#define nlm_read_c2_status(sel) __read_32bit_c2_register($2, 0)
#define nlm_read_c2_config() __read_32bit_c2_register($3, 0)
#define nlm_write_c2_config(v) __write_32bit_c2_register($3, 0, v)
#define nlm_read_c2_bucksize(b) __read_32bit_c2_register($4, b)
#define nlm_write_c2_bucksize(b, v) __write_32bit_c2_register($4, b, v)
#define nlm_read_c2_rx_msg0() __read_64bit_c2_register($1, 0)
#define nlm_read_c2_rx_msg1() __read_64bit_c2_register($1, 1)
#define nlm_read_c2_rx_msg2() __read_64bit_c2_register($1, 2)
#define nlm_read_c2_rx_msg3() __read_64bit_c2_register($1, 3)
#define nlm_write_c2_tx_msg0(v) __write_64bit_c2_register($0, 0, v)
#define nlm_write_c2_tx_msg1(v) __write_64bit_c2_register($0, 1, v)
#define nlm_write_c2_tx_msg2(v) __write_64bit_c2_register($0, 2, v)
#define nlm_write_c2_tx_msg3(v) __write_64bit_c2_register($0, 3, v)
#define FMN_STN_RX_QSIZE 256
#define FMN_NSTATIONS 128
#define FMN_CORE_NBUCKETS 8
static inline void nlm_msgsnd(unsigned int stid)
{
__asm__ volatile (
".set push\n"
".set noreorder\n"
".set noat\n"
"move $1, %0\n"
"c2 0x10001\n" /* msgsnd $1 */
".set pop\n"
: : "r" (stid) : "$1"
);
}
static inline void nlm_msgld(unsigned int pri)
{
__asm__ volatile (
".set push\n"
".set noreorder\n"
".set noat\n"
"move $1, %0\n"
"c2 0x10002\n" /* msgld $1 */
".set pop\n"
: : "r" (pri) : "$1"
);
}
static inline void nlm_msgwait(unsigned int mask)
{
__asm__ volatile (
".set push\n"
".set noreorder\n"
".set noat\n"
"move $8, %0\n"
"c2 0x10003\n" /* msgwait $1 */
".set pop\n"
: : "r" (mask) : "$1"
);
}
/*
* Disable interrupts and enable COP2 access
*/
static inline uint32_t nlm_cop2_enable(void)
{
uint32_t sr = read_c0_status();
write_c0_status((sr & ~ST0_IE) | ST0_CU2);
return sr;
}
static inline void nlm_cop2_restore(uint32_t sr)
{
write_c0_status(sr);
}
static inline void nlm_fmn_setup_intr(int irq, unsigned int tmask)
{
uint32_t config;
config = (1 << 24) /* interrupt water mark - 1 msg */
| (irq << 16) /* irq */
| (tmask << 8) /* thread mask */
| 0x2; /* enable watermark intr, disable empty intr */
nlm_write_c2_config(config);
}
struct nlm_fmn_msg {
uint64_t msg0;
uint64_t msg1;
uint64_t msg2;
uint64_t msg3;
};
static inline int nlm_fmn_send(unsigned int size, unsigned int code,
unsigned int stid, struct nlm_fmn_msg *msg)
{
unsigned int dest;
uint32_t status;
int i;
/*
* Make sure that all the writes pending at the cpu are flushed.
* Any writes pending on CPU will not be see by devices. L1/L2
* caches are coherent with IO, so no cache flush needed.
*/
__asm __volatile("sync");
/* Load TX message buffers */
nlm_write_c2_tx_msg0(msg->msg0);
nlm_write_c2_tx_msg1(msg->msg1);
nlm_write_c2_tx_msg2(msg->msg2);
nlm_write_c2_tx_msg3(msg->msg3);
dest = ((size - 1) << 16) | (code << 8) | stid;
/*
* Retry a few times on credit fail, this should be a
* transient condition, unless there is a configuration
* failure, or the receiver is stuck.
*/
for (i = 0; i < 8; i++) {
nlm_msgsnd(dest);
status = nlm_read_c2_status(0);
if ((status & 0x2) == 1)
pr_info("Send pending fail!\n");
if ((status & 0x4) == 0)
return 0;
}
/* If there is a credit failure, return error */
return status & 0x06;
}
static inline int nlm_fmn_receive(int bucket, int *size, int *code, int *stid,
struct nlm_fmn_msg *msg)
{
uint32_t status, tmp;
nlm_msgld(bucket);
/* wait for load pending to clear */
do {
status = nlm_read_c2_status(1);
} while ((status & 0x08) != 0);
/* receive error bits */
tmp = status & 0x30;
if (tmp != 0)
return tmp;
*size = ((status & 0xc0) >> 6) + 1;
*code = (status & 0xff00) >> 8;
*stid = (status & 0x7f0000) >> 16;
msg->msg0 = nlm_read_c2_rx_msg0();
msg->msg1 = nlm_read_c2_rx_msg1();
msg->msg2 = nlm_read_c2_rx_msg2();
msg->msg3 = nlm_read_c2_rx_msg3();
return 0;
}
struct xlr_fmn_info {
int num_buckets;
int start_stn_id;
int end_stn_id;
int credit_config[128];
};
struct xlr_board_fmn_config {
int bucket_size[128]; /* size of buckets for all stations */
struct xlr_fmn_info cpu[8];
struct xlr_fmn_info gmac[2];
struct xlr_fmn_info dma;
struct xlr_fmn_info cmp;
struct xlr_fmn_info sae;
struct xlr_fmn_info xgmac[2];
};
extern int nlm_register_fmn_handler(int start, int end,
void (*fn)(int, int, int, int, struct nlm_fmn_msg *, void *),
void *arg);
extern void xlr_percpu_fmn_init(void);
extern void nlm_setup_fmn_irq(void);
extern void xlr_board_info_setup(void);
extern struct xlr_board_fmn_config xlr_board_fmn_config;
#endif

View File

@@ -258,7 +258,5 @@ nlm_pic_init_irt(uint64_t base, int irt, int irq, int hwt)
nlm_write_reg(base, PIC_IRT_1(irt),
(1 << 30) | (1 << 6) | irq);
}
extern uint64_t nlm_pic_base;
#endif
#endif /* _ASM_NLM_XLR_PIC_H */

View File

@@ -51,10 +51,8 @@ static inline unsigned int nlm_chip_is_xls_b(void)
return ((prid & 0xf000) == 0x4000);
}
/*
* XLR chip types
*/
/* The XLS product line has chip versions 0x[48c]? */
/* XLR chip types */
/* The XLS product line has chip versions 0x[48c]? */
static inline unsigned int nlm_chip_is_xls(void)
{
uint32_t prid = read_c0_prid();

View File

@@ -370,4 +370,6 @@ void cvmx_bootmem_lock(void);
*/
void cvmx_bootmem_unlock(void);
extern struct cvmx_bootmem_desc *cvmx_bootmem_get_desc(void);
#endif /* __CVMX_BOOTMEM_H__ */

File diff suppressed because it is too large Load Diff

View File

@@ -218,6 +218,12 @@
#define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
#define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
/* These are used to cover entire families of OCTEON processors */
#define OCTEON_FAM_1 (OCTEON_CN3XXX)
#define OCTEON_FAM_PLUS (OCTEON_CN5XXX)
#define OCTEON_FAM_1_PLUS (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS)
#define OCTEON_FAM_2 (OCTEON_CN6XXX)
/* The revision byte (low byte) has two different encodings.
* CN3XXX:
*

View File

@@ -209,13 +209,6 @@ union octeon_cvmemctl {
} s;
};
struct octeon_cf_data {
unsigned long base_region_bias;
unsigned int base_region; /* The chip select region used by CF */
int is16bit; /* 0 - 8bit, !0 - 16bit */
int dma_engine; /* -1 for no DMA */
};
extern void octeon_write_lcd(const char *s);
extern void octeon_check_cpu_bist(void);
extern int octeon_get_boot_debug_flag(void);

View File

@@ -31,21 +31,19 @@
#define PAGE_SHIFT 16
#endif
#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
#define PAGE_MASK (~(PAGE_SIZE - 1))
#ifdef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
#define HPAGE_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3)
#define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#else /* !CONFIG_HUGETLB_PAGE */
#else /* !CONFIG_MIPS_HUGE_TLB_SUPPORT */
#define HPAGE_SHIFT ({BUILD_BUG(); 0; })
#define HPAGE_SIZE ({BUILD_BUG(); 0; })
#define HPAGE_MASK ({BUILD_BUG(); 0; })
#define HUGETLB_PAGE_ORDER ({BUILD_BUG(); 0; })
#endif /* CONFIG_HUGETLB_PAGE */
#ifndef __ASSEMBLY__
#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
#include <linux/pfn.h>
#include <asm/io.h>
@@ -139,8 +137,6 @@ typedef struct { unsigned long pgprot; } pgprot_t;
*/
#define ptep_buddy(x) ((pte_t *)((unsigned long)(x) ^ sizeof(pte_t)))
#endif /* !__ASSEMBLY__ */
/*
* __pa()/__va() should be used only during mem init.
*/
@@ -202,7 +198,10 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#endif
#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr)))
#define virt_addr_valid(kaddr) pfn_valid(PFN_DOWN(virt_to_phys(kaddr)))
extern int __virt_addr_valid(const volatile void *kaddr);
#define virt_addr_valid(kaddr) \
__virt_addr_valid((const volatile void *) (kaddr))
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

View File

@@ -145,7 +145,7 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
extern char * (*pcibios_plat_setup)(char *str);
/* this function parses memory ranges from a device node */
extern void __devinit pci_load_of_ranges(struct pci_controller *hose,
struct device_node *node);
extern void pci_load_of_ranges(struct pci_controller *hose,
struct device_node *node);
#endif /* _ASM_PCI_H */

View File

@@ -175,7 +175,7 @@ static inline int pmd_none(pmd_t pmd)
static inline int pmd_bad(pmd_t pmd)
{
#ifdef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* pmd_huge(pmd) but inline */
if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
return 0;

View File

@@ -34,38 +34,72 @@
*/
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
#define _PAGE_PRESENT (1<<6) /* implemented in software */
#define _PAGE_READ (1<<7) /* implemented in software */
#define _PAGE_WRITE (1<<8) /* implemented in software */
#define _PAGE_ACCESSED (1<<9) /* implemented in software */
#define _PAGE_MODIFIED (1<<10) /* implemented in software */
#define _PAGE_FILE (1<<10) /* set:pagecache unset:swap */
/*
* The following bits are directly used by the TLB hardware
*/
#define _PAGE_R4KBUG (1 << 0) /* workaround for r4k bug */
#define _PAGE_GLOBAL (1 << 0)
#define _PAGE_VALID_SHIFT 1
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
#define _PAGE_SILENT_READ (1 << 1) /* synonym */
#define _PAGE_DIRTY_SHIFT 2
#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) /* The MIPS dirty bit */
#define _PAGE_SILENT_WRITE (1 << 2)
#define _CACHE_SHIFT 3
#define _CACHE_MASK (7 << 3)
#define _PAGE_R4KBUG (1<<0) /* workaround for r4k bug */
#define _PAGE_GLOBAL (1<<0)
#define _PAGE_VALID (1<<1)
#define _PAGE_SILENT_READ (1<<1) /* synonym */
#define _PAGE_DIRTY (1<<2) /* The MIPS dirty bit */
#define _PAGE_SILENT_WRITE (1<<2)
#define _CACHE_SHIFT 3
#define _CACHE_MASK (7<<3)
/*
* The following bits are implemented in software
*
* _PAGE_FILE semantics: set:pagecache unset:swap
*/
#define _PAGE_PRESENT_SHIFT 6
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
#define _PAGE_READ_SHIFT 7
#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
#define _PAGE_WRITE_SHIFT 8
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
#define _PAGE_ACCESSED_SHIFT 9
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
#define _PAGE_MODIFIED_SHIFT 10
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
#define _PAGE_FILE (1 << 10)
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
#define _PAGE_PRESENT (1<<0) /* implemented in software */
#define _PAGE_READ (1<<1) /* implemented in software */
#define _PAGE_WRITE (1<<2) /* implemented in software */
#define _PAGE_ACCESSED (1<<3) /* implemented in software */
#define _PAGE_MODIFIED (1<<4) /* implemented in software */
#define _PAGE_FILE (1<<4) /* set:pagecache unset:swap */
/*
* The following are implemented by software
*
* _PAGE_FILE semantics: set:pagecache unset:swap
*/
#define _PAGE_PRESENT_SHIFT 0
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
#define _PAGE_READ_SHIFT 1
#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
#define _PAGE_WRITE_SHIFT 2
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
#define _PAGE_ACCESSED_SHIFT 3
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
#define _PAGE_MODIFIED_SHIFT 4
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
#define _PAGE_FILE_SHIFT 4
#define _PAGE_FILE (1 << _PAGE_FILE_SHIFT)
#define _PAGE_GLOBAL (1<<8)
#define _PAGE_VALID (1<<9)
#define _PAGE_SILENT_READ (1<<9) /* synonym */
#define _PAGE_DIRTY (1<<10) /* The MIPS dirty bit */
#define _PAGE_SILENT_WRITE (1<<10)
#define _CACHE_UNCACHED (1<<11)
#define _CACHE_MASK (1<<11)
/*
* And these are the hardware TLB bits
*/
#define _PAGE_GLOBAL_SHIFT 8
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
#define _PAGE_VALID_SHIFT 9
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
#define _PAGE_SILENT_READ (1 << _PAGE_VALID_SHIFT) /* synonym */
#define _PAGE_DIRTY_SHIFT 10
#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
#define _PAGE_SILENT_WRITE (1 << _PAGE_DIRTY_SHIFT)
#define _CACHE_UNCACHED_SHIFT 11
#define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
#define _CACHE_MASK (1 << _CACHE_UNCACHED_SHIFT)
#else /* 'Normal' r4K case */
/*
@@ -76,25 +110,25 @@
* which is more than we need right now.
*/
/* implemented in software */
/*
* The following bits are implemented in software
*
* _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi.
* _PAGE_FILE semantics: set:pagecache unset:swap
*/
#define _PAGE_PRESENT_SHIFT (0)
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
/* implemented in software, should be unused if cpu_has_rixi. */
#define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
#define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
/* implemented in software */
#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
/* implemented in software */
#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
/* implemented in software */
#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
/* set:pagecache unset:swap */
#define _PAGE_FILE (_PAGE_MODIFIED)
#ifdef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* huge tlb page */
#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
@@ -103,8 +137,17 @@
#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */
#endif
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
/* huge tlb page */
#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1)
#define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT)
#else
#define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT)
#define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */
#endif
/* Page cannot be executed */
#define _PAGE_NO_EXEC_SHIFT (cpu_has_rixi ? _PAGE_HUGE_SHIFT + 1 : _PAGE_HUGE_SHIFT)
#define _PAGE_NO_EXEC_SHIFT (cpu_has_rixi ? _PAGE_SPLITTING_SHIFT + 1 : _PAGE_SPLITTING_SHIFT)
#define _PAGE_NO_EXEC ({BUG_ON(!cpu_has_rixi); 1 << _PAGE_NO_EXEC_SHIFT; })
/* Page cannot be read */
@@ -192,20 +235,6 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
#elif defined(CONFIG_CPU_RM9000)
#define _CACHE_WT (0<<_CACHE_SHIFT)
#define _CACHE_WTWA (1<<_CACHE_SHIFT)
#define _CACHE_UC_B (2<<_CACHE_SHIFT)
#define _CACHE_WB (3<<_CACHE_SHIFT)
#define _CACHE_CWBEA (4<<_CACHE_SHIFT)
#define _CACHE_CWB (5<<_CACHE_SHIFT)
#define _CACHE_UCNB (6<<_CACHE_SHIFT)
#define _CACHE_FPC (7<<_CACHE_SHIFT)
#define _CACHE_UNCACHED _CACHE_UC_B
#define _CACHE_CACHABLE_NONCOHERENT _CACHE_WB
#else
#define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT) /* R4600 only */

View File

@@ -8,6 +8,7 @@
#ifndef _ASM_PGTABLE_H
#define _ASM_PGTABLE_H
#include <linux/mmzone.h>
#ifdef CONFIG_32BIT
#include <asm/pgtable-32.h>
#endif
@@ -76,16 +77,7 @@ extern unsigned long zero_page_mask;
#define ZERO_PAGE(vaddr) \
(virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
#define is_zero_pfn is_zero_pfn
static inline int is_zero_pfn(unsigned long pfn)
{
extern unsigned long zero_pfn;
unsigned long offset_from_zero_pfn = pfn - zero_pfn;
return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
}
#define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
#define __HAVE_COLOR_ZERO_PAGE
extern void paging_init(void);
@@ -94,7 +86,12 @@ extern void paging_init(void);
* and a page entry and page directory to the page they refer to.
*/
#define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
#define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
#define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
#ifndef CONFIG_TRANSPARENT_HUGEPAGE
#define pmd_page(pmd) __pmd_page(pmd)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#define pmd_page_vaddr(pmd) pmd_val(pmd)
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
@@ -107,7 +104,6 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
ptep->pte_high = pte.pte_high;
smp_wmb();
ptep->pte_low = pte.pte_low;
//printk("pte_high %x pte_low %x\n", ptep->pte_high, ptep->pte_low);
if (pte.pte_low & _PAGE_GLOBAL) {
pte_t *buddy = ptep_buddy(ptep);
@@ -375,6 +371,14 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
__update_cache(vma, address, pte);
}
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
pte_t pte = *(pte_t *)pmdp;
__update_tlb(vma, address, pte);
}
#define kern_addr_valid(addr) (1)
#ifdef CONFIG_64BIT_PHYS_ADDR
@@ -394,6 +398,157 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
remap_pfn_range(vma, vaddr, pfn, size, prot)
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
extern int has_transparent_hugepage(void);
static inline int pmd_trans_huge(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_HUGE);
}
static inline pmd_t pmd_mkhuge(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_HUGE;
return pmd;
}
static inline int pmd_trans_splitting(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_SPLITTING);
}
static inline pmd_t pmd_mksplitting(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_SPLITTING;
return pmd;
}
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
/* Extern to avoid header file madness */
extern void pmdp_splitting_flush(struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmdp);
#define __HAVE_ARCH_PMD_WRITE
static inline int pmd_write(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_WRITE);
}
static inline pmd_t pmd_wrprotect(pmd_t pmd)
{
pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
return pmd;
}
static inline pmd_t pmd_mkwrite(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_WRITE;
if (pmd_val(pmd) & _PAGE_MODIFIED)
pmd_val(pmd) |= _PAGE_SILENT_WRITE;
return pmd;
}
static inline int pmd_dirty(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_MODIFIED);
}
static inline pmd_t pmd_mkclean(pmd_t pmd)
{
pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
return pmd;
}
static inline pmd_t pmd_mkdirty(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_MODIFIED;
if (pmd_val(pmd) & _PAGE_WRITE)
pmd_val(pmd) |= _PAGE_SILENT_WRITE;
return pmd;
}
static inline int pmd_young(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_ACCESSED);
}
static inline pmd_t pmd_mkold(pmd_t pmd)
{
pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
return pmd;
}
static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_ACCESSED;
if (cpu_has_rixi) {
if (!(pmd_val(pmd) & _PAGE_NO_READ))
pmd_val(pmd) |= _PAGE_SILENT_READ;
} else {
if (pmd_val(pmd) & _PAGE_READ)
pmd_val(pmd) |= _PAGE_SILENT_READ;
}
return pmd;
}
/* Extern to avoid header file madness */
extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
static inline unsigned long pmd_pfn(pmd_t pmd)
{
return pmd_val(pmd) >> _PFN_SHIFT;
}
static inline struct page *pmd_page(pmd_t pmd)
{
if (pmd_trans_huge(pmd))
return pfn_to_page(pmd_pfn(pmd));
return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
return pmd;
}
static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
return pmd;
}
/*
* The generic version pmdp_get_and_clear uses a version of pmd_clear() with a
* different prototype.
*/
#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
unsigned long address, pmd_t *pmdp)
{
pmd_t old = *pmdp;
pmd_clear(pmdp);
return old;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#include <asm-generic/pgtable.h>
/*

View File

@@ -17,7 +17,6 @@
#define MIPS4K_ICACHE_REFILL_WAR 0
#define MIPS_CACHE_SYNC_WAR 0
#define TX49XX_ICACHE_INDEX_INV_WAR 0
#define RM9000_CDEX_SMP_WAR 0
#define ICACHE_REFILLS_WORKAROUND_WAR 0
#define R10000_LLSC_WAR 0
#if defined(CONFIG_PMC_MSP7120_EVAL) || defined(CONFIG_PMC_MSP7120_GW) || \

View File

@@ -226,8 +226,6 @@ struct thread_struct {
unsigned long cp0_badvaddr; /* Last user fault */
unsigned long cp0_baduaddr; /* Last kernel fault accessing USEG */
unsigned long error_code;
unsigned long irix_trampoline; /* Wheee... */
unsigned long irix_oldctx;
#ifdef CONFIG_CPU_CAVIUM_OCTEON
struct octeon_cop2_state cp2 __attribute__ ((__aligned__(128)));
struct octeon_cvmseg_state cvmseg __attribute__ ((__aligned__(128)));
@@ -297,8 +295,6 @@ struct thread_struct {
.cp0_badvaddr = 0, \
.cp0_baduaddr = 0, \
.error_code = 0, \
.irix_trampoline = 0, \
.irix_oldctx = 0, \
/* \
* Cavium Octeon specifics (null if not Octeon) \
*/ \
@@ -310,8 +306,6 @@ struct task_struct;
/* Free all resources held by a thread. */
#define release_thread(thread) do { } while(0)
extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern unsigned long thread_saved_pc(struct task_struct *tsk);
/*

View File

@@ -49,6 +49,7 @@ static inline long regs_return_value(struct pt_regs *regs)
#define instruction_pointer(regs) ((regs)->cp0_epc)
#define profile_pc(regs) instruction_pointer(regs)
#define user_stack_pointer(r) ((r)->regs[29])
extern asmlinkage void syscall_trace_enter(struct pt_regs *regs);
extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
@@ -61,4 +62,10 @@ static inline void die_if_kernel(const char *str, struct pt_regs *regs)
die(str, regs);
}
#define current_pt_regs() \
({ \
unsigned long sp = (unsigned long)__builtin_frame_address(0); \
(struct pt_regs *)((sp | (THREAD_SIZE - 1)) + 1 - 32) - 1; \
})
#endif /* _ASM_PTRACE_H */

View File

@@ -366,7 +366,7 @@ struct linux_smonblock {
* Macros for calling a 32-bit ARC implementation from 64-bit code
*/
#if defined(CONFIG_64BIT) && defined(CONFIG_ARC32)
#if defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32)
#define __arc_clobbers \
"$2", "$3" /* ... */, "$8", "$9", "$10", "$11", \
@@ -475,10 +475,10 @@ struct linux_smonblock {
__res; \
})
#endif /* defined(CONFIG_64BIT) && defined(CONFIG_ARC32) */
#endif /* defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC32) */
#if (defined(CONFIG_32BIT) && defined(CONFIG_ARC32)) || \
(defined(CONFIG_64BIT) && defined(CONFIG_ARC64))
#if (defined(CONFIG_32BIT) && defined(CONFIG_FW_ARC32)) || \
(defined(CONFIG_64BIT) && defined(CONFIG_FW_ARC64))
#define ARC_CALL0(dest) \
({ long __res; \

View File

@@ -21,6 +21,4 @@
#include <asm/sigcontext.h>
#include <asm/siginfo.h>
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif /* _ASM_SIGNAL_H */

View File

@@ -40,6 +40,8 @@ extern int __cpu_logical_map[NR_CPUS];
#define SMP_CALL_FUNCTION 0x2
/* Octeon - Tell another core to flush its icache */
#define SMP_ICACHE_FLUSH 0x4
/* Used by kexec crashdump to save all cpu's state */
#define SMP_DUMP 0x8
extern volatile cpumask_t cpu_callin_map;
@@ -91,4 +93,8 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
#if defined(CONFIG_KEXEC)
extern void (*dump_ipi_function_ptr)(void *);
void dump_send_ipi(void (*dump_ipi_callback)(void *));
#endif
#endif /* __ASM_SMP_H */

View File

@@ -1,19 +0,0 @@
#ifndef _ASM_SMVP_H
#define _ASM_SMVP_H
/*
* Definitions for SMVP multitasking on MIPS MT cores
*/
struct task_struct;
extern void smvp_smp_setup(void);
extern void smvp_smp_finish(void);
extern void smvp_boot_secondary(int cpu, struct task_struct *t);
extern void smvp_init_secondary(void);
extern void smvp_smp_finish(void);
extern void smvp_cpus_done(void);
extern void smvp_prepare_cpus(unsigned int max_cpus);
/* This is platform specific */
extern void smvp_send_ipi(int cpu, unsigned int action);
#endif /* _ASM_SMVP_H */

View File

@@ -6,7 +6,7 @@
* SECTION_SIZE_BITS 2^N: how big each section will be
* MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
*/
#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PAGE_SIZE_64KB)
#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT) && defined(CONFIG_PAGE_SIZE_64KB)
# define SECTION_SIZE_BITS 29
#else
# define SECTION_SIZE_BITS 28

View File

@@ -29,10 +29,11 @@ struct thread_info {
__u32 cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
mm_segment_t addr_limit; /* thread address space:
0-0xBFFFFFFF for user-thead
0-0xFFFFFFFF for kernel-thread
*/
mm_segment_t addr_limit; /*
* thread address space limit:
* 0x7fffffff for user-thead
* 0xffffffff for kernel-thread
*/
struct restart_block restart_block;
struct pt_regs *regs;
};
@@ -112,12 +113,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
#ifdef CONFIG_MIPS32_O32
#define TIF_32BIT TIF_32BIT_REGS
#elif defined(CONFIG_MIPS32_N32)
#define TIF_32BIT _TIF_32BIT_ADDR
#endif /* CONFIG_MIPS32_O32 */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)

View File

@@ -50,10 +50,8 @@ extern int (*perf_irq)(void);
/*
* Initialize the calling CPU's compare interrupt as clockevent device
*/
#ifdef CONFIG_CEVT_R4K_LIB
extern unsigned int __weak get_c0_compare_int(void);
extern int r4k_clockevent_init(void);
#endif
static inline int mips_clockevent_init(void)
{
@@ -71,7 +69,7 @@ static inline int mips_clockevent_init(void)
/*
* Initialize the count register as a clocksource
*/
#ifdef CONFIG_CSRC_R4K_LIB
#ifdef CONFIG_CSRC_R4K
extern int init_r4k_clocksource(void);
#endif

View File

@@ -1,231 +0,0 @@
/*
* Copyright 2003 PMC-Sierra
* Author: Manish Lachwani (lachwani@pmc-sierra.com)
*
* Board specific definititions for the PMC-Sierra Yosemite
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#ifndef __TITAN_DEP_H__
#define __TITAN_DEP_H__
#include <asm/addrspace.h> /* for KSEG1ADDR() */
#include <asm/byteorder.h> /* for cpu_to_le32() */
#define TITAN_READ(ofs) \
(*(volatile u32 *)(ocd_base+(ofs)))
#define TITAN_READ_16(ofs) \
(*(volatile u16 *)(ocd_base+(ofs)))
#define TITAN_READ_8(ofs) \
(*(volatile u8 *)(ocd_base+(ofs)))
#define TITAN_WRITE(ofs, data) \
do { *(volatile u32 *)(ocd_base+(ofs)) = (data); } while (0)
#define TITAN_WRITE_16(ofs, data) \
do { *(volatile u16 *)(ocd_base+(ofs)) = (data); } while (0)
#define TITAN_WRITE_8(ofs, data) \
do { *(volatile u8 *)(ocd_base+(ofs)) = (data); } while (0)
/*
* PCI specific defines
*/
#define TITAN_PCI_0_CONFIG_ADDRESS 0x780
#define TITAN_PCI_0_CONFIG_DATA 0x784
/*
* HT specific defines
*/
#define RM9000x2_HTLINK_REG 0xbb000644
#define RM9000x2_BASE_ADDR 0xbb000000
#define OCD_BASE 0xfb000000UL
#define OCD_SIZE 0x3000UL
extern unsigned long ocd_base;
/*
* OCD Registers
*/
#define RM9000x2_OCD_LKB5 0x0128 /* Ethernet */
#define RM9000x2_OCD_LKM5 0x012c
#define RM9000x2_OCD_LKB7 0x0138 /* HT Region 0 */
#define RM9000x2_OCD_LKM7 0x013c
#define RM9000x2_OCD_LKB8 0x0140 /* HT Region 1 */
#define RM9000x2_OCD_LKM8 0x0144
#define RM9000x2_OCD_LKB9 0x0148 /* Local Bus */
#define RM9000x2_OCD_LKM9 0x014c
#define RM9000x2_OCD_LKB10 0x0150
#define RM9000x2_OCD_LKM10 0x0154
#define RM9000x2_OCD_LKB11 0x0158
#define RM9000x2_OCD_LKM11 0x015c
#define RM9000x2_OCD_LKB12 0x0160
#define RM9000x2_OCD_LKM12 0x0164
#define RM9000x2_OCD_LKB13 0x0168 /* Scratch RAM */
#define RM9000x2_OCD_LKM13 0x016c
#define RM9000x2_OCD_LPD0 0x0200 /* Local Bus */
#define RM9000x2_OCD_LPD1 0x0210
#define RM9000x2_OCD_LPD2 0x0220
#define RM9000x2_OCD_LPD3 0x0230
#define RM9000x2_OCD_HTDVID 0x0600 /* HT Device Header */
#define RM9000x2_OCD_HTSC 0x0604
#define RM9000x2_OCD_HTCCR 0x0608
#define RM9000x2_OCD_HTBHL 0x060c
#define RM9000x2_OCD_HTBAR0 0x0610
#define RM9000x2_OCD_HTBAR1 0x0614
#define RM9000x2_OCD_HTBAR2 0x0618
#define RM9000x2_OCD_HTBAR3 0x061c
#define RM9000x2_OCD_HTBAR4 0x0620
#define RM9000x2_OCD_HTBAR5 0x0624
#define RM9000x2_OCD_HTCBCPT 0x0628
#define RM9000x2_OCD_HTSDVID 0x062c
#define RM9000x2_OCD_HTXRA 0x0630
#define RM9000x2_OCD_HTCAP1 0x0634
#define RM9000x2_OCD_HTIL 0x063c
#define RM9000x2_OCD_HTLCC 0x0640 /* HT Capability Block */
#define RM9000x2_OCD_HTLINK 0x0644
#define RM9000x2_OCD_HTFQREV 0x0648
#define RM9000x2_OCD_HTERCTL 0x0668 /* HT Controller */
#define RM9000x2_OCD_HTRXDB 0x066c
#define RM9000x2_OCD_HTIMPED 0x0670
#define RM9000x2_OCD_HTSWIMP 0x0674
#define RM9000x2_OCD_HTCAL 0x0678
#define RM9000x2_OCD_HTBAA30 0x0680
#define RM9000x2_OCD_HTBAA54 0x0684
#define RM9000x2_OCD_HTMASK0 0x0688
#define RM9000x2_OCD_HTMASK1 0x068c
#define RM9000x2_OCD_HTMASK2 0x0690
#define RM9000x2_OCD_HTMASK3 0x0694
#define RM9000x2_OCD_HTMASK4 0x0698
#define RM9000x2_OCD_HTMASK5 0x069c
#define RM9000x2_OCD_HTIFCTL 0x06a0
#define RM9000x2_OCD_HTPLL 0x06a4
#define RM9000x2_OCD_HTSRI 0x06b0
#define RM9000x2_OCD_HTRXNUM 0x06b4
#define RM9000x2_OCD_HTTXNUM 0x06b8
#define RM9000x2_OCD_HTTXCNT 0x06c8
#define RM9000x2_OCD_HTERROR 0x06d8
#define RM9000x2_OCD_HTRCRCE 0x06dc
#define RM9000x2_OCD_HTEOI 0x06e0
#define RM9000x2_OCD_CRCR 0x06f0
#define RM9000x2_OCD_HTCFGA 0x06f8
#define RM9000x2_OCD_HTCFGD 0x06fc
#define RM9000x2_OCD_INTMSG 0x0a00
#define RM9000x2_OCD_INTPIN0 0x0a40
#define RM9000x2_OCD_INTPIN1 0x0a44
#define RM9000x2_OCD_INTPIN2 0x0a48
#define RM9000x2_OCD_INTPIN3 0x0a4c
#define RM9000x2_OCD_INTPIN4 0x0a50
#define RM9000x2_OCD_INTPIN5 0x0a54
#define RM9000x2_OCD_INTPIN6 0x0a58
#define RM9000x2_OCD_INTPIN7 0x0a5c
#define RM9000x2_OCD_SEM 0x0a60
#define RM9000x2_OCD_SEMSET 0x0a64
#define RM9000x2_OCD_SEMCLR 0x0a68
#define RM9000x2_OCD_TKT 0x0a70
#define RM9000x2_OCD_TKTINC 0x0a74
#define RM9000x2_OCD_NMICONFIG 0x0ac0 /* Interrupts */
#define RM9000x2_OCD_INTP0PRI 0x1a80
#define RM9000x2_OCD_INTP1PRI 0x1a80
#define RM9000x2_OCD_INTP0STATUS0 0x1b00
#define RM9000x2_OCD_INTP0MASK0 0x1b04
#define RM9000x2_OCD_INTP0SET0 0x1b08
#define RM9000x2_OCD_INTP0CLEAR0 0x1b0c
#define RM9000x2_OCD_INTP0STATUS1 0x1b10
#define RM9000x2_OCD_INTP0MASK1 0x1b14
#define RM9000x2_OCD_INTP0SET1 0x1b18
#define RM9000x2_OCD_INTP0CLEAR1 0x1b1c
#define RM9000x2_OCD_INTP0STATUS2 0x1b20
#define RM9000x2_OCD_INTP0MASK2 0x1b24
#define RM9000x2_OCD_INTP0SET2 0x1b28
#define RM9000x2_OCD_INTP0CLEAR2 0x1b2c
#define RM9000x2_OCD_INTP0STATUS3 0x1b30
#define RM9000x2_OCD_INTP0MASK3 0x1b34
#define RM9000x2_OCD_INTP0SET3 0x1b38
#define RM9000x2_OCD_INTP0CLEAR3 0x1b3c
#define RM9000x2_OCD_INTP0STATUS4 0x1b40
#define RM9000x2_OCD_INTP0MASK4 0x1b44
#define RM9000x2_OCD_INTP0SET4 0x1b48
#define RM9000x2_OCD_INTP0CLEAR4 0x1b4c
#define RM9000x2_OCD_INTP0STATUS5 0x1b50
#define RM9000x2_OCD_INTP0MASK5 0x1b54
#define RM9000x2_OCD_INTP0SET5 0x1b58
#define RM9000x2_OCD_INTP0CLEAR5 0x1b5c
#define RM9000x2_OCD_INTP0STATUS6 0x1b60
#define RM9000x2_OCD_INTP0MASK6 0x1b64
#define RM9000x2_OCD_INTP0SET6 0x1b68
#define RM9000x2_OCD_INTP0CLEAR6 0x1b6c
#define RM9000x2_OCD_INTP0STATUS7 0x1b70
#define RM9000x2_OCD_INTP0MASK7 0x1b74
#define RM9000x2_OCD_INTP0SET7 0x1b78
#define RM9000x2_OCD_INTP0CLEAR7 0x1b7c
#define RM9000x2_OCD_INTP1STATUS0 0x2b00
#define RM9000x2_OCD_INTP1MASK0 0x2b04
#define RM9000x2_OCD_INTP1SET0 0x2b08
#define RM9000x2_OCD_INTP1CLEAR0 0x2b0c
#define RM9000x2_OCD_INTP1STATUS1 0x2b10
#define RM9000x2_OCD_INTP1MASK1 0x2b14
#define RM9000x2_OCD_INTP1SET1 0x2b18
#define RM9000x2_OCD_INTP1CLEAR1 0x2b1c
#define RM9000x2_OCD_INTP1STATUS2 0x2b20
#define RM9000x2_OCD_INTP1MASK2 0x2b24
#define RM9000x2_OCD_INTP1SET2 0x2b28
#define RM9000x2_OCD_INTP1CLEAR2 0x2b2c
#define RM9000x2_OCD_INTP1STATUS3 0x2b30
#define RM9000x2_OCD_INTP1MASK3 0x2b34
#define RM9000x2_OCD_INTP1SET3 0x2b38
#define RM9000x2_OCD_INTP1CLEAR3 0x2b3c
#define RM9000x2_OCD_INTP1STATUS4 0x2b40
#define RM9000x2_OCD_INTP1MASK4 0x2b44
#define RM9000x2_OCD_INTP1SET4 0x2b48
#define RM9000x2_OCD_INTP1CLEAR4 0x2b4c
#define RM9000x2_OCD_INTP1STATUS5 0x2b50
#define RM9000x2_OCD_INTP1MASK5 0x2b54
#define RM9000x2_OCD_INTP1SET5 0x2b58
#define RM9000x2_OCD_INTP1CLEAR5 0x2b5c
#define RM9000x2_OCD_INTP1STATUS6 0x2b60
#define RM9000x2_OCD_INTP1MASK6 0x2b64
#define RM9000x2_OCD_INTP1SET6 0x2b68
#define RM9000x2_OCD_INTP1CLEAR6 0x2b6c
#define RM9000x2_OCD_INTP1STATUS7 0x2b70
#define RM9000x2_OCD_INTP1MASK7 0x2b74
#define RM9000x2_OCD_INTP1SET7 0x2b78
#define RM9000x2_OCD_INTP1CLEAR7 0x2b7c
#define OCD_READ(reg) (*(volatile unsigned int *)(ocd_base + (reg)))
#define OCD_WRITE(reg, val) \
do { *(volatile unsigned int *)(ocd_base + (reg)) = (val); } while (0)
/*
* Hypertransport specific macros
*/
#define RM9K_WRITE(ofs, data) *(volatile u_int32_t *)(RM9000x2_BASE_ADDR+ofs) = data
#define RM9K_WRITE_8(ofs, data) *(volatile u8 *)(RM9000x2_BASE_ADDR+ofs) = data
#define RM9K_WRITE_16(ofs, data) *(volatile u16 *)(RM9000x2_BASE_ADDR+ofs) = data
#define RM9K_READ(ofs, val) *(val) = *(volatile u_int32_t *)(RM9000x2_BASE_ADDR+ofs)
#define RM9K_READ_8(ofs, val) *(val) = *(volatile u8 *)(RM9000x2_BASE_ADDR+ofs)
#define RM9K_READ_16(ofs, val) *(val) = *(volatile u16 *)(RM9000x2_BASE_ADDR+ofs)
#endif

View File

@@ -208,14 +208,6 @@
#error Check setting of TX49XX_ICACHE_INDEX_INV_WAR for your platform
#endif
/*
* On the RM9000 there is a problem which makes the CreateDirtyExclusive
* eache operation unusable on SMP systems.
*/
#ifndef RM9000_CDEX_SMP_WAR
#error Check setting of RM9000_CDEX_SMP_WAR for your platform
#endif
/*
* The RM7000 processors and the E9000 cores have a bug (though PMC-Sierra
* opposes it being called that) where invalid instructions in the same

View File

@@ -86,6 +86,9 @@
#define TIOCGDEV _IOR('T', 0x32, unsigned int) /* Get primary device node of /dev/console */
#define TIOCSIG _IOW('T', 0x36, int) /* Generate signal on Pty slave */
#define TIOCVHANGUP 0x5437
#define TIOCGPKT _IOR('T', 0x38, int) /* Get packet mode state */
#define TIOCGPTLCK _IOR('T', 0x39, int) /* Get Pty lock state */
#define TIOCGEXCL _IOR('T', 0x40, int) /* Get exclusive mode state */
/* I hope the range from 0x5480 on is free ... */
#define TIOCSCTTY 0x5480 /* become controlling tty */

View File

@@ -87,4 +87,15 @@
/* compatibility flags */
#define MAP_FILE 0
/*
* When MAP_HUGETLB is set bits [26:31] encode the log2 of the huge page size.
* This gives us 6 bits, which is enough until someone invents 128 bit address
* spaces.
*
* Assume these are all power of twos.
* When 0 use the default page size.
*/
#define MAP_HUGE_SHIFT 26
#define MAP_HUGE_MASK 0x3f
#endif /* _ASM_MMAN_H */

View File

@@ -86,12 +86,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
#define SA_RESTORER 0x04000000 /* Only for o32 */
/*
* sigaltstack controls
*/
#define SS_ONSTACK 1
#define SS_DISABLE 2
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192

View File

@@ -63,6 +63,7 @@ To add: #define SO_REUSEPORT 0x0200 /* Allow local address and port reuse. */
/* Socket filtering */
#define SO_ATTACH_FILTER 26
#define SO_DETACH_FILTER 27
#define SO_GET_FILTER SO_ATTACH_FILTER
#define SO_PEERNAME 28
#define SO_TIMESTAMP 29

View File

@@ -368,16 +368,17 @@
#define __NR_process_vm_readv (__NR_Linux + 345)
#define __NR_process_vm_writev (__NR_Linux + 346)
#define __NR_kcmp (__NR_Linux + 347)
#define __NR_finit_module (__NR_Linux + 348)
/*
* Offset of the last Linux o32 flavoured syscall
*/
#define __NR_Linux_syscalls 347
#define __NR_Linux_syscalls 348
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
#define __NR_O32_Linux_syscalls 347
#define __NR_O32_Linux_syscalls 348
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -692,16 +693,17 @@
#define __NR_process_vm_readv (__NR_Linux + 304)
#define __NR_process_vm_writev (__NR_Linux + 305)
#define __NR_kcmp (__NR_Linux + 306)
#define __NR_finit_module (__NR_Linux + 307)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
#define __NR_Linux_syscalls 306
#define __NR_Linux_syscalls 307
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
#define __NR_64_Linux_syscalls 306
#define __NR_64_Linux_syscalls 307
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1021,15 +1023,16 @@
#define __NR_process_vm_readv (__NR_Linux + 309)
#define __NR_process_vm_writev (__NR_Linux + 310)
#define __NR_kcmp (__NR_Linux + 311)
#define __NR_finit_module (__NR_Linux + 312)
/*
* Offset of the last N32 flavoured syscall
*/
#define __NR_Linux_syscalls 311
#define __NR_Linux_syscalls 312
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 311
#define __NR_N32_Linux_syscalls 312
#endif /* _UAPI_ASM_UNISTD_H */