Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next

Pull sparc updates from David Miller:

1) Kill off support for sun4c and Cypress sun4m chips.

   And as a result we were able to also kill off that ugly btfixup thing
   that required multi-stage links of the final vmlinux image in the
   Kbuild system.  This should make the kbuild maintainers really happy.

   Thanks a lot to Sam Ravnborg for his tireless efforts to get this
   going.

2) Convert sparc64 to nobootmem.  I suspect now with sparc32 being a lot
   cleaner, it should be able to fall in line and modernize in this area
   too.

3) Make sparc32 use generic clockevents, from Tkhai Kirill.

[ I fixed up the BPF rules, and tried to clean up the build rules too.
  But I don't have - or want - a sparc cross-build environment, so the
  BPF rule bug and the related build cleanup was all done with just a
  bare "make -n" pseudo-test.      - Linus ]

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next: (110 commits)
  sparc32: use flushi when run-time patching in per_cpu_patch
  sparc32: fix cpuid_patch run-time patching
  sparc32: drop unused inline functions in srmmu.c
  sparc32: drop unused functions in pgtsrmmu.h
  sparc32,leon: move leon mmu functions to leon_mm.c
  sparc32,leon: remove duplicate definitions in leon.h
  sparc32,leon: remove duplicate UART register definitions
  sparc32,leon: move leon ASI definitions to asi.h
  sparc32: move trap table to a separate file
  sparc64: renamed ttable.S to ttable_64.S
  sparc32: Remove asm/sysen.h header.
  sparc32: Delete asm/smpprim.h
  sparc32: Remove unused empty_bad_page{,_table} declarations.
  sparc32: Kill boot_cpu_id4
  sparc32: Move GET_PROCESSOR*_ID() out of asm/asmmacro.h
  sparc32: Remove completely unused code from asm/cache.h
  sparc32: Add ucmpdi2.o to obj-y instead of lib-y.
  sparc32: add ucmpdi2
  sparc: introduce arch/sparc/Kbuild
  sparc: remove obsolete documentation
  ...
This commit is contained in:
Linus Torvalds
2012-05-21 10:32:01 -07:00
137 changed files with 2257 additions and 10787 deletions

View File

@@ -7,8 +7,7 @@ ccflags-y := -Werror
obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
obj-y += fault_$(BITS).o
obj-y += init_$(BITS).o
obj-$(CONFIG_SPARC32) += loadmmu.o
obj-$(CONFIG_SPARC32) += extable.o btfixup.o srmmu.o iommu.o io-unit.o
obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o
obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
obj-$(CONFIG_SPARC_LEON)+= leon_mm.o
@@ -17,9 +16,3 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
# Only used by sparc32
obj-$(CONFIG_HIGHMEM) += highmem.o
ifdef CONFIG_SMP
obj-$(CONFIG_SPARC32) += nosun4c.o
else
obj-$(CONFIG_SPARC32) += sun4c.o
endif

View File

@@ -1,328 +0,0 @@
/* btfixup.c: Boot time code fixup and relocator, so that
* we can get rid of most indirect calls to achieve single
* image sun4c and srmmu kernel.
*
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/btfixup.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/oplib.h>
#include <asm/cacheflush.h>
#define BTFIXUP_OPTIMIZE_NOP
#define BTFIXUP_OPTIMIZE_OTHER
extern char *srmmu_name;
static char version[] __initdata = "Boot time fixup v1.6. 4/Mar/98 Jakub Jelinek (jj@ultra.linux.cz). Patching kernel for ";
static char str_sun4c[] __initdata = "sun4c\n";
static char str_srmmu[] __initdata = "srmmu[%s]/";
static char str_iommu[] __initdata = "iommu\n";
static char str_iounit[] __initdata = "io-unit\n";
static int visited __initdata = 0;
extern unsigned int ___btfixup_start[], ___btfixup_end[], __init_begin[], __init_end[], __init_text_end[];
extern unsigned int _stext[], _end[], __start___ksymtab[], __stop___ksymtab[];
static char wrong_f[] __initdata = "Trying to set f fixup %p to invalid function %08x\n";
static char wrong_b[] __initdata = "Trying to set b fixup %p to invalid function %08x\n";
static char wrong_s[] __initdata = "Trying to set s fixup %p to invalid value %08x\n";
static char wrong_h[] __initdata = "Trying to set h fixup %p to invalid value %08x\n";
static char wrong_a[] __initdata = "Trying to set a fixup %p to invalid value %08x\n";
static char wrong[] __initdata = "Wrong address for %c fixup %p\n";
static char insn_f[] __initdata = "Fixup f %p refers to weird instructions at %p[%08x,%08x]\n";
static char insn_b[] __initdata = "Fixup b %p doesn't refer to a SETHI at %p[%08x]\n";
static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]\n";
static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n";
static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n";
static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n";
static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n";
static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n";
#ifdef BTFIXUP_OPTIMIZE_OTHER
static void __init set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
{
if (!fmangled)
*addr = value;
else {
unsigned int *q = (unsigned int *)q1;
if (*addr == 0x01000000) {
/* Noped */
*q = value;
} else if (addr[-1] == *q) {
/* Moved */
addr[-1] = value;
*q = value;
} else {
prom_printf(wrong_setaddr, addr-1, addr[-1], *addr, *q, value);
prom_halt();
}
}
}
#else
static inline void set_addr(unsigned int *addr, unsigned int q1, int fmangled, unsigned int value)
{
*addr = value;
}
#endif
void __init btfixup(void)
{
unsigned int *p, *q;
int type, count;
unsigned insn;
unsigned *addr;
int fmangled = 0;
void (*flush_cacheall)(void);
if (!visited) {
visited++;
printk(version);
if (ARCH_SUN4C)
printk(str_sun4c);
else {
printk(str_srmmu, srmmu_name);
if (sparc_cpu_model == sun4d)
printk(str_iounit);
else
printk(str_iommu);
}
}
for (p = ___btfixup_start; p < ___btfixup_end; ) {
count = p[2];
q = p + 3;
switch (type = *(unsigned char *)p) {
case 'f':
count = p[3];
q = p + 4;
if (((p[0] & 1) || p[1])
&& ((p[1] & 3) || (unsigned *)(p[1]) < _stext || (unsigned *)(p[1]) >= _end)) {
prom_printf(wrong_f, p, p[1]);
prom_halt();
}
break;
case 'b':
if (p[1] < (unsigned long)__init_begin || p[1] >= (unsigned long)__init_text_end || (p[1] & 3)) {
prom_printf(wrong_b, p, p[1]);
prom_halt();
}
break;
case 's':
if (p[1] + 0x1000 >= 0x2000) {
prom_printf(wrong_s, p, p[1]);
prom_halt();
}
break;
case 'h':
if (p[1] & 0x3ff) {
prom_printf(wrong_h, p, p[1]);
prom_halt();
}
break;
case 'a':
if (p[1] + 0x1000 >= 0x2000 && (p[1] & 0x3ff)) {
prom_printf(wrong_a, p, p[1]);
prom_halt();
}
break;
}
if (p[0] & 1) {
p[0] &= ~1;
while (count) {
fmangled = 0;
addr = (unsigned *)*q;
if (addr < _stext || addr >= _end) {
prom_printf(wrong, type, p);
prom_halt();
}
insn = *addr;
#ifdef BTFIXUP_OPTIMIZE_OTHER
if (type != 'f' && q[1]) {
insn = *(unsigned int *)q[1];
if (!insn || insn == 1)
insn = *addr;
else
fmangled = 1;
}
#endif
switch (type) {
case 'f': /* CALL */
if (addr >= __start___ksymtab && addr < __stop___ksymtab) {
*addr = p[1];
break;
} else if (!q[1]) {
if ((insn & 0xc1c00000) == 0x01000000) { /* SETHI */
*addr = (insn & 0xffc00000) | (p[1] >> 10); break;
} else if ((insn & 0xc1f82000) == 0x80102000) { /* OR X, %LO(i), Y */
*addr = (insn & 0xffffe000) | (p[1] & 0x3ff); break;
} else if ((insn & 0xc0000000) != 0x40000000) { /* !CALL */
bad_f:
prom_printf(insn_f, p, addr, insn, addr[1]);
prom_halt();
}
} else if (q[1] != 1)
addr[1] = q[1];
if (p[2] == BTFIXUPCALL_NORM) {
norm_f:
*addr = 0x40000000 | ((p[1] - (unsigned)addr) >> 2);
q[1] = 0;
break;
}
#ifndef BTFIXUP_OPTIMIZE_NOP
goto norm_f;
#else
if (!(addr[1] & 0x80000000)) {
if ((addr[1] & 0xc1c00000) != 0x01000000) /* !SETHI */
goto bad_f; /* CALL, Bicc, FBfcc, CBccc are weird in delay slot, aren't they? */
} else {
if ((addr[1] & 0x01800000) == 0x01800000) {
if ((addr[1] & 0x01f80000) == 0x01e80000) {
/* RESTORE */
goto norm_f; /* It is dangerous to patch that */
}
goto bad_f;
}
if ((addr[1] & 0xffffe003) == 0x9e03e000) {
/* ADD %O7, XX, %o7 */
int displac = (addr[1] << 19);
displac = (displac >> 21) + 2;
*addr = (0x10800000) + (displac & 0x3fffff);
q[1] = addr[1];
addr[1] = p[2];
break;
}
if ((addr[1] & 0x201f) == 0x200f || (addr[1] & 0x7c000) == 0x3c000)
goto norm_f; /* Someone is playing bad tricks with us: rs1 or rs2 is o7 */
if ((addr[1] & 0x3e000000) == 0x1e000000)
goto norm_f; /* rd is %o7. We'd better take care. */
}
if (p[2] == BTFIXUPCALL_NOP) {
*addr = 0x01000000;
q[1] = 1;
break;
}
#ifndef BTFIXUP_OPTIMIZE_OTHER
goto norm_f;
#else
if (addr[1] == 0x01000000) { /* NOP in the delay slot */
q[1] = addr[1];
*addr = p[2];
break;
}
if ((addr[1] & 0xc0000000) != 0xc0000000) {
/* Not a memory operation */
if ((addr[1] & 0x30000000) == 0x10000000) {
/* Ok, non-memory op with rd %oX */
if ((addr[1] & 0x3e000000) == 0x1c000000)
goto bad_f; /* Aiee. Someone is playing strange %sp tricks */
if ((addr[1] & 0x3e000000) > 0x12000000 ||
((addr[1] & 0x3e000000) == 0x12000000 &&
p[2] != BTFIXUPCALL_STO1O0 && p[2] != BTFIXUPCALL_SWAPO0O1) ||
((p[2] & 0xffffe000) == BTFIXUPCALL_RETINT(0))) {
/* Nobody uses the result. We can nop it out. */
*addr = p[2];
q[1] = addr[1];
addr[1] = 0x01000000;
break;
}
if ((addr[1] & 0xf1ffffe0) == 0x90100000) {
/* MOV %reg, %Ox */
if ((addr[1] & 0x3e000000) == 0x10000000 &&
(p[2] & 0x7c000) == 0x20000) {
/* Ok, it is call xx; mov reg, %o0 and call optimizes
to doing something on %o0. Patch the patch. */
*addr = (p[2] & ~0x7c000) | ((addr[1] & 0x1f) << 14);
q[1] = addr[1];
addr[1] = 0x01000000;
break;
}
if ((addr[1] & 0x3e000000) == 0x12000000 &&
p[2] == BTFIXUPCALL_STO1O0) {
*addr = (p[2] & ~0x3e000000) | ((addr[1] & 0x1f) << 25);
q[1] = addr[1];
addr[1] = 0x01000000;
break;
}
}
}
}
*addr = addr[1];
q[1] = addr[1];
addr[1] = p[2];
break;
#endif /* BTFIXUP_OPTIMIZE_OTHER */
#endif /* BTFIXUP_OPTIMIZE_NOP */
case 'b': /* BLACKBOX */
/* Has to be sethi i, xx */
if ((insn & 0xc1c00000) != 0x01000000) {
prom_printf(insn_b, p, addr, insn);
prom_halt();
} else {
void (*do_fixup)(unsigned *);
do_fixup = (void (*)(unsigned *))p[1];
do_fixup(addr);
}
break;
case 's': /* SIMM13 */
/* Has to be or %g0, i, xx */
if ((insn & 0xc1ffe000) != 0x80102000) {
prom_printf(insn_s, p, addr, insn);
prom_halt();
}
set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x1fff));
break;
case 'h': /* SETHI */
/* Has to be sethi i, xx */
if ((insn & 0xc1c00000) != 0x01000000) {
prom_printf(insn_h, p, addr, insn);
prom_halt();
}
set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
break;
case 'a': /* HALF */
/* Has to be sethi i, xx or or %g0, i, xx */
if ((insn & 0xc1c00000) != 0x01000000 &&
(insn & 0xc1ffe000) != 0x80102000) {
prom_printf(insn_a, p, addr, insn);
prom_halt();
}
if (p[1] & 0x3ff)
set_addr(addr, q[1], fmangled,
(insn & 0x3e000000) | 0x80102000 | (p[1] & 0x1fff));
else
set_addr(addr, q[1], fmangled,
(insn & 0x3e000000) | 0x01000000 | (p[1] >> 10));
break;
case 'i': /* INT */
if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
else if ((insn & 0x80002000) == 0x80002000) /* %LO */
set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
else {
prom_printf(insn_i, p, addr, insn);
prom_halt();
}
break;
}
count -= 2;
q += 2;
}
} else
p = q + count;
}
#ifdef CONFIG_SMP
flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all);
#else
flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all);
#endif
if (!flush_cacheall) {
prom_printf(fca_und);
prom_halt();
}
(*flush_cacheall)();
}

View File

@@ -24,29 +24,19 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/memreg.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/smp.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
extern int prom_node_root;
int show_unhandled_signals = 1;
/* At boot time we determine these two values necessary for setting
* up the segment maps and page table entries (pte's).
*/
int num_segmaps, num_contexts;
int invalid_segment;
/* various Virtual Address Cache parameters we find at boot time... */
int vac_size, vac_linesize, vac_do_hw_vac_flushes;
int vac_entries_per_context, vac_entries_per_segment;
int vac_entries_per_page;
int num_contexts;
/* Return how much physical memory we have. */
unsigned long probe_memory(void)
@@ -60,55 +50,36 @@ unsigned long probe_memory(void)
return total;
}
extern void sun4c_complete_all_stores(void);
/* Whee, a level 15 NMI interrupt memory error. Let's have fun... */
asmlinkage void sparc_lvl15_nmi(struct pt_regs *regs, unsigned long serr,
unsigned long svaddr, unsigned long aerr,
unsigned long avaddr)
{
sun4c_complete_all_stores();
printk("FAULT: NMI received\n");
printk("SREGS: Synchronous Error %08lx\n", serr);
printk(" Synchronous Vaddr %08lx\n", svaddr);
printk(" Asynchronous Error %08lx\n", aerr);
printk(" Asynchronous Vaddr %08lx\n", avaddr);
if (sun4c_memerr_reg)
printk(" Memory Parity Error %08lx\n", *sun4c_memerr_reg);
printk("REGISTER DUMP:\n");
show_regs(regs);
prom_halt();
}
static void unhandled_fault(unsigned long, struct task_struct *,
struct pt_regs *) __attribute__ ((noreturn));
static void unhandled_fault(unsigned long address, struct task_struct *tsk,
struct pt_regs *regs)
static void __noreturn unhandled_fault(unsigned long address,
struct task_struct *tsk,
struct pt_regs *regs)
{
if((unsigned long) address < PAGE_SIZE) {
if ((unsigned long) address < PAGE_SIZE) {
printk(KERN_ALERT
"Unable to handle kernel NULL pointer dereference\n");
} else {
printk(KERN_ALERT "Unable to handle kernel paging request "
"at virtual address %08lx\n", address);
printk(KERN_ALERT "Unable to handle kernel paging request at virtual address %08lx\n",
address);
}
printk(KERN_ALERT "tsk->{mm,active_mm}->context = %08lx\n",
(tsk->mm ? tsk->mm->context : tsk->active_mm->context));
printk(KERN_ALERT "tsk->{mm,active_mm}->pgd = %08lx\n",
(tsk->mm ? (unsigned long) tsk->mm->pgd :
(unsigned long) tsk->active_mm->pgd));
(unsigned long) tsk->active_mm->pgd));
die_if_kernel("Oops", regs);
}
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
unsigned long address)
{
struct pt_regs regs;
unsigned long g2;
unsigned int insn;
int i;
i = search_extables_range(ret_pc, &g2);
switch (i) {
case 3:
@@ -128,14 +99,14 @@ asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
/* for _from_ macros */
insn = *((unsigned int *) pc);
if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
return 2;
break;
return 2;
break;
default:
break;
}
memset(&regs, 0, sizeof (regs));
memset(&regs, 0, sizeof(regs));
regs.pc = pc;
regs.npc = pc + 4;
__asm__ __volatile__(
@@ -198,11 +169,10 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
if (text_fault)
return regs->pc;
if (regs->psr & PSR_PS) {
if (regs->psr & PSR_PS)
insn = *(unsigned int *) regs->pc;
} else {
else
__get_user(insn, (unsigned int *) regs->pc);
}
return safe_compute_effective_address(regs, insn);
}
@@ -228,7 +198,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
(write ? FAULT_FLAG_WRITE : 0));
if(text_fault)
if (text_fault)
address = regs->pc;
/*
@@ -241,36 +211,32 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
* nothing more.
*/
code = SEGV_MAPERR;
if (!ARCH_SUN4C && address >= TASK_SIZE)
if (address >= TASK_SIZE)
goto vmalloc_fault;
/*
* If we're in an interrupt or have no user
* context, we must not take the fault..
*/
if (in_atomic() || !mm)
goto no_context;
if (in_atomic() || !mm)
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry:
down_read(&mm->mmap_sem);
/*
* The kernel referencing a bad kernel pointer can lock up
* a sun4c machine completely, so we must attempt recovery.
*/
if(!from_user && address >= PAGE_OFFSET)
if (!from_user && address >= PAGE_OFFSET)
goto bad_area;
vma = find_vma(mm, address);
if(!vma)
if (!vma)
goto bad_area;
if(vma->vm_start <= address)
if (vma->vm_start <= address)
goto good_area;
if(!(vma->vm_flags & VM_GROWSDOWN))
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if(expand_stack(vma, address))
if (expand_stack(vma, address))
goto bad_area;
/*
* Ok, we have a good vm_area for this memory access, so
@@ -278,12 +244,12 @@ retry:
*/
good_area:
code = SEGV_ACCERR;
if(write) {
if(!(vma->vm_flags & VM_WRITE))
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
/* Allow reads even for write-only mappings */
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
@@ -349,14 +315,16 @@ no_context:
g2 = regs->u_regs[UREG_G2];
if (!from_user) {
fixup = search_extables_range(regs->pc, &g2);
if (fixup > 10) { /* Values below are reserved for other things */
/* Values below 10 are reserved for other things */
if (fixup > 10) {
extern const unsigned __memset_start[];
extern const unsigned __memset_end[];
extern const unsigned __csum_partial_copy_start[];
extern const unsigned __csum_partial_copy_end[];
#ifdef DEBUG_EXCEPTIONS
printk("Exception: PC<%08lx> faddr<%08lx>\n", regs->pc, address);
printk("Exception: PC<%08lx> faddr<%08lx>\n",
regs->pc, address);
printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
regs->pc, fixup, g2);
#endif
@@ -364,7 +332,7 @@ no_context:
regs->pc < (unsigned long)__memset_end) ||
(regs->pc >= (unsigned long)__csum_partial_copy_start &&
regs->pc < (unsigned long)__csum_partial_copy_end)) {
regs->u_regs[UREG_I4] = address;
regs->u_regs[UREG_I4] = address;
regs->u_regs[UREG_I5] = regs->pc;
}
regs->u_regs[UREG_G2] = g2;
@@ -373,8 +341,8 @@ no_context:
return;
}
}
unhandled_fault (address, tsk, regs);
unhandled_fault(address, tsk, regs);
do_exit(SIGKILL);
/*
@@ -420,97 +388,12 @@ vmalloc_fault:
if (pmd_present(*pmd) || !pmd_present(*pmd_k))
goto bad_area_nosemaphore;
*pmd = *pmd_k;
return;
}
}
asmlinkage void do_sun4c_fault(struct pt_regs *regs, int text_fault, int write,
unsigned long address)
{
extern void sun4c_update_mmu_cache(struct vm_area_struct *,
unsigned long,pte_t *);
extern pte_t *sun4c_pte_offset_kernel(pmd_t *,unsigned long);
struct task_struct *tsk = current;
struct mm_struct *mm = tsk->mm;
pgd_t *pgdp;
pte_t *ptep;
if (text_fault) {
address = regs->pc;
} else if (!write &&
!(regs->psr & PSR_PS)) {
unsigned int insn, __user *ip;
ip = (unsigned int __user *)regs->pc;
if (!get_user(insn, ip)) {
if ((insn & 0xc1680000) == 0xc0680000)
write = 1;
}
}
if (!mm) {
/* We are oopsing. */
do_sparc_fault(regs, text_fault, write, address);
BUG(); /* P3 Oops already, you bitch */
}
pgdp = pgd_offset(mm, address);
ptep = sun4c_pte_offset_kernel((pmd_t *) pgdp, address);
if (pgd_val(*pgdp)) {
if (write) {
if ((pte_val(*ptep) & (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT))
== (_SUN4C_PAGE_WRITE|_SUN4C_PAGE_PRESENT)) {
unsigned long flags;
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
_SUN4C_PAGE_MODIFIED |
_SUN4C_PAGE_VALID |
_SUN4C_PAGE_DIRTY);
local_irq_save(flags);
if (sun4c_get_segmap(address) != invalid_segment) {
sun4c_put_pte(address, pte_val(*ptep));
local_irq_restore(flags);
return;
}
local_irq_restore(flags);
}
} else {
if ((pte_val(*ptep) & (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT))
== (_SUN4C_PAGE_READ|_SUN4C_PAGE_PRESENT)) {
unsigned long flags;
*ptep = __pte(pte_val(*ptep) | _SUN4C_PAGE_ACCESSED |
_SUN4C_PAGE_VALID);
local_irq_save(flags);
if (sun4c_get_segmap(address) != invalid_segment) {
sun4c_put_pte(address, pte_val(*ptep));
local_irq_restore(flags);
return;
}
local_irq_restore(flags);
}
}
}
/* This conditional is 'interesting'. */
if (pgd_val(*pgdp) && !(write && !(pte_val(*ptep) & _SUN4C_PAGE_WRITE))
&& (pte_val(*ptep) & _SUN4C_PAGE_VALID))
/* Note: It is safe to not grab the MMAP semaphore here because
* we know that update_mmu_cache() will not sleep for
* any reason (at least not in the current implementation)
* and therefore there is no danger of another thread getting
* on the CPU and doing a shrink_mmap() on this vma.
*/
sun4c_update_mmu_cache (find_vma(current->mm, address), address,
ptep);
else
do_sparc_fault(regs, text_fault, write, address);
}
/* This always deals with user addresses. */
static void force_user_fault(unsigned long address, int write)
{
@@ -523,21 +406,21 @@ static void force_user_fault(unsigned long address, int write)
down_read(&mm->mmap_sem);
vma = find_vma(mm, address);
if(!vma)
if (!vma)
goto bad_area;
if(vma->vm_start <= address)
if (vma->vm_start <= address)
goto good_area;
if(!(vma->vm_flags & VM_GROWSDOWN))
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if(expand_stack(vma, address))
if (expand_stack(vma, address))
goto bad_area;
good_area:
code = SEGV_ACCERR;
if(write) {
if(!(vma->vm_flags & VM_WRITE))
if (write) {
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
} else {
if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
@@ -568,7 +451,7 @@ void window_overflow_fault(void)
unsigned long sp;
sp = current_thread_info()->rwbuf_stkptrs[0];
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 1);
force_user_fault(sp, 1);
@@ -577,7 +460,7 @@ void window_overflow_fault(void)
void window_underflow_fault(unsigned long sp)
{
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);
@@ -589,7 +472,7 @@ void window_ret_fault(struct pt_regs *regs)
unsigned long sp;
sp = regs->u_regs[UREG_FP];
if(((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
if (((sp + 0x38) & PAGE_MASK) != (sp & PAGE_MASK))
force_user_fault(sp + 0x38, 0);
force_user_fault(sp, 0);

View File

@@ -27,7 +27,6 @@
#include <linux/gfp.h>
#include <asm/sections.h>
#include <asm/vac-ops.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/vaddrs.h>
@@ -45,9 +44,6 @@ EXPORT_SYMBOL(phys_base);
unsigned long pfn_base;
EXPORT_SYMBOL(pfn_base);
unsigned long page_kernel;
EXPORT_SYMBOL(page_kernel);
struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS+1];
unsigned long sparc_unmapped_base;
@@ -286,45 +282,17 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
return max_pfn;
}
/*
* check_pgt_cache
*
* This is called at the end of unmapping of VMA (zap_page_range),
* to rescan the page cache for architecture specific things,
* presumably something like sun4/sun4c PMEGs. Most architectures
* define check_pgt_cache empty.
*
* We simply copy the 2.4 implementation for now.
*/
static int pgt_cache_water[2] = { 25, 50 };
void check_pgt_cache(void)
{
do_check_pgt_cache(pgt_cache_water[0], pgt_cache_water[1]);
}
/*
* paging_init() sets up the page tables: We call the MMU specific
* init routine based upon the Sun model type on the Sparc.
*
*/
extern void sun4c_paging_init(void);
extern void srmmu_paging_init(void);
extern void device_scan(void);
pgprot_t PAGE_SHARED __read_mostly;
EXPORT_SYMBOL(PAGE_SHARED);
void __init paging_init(void)
{
switch(sparc_cpu_model) {
case sun4c:
case sun4e:
case sun4:
sun4c_paging_init();
sparc_unmapped_base = 0xe0000000;
BTFIXUPSET_SETHI(sparc_unmapped_base, 0xe0000000);
break;
case sparc_leon:
leon_init();
/* fall through */
@@ -332,7 +300,6 @@ void __init paging_init(void)
case sun4d:
srmmu_paging_init();
sparc_unmapped_base = 0x50000000;
BTFIXUPSET_SETHI(sparc_unmapped_base, 0x50000000);
break;
default:
prom_printf("paging_init: Cannot init paging on this Sparc\n");
@@ -341,24 +308,6 @@ void __init paging_init(void)
prom_halt();
}
/* Initialize the protection map with non-constant, MMU dependent values. */
protection_map[0] = PAGE_NONE;
protection_map[1] = PAGE_READONLY;
protection_map[2] = PAGE_COPY;
protection_map[3] = PAGE_COPY;
protection_map[4] = PAGE_READONLY;
protection_map[5] = PAGE_READONLY;
protection_map[6] = PAGE_COPY;
protection_map[7] = PAGE_COPY;
protection_map[8] = PAGE_NONE;
protection_map[9] = PAGE_READONLY;
protection_map[10] = PAGE_SHARED;
protection_map[11] = PAGE_SHARED;
protection_map[12] = PAGE_READONLY;
protection_map[13] = PAGE_READONLY;
protection_map[14] = PAGE_SHARED;
protection_map[15] = PAGE_SHARED;
btfixup();
prom_build_devicetree();
of_fill_in_cpu_data();
device_scan();

View File

@@ -741,7 +741,6 @@ static void __init find_ramdisk(unsigned long phys_base)
struct node_mem_mask {
unsigned long mask;
unsigned long val;
unsigned long bootmem_paddr;
};
static struct node_mem_mask node_masks[MAX_NUMNODES];
static int num_node_masks;
@@ -806,12 +805,6 @@ static u64 memblock_nid_range(u64 start, u64 end, int *nid)
return start;
}
#else
static u64 memblock_nid_range(u64 start, u64 end, int *nid)
{
*nid = 0;
return end;
}
#endif
/* This must be invoked after performing all of the necessary
@@ -820,10 +813,11 @@ static u64 memblock_nid_range(u64 start, u64 end, int *nid)
*/
static void __init allocate_node_data(int nid)
{
unsigned long paddr, num_pages, start_pfn, end_pfn;
struct pglist_data *p;
unsigned long start_pfn, end_pfn;
#ifdef CONFIG_NEED_MULTIPLE_NODES
unsigned long paddr;
paddr = memblock_alloc_try_nid(sizeof(struct pglist_data), SMP_CACHE_BYTES, nid);
if (!paddr) {
prom_printf("Cannot allocate pglist_data for nid[%d]\n", nid);
@@ -832,7 +826,7 @@ static void __init allocate_node_data(int nid)
NODE_DATA(nid) = __va(paddr);
memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
NODE_DATA(nid)->node_id = nid;
#endif
p = NODE_DATA(nid);
@@ -840,18 +834,6 @@ static void __init allocate_node_data(int nid)
get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
p->node_start_pfn = start_pfn;
p->node_spanned_pages = end_pfn - start_pfn;
if (p->node_spanned_pages) {
num_pages = bootmem_bootmap_pages(p->node_spanned_pages);
paddr = memblock_alloc_try_nid(num_pages << PAGE_SHIFT, PAGE_SIZE, nid);
if (!paddr) {
prom_printf("Cannot allocate bootmap for nid[%d]\n",
nid);
prom_halt();
}
node_masks[nid].bootmem_paddr = paddr;
}
}
static void init_node_masks_nonnuma(void)
@@ -1292,75 +1274,9 @@ static void __init bootmem_init_nonnuma(void)
node_set_online(0);
}
static void __init reserve_range_in_node(int nid, unsigned long start,
unsigned long end)
{
numadbg(" reserve_range_in_node(nid[%d],start[%lx],end[%lx]\n",
nid, start, end);
while (start < end) {
unsigned long this_end;
int n;
this_end = memblock_nid_range(start, end, &n);
if (n == nid) {
numadbg(" MATCH reserving range [%lx:%lx]\n",
start, this_end);
reserve_bootmem_node(NODE_DATA(nid), start,
(this_end - start), BOOTMEM_DEFAULT);
} else
numadbg(" NO MATCH, advancing start to %lx\n",
this_end);
start = this_end;
}
}
static void __init trim_reserved_in_node(int nid)
{
struct memblock_region *reg;
numadbg(" trim_reserved_in_node(%d)\n", nid);
for_each_memblock(reserved, reg)
reserve_range_in_node(nid, reg->base, reg->base + reg->size);
}
static void __init bootmem_init_one_node(int nid)
{
struct pglist_data *p;
numadbg("bootmem_init_one_node(%d)\n", nid);
p = NODE_DATA(nid);
if (p->node_spanned_pages) {
unsigned long paddr = node_masks[nid].bootmem_paddr;
unsigned long end_pfn;
end_pfn = p->node_start_pfn + p->node_spanned_pages;
numadbg(" init_bootmem_node(%d, %lx, %lx, %lx)\n",
nid, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
init_bootmem_node(p, paddr >> PAGE_SHIFT,
p->node_start_pfn, end_pfn);
numadbg(" free_bootmem_with_active_regions(%d, %lx)\n",
nid, end_pfn);
free_bootmem_with_active_regions(nid, end_pfn);
trim_reserved_in_node(nid);
numadbg(" sparse_memory_present_with_active_regions(%d)\n",
nid);
sparse_memory_present_with_active_regions(nid);
}
}
static unsigned long __init bootmem_init(unsigned long phys_base)
{
unsigned long end_pfn;
int nid;
end_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
max_pfn = max_low_pfn = end_pfn;
@@ -1369,11 +1285,12 @@ static unsigned long __init bootmem_init(unsigned long phys_base)
if (bootmem_init_numa() < 0)
bootmem_init_nonnuma();
/* Dump memblock with node info. */
memblock_dump_all();
/* XXX cpu notifier XXX */
for_each_online_node(nid)
bootmem_init_one_node(nid);
sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init();
return end_pfn;
@@ -1701,6 +1618,7 @@ void __init paging_init(void)
{
unsigned long end_pfn, shift, phys_base;
unsigned long real_end, i;
int node;
/* These build time checkes make sure that the dcache_dirty_cpu()
* page->flags usage will work.
@@ -1826,22 +1744,24 @@ void __init paging_init(void)
#endif
}
/* Setup bootmem... */
last_valid_pfn = end_pfn = bootmem_init(phys_base);
/* Once the OF device tree and MDESC have been setup, we know
* the list of possible cpus. Therefore we can allocate the
* IRQ stacks.
*/
for_each_possible_cpu(i) {
/* XXX Use node local allocations... XXX */
softirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
hardirq_stack[i] = __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
node = cpu_to_node(i);
softirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
THREAD_SIZE,
THREAD_SIZE, 0);
hardirq_stack[i] = __alloc_bootmem_node(NODE_DATA(node),
THREAD_SIZE,
THREAD_SIZE, 0);
}
/* Setup bootmem... */
last_valid_pfn = end_pfn = bootmem_init(phys_base);
#ifndef CONFIG_NEED_MULTIPLE_NODES
max_mapnr = last_valid_pfn;
#endif
kernel_physical_mapping_init();
{
@@ -1973,6 +1893,7 @@ void __init mem_init(void)
free_all_bootmem_node(NODE_DATA(i));
}
}
totalram_pages += free_low_memory_core_early(MAX_NUMNODES);
}
#else
totalram_pages = free_all_bootmem();

View File

@@ -197,7 +197,7 @@ static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg,
}
#ifdef CONFIG_SBUS
static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, __u32 addr, int len)
static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len)
{
struct iounit_struct *iounit = dev->archdata.iommu;
unsigned long page, end;
@@ -242,29 +242,18 @@ static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int le
}
#endif
static char *iounit_lockarea(char *vaddr, unsigned long len)
{
/* FIXME: Write this */
return vaddr;
}
static void iounit_unlockarea(char *vaddr, unsigned long len)
{
/* FIXME: Write this */
}
static const struct sparc32_dma_ops iounit_dma_ops = {
.get_scsi_one = iounit_get_scsi_one,
.get_scsi_sgl = iounit_get_scsi_sgl,
.release_scsi_one = iounit_release_scsi_one,
.release_scsi_sgl = iounit_release_scsi_sgl,
#ifdef CONFIG_SBUS
.map_dma_area = iounit_map_dma_area,
.unmap_dma_area = iounit_unmap_dma_area,
#endif
};
void __init ld_mmu_iounit(void)
{
BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
#ifdef CONFIG_SBUS
BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
#endif
sparc32_dma_ops = &iounit_dma_ops;
}

View File

@@ -39,8 +39,6 @@
/* srmmu.c */
extern int viking_mxcc_present;
BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
extern int flush_page_for_dma_global;
static int viking_flush;
/* viking.S */
@@ -143,7 +141,6 @@ static int __init iommu_init(void)
subsys_initcall(iommu_init);
/* This begs to be btfixup-ed by srmmu. */
/* Flush the iotlb entries to ram. */
/* This could be better if we didn't have to flush whole pages. */
static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
@@ -216,11 +213,6 @@ static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
return busa + off;
}
static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
{
return iommu_get_scsi_one(dev, vaddr, len);
}
static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
{
flush_page_for_dma(0);
@@ -238,19 +230,6 @@ static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned
return iommu_get_scsi_one(dev, vaddr, len);
}
static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
{
int n;
while (sz != 0) {
--sz;
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
sg->dma_length = sg->length;
sg = sg_next(sg);
}
}
static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
{
int n;
@@ -426,40 +405,36 @@ static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len
}
#endif
static char *iommu_lockarea(char *vaddr, unsigned long len)
{
return vaddr;
}
static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
.get_scsi_one = iommu_get_scsi_one_gflush,
.get_scsi_sgl = iommu_get_scsi_sgl_gflush,
.release_scsi_one = iommu_release_scsi_one,
.release_scsi_sgl = iommu_release_scsi_sgl,
#ifdef CONFIG_SBUS
.map_dma_area = iommu_map_dma_area,
.unmap_dma_area = iommu_unmap_dma_area,
#endif
};
static void iommu_unlockarea(char *vaddr, unsigned long len)
{
}
static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
.get_scsi_one = iommu_get_scsi_one_pflush,
.get_scsi_sgl = iommu_get_scsi_sgl_pflush,
.release_scsi_one = iommu_release_scsi_one,
.release_scsi_sgl = iommu_release_scsi_sgl,
#ifdef CONFIG_SBUS
.map_dma_area = iommu_map_dma_area,
.unmap_dma_area = iommu_unmap_dma_area,
#endif
};
void __init ld_mmu_iommu(void)
{
viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page);
BTFIXUPSET_CALL(mmu_lockarea, iommu_lockarea, BTFIXUPCALL_RETO0);
BTFIXUPSET_CALL(mmu_unlockarea, iommu_unlockarea, BTFIXUPCALL_NOP);
if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
/* IO coherent chip */
BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_noflush, BTFIXUPCALL_RETO0);
BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_noflush, BTFIXUPCALL_NORM);
} else if (flush_page_for_dma_global) {
if (flush_page_for_dma_global) {
/* flush_page_for_dma flushes everything, no matter of what page is it */
BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_gflush, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_gflush, BTFIXUPCALL_NORM);
sparc32_dma_ops = &iommu_dma_gflush_ops;
} else {
BTFIXUPSET_CALL(mmu_get_scsi_one, iommu_get_scsi_one_pflush, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_get_scsi_sgl, iommu_get_scsi_sgl_pflush, BTFIXUPCALL_NORM);
sparc32_dma_ops = &iommu_dma_pflush_ops;
}
BTFIXUPSET_CALL(mmu_release_scsi_one, iommu_release_scsi_one, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_release_scsi_sgl, iommu_release_scsi_sgl, BTFIXUPCALL_NORM);
#ifdef CONFIG_SBUS
BTFIXUPSET_CALL(mmu_map_dma_area, iommu_map_dma_area, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(mmu_unmap_dma_area, iommu_unmap_dma_area, BTFIXUPCALL_NORM);
#endif
if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);

View File

@@ -15,9 +15,23 @@
#include <asm/leon.h>
#include <asm/tlbflush.h>
#include "srmmu.h"
int leon_flush_during_switch = 1;
int srmmu_swprobe_trace;
static inline unsigned long leon_get_ctable_ptr(void)
{
unsigned int retval;
__asm__ __volatile__("lda [%1] %2, %0\n\t" :
"=r" (retval) :
"r" (SRMMU_CTXTBL_PTR),
"i" (ASI_LEON_MMUREGS));
return (retval & SRMMU_CTX_PMASK) << 4;
}
unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
{
@@ -33,10 +47,10 @@ unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr)
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: trace on\n");
ctxtbl = srmmu_get_ctable_ptr();
ctxtbl = leon_get_ctable_ptr();
if (!(ctxtbl)) {
if (srmmu_swprobe_trace)
printk(KERN_INFO "swprobe: srmmu_get_ctable_ptr returned 0=>0\n");
printk(KERN_INFO "swprobe: leon_get_ctable_ptr returned 0=>0\n");
return 0;
}
if (!_pfn_valid(PFN(ctxtbl))) {
@@ -258,3 +272,80 @@ void leon_switch_mm(void)
if (leon_flush_during_switch)
leon_flush_cache_all();
}
static void leon_flush_cache_mm(struct mm_struct *mm)
{
leon_flush_cache_all();
}
static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
{
leon_flush_pcache_all(vma, page);
}
static void leon_flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
leon_flush_cache_all();
}
static void leon_flush_tlb_mm(struct mm_struct *mm)
{
leon_flush_tlb_all();
}
static void leon_flush_tlb_page(struct vm_area_struct *vma,
unsigned long page)
{
leon_flush_tlb_all();
}
static void leon_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
leon_flush_tlb_all();
}
static void leon_flush_page_to_ram(unsigned long page)
{
leon_flush_cache_all();
}
static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
{
leon_flush_cache_all();
}
static void leon_flush_page_for_dma(unsigned long page)
{
leon_flush_dcache_all();
}
void __init poke_leonsparc(void)
{
}
static const struct sparc32_cachetlb_ops leon_ops = {
.cache_all = leon_flush_cache_all,
.cache_mm = leon_flush_cache_mm,
.cache_page = leon_flush_cache_page,
.cache_range = leon_flush_cache_range,
.tlb_all = leon_flush_tlb_all,
.tlb_mm = leon_flush_tlb_mm,
.tlb_page = leon_flush_tlb_page,
.tlb_range = leon_flush_tlb_range,
.page_to_ram = leon_flush_page_to_ram,
.sig_insns = leon_flush_sig_insns,
.page_for_dma = leon_flush_page_for_dma,
};
void __init init_leon(void)
{
srmmu_name = "LEON";
sparc32_cachetlb_ops = &leon_ops;
poke_srmmu = poke_leonsparc;
leon_flush_during_switch = leon_flush_needed();
}

View File

@@ -1,43 +0,0 @@
/*
* loadmmu.c: This code loads up all the mm function pointers once the
* machine type has been determined. It also sets the static
* mmu values such as PAGE_NONE, etc.
*
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/oplib.h>
struct ctx_list *ctx_list_pool;
struct ctx_list ctx_free;
struct ctx_list ctx_used;
extern void ld_mmu_sun4c(void);
extern void ld_mmu_srmmu(void);
void __init load_mmu(void)
{
switch(sparc_cpu_model) {
case sun4c:
case sun4:
ld_mmu_sun4c();
break;
case sun4m:
case sun4d:
case sparc_leon:
ld_mmu_srmmu();
break;
default:
prom_printf("load_mmu: %d unsupported\n", (int)sparc_cpu_model);
prom_halt();
}
btfixup();
}

View File

@@ -1,77 +0,0 @@
/*
* nosun4c.c: This file is a bunch of dummies for SMP compiles,
* so that it does not need sun4c and avoid ifdefs.
*
* Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/pgtable.h>
static char shouldnothappen[] __initdata = "32bit SMP kernel only supports sun4m and sun4d\n";
/* Dummies */
struct sun4c_mmu_ring {
unsigned long xxx1[3];
unsigned char xxx2[2];
int xxx3;
};
struct sun4c_mmu_ring sun4c_kernel_ring;
struct sun4c_mmu_ring sun4c_kfree_ring;
unsigned long sun4c_kernel_faults;
unsigned long *sun4c_memerr_reg;
static void __init should_not_happen(void)
{
prom_printf(shouldnothappen);
prom_halt();
}
unsigned long __init sun4c_paging_init(unsigned long start_mem, unsigned long end_mem)
{
should_not_happen();
return 0;
}
void __init ld_mmu_sun4c(void)
{
should_not_happen();
}
void sun4c_mapioaddr(unsigned long physaddr, unsigned long virt_addr, int bus_type, int rdonly)
{
}
void sun4c_unmapioaddr(unsigned long virt_addr)
{
}
void sun4c_complete_all_stores(void)
{
}
pte_t *sun4c_pte_offset(pmd_t * dir, unsigned long address)
{
return NULL;
}
pte_t *sun4c_pte_offset_kernel(pmd_t *dir, unsigned long address)
{
return NULL;
}
void sun4c_update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
{
}
void __init sun4c_probe_vac(void)
{
should_not_happen();
}
void __init sun4c_probe_memerr_reg(void)
{
should_not_happen();
}

File diff suppressed because it is too large Load Diff

4
arch/sparc/mm/srmmu.h Normal file
View File

@@ -0,0 +1,4 @@
/* srmmu.c */
extern char *srmmu_name;
extern void (*poke_srmmu)(void);

File diff suppressed because it is too large Load Diff

View File

@@ -14,7 +14,6 @@
#include <asm/page.h>
#include <asm/pgtsrmmu.h>
#include <asm/viking.h>
#include <asm/btfixup.h>
#ifdef CONFIG_SMP
.data