Merge branch 'highmem' into devel
Conflicts: arch/arm/mach-clps7500/include/mach/memory.h
This commit is contained in:
@@ -1,95 +0,0 @@
|
||||
/*
|
||||
* linux/arch/arm/lib/copypage-feroceon.S
|
||||
*
|
||||
* Copyright (C) 2008 Marvell Semiconductors
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This handles copy_user_page and clear_user_page on Feroceon
|
||||
* more optimally than the generic implementations.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
.text
|
||||
.align 5
|
||||
|
||||
ENTRY(feroceon_copy_user_page)
|
||||
stmfd sp!, {r4-r9, lr}
|
||||
mov ip, #PAGE_SZ
|
||||
1: mov lr, r1
|
||||
ldmia r1!, {r2 - r9}
|
||||
pld [lr, #32]
|
||||
pld [lr, #64]
|
||||
pld [lr, #96]
|
||||
pld [lr, #128]
|
||||
pld [lr, #160]
|
||||
pld [lr, #192]
|
||||
pld [lr, #224]
|
||||
stmia r0, {r2 - r9}
|
||||
ldmia r1!, {r2 - r9}
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
|
||||
add r0, r0, #32
|
||||
stmia r0, {r2 - r9}
|
||||
ldmia r1!, {r2 - r9}
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
|
||||
add r0, r0, #32
|
||||
stmia r0, {r2 - r9}
|
||||
ldmia r1!, {r2 - r9}
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
|
||||
add r0, r0, #32
|
||||
stmia r0, {r2 - r9}
|
||||
ldmia r1!, {r2 - r9}
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
|
||||
add r0, r0, #32
|
||||
stmia r0, {r2 - r9}
|
||||
ldmia r1!, {r2 - r9}
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
|
||||
add r0, r0, #32
|
||||
stmia r0, {r2 - r9}
|
||||
ldmia r1!, {r2 - r9}
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
|
||||
add r0, r0, #32
|
||||
stmia r0, {r2 - r9}
|
||||
ldmia r1!, {r2 - r9}
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
|
||||
add r0, r0, #32
|
||||
stmia r0, {r2 - r9}
|
||||
subs ip, ip, #(32 * 8)
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
|
||||
add r0, r0, #32
|
||||
bne 1b
|
||||
mcr p15, 0, ip, c7, c10, 4 @ drain WB
|
||||
ldmfd sp!, {r4-r9, pc}
|
||||
|
||||
.align 5
|
||||
|
||||
ENTRY(feroceon_clear_user_page)
|
||||
stmfd sp!, {r4-r7, lr}
|
||||
mov r1, #PAGE_SZ/32
|
||||
mov r2, #0
|
||||
mov r3, #0
|
||||
mov r4, #0
|
||||
mov r5, #0
|
||||
mov r6, #0
|
||||
mov r7, #0
|
||||
mov ip, #0
|
||||
mov lr, #0
|
||||
1: stmia r0, {r2-r7, ip, lr}
|
||||
subs r1, r1, #1
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line
|
||||
add r0, r0, #32
|
||||
bne 1b
|
||||
mcr p15, 0, r1, c7, c10, 4 @ drain WB
|
||||
ldmfd sp!, {r4-r7, pc}
|
||||
|
||||
__INITDATA
|
||||
|
||||
.type feroceon_user_fns, #object
|
||||
ENTRY(feroceon_user_fns)
|
||||
.long feroceon_clear_user_page
|
||||
.long feroceon_copy_user_page
|
||||
.size feroceon_user_fns, . - feroceon_user_fns
|
111
arch/arm/mm/copypage-feroceon.c
Normal file
111
arch/arm/mm/copypage-feroceon.c
Normal file
@@ -0,0 +1,111 @@
|
||||
/*
|
||||
* linux/arch/arm/mm/copypage-feroceon.S
|
||||
*
|
||||
* Copyright (C) 2008 Marvell Semiconductors
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This handles copy_user_highpage and clear_user_page on Feroceon
|
||||
* more optimally than the generic implementations.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
static void __attribute__((naked))
|
||||
feroceon_copy_user_page(void *kto, const void *kfrom)
|
||||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4-r9, lr} \n\
|
||||
mov ip, %0 \n\
|
||||
1: mov lr, r1 \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
pld [lr, #32] \n\
|
||||
pld [lr, #64] \n\
|
||||
pld [lr, #96] \n\
|
||||
pld [lr, #128] \n\
|
||||
pld [lr, #160] \n\
|
||||
pld [lr, #192] \n\
|
||||
pld [lr, #224] \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
ldmia r1!, {r2 - r9} \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
stmia r0, {r2 - r9} \n\
|
||||
subs ip, ip, #(32 * 8) \n\
|
||||
mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add r0, r0, #32 \n\
|
||||
bne 1b \n\
|
||||
mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\
|
||||
ldmfd sp!, {r4-r9, pc}"
|
||||
:
|
||||
: "I" (PAGE_SIZE));
|
||||
}
|
||||
|
||||
void feroceon_copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
void *kto, *kfrom;
|
||||
|
||||
kto = kmap_atomic(to, KM_USER0);
|
||||
kfrom = kmap_atomic(from, KM_USER1);
|
||||
feroceon_copy_user_page(kto, kfrom);
|
||||
kunmap_atomic(kfrom, KM_USER1);
|
||||
kunmap_atomic(kto, KM_USER0);
|
||||
}
|
||||
|
||||
void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
|
||||
{
|
||||
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
|
||||
asm volatile ("\
|
||||
mov r1, %2 \n\
|
||||
mov r2, #0 \n\
|
||||
mov r3, #0 \n\
|
||||
mov r4, #0 \n\
|
||||
mov r5, #0 \n\
|
||||
mov r6, #0 \n\
|
||||
mov r7, #0 \n\
|
||||
mov ip, #0 \n\
|
||||
mov lr, #0 \n\
|
||||
1: stmia %0, {r2-r7, ip, lr} \n\
|
||||
subs r1, r1, #1 \n\
|
||||
mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\
|
||||
add %0, %0, #32 \n\
|
||||
bne 1b \n\
|
||||
mcr p15, 0, r1, c7, c10, 4 @ drain WB"
|
||||
: "=r" (ptr)
|
||||
: "0" (kaddr), "I" (PAGE_SIZE / 32)
|
||||
: "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
}
|
||||
|
||||
struct cpu_user_fns feroceon_user_fns __initdata = {
|
||||
.cpu_clear_user_highpage = feroceon_clear_user_highpage,
|
||||
.cpu_copy_user_highpage = feroceon_copy_user_highpage,
|
||||
};
|
||||
|
@@ -1,67 +0,0 @@
|
||||
/*
|
||||
* linux/arch/arm/lib/copypage.S
|
||||
*
|
||||
* Copyright (C) 1995-1999 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* ASM optimised string functions
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
.text
|
||||
.align 5
|
||||
/*
|
||||
* ARMv3 optimised copy_user_page
|
||||
*
|
||||
* FIXME: do we need to handle cache stuff...
|
||||
*/
|
||||
ENTRY(v3_copy_user_page)
|
||||
stmfd sp!, {r4, lr} @ 2
|
||||
mov r2, #PAGE_SZ/64 @ 1
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4+1
|
||||
1: stmia r0!, {r3, r4, ip, lr} @ 4
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4+1
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4+1
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4
|
||||
subs r2, r2, #1 @ 1
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4
|
||||
ldmneia r1!, {r3, r4, ip, lr} @ 4
|
||||
bne 1b @ 1
|
||||
ldmfd sp!, {r4, pc} @ 3
|
||||
|
||||
.align 5
|
||||
/*
|
||||
* ARMv3 optimised clear_user_page
|
||||
*
|
||||
* FIXME: do we need to handle cache stuff...
|
||||
*/
|
||||
ENTRY(v3_clear_user_page)
|
||||
str lr, [sp, #-4]!
|
||||
mov r1, #PAGE_SZ/64 @ 1
|
||||
mov r2, #0 @ 1
|
||||
mov r3, #0 @ 1
|
||||
mov ip, #0 @ 1
|
||||
mov lr, #0 @ 1
|
||||
1: stmia r0!, {r2, r3, ip, lr} @ 4
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4
|
||||
subs r1, r1, #1 @ 1
|
||||
bne 1b @ 1
|
||||
ldr pc, [sp], #4
|
||||
|
||||
__INITDATA
|
||||
|
||||
.type v3_user_fns, #object
|
||||
ENTRY(v3_user_fns)
|
||||
.long v3_clear_user_page
|
||||
.long v3_copy_user_page
|
||||
.size v3_user_fns, . - v3_user_fns
|
81
arch/arm/mm/copypage-v3.c
Normal file
81
arch/arm/mm/copypage-v3.c
Normal file
@@ -0,0 +1,81 @@
|
||||
/*
|
||||
* linux/arch/arm/mm/copypage-v3.c
|
||||
*
|
||||
* Copyright (C) 1995-1999 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
/*
|
||||
* ARMv3 optimised copy_user_highpage
|
||||
*
|
||||
* FIXME: do we need to handle cache stuff...
|
||||
*/
|
||||
static void __attribute__((naked))
|
||||
v3_copy_user_page(void *kto, const void *kfrom)
|
||||
{
|
||||
asm("\n\
|
||||
stmfd sp!, {r4, lr} @ 2\n\
|
||||
mov r2, %2 @ 1\n\
|
||||
ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\
|
||||
1: stmia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\
|
||||
stmia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\
|
||||
stmia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia %0!, {r3, r4, ip, lr} @ 4\n\
|
||||
subs r2, r2, #1 @ 1\n\
|
||||
stmia %1!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmneia %0!, {r3, r4, ip, lr} @ 4\n\
|
||||
bne 1b @ 1\n\
|
||||
ldmfd sp!, {r4, pc} @ 3"
|
||||
:
|
||||
: "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64));
|
||||
}
|
||||
|
||||
void v3_copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
void *kto, *kfrom;
|
||||
|
||||
kto = kmap_atomic(to, KM_USER0);
|
||||
kfrom = kmap_atomic(from, KM_USER1);
|
||||
v3_copy_user_page(kto, kfrom);
|
||||
kunmap_atomic(kfrom, KM_USER1);
|
||||
kunmap_atomic(kto, KM_USER0);
|
||||
}
|
||||
|
||||
/*
|
||||
* ARMv3 optimised clear_user_page
|
||||
*
|
||||
* FIXME: do we need to handle cache stuff...
|
||||
*/
|
||||
void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
|
||||
{
|
||||
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
|
||||
asm volatile("\n\
|
||||
mov r1, %2 @ 1\n\
|
||||
mov r2, #0 @ 1\n\
|
||||
mov r3, #0 @ 1\n\
|
||||
mov ip, #0 @ 1\n\
|
||||
mov lr, #0 @ 1\n\
|
||||
1: stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
subs r1, r1, #1 @ 1\n\
|
||||
bne 1b @ 1"
|
||||
: "=r" (ptr)
|
||||
: "0" (kaddr), "I" (PAGE_SIZE / 64)
|
||||
: "r1", "r2", "r3", "ip", "lr");
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
}
|
||||
|
||||
struct cpu_user_fns v3_user_fns __initdata = {
|
||||
.cpu_clear_user_highpage = v3_clear_user_highpage,
|
||||
.cpu_copy_user_highpage = v3_copy_user_highpage,
|
||||
};
|
@@ -15,8 +15,8 @@
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@@ -33,7 +33,7 @@
|
||||
static DEFINE_SPINLOCK(minicache_lock);
|
||||
|
||||
/*
|
||||
* ARMv4 mini-dcache optimised copy_user_page
|
||||
* ARMv4 mini-dcache optimised copy_user_highpage
|
||||
*
|
||||
* We flush the destination cache lines just before we write the data into the
|
||||
* corresponding address. Since the Dcache is read-allocate, this removes the
|
||||
@@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock);
|
||||
*
|
||||
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
|
||||
* instruction. If your processor does not supply this, you have to write your
|
||||
* own copy_user_page that does the right thing.
|
||||
* own copy_user_highpage that does the right thing.
|
||||
*/
|
||||
static void __attribute__((naked))
|
||||
mc_copy_user_page(void *from, void *to)
|
||||
@@ -68,50 +68,53 @@ mc_copy_user_page(void *from, void *to)
|
||||
: "r" (from), "r" (to), "I" (PAGE_SIZE / 64));
|
||||
}
|
||||
|
||||
void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
|
||||
void v4_mc_copy_user_highpage(struct page *from, struct page *to,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
struct page *page = virt_to_page(kfrom);
|
||||
void *kto = kmap_atomic(to, KM_USER1);
|
||||
|
||||
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
|
||||
__flush_dcache_page(page_mapping(page), page);
|
||||
if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
|
||||
__flush_dcache_page(page_mapping(from), from);
|
||||
|
||||
spin_lock(&minicache_lock);
|
||||
|
||||
set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
|
||||
set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
|
||||
flush_tlb_kernel_page(0xffff8000);
|
||||
|
||||
mc_copy_user_page((void *)0xffff8000, kto);
|
||||
|
||||
spin_unlock(&minicache_lock);
|
||||
|
||||
kunmap_atomic(kto, KM_USER1);
|
||||
}
|
||||
|
||||
/*
|
||||
* ARMv4 optimised clear_user_page
|
||||
*/
|
||||
void __attribute__((naked))
|
||||
v4_mc_clear_user_page(void *kaddr, unsigned long vaddr)
|
||||
void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
|
||||
{
|
||||
asm volatile(
|
||||
"str lr, [sp, #-4]!\n\
|
||||
mov r1, %0 @ 1\n\
|
||||
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
|
||||
asm volatile("\
|
||||
mov r1, %2 @ 1\n\
|
||||
mov r2, #0 @ 1\n\
|
||||
mov r3, #0 @ 1\n\
|
||||
mov ip, #0 @ 1\n\
|
||||
mov lr, #0 @ 1\n\
|
||||
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4\n\
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4\n\
|
||||
mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4\n\
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4\n\
|
||||
1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
subs r1, r1, #1 @ 1\n\
|
||||
bne 1b @ 1\n\
|
||||
ldr pc, [sp], #4"
|
||||
:
|
||||
: "I" (PAGE_SIZE / 64));
|
||||
bne 1b @ 1"
|
||||
: "=r" (ptr)
|
||||
: "0" (kaddr), "I" (PAGE_SIZE / 64)
|
||||
: "r1", "r2", "r3", "ip", "lr");
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
}
|
||||
|
||||
struct cpu_user_fns v4_mc_user_fns __initdata = {
|
||||
.cpu_clear_user_page = v4_mc_clear_user_page,
|
||||
.cpu_copy_user_page = v4_mc_copy_user_page,
|
||||
.cpu_clear_user_highpage = v4_mc_clear_user_highpage,
|
||||
.cpu_copy_user_highpage = v4_mc_copy_user_highpage,
|
||||
};
|
||||
|
@@ -1,79 +0,0 @@
|
||||
/*
|
||||
* linux/arch/arm/lib/copypage.S
|
||||
*
|
||||
* Copyright (C) 1995-1999 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* ASM optimised string functions
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
.text
|
||||
.align 5
|
||||
/*
|
||||
* ARMv4 optimised copy_user_page
|
||||
*
|
||||
* We flush the destination cache lines just before we write the data into the
|
||||
* corresponding address. Since the Dcache is read-allocate, this removes the
|
||||
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
|
||||
* and merged as appropriate.
|
||||
*
|
||||
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
|
||||
* instruction. If your processor does not supply this, you have to write your
|
||||
* own copy_user_page that does the right thing.
|
||||
*/
|
||||
ENTRY(v4wb_copy_user_page)
|
||||
stmfd sp!, {r4, lr} @ 2
|
||||
mov r2, #PAGE_SZ/64 @ 1
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4
|
||||
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4+1
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4
|
||||
mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4
|
||||
subs r2, r2, #1 @ 1
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4
|
||||
ldmneia r1!, {r3, r4, ip, lr} @ 4
|
||||
bne 1b @ 1
|
||||
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB
|
||||
ldmfd sp!, {r4, pc} @ 3
|
||||
|
||||
.align 5
|
||||
/*
|
||||
* ARMv4 optimised clear_user_page
|
||||
*
|
||||
* Same story as above.
|
||||
*/
|
||||
ENTRY(v4wb_clear_user_page)
|
||||
str lr, [sp, #-4]!
|
||||
mov r1, #PAGE_SZ/64 @ 1
|
||||
mov r2, #0 @ 1
|
||||
mov r3, #0 @ 1
|
||||
mov ip, #0 @ 1
|
||||
mov lr, #0 @ 1
|
||||
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4
|
||||
mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4
|
||||
subs r1, r1, #1 @ 1
|
||||
bne 1b @ 1
|
||||
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB
|
||||
ldr pc, [sp], #4
|
||||
|
||||
__INITDATA
|
||||
|
||||
.type v4wb_user_fns, #object
|
||||
ENTRY(v4wb_user_fns)
|
||||
.long v4wb_clear_user_page
|
||||
.long v4wb_copy_user_page
|
||||
.size v4wb_user_fns, . - v4wb_user_fns
|
94
arch/arm/mm/copypage-v4wb.c
Normal file
94
arch/arm/mm/copypage-v4wb.c
Normal file
@@ -0,0 +1,94 @@
|
||||
/*
|
||||
* linux/arch/arm/mm/copypage-v4wb.c
|
||||
*
|
||||
* Copyright (C) 1995-1999 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
/*
|
||||
* ARMv4 optimised copy_user_highpage
|
||||
*
|
||||
* We flush the destination cache lines just before we write the data into the
|
||||
* corresponding address. Since the Dcache is read-allocate, this removes the
|
||||
* Dcache aliasing issue. The writes will be forwarded to the write buffer,
|
||||
* and merged as appropriate.
|
||||
*
|
||||
* Note: We rely on all ARMv4 processors implementing the "invalidate D line"
|
||||
* instruction. If your processor does not supply this, you have to write your
|
||||
* own copy_user_highpage that does the right thing.
|
||||
*/
|
||||
static void __attribute__((naked))
|
||||
v4wb_copy_user_page(void *kto, const void *kfrom)
|
||||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4, lr} @ 2\n\
|
||||
mov r2, %0 @ 1\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
subs r2, r2, #1 @ 1\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmneia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
bne 1b @ 1\n\
|
||||
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\
|
||||
ldmfd sp!, {r4, pc} @ 3"
|
||||
:
|
||||
: "I" (PAGE_SIZE / 64));
|
||||
}
|
||||
|
||||
void v4wb_copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
void *kto, *kfrom;
|
||||
|
||||
kto = kmap_atomic(to, KM_USER0);
|
||||
kfrom = kmap_atomic(from, KM_USER1);
|
||||
v4wb_copy_user_page(kto, kfrom);
|
||||
kunmap_atomic(kfrom, KM_USER1);
|
||||
kunmap_atomic(kto, KM_USER0);
|
||||
}
|
||||
|
||||
/*
|
||||
* ARMv4 optimised clear_user_page
|
||||
*
|
||||
* Same story as above.
|
||||
*/
|
||||
void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
|
||||
{
|
||||
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
|
||||
asm volatile("\
|
||||
mov r1, %2 @ 1\n\
|
||||
mov r2, #0 @ 1\n\
|
||||
mov r3, #0 @ 1\n\
|
||||
mov ip, #0 @ 1\n\
|
||||
mov lr, #0 @ 1\n\
|
||||
1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
subs r1, r1, #1 @ 1\n\
|
||||
bne 1b @ 1\n\
|
||||
mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB"
|
||||
: "=r" (ptr)
|
||||
: "0" (kaddr), "I" (PAGE_SIZE / 64)
|
||||
: "r1", "r2", "r3", "ip", "lr");
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
}
|
||||
|
||||
struct cpu_user_fns v4wb_user_fns __initdata = {
|
||||
.cpu_clear_user_highpage = v4wb_clear_user_highpage,
|
||||
.cpu_copy_user_highpage = v4wb_copy_user_highpage,
|
||||
};
|
@@ -1,73 +0,0 @@
|
||||
/*
|
||||
* linux/arch/arm/lib/copypage-v4.S
|
||||
*
|
||||
* Copyright (C) 1995-1999 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* ASM optimised string functions
|
||||
*
|
||||
* This is for CPUs with a writethrough cache and 'flush ID cache' is
|
||||
* the only supported cache operation.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
.text
|
||||
.align 5
|
||||
/*
|
||||
* ARMv4 optimised copy_user_page
|
||||
*
|
||||
* Since we have writethrough caches, we don't have to worry about
|
||||
* dirty data in the cache. However, we do have to ensure that
|
||||
* subsequent reads are up to date.
|
||||
*/
|
||||
ENTRY(v4wt_copy_user_page)
|
||||
stmfd sp!, {r4, lr} @ 2
|
||||
mov r2, #PAGE_SZ/64 @ 1
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4
|
||||
1: stmia r0!, {r3, r4, ip, lr} @ 4
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4+1
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4
|
||||
subs r2, r2, #1 @ 1
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4
|
||||
ldmneia r1!, {r3, r4, ip, lr} @ 4
|
||||
bne 1b @ 1
|
||||
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache
|
||||
ldmfd sp!, {r4, pc} @ 3
|
||||
|
||||
.align 5
|
||||
/*
|
||||
* ARMv4 optimised clear_user_page
|
||||
*
|
||||
* Same story as above.
|
||||
*/
|
||||
ENTRY(v4wt_clear_user_page)
|
||||
str lr, [sp, #-4]!
|
||||
mov r1, #PAGE_SZ/64 @ 1
|
||||
mov r2, #0 @ 1
|
||||
mov r3, #0 @ 1
|
||||
mov ip, #0 @ 1
|
||||
mov lr, #0 @ 1
|
||||
1: stmia r0!, {r2, r3, ip, lr} @ 4
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4
|
||||
stmia r0!, {r2, r3, ip, lr} @ 4
|
||||
subs r1, r1, #1 @ 1
|
||||
bne 1b @ 1
|
||||
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache
|
||||
ldr pc, [sp], #4
|
||||
|
||||
__INITDATA
|
||||
|
||||
.type v4wt_user_fns, #object
|
||||
ENTRY(v4wt_user_fns)
|
||||
.long v4wt_clear_user_page
|
||||
.long v4wt_copy_user_page
|
||||
.size v4wt_user_fns, . - v4wt_user_fns
|
88
arch/arm/mm/copypage-v4wt.c
Normal file
88
arch/arm/mm/copypage-v4wt.c
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
* linux/arch/arm/mm/copypage-v4wt.S
|
||||
*
|
||||
* Copyright (C) 1995-1999 Russell King
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This is for CPUs with a writethrough cache and 'flush ID cache' is
|
||||
* the only supported cache operation.
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
/*
|
||||
* ARMv4 optimised copy_user_highpage
|
||||
*
|
||||
* Since we have writethrough caches, we don't have to worry about
|
||||
* dirty data in the cache. However, we do have to ensure that
|
||||
* subsequent reads are up to date.
|
||||
*/
|
||||
static void __attribute__((naked))
|
||||
v4wt_copy_user_page(void *kto, const void *kfrom)
|
||||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4, lr} @ 2\n\
|
||||
mov r2, %0 @ 1\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
1: stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
subs r2, r2, #1 @ 1\n\
|
||||
stmia r0!, {r3, r4, ip, lr} @ 4\n\
|
||||
ldmneia r1!, {r3, r4, ip, lr} @ 4\n\
|
||||
bne 1b @ 1\n\
|
||||
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\
|
||||
ldmfd sp!, {r4, pc} @ 3"
|
||||
:
|
||||
: "I" (PAGE_SIZE / 64));
|
||||
}
|
||||
|
||||
void v4wt_copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
void *kto, *kfrom;
|
||||
|
||||
kto = kmap_atomic(to, KM_USER0);
|
||||
kfrom = kmap_atomic(from, KM_USER1);
|
||||
v4wt_copy_user_page(kto, kfrom);
|
||||
kunmap_atomic(kfrom, KM_USER1);
|
||||
kunmap_atomic(kto, KM_USER0);
|
||||
}
|
||||
|
||||
/*
|
||||
* ARMv4 optimised clear_user_page
|
||||
*
|
||||
* Same story as above.
|
||||
*/
|
||||
void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
|
||||
{
|
||||
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
|
||||
asm volatile("\
|
||||
mov r1, %2 @ 1\n\
|
||||
mov r2, #0 @ 1\n\
|
||||
mov r3, #0 @ 1\n\
|
||||
mov ip, #0 @ 1\n\
|
||||
mov lr, #0 @ 1\n\
|
||||
1: stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
stmia %0!, {r2, r3, ip, lr} @ 4\n\
|
||||
subs r1, r1, #1 @ 1\n\
|
||||
bne 1b @ 1\n\
|
||||
mcr p15, 0, r2, c7, c7, 0 @ flush ID cache"
|
||||
: "=r" (ptr)
|
||||
: "0" (kaddr), "I" (PAGE_SIZE / 64)
|
||||
: "r1", "r2", "r3", "ip", "lr");
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
}
|
||||
|
||||
struct cpu_user_fns v4wt_user_fns __initdata = {
|
||||
.cpu_clear_user_highpage = v4wt_clear_user_highpage,
|
||||
.cpu_copy_user_highpage = v4wt_copy_user_highpage,
|
||||
};
|
@@ -10,8 +10,8 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/shmparam.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@@ -33,41 +33,56 @@ static DEFINE_SPINLOCK(v6_lock);
|
||||
* Copy the user page. No aliasing to deal with so we can just
|
||||
* attack the kernel's existing mapping of these pages.
|
||||
*/
|
||||
static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr)
|
||||
static void v6_copy_user_highpage_nonaliasing(struct page *to,
|
||||
struct page *from, unsigned long vaddr)
|
||||
{
|
||||
void *kto, *kfrom;
|
||||
|
||||
kfrom = kmap_atomic(from, KM_USER0);
|
||||
kto = kmap_atomic(to, KM_USER1);
|
||||
copy_page(kto, kfrom);
|
||||
kunmap_atomic(kto, KM_USER1);
|
||||
kunmap_atomic(kfrom, KM_USER0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear the user page. No aliasing to deal with so we can just
|
||||
* attack the kernel's existing mapping of this page.
|
||||
*/
|
||||
static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr)
|
||||
static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
|
||||
{
|
||||
void *kaddr = kmap_atomic(page, KM_USER0);
|
||||
clear_page(kaddr);
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the page, taking account of the cache colour.
|
||||
* Discard data in the kernel mapping for the new page.
|
||||
* FIXME: needs this MCRR to be supported.
|
||||
*/
|
||||
static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr)
|
||||
static void discard_old_kernel_data(void *kto)
|
||||
{
|
||||
unsigned int offset = CACHE_COLOUR(vaddr);
|
||||
unsigned long from, to;
|
||||
struct page *page = virt_to_page(kfrom);
|
||||
|
||||
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
|
||||
__flush_dcache_page(page_mapping(page), page);
|
||||
|
||||
/*
|
||||
* Discard data in the kernel mapping for the new page.
|
||||
* FIXME: needs this MCRR to be supported.
|
||||
*/
|
||||
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
|
||||
:
|
||||
: "r" (kto),
|
||||
"r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the page, taking account of the cache colour.
|
||||
*/
|
||||
static void v6_copy_user_highpage_aliasing(struct page *to,
|
||||
struct page *from, unsigned long vaddr)
|
||||
{
|
||||
unsigned int offset = CACHE_COLOUR(vaddr);
|
||||
unsigned long kfrom, kto;
|
||||
|
||||
if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
|
||||
__flush_dcache_page(page_mapping(from), from);
|
||||
|
||||
/* FIXME: not highmem safe */
|
||||
discard_old_kernel_data(page_address(to));
|
||||
|
||||
/*
|
||||
* Now copy the page using the same cache colour as the
|
||||
@@ -75,16 +90,16 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo
|
||||
*/
|
||||
spin_lock(&v6_lock);
|
||||
|
||||
set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0);
|
||||
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0);
|
||||
set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0);
|
||||
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0);
|
||||
|
||||
from = from_address + (offset << PAGE_SHIFT);
|
||||
to = to_address + (offset << PAGE_SHIFT);
|
||||
kfrom = from_address + (offset << PAGE_SHIFT);
|
||||
kto = to_address + (offset << PAGE_SHIFT);
|
||||
|
||||
flush_tlb_kernel_page(from);
|
||||
flush_tlb_kernel_page(to);
|
||||
flush_tlb_kernel_page(kfrom);
|
||||
flush_tlb_kernel_page(kto);
|
||||
|
||||
copy_page((void *)to, (void *)from);
|
||||
copy_page((void *)kto, (void *)kfrom);
|
||||
|
||||
spin_unlock(&v6_lock);
|
||||
}
|
||||
@@ -94,20 +109,13 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo
|
||||
* so remap the kernel page into the same cache colour as the user
|
||||
* page.
|
||||
*/
|
||||
static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
|
||||
static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
|
||||
{
|
||||
unsigned int offset = CACHE_COLOUR(vaddr);
|
||||
unsigned long to = to_address + (offset << PAGE_SHIFT);
|
||||
|
||||
/*
|
||||
* Discard data in the kernel mapping for the new page
|
||||
* FIXME: needs this MCRR to be supported.
|
||||
*/
|
||||
__asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
|
||||
:
|
||||
: "r" (kaddr),
|
||||
"r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES)
|
||||
: "cc");
|
||||
/* FIXME: not highmem safe */
|
||||
discard_old_kernel_data(page_address(page));
|
||||
|
||||
/*
|
||||
* Now clear the page using the same cache colour as
|
||||
@@ -115,7 +123,7 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
|
||||
*/
|
||||
spin_lock(&v6_lock);
|
||||
|
||||
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0);
|
||||
set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0);
|
||||
flush_tlb_kernel_page(to);
|
||||
clear_page((void *)to);
|
||||
|
||||
@@ -123,15 +131,15 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr)
|
||||
}
|
||||
|
||||
struct cpu_user_fns v6_user_fns __initdata = {
|
||||
.cpu_clear_user_page = v6_clear_user_page_nonaliasing,
|
||||
.cpu_copy_user_page = v6_copy_user_page_nonaliasing,
|
||||
.cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
|
||||
.cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
|
||||
};
|
||||
|
||||
static int __init v6_userpage_init(void)
|
||||
{
|
||||
if (cache_is_vipt_aliasing()) {
|
||||
cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing;
|
||||
cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing;
|
||||
cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
|
||||
cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@@ -1,97 +0,0 @@
|
||||
/*
|
||||
* linux/arch/arm/lib/copypage-xsc3.S
|
||||
*
|
||||
* Copyright (C) 2004 Intel Corp.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Adapted for 3rd gen XScale core, no more mini-dcache
|
||||
* Author: Matt Gilbert (matthew.m.gilbert@intel.com)
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/init.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
/*
|
||||
* General note:
|
||||
* We don't really want write-allocate cache behaviour for these functions
|
||||
* since that will just eat through 8K of the cache.
|
||||
*/
|
||||
|
||||
.text
|
||||
.align 5
|
||||
/*
|
||||
* XSC3 optimised copy_user_page
|
||||
* r0 = destination
|
||||
* r1 = source
|
||||
* r2 = virtual user address of ultimate destination page
|
||||
*
|
||||
* The source page may have some clean entries in the cache already, but we
|
||||
* can safely ignore them - break_cow() will flush them out of the cache
|
||||
* if we eventually end up using our copied page.
|
||||
*
|
||||
*/
|
||||
ENTRY(xsc3_mc_copy_user_page)
|
||||
stmfd sp!, {r4, r5, lr}
|
||||
mov lr, #PAGE_SZ/64-1
|
||||
|
||||
pld [r1, #0]
|
||||
pld [r1, #32]
|
||||
1: pld [r1, #64]
|
||||
pld [r1, #96]
|
||||
|
||||
2: ldrd r2, [r1], #8
|
||||
mov ip, r0
|
||||
ldrd r4, [r1], #8
|
||||
mcr p15, 0, ip, c7, c6, 1 @ invalidate
|
||||
strd r2, [r0], #8
|
||||
ldrd r2, [r1], #8
|
||||
strd r4, [r0], #8
|
||||
ldrd r4, [r1], #8
|
||||
strd r2, [r0], #8
|
||||
strd r4, [r0], #8
|
||||
ldrd r2, [r1], #8
|
||||
mov ip, r0
|
||||
ldrd r4, [r1], #8
|
||||
mcr p15, 0, ip, c7, c6, 1 @ invalidate
|
||||
strd r2, [r0], #8
|
||||
ldrd r2, [r1], #8
|
||||
subs lr, lr, #1
|
||||
strd r4, [r0], #8
|
||||
ldrd r4, [r1], #8
|
||||
strd r2, [r0], #8
|
||||
strd r4, [r0], #8
|
||||
bgt 1b
|
||||
beq 2b
|
||||
|
||||
ldmfd sp!, {r4, r5, pc}
|
||||
|
||||
.align 5
|
||||
/*
|
||||
* XScale optimised clear_user_page
|
||||
* r0 = destination
|
||||
* r1 = virtual user address of ultimate destination page
|
||||
*/
|
||||
ENTRY(xsc3_mc_clear_user_page)
|
||||
mov r1, #PAGE_SZ/32
|
||||
mov r2, #0
|
||||
mov r3, #0
|
||||
1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line
|
||||
strd r2, [r0], #8
|
||||
strd r2, [r0], #8
|
||||
strd r2, [r0], #8
|
||||
strd r2, [r0], #8
|
||||
subs r1, r1, #1
|
||||
bne 1b
|
||||
mov pc, lr
|
||||
|
||||
__INITDATA
|
||||
|
||||
.type xsc3_mc_user_fns, #object
|
||||
ENTRY(xsc3_mc_user_fns)
|
||||
.long xsc3_mc_clear_user_page
|
||||
.long xsc3_mc_copy_user_page
|
||||
.size xsc3_mc_user_fns, . - xsc3_mc_user_fns
|
113
arch/arm/mm/copypage-xsc3.c
Normal file
113
arch/arm/mm/copypage-xsc3.c
Normal file
@@ -0,0 +1,113 @@
|
||||
/*
|
||||
* linux/arch/arm/mm/copypage-xsc3.S
|
||||
*
|
||||
* Copyright (C) 2004 Intel Corp.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* Adapted for 3rd gen XScale core, no more mini-dcache
|
||||
* Author: Matt Gilbert (matthew.m.gilbert@intel.com)
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
/*
|
||||
* General note:
|
||||
* We don't really want write-allocate cache behaviour for these functions
|
||||
* since that will just eat through 8K of the cache.
|
||||
*/
|
||||
|
||||
/*
|
||||
* XSC3 optimised copy_user_highpage
|
||||
* r0 = destination
|
||||
* r1 = source
|
||||
*
|
||||
* The source page may have some clean entries in the cache already, but we
|
||||
* can safely ignore them - break_cow() will flush them out of the cache
|
||||
* if we eventually end up using our copied page.
|
||||
*
|
||||
*/
|
||||
static void __attribute__((naked))
|
||||
xsc3_mc_copy_user_page(void *kto, const void *kfrom)
|
||||
{
|
||||
asm("\
|
||||
stmfd sp!, {r4, r5, lr} \n\
|
||||
mov lr, %0 \n\
|
||||
\n\
|
||||
pld [r1, #0] \n\
|
||||
pld [r1, #32] \n\
|
||||
1: pld [r1, #64] \n\
|
||||
pld [r1, #96] \n\
|
||||
\n\
|
||||
2: ldrd r2, [r1], #8 \n\
|
||||
mov ip, r0 \n\
|
||||
ldrd r4, [r1], #8 \n\
|
||||
mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
|
||||
strd r2, [r0], #8 \n\
|
||||
ldrd r2, [r1], #8 \n\
|
||||
strd r4, [r0], #8 \n\
|
||||
ldrd r4, [r1], #8 \n\
|
||||
strd r2, [r0], #8 \n\
|
||||
strd r4, [r0], #8 \n\
|
||||
ldrd r2, [r1], #8 \n\
|
||||
mov ip, r0 \n\
|
||||
ldrd r4, [r1], #8 \n\
|
||||
mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\
|
||||
strd r2, [r0], #8 \n\
|
||||
ldrd r2, [r1], #8 \n\
|
||||
subs lr, lr, #1 \n\
|
||||
strd r4, [r0], #8 \n\
|
||||
ldrd r4, [r1], #8 \n\
|
||||
strd r2, [r0], #8 \n\
|
||||
strd r4, [r0], #8 \n\
|
||||
bgt 1b \n\
|
||||
beq 2b \n\
|
||||
\n\
|
||||
ldmfd sp!, {r4, r5, pc}"
|
||||
:
|
||||
: "I" (PAGE_SIZE / 64 - 1));
|
||||
}
|
||||
|
||||
void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
void *kto, *kfrom;
|
||||
|
||||
kto = kmap_atomic(to, KM_USER0);
|
||||
kfrom = kmap_atomic(from, KM_USER1);
|
||||
xsc3_mc_copy_user_page(kto, kfrom);
|
||||
kunmap_atomic(kfrom, KM_USER1);
|
||||
kunmap_atomic(kto, KM_USER0);
|
||||
}
|
||||
|
||||
/*
|
||||
* XScale optimised clear_user_page
|
||||
* r0 = destination
|
||||
* r1 = virtual user address of ultimate destination page
|
||||
*/
|
||||
void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
|
||||
{
|
||||
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
|
||||
asm volatile ("\
|
||||
mov r1, %2 \n\
|
||||
mov r2, #0 \n\
|
||||
mov r3, #0 \n\
|
||||
1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\
|
||||
strd r2, [%0], #8 \n\
|
||||
strd r2, [%0], #8 \n\
|
||||
strd r2, [%0], #8 \n\
|
||||
strd r2, [%0], #8 \n\
|
||||
subs r1, r1, #1 \n\
|
||||
bne 1b"
|
||||
: "=r" (ptr)
|
||||
: "0" (kaddr), "I" (PAGE_SIZE / 32)
|
||||
: "r1", "r2", "r3");
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
}
|
||||
|
||||
struct cpu_user_fns xsc3_mc_user_fns __initdata = {
|
||||
.cpu_clear_user_highpage = xsc3_mc_clear_user_highpage,
|
||||
.cpu_copy_user_highpage = xsc3_mc_copy_user_highpage,
|
||||
};
|
@@ -15,8 +15,8 @@
|
||||
*/
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/cacheflush.h>
|
||||
@@ -35,7 +35,7 @@
|
||||
static DEFINE_SPINLOCK(minicache_lock);
|
||||
|
||||
/*
|
||||
* XScale mini-dcache optimised copy_user_page
|
||||
* XScale mini-dcache optimised copy_user_highpage
|
||||
*
|
||||
* We flush the destination cache lines just before we write the data into the
|
||||
* corresponding address. Since the Dcache is read-allocate, this removes the
|
||||
@@ -90,48 +90,53 @@ mc_copy_user_page(void *from, void *to)
|
||||
: "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
|
||||
}
|
||||
|
||||
void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
|
||||
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
|
||||
unsigned long vaddr)
|
||||
{
|
||||
struct page *page = virt_to_page(kfrom);
|
||||
void *kto = kmap_atomic(to, KM_USER1);
|
||||
|
||||
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
|
||||
__flush_dcache_page(page_mapping(page), page);
|
||||
if (test_and_clear_bit(PG_dcache_dirty, &from->flags))
|
||||
__flush_dcache_page(page_mapping(from), from);
|
||||
|
||||
spin_lock(&minicache_lock);
|
||||
|
||||
set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
|
||||
set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0);
|
||||
flush_tlb_kernel_page(COPYPAGE_MINICACHE);
|
||||
|
||||
mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);
|
||||
|
||||
spin_unlock(&minicache_lock);
|
||||
|
||||
kunmap_atomic(kto, KM_USER1);
|
||||
}
|
||||
|
||||
/*
|
||||
* XScale optimised clear_user_page
|
||||
*/
|
||||
void __attribute__((naked))
|
||||
xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr)
|
||||
void
|
||||
xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
|
||||
{
|
||||
void *ptr, *kaddr = kmap_atomic(page, KM_USER0);
|
||||
asm volatile(
|
||||
"mov r1, %0 \n\
|
||||
"mov r1, %2 \n\
|
||||
mov r2, #0 \n\
|
||||
mov r3, #0 \n\
|
||||
1: mov ip, r0 \n\
|
||||
strd r2, [r0], #8 \n\
|
||||
strd r2, [r0], #8 \n\
|
||||
strd r2, [r0], #8 \n\
|
||||
strd r2, [r0], #8 \n\
|
||||
1: mov ip, %0 \n\
|
||||
strd r2, [%0], #8 \n\
|
||||
strd r2, [%0], #8 \n\
|
||||
strd r2, [%0], #8 \n\
|
||||
strd r2, [%0], #8 \n\
|
||||
mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\
|
||||
subs r1, r1, #1 \n\
|
||||
mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\
|
||||
bne 1b \n\
|
||||
mov pc, lr"
|
||||
:
|
||||
: "I" (PAGE_SIZE / 32));
|
||||
bne 1b"
|
||||
: "=r" (ptr)
|
||||
: "0" (kaddr), "I" (PAGE_SIZE / 32)
|
||||
: "r1", "r2", "r3", "ip");
|
||||
kunmap_atomic(kaddr, KM_USER0);
|
||||
}
|
||||
|
||||
struct cpu_user_fns xscale_mc_user_fns __initdata = {
|
||||
.cpu_clear_user_page = xscale_mc_clear_user_page,
|
||||
.cpu_copy_user_page = xscale_mc_copy_user_page,
|
||||
.cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
|
||||
.cpu_copy_user_highpage = xscale_mc_copy_user_highpage,
|
||||
};
|
||||
|
@@ -14,6 +14,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/page-flags.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/pgtable.h>
|
||||
@@ -83,13 +84,14 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
|
||||
break;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_HIGHMEM
|
||||
/* We must not map this if we have highmem enabled */
|
||||
if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT)))
|
||||
break;
|
||||
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
printk(", *pte=%08lx", pte_val(*pte));
|
||||
printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE]));
|
||||
pte_unmap(pte);
|
||||
#endif
|
||||
} while(0);
|
||||
|
||||
printk("\n");
|
||||
|
@@ -64,10 +64,11 @@ static int __init parse_tag_initrd2(const struct tag *tag)
|
||||
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
|
||||
|
||||
/*
|
||||
* This is used to pass memory configuration data from paging_init
|
||||
* to mem_init, and by show_mem() to skip holes in the memory map.
|
||||
* This keeps memory configuration data used by a couple memory
|
||||
* initialization functions, as well as show_mem() for the skipping
|
||||
* of holes in the memory map. It is populated by arm_add_memory().
|
||||
*/
|
||||
static struct meminfo meminfo = { 0, };
|
||||
struct meminfo meminfo;
|
||||
|
||||
void show_mem(void)
|
||||
{
|
||||
@@ -331,13 +332,12 @@ static void __init bootmem_free_node(int node, struct meminfo *mi)
|
||||
free_area_init_node(node, zone_size, start_pfn, zhole_size);
|
||||
}
|
||||
|
||||
void __init bootmem_init(struct meminfo *mi)
|
||||
void __init bootmem_init(void)
|
||||
{
|
||||
struct meminfo *mi = &meminfo;
|
||||
unsigned long memend_pfn = 0;
|
||||
int node, initrd_node;
|
||||
|
||||
memcpy(&meminfo, mi, sizeof(meminfo));
|
||||
|
||||
/*
|
||||
* Locate which node contains the ramdisk image, if any.
|
||||
*/
|
||||
@@ -394,20 +394,22 @@ void __init bootmem_init(struct meminfo *mi)
|
||||
max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET;
|
||||
}
|
||||
|
||||
static inline void free_area(unsigned long addr, unsigned long end, char *s)
|
||||
static inline int free_area(unsigned long pfn, unsigned long end, char *s)
|
||||
{
|
||||
unsigned int size = (end - addr) >> 10;
|
||||
unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
|
||||
|
||||
for (; addr < end; addr += PAGE_SIZE) {
|
||||
struct page *page = virt_to_page(addr);
|
||||
for (; pfn < end; pfn++) {
|
||||
struct page *page = pfn_to_page(pfn);
|
||||
ClearPageReserved(page);
|
||||
init_page_count(page);
|
||||
free_page(addr);
|
||||
totalram_pages++;
|
||||
__free_page(page);
|
||||
pages++;
|
||||
}
|
||||
|
||||
if (size && s)
|
||||
printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
static inline void
|
||||
@@ -478,13 +480,9 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
|
||||
*/
|
||||
void __init mem_init(void)
|
||||
{
|
||||
unsigned int codepages, datapages, initpages;
|
||||
unsigned int codesize, datasize, initsize;
|
||||
int i, node;
|
||||
|
||||
codepages = &_etext - &_text;
|
||||
datapages = &_end - &__data_start;
|
||||
initpages = &__init_end - &__init_begin;
|
||||
|
||||
#ifndef CONFIG_DISCONTIGMEM
|
||||
max_mapnr = virt_to_page(high_memory) - mem_map;
|
||||
#endif
|
||||
@@ -501,7 +499,8 @@ void __init mem_init(void)
|
||||
|
||||
#ifdef CONFIG_SA1111
|
||||
/* now that our DMA memory is actually so designated, we can free it */
|
||||
free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL);
|
||||
totalram_pages += free_area(PHYS_PFN_OFFSET,
|
||||
__phys_to_pfn(__pa(swapper_pg_dir)), NULL);
|
||||
#endif
|
||||
|
||||
/*
|
||||
@@ -509,18 +508,21 @@ void __init mem_init(void)
|
||||
* real number of pages we have in this system
|
||||
*/
|
||||
printk(KERN_INFO "Memory:");
|
||||
|
||||
num_physpages = 0;
|
||||
for (i = 0; i < meminfo.nr_banks; i++) {
|
||||
num_physpages += bank_pfn_size(&meminfo.bank[i]);
|
||||
printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20);
|
||||
}
|
||||
|
||||
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
|
||||
|
||||
codesize = &_etext - &_text;
|
||||
datasize = &_end - &__data_start;
|
||||
initsize = &__init_end - &__init_begin;
|
||||
|
||||
printk(KERN_NOTICE "Memory: %luKB available (%dK code, "
|
||||
"%dK data, %dK init)\n",
|
||||
(unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
|
||||
codepages >> 10, datapages >> 10, initpages >> 10);
|
||||
codesize >> 10, datasize >> 10, initsize >> 10);
|
||||
|
||||
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
|
||||
extern int sysctl_overcommit_memory;
|
||||
@@ -535,11 +537,10 @@ void __init mem_init(void)
|
||||
|
||||
void free_initmem(void)
|
||||
{
|
||||
if (!machine_is_integrator() && !machine_is_cintegrator()) {
|
||||
free_area((unsigned long)(&__init_begin),
|
||||
(unsigned long)(&__init_end),
|
||||
"init");
|
||||
}
|
||||
if (!machine_is_integrator() && !machine_is_cintegrator())
|
||||
totalram_pages += free_area(__phys_to_pfn(__pa(&__init_begin)),
|
||||
__phys_to_pfn(__pa(&__init_end)),
|
||||
"init");
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
@@ -549,7 +550,9 @@ static int keep_initrd;
|
||||
void free_initrd_mem(unsigned long start, unsigned long end)
|
||||
{
|
||||
if (!keep_initrd)
|
||||
free_area(start, end, "initrd");
|
||||
totalram_pages += free_area(__phys_to_pfn(__pa(start)),
|
||||
__phys_to_pfn(__pa(end)),
|
||||
"initrd");
|
||||
}
|
||||
|
||||
static int __init keepinitrd_setup(char *__unused)
|
||||
|
@@ -32,7 +32,7 @@ struct meminfo;
|
||||
struct pglist_data;
|
||||
|
||||
void __init create_mapping(struct map_desc *md);
|
||||
void __init bootmem_init(struct meminfo *mi);
|
||||
void __init bootmem_init(void);
|
||||
void reserve_node_zero(struct pglist_data *pgdat);
|
||||
|
||||
extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end;
|
||||
|
@@ -646,61 +646,79 @@ static void __init early_vmalloc(char **arg)
|
||||
"vmalloc area too small, limiting to %luMB\n",
|
||||
vmalloc_reserve >> 20);
|
||||
}
|
||||
|
||||
if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
|
||||
vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
|
||||
printk(KERN_WARNING
|
||||
"vmalloc area is too big, limiting to %luMB\n",
|
||||
vmalloc_reserve >> 20);
|
||||
}
|
||||
}
|
||||
__early_param("vmalloc=", early_vmalloc);
|
||||
|
||||
#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
|
||||
|
||||
static int __init check_membank_valid(struct membank *mb)
|
||||
{
|
||||
/*
|
||||
* Check whether this memory region has non-zero size or
|
||||
* invalid node number.
|
||||
*/
|
||||
if (mb->size == 0 || mb->node >= MAX_NUMNODES)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Check whether this memory region would entirely overlap
|
||||
* the vmalloc area.
|
||||
*/
|
||||
if (phys_to_virt(mb->start) >= VMALLOC_MIN) {
|
||||
printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
|
||||
"(vmalloc region overlap).\n",
|
||||
mb->start, mb->start + mb->size - 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether this memory region would partially overlap
|
||||
* the vmalloc area.
|
||||
*/
|
||||
if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) ||
|
||||
phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) {
|
||||
unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start);
|
||||
|
||||
printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
|
||||
"to -%.8lx (vmalloc region overlap).\n",
|
||||
mb->start, mb->start + mb->size - 1,
|
||||
mb->start + newsize - 1);
|
||||
mb->size = newsize;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void __init sanity_check_meminfo(struct meminfo *mi)
|
||||
static void __init sanity_check_meminfo(void)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0, j = 0; i < mi->nr_banks; i++) {
|
||||
if (check_membank_valid(&mi->bank[i]))
|
||||
mi->bank[j++] = mi->bank[i];
|
||||
for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
|
||||
struct membank *bank = &meminfo.bank[j];
|
||||
*bank = meminfo.bank[i];
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
/*
|
||||
* Split those memory banks which are partially overlapping
|
||||
* the vmalloc area greatly simplifying things later.
|
||||
*/
|
||||
if (__va(bank->start) < VMALLOC_MIN &&
|
||||
bank->size > VMALLOC_MIN - __va(bank->start)) {
|
||||
if (meminfo.nr_banks >= NR_BANKS) {
|
||||
printk(KERN_CRIT "NR_BANKS too low, "
|
||||
"ignoring high memory\n");
|
||||
} else {
|
||||
memmove(bank + 1, bank,
|
||||
(meminfo.nr_banks - i) * sizeof(*bank));
|
||||
meminfo.nr_banks++;
|
||||
i++;
|
||||
bank[1].size -= VMALLOC_MIN - __va(bank->start);
|
||||
bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
|
||||
j++;
|
||||
}
|
||||
bank->size = VMALLOC_MIN - __va(bank->start);
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Check whether this memory bank would entirely overlap
|
||||
* the vmalloc area.
|
||||
*/
|
||||
if (__va(bank->start) >= VMALLOC_MIN) {
|
||||
printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
|
||||
"(vmalloc region overlap).\n",
|
||||
bank->start, bank->start + bank->size - 1);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check whether this memory bank would partially overlap
|
||||
* the vmalloc area.
|
||||
*/
|
||||
if (__va(bank->start + bank->size) > VMALLOC_MIN ||
|
||||
__va(bank->start + bank->size) < __va(bank->start)) {
|
||||
unsigned long newsize = VMALLOC_MIN - __va(bank->start);
|
||||
printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
|
||||
"to -%.8lx (vmalloc region overlap).\n",
|
||||
bank->start, bank->start + bank->size - 1,
|
||||
bank->start + newsize - 1);
|
||||
bank->size = newsize;
|
||||
}
|
||||
#endif
|
||||
j++;
|
||||
}
|
||||
mi->nr_banks = j;
|
||||
meminfo.nr_banks = j;
|
||||
}
|
||||
|
||||
static inline void prepare_page_table(struct meminfo *mi)
|
||||
static inline void prepare_page_table(void)
|
||||
{
|
||||
unsigned long addr;
|
||||
|
||||
@@ -721,7 +739,7 @@ static inline void prepare_page_table(struct meminfo *mi)
|
||||
* Clear out all the kernel space mappings, except for the first
|
||||
* memory bank, up to the end of the vmalloc region.
|
||||
*/
|
||||
for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);
|
||||
for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
|
||||
addr < VMALLOC_END; addr += PGDIR_SIZE)
|
||||
pmd_clear(pmd_off_k(addr));
|
||||
}
|
||||
@@ -880,14 +898,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
|
||||
* paging_init() sets up the page tables, initialises the zone memory
|
||||
* maps, and sets up the zero page, bad page and bad page tables.
|
||||
*/
|
||||
void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
|
||||
void __init paging_init(struct machine_desc *mdesc)
|
||||
{
|
||||
void *zero_page;
|
||||
|
||||
build_mem_type_table();
|
||||
sanity_check_meminfo(mi);
|
||||
prepare_page_table(mi);
|
||||
bootmem_init(mi);
|
||||
sanity_check_meminfo();
|
||||
prepare_page_table();
|
||||
bootmem_init();
|
||||
devicemaps_init(mdesc);
|
||||
|
||||
top_pmd = pmd_off_k(0xffff0000);
|
||||
|
@@ -41,27 +41,13 @@ void __init reserve_node_zero(pg_data_t *pgdat)
|
||||
BOOTMEM_DEFAULT);
|
||||
}
|
||||
|
||||
static void __init sanity_check_meminfo(struct meminfo *mi)
|
||||
{
|
||||
int i, j;
|
||||
|
||||
for (i = 0, j = 0; i < mi->nr_banks; i++) {
|
||||
struct membank *mb = &mi->bank[i];
|
||||
|
||||
if (mb->size != 0 && mb->node < MAX_NUMNODES)
|
||||
mi->bank[j++] = mi->bank[i];
|
||||
}
|
||||
mi->nr_banks = j;
|
||||
}
|
||||
|
||||
/*
|
||||
* paging_init() sets up the page tables, initialises the zone memory
|
||||
* maps, and sets up the zero page, bad page and bad page tables.
|
||||
*/
|
||||
void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
|
||||
void __init paging_init(struct machine_desc *mdesc)
|
||||
{
|
||||
sanity_check_meminfo(mi);
|
||||
bootmem_init(mi);
|
||||
bootmem_init();
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -33,8 +33,8 @@ EXPORT_SYMBOL(cpu_cache);
|
||||
|
||||
#ifdef CONFIG_MMU
|
||||
#ifndef MULTI_USER
|
||||
EXPORT_SYMBOL(__cpu_clear_user_page);
|
||||
EXPORT_SYMBOL(__cpu_copy_user_page);
|
||||
EXPORT_SYMBOL(__cpu_clear_user_highpage);
|
||||
EXPORT_SYMBOL(__cpu_copy_user_highpage);
|
||||
#else
|
||||
EXPORT_SYMBOL(cpu_user);
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user