Merge branch 'for-2.6.23' into merge
This commit is contained in:
@@ -12,7 +12,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
||||
|
@@ -9,7 +9,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
||||
|
@@ -11,8 +11,7 @@ obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o
|
||||
hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o
|
||||
obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o \
|
||||
hash_utils_64.o hash_low_64.o tlb_64.o \
|
||||
slb_low.o slb.o stab.o mmap.o imalloc.o \
|
||||
$(hash-y)
|
||||
slb_low.o slb.o stab.o mmap.o $(hash-y)
|
||||
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o tlb_32.o
|
||||
obj-$(CONFIG_40x) += 4xx_mmu.o
|
||||
obj-$(CONFIG_44x) += 44x_mmu.o
|
||||
|
@@ -380,7 +380,7 @@ out_of_memory:
|
||||
}
|
||||
printk("VM: killing process %s\n", current->comm);
|
||||
if (user_mode(regs))
|
||||
do_exit(SIGKILL);
|
||||
do_group_exit(SIGKILL);
|
||||
return SIGKILL;
|
||||
|
||||
do_sigbus:
|
||||
|
@@ -14,7 +14,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
||||
|
@@ -104,7 +104,7 @@ static inline void tlbie(unsigned long va, int psize, int local)
|
||||
spin_unlock(&native_tlbie_lock);
|
||||
}
|
||||
|
||||
static inline void native_lock_hpte(hpte_t *hptep)
|
||||
static inline void native_lock_hpte(struct hash_pte *hptep)
|
||||
{
|
||||
unsigned long *word = &hptep->v;
|
||||
|
||||
@@ -116,7 +116,7 @@ static inline void native_lock_hpte(hpte_t *hptep)
|
||||
}
|
||||
}
|
||||
|
||||
static inline void native_unlock_hpte(hpte_t *hptep)
|
||||
static inline void native_unlock_hpte(struct hash_pte *hptep)
|
||||
{
|
||||
unsigned long *word = &hptep->v;
|
||||
|
||||
@@ -128,7 +128,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
|
||||
unsigned long pa, unsigned long rflags,
|
||||
unsigned long vflags, int psize)
|
||||
{
|
||||
hpte_t *hptep = htab_address + hpte_group;
|
||||
struct hash_pte *hptep = htab_address + hpte_group;
|
||||
unsigned long hpte_v, hpte_r;
|
||||
int i;
|
||||
|
||||
@@ -163,7 +163,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
|
||||
|
||||
hptep->r = hpte_r;
|
||||
/* Guarantee the second dword is visible before the valid bit */
|
||||
__asm__ __volatile__ ("eieio" : : : "memory");
|
||||
eieio();
|
||||
/*
|
||||
* Now set the first dword including the valid bit
|
||||
* NOTE: this also unlocks the hpte
|
||||
@@ -177,7 +177,7 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long va,
|
||||
|
||||
static long native_hpte_remove(unsigned long hpte_group)
|
||||
{
|
||||
hpte_t *hptep;
|
||||
struct hash_pte *hptep;
|
||||
int i;
|
||||
int slot_offset;
|
||||
unsigned long hpte_v;
|
||||
@@ -217,7 +217,7 @@ static long native_hpte_remove(unsigned long hpte_group)
|
||||
static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
||||
unsigned long va, int psize, int local)
|
||||
{
|
||||
hpte_t *hptep = htab_address + slot;
|
||||
struct hash_pte *hptep = htab_address + slot;
|
||||
unsigned long hpte_v, want_v;
|
||||
int ret = 0;
|
||||
|
||||
@@ -233,15 +233,14 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
||||
/* Even if we miss, we need to invalidate the TLB */
|
||||
if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
|
||||
DBG_LOW(" -> miss\n");
|
||||
native_unlock_hpte(hptep);
|
||||
ret = -1;
|
||||
} else {
|
||||
DBG_LOW(" -> hit\n");
|
||||
/* Update the HPTE */
|
||||
hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) |
|
||||
(newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C));
|
||||
native_unlock_hpte(hptep);
|
||||
}
|
||||
native_unlock_hpte(hptep);
|
||||
|
||||
/* Ensure it is out of the tlb too. */
|
||||
tlbie(va, psize, local);
|
||||
@@ -251,7 +250,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
|
||||
|
||||
static long native_hpte_find(unsigned long va, int psize)
|
||||
{
|
||||
hpte_t *hptep;
|
||||
struct hash_pte *hptep;
|
||||
unsigned long hash;
|
||||
unsigned long i, j;
|
||||
long slot;
|
||||
@@ -294,7 +293,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
|
||||
{
|
||||
unsigned long vsid, va;
|
||||
long slot;
|
||||
hpte_t *hptep;
|
||||
struct hash_pte *hptep;
|
||||
|
||||
vsid = get_kernel_vsid(ea);
|
||||
va = (vsid << 28) | (ea & 0x0fffffff);
|
||||
@@ -315,7 +314,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
|
||||
static void native_hpte_invalidate(unsigned long slot, unsigned long va,
|
||||
int psize, int local)
|
||||
{
|
||||
hpte_t *hptep = htab_address + slot;
|
||||
struct hash_pte *hptep = htab_address + slot;
|
||||
unsigned long hpte_v;
|
||||
unsigned long want_v;
|
||||
unsigned long flags;
|
||||
@@ -345,7 +344,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
|
||||
#define LP_BITS 8
|
||||
#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
|
||||
|
||||
static void hpte_decode(hpte_t *hpte, unsigned long slot,
|
||||
static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
|
||||
int *psize, unsigned long *va)
|
||||
{
|
||||
unsigned long hpte_r = hpte->r;
|
||||
@@ -415,7 +414,7 @@ static void hpte_decode(hpte_t *hpte, unsigned long slot,
|
||||
static void native_hpte_clear(void)
|
||||
{
|
||||
unsigned long slot, slots, flags;
|
||||
hpte_t *hptep = htab_address;
|
||||
struct hash_pte *hptep = htab_address;
|
||||
unsigned long hpte_v, va;
|
||||
unsigned long pteg_count;
|
||||
int psize;
|
||||
@@ -462,7 +461,7 @@ static void native_hpte_clear(void)
|
||||
static void native_flush_hash_range(unsigned long number, int local)
|
||||
{
|
||||
unsigned long va, hash, index, hidx, shift, slot;
|
||||
hpte_t *hptep;
|
||||
struct hash_pte *hptep;
|
||||
unsigned long hpte_v;
|
||||
unsigned long want_v;
|
||||
unsigned long flags;
|
||||
|
@@ -87,7 +87,7 @@ extern unsigned long dart_tablebase;
|
||||
static unsigned long _SDR1;
|
||||
struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
|
||||
|
||||
hpte_t *htab_address;
|
||||
struct hash_pte *htab_address;
|
||||
unsigned long htab_size_bytes;
|
||||
unsigned long htab_hash_mask;
|
||||
int mmu_linear_psize = MMU_PAGE_4K;
|
||||
|
@@ -1,313 +0,0 @@
|
||||
/*
|
||||
* c 2001 PPC 64 Team, IBM Corp
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
#include "mmu_decl.h"
|
||||
|
||||
static DEFINE_MUTEX(imlist_mutex);
|
||||
struct vm_struct * imlist = NULL;
|
||||
|
||||
static int get_free_im_addr(unsigned long size, unsigned long *im_addr)
|
||||
{
|
||||
unsigned long addr;
|
||||
struct vm_struct **p, *tmp;
|
||||
|
||||
addr = ioremap_bot;
|
||||
for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
|
||||
if (size + addr < (unsigned long) tmp->addr)
|
||||
break;
|
||||
if ((unsigned long)tmp->addr >= ioremap_bot)
|
||||
addr = tmp->size + (unsigned long) tmp->addr;
|
||||
if (addr >= IMALLOC_END-size)
|
||||
return 1;
|
||||
}
|
||||
*im_addr = addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Return whether the region described by v_addr and size is a subset
|
||||
* of the region described by parent
|
||||
*/
|
||||
static inline int im_region_is_subset(unsigned long v_addr, unsigned long size,
|
||||
struct vm_struct *parent)
|
||||
{
|
||||
return (int) (v_addr >= (unsigned long) parent->addr &&
|
||||
v_addr < (unsigned long) parent->addr + parent->size &&
|
||||
size < parent->size);
|
||||
}
|
||||
|
||||
/* Return whether the region described by v_addr and size is a superset
|
||||
* of the region described by child
|
||||
*/
|
||||
static int im_region_is_superset(unsigned long v_addr, unsigned long size,
|
||||
struct vm_struct *child)
|
||||
{
|
||||
struct vm_struct parent;
|
||||
|
||||
parent.addr = (void *) v_addr;
|
||||
parent.size = size;
|
||||
|
||||
return im_region_is_subset((unsigned long) child->addr, child->size,
|
||||
&parent);
|
||||
}
|
||||
|
||||
/* Return whether the region described by v_addr and size overlaps
|
||||
* the region described by vm. Overlapping regions meet the
|
||||
* following conditions:
|
||||
* 1) The regions share some part of the address space
|
||||
* 2) The regions aren't identical
|
||||
* 3) Neither region is a subset of the other
|
||||
*/
|
||||
static int im_region_overlaps(unsigned long v_addr, unsigned long size,
|
||||
struct vm_struct *vm)
|
||||
{
|
||||
if (im_region_is_superset(v_addr, size, vm))
|
||||
return 0;
|
||||
|
||||
return (v_addr + size > (unsigned long) vm->addr + vm->size &&
|
||||
v_addr < (unsigned long) vm->addr + vm->size) ||
|
||||
(v_addr < (unsigned long) vm->addr &&
|
||||
v_addr + size > (unsigned long) vm->addr);
|
||||
}
|
||||
|
||||
/* Determine imalloc status of region described by v_addr and size.
|
||||
* Can return one of the following:
|
||||
* IM_REGION_UNUSED - Entire region is unallocated in imalloc space.
|
||||
* IM_REGION_SUBSET - Region is a subset of a region that is already
|
||||
* allocated in imalloc space.
|
||||
* vm will be assigned to a ptr to the parent region.
|
||||
* IM_REGION_EXISTS - Exact region already allocated in imalloc space.
|
||||
* vm will be assigned to a ptr to the existing imlist
|
||||
* member.
|
||||
* IM_REGION_OVERLAPS - Region overlaps an allocated region in imalloc space.
|
||||
* IM_REGION_SUPERSET - Region is a superset of a region that is already
|
||||
* allocated in imalloc space.
|
||||
*/
|
||||
static int im_region_status(unsigned long v_addr, unsigned long size,
|
||||
struct vm_struct **vm)
|
||||
{
|
||||
struct vm_struct *tmp;
|
||||
|
||||
for (tmp = imlist; tmp; tmp = tmp->next)
|
||||
if (v_addr < (unsigned long) tmp->addr + tmp->size)
|
||||
break;
|
||||
|
||||
*vm = NULL;
|
||||
if (tmp) {
|
||||
if (im_region_overlaps(v_addr, size, tmp))
|
||||
return IM_REGION_OVERLAP;
|
||||
|
||||
*vm = tmp;
|
||||
if (im_region_is_subset(v_addr, size, tmp)) {
|
||||
/* Return with tmp pointing to superset */
|
||||
return IM_REGION_SUBSET;
|
||||
}
|
||||
if (im_region_is_superset(v_addr, size, tmp)) {
|
||||
/* Return with tmp pointing to first subset */
|
||||
return IM_REGION_SUPERSET;
|
||||
}
|
||||
else if (v_addr == (unsigned long) tmp->addr &&
|
||||
size == tmp->size) {
|
||||
/* Return with tmp pointing to exact region */
|
||||
return IM_REGION_EXISTS;
|
||||
}
|
||||
}
|
||||
|
||||
return IM_REGION_UNUSED;
|
||||
}
|
||||
|
||||
static struct vm_struct * split_im_region(unsigned long v_addr,
|
||||
unsigned long size, struct vm_struct *parent)
|
||||
{
|
||||
struct vm_struct *vm1 = NULL;
|
||||
struct vm_struct *vm2 = NULL;
|
||||
struct vm_struct *new_vm = NULL;
|
||||
|
||||
vm1 = kmalloc(sizeof(*vm1), GFP_KERNEL);
|
||||
if (vm1 == NULL) {
|
||||
printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (v_addr == (unsigned long) parent->addr) {
|
||||
/* Use existing parent vm_struct to represent child, allocate
|
||||
* new one for the remainder of parent range
|
||||
*/
|
||||
vm1->size = parent->size - size;
|
||||
vm1->addr = (void *) (v_addr + size);
|
||||
vm1->next = parent->next;
|
||||
|
||||
parent->size = size;
|
||||
parent->next = vm1;
|
||||
new_vm = parent;
|
||||
} else if (v_addr + size == (unsigned long) parent->addr +
|
||||
parent->size) {
|
||||
/* Allocate new vm_struct to represent child, use existing
|
||||
* parent one for remainder of parent range
|
||||
*/
|
||||
vm1->size = size;
|
||||
vm1->addr = (void *) v_addr;
|
||||
vm1->next = parent->next;
|
||||
new_vm = vm1;
|
||||
|
||||
parent->size -= size;
|
||||
parent->next = vm1;
|
||||
} else {
|
||||
/* Allocate two new vm_structs for the new child and
|
||||
* uppermost remainder, and use existing parent one for the
|
||||
* lower remainder of parent range
|
||||
*/
|
||||
vm2 = kmalloc(sizeof(*vm2), GFP_KERNEL);
|
||||
if (vm2 == NULL) {
|
||||
printk(KERN_ERR "%s() out of memory\n", __FUNCTION__);
|
||||
kfree(vm1);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
vm1->size = size;
|
||||
vm1->addr = (void *) v_addr;
|
||||
vm1->next = vm2;
|
||||
new_vm = vm1;
|
||||
|
||||
vm2->size = ((unsigned long) parent->addr + parent->size) -
|
||||
(v_addr + size);
|
||||
vm2->addr = (void *) v_addr + size;
|
||||
vm2->next = parent->next;
|
||||
|
||||
parent->size = v_addr - (unsigned long) parent->addr;
|
||||
parent->next = vm1;
|
||||
}
|
||||
|
||||
return new_vm;
|
||||
}
|
||||
|
||||
static struct vm_struct * __add_new_im_area(unsigned long req_addr,
|
||||
unsigned long size)
|
||||
{
|
||||
struct vm_struct **p, *tmp, *area;
|
||||
|
||||
for (p = &imlist; (tmp = *p) ; p = &tmp->next) {
|
||||
if (req_addr + size <= (unsigned long)tmp->addr)
|
||||
break;
|
||||
}
|
||||
|
||||
area = kmalloc(sizeof(*area), GFP_KERNEL);
|
||||
if (!area)
|
||||
return NULL;
|
||||
area->flags = 0;
|
||||
area->addr = (void *)req_addr;
|
||||
area->size = size;
|
||||
area->next = *p;
|
||||
*p = area;
|
||||
|
||||
return area;
|
||||
}
|
||||
|
||||
static struct vm_struct * __im_get_area(unsigned long req_addr,
|
||||
unsigned long size,
|
||||
int criteria)
|
||||
{
|
||||
struct vm_struct *tmp;
|
||||
int status;
|
||||
|
||||
status = im_region_status(req_addr, size, &tmp);
|
||||
if ((criteria & status) == 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
switch (status) {
|
||||
case IM_REGION_UNUSED:
|
||||
tmp = __add_new_im_area(req_addr, size);
|
||||
break;
|
||||
case IM_REGION_SUBSET:
|
||||
tmp = split_im_region(req_addr, size, tmp);
|
||||
break;
|
||||
case IM_REGION_EXISTS:
|
||||
/* Return requested region */
|
||||
break;
|
||||
case IM_REGION_SUPERSET:
|
||||
/* Return first existing subset of requested region */
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "%s() unexpected imalloc region status\n",
|
||||
__FUNCTION__);
|
||||
tmp = NULL;
|
||||
}
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
struct vm_struct * im_get_free_area(unsigned long size)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
unsigned long addr;
|
||||
|
||||
mutex_lock(&imlist_mutex);
|
||||
if (get_free_im_addr(size, &addr)) {
|
||||
printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n",
|
||||
__FUNCTION__, size);
|
||||
area = NULL;
|
||||
goto next_im_done;
|
||||
}
|
||||
|
||||
area = __im_get_area(addr, size, IM_REGION_UNUSED);
|
||||
if (area == NULL) {
|
||||
printk(KERN_ERR
|
||||
"%s() cannot obtain area for addr 0x%lx size 0x%lx\n",
|
||||
__FUNCTION__, addr, size);
|
||||
}
|
||||
next_im_done:
|
||||
mutex_unlock(&imlist_mutex);
|
||||
return area;
|
||||
}
|
||||
|
||||
struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
|
||||
int criteria)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
mutex_lock(&imlist_mutex);
|
||||
area = __im_get_area(v_addr, size, criteria);
|
||||
mutex_unlock(&imlist_mutex);
|
||||
return area;
|
||||
}
|
||||
|
||||
void im_free(void * addr)
|
||||
{
|
||||
struct vm_struct **p, *tmp;
|
||||
|
||||
if (!addr)
|
||||
return;
|
||||
if ((unsigned long) addr & ~PAGE_MASK) {
|
||||
printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr);
|
||||
return;
|
||||
}
|
||||
mutex_lock(&imlist_mutex);
|
||||
for (p = &imlist ; (tmp = *p) ; p = &tmp->next) {
|
||||
if (tmp->addr == addr) {
|
||||
*p = tmp->next;
|
||||
unmap_vm_area(tmp);
|
||||
kfree(tmp);
|
||||
mutex_unlock(&imlist_mutex);
|
||||
return;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&imlist_mutex);
|
||||
printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__,
|
||||
addr);
|
||||
}
|
@@ -5,7 +5,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
* PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
|
@@ -5,7 +5,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
||||
|
@@ -5,7 +5,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
* PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
@@ -129,8 +128,6 @@ int __devinit arch_add_memory(int nid, u64 start, u64 size)
|
||||
zone = pgdata->node_zones;
|
||||
|
||||
return __add_pages(zone, start_pfn, nr_pages);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -11,7 +11,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
||||
|
@@ -8,7 +8,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
||||
@@ -40,8 +39,8 @@ extern int __map_without_bats;
|
||||
extern unsigned long ioremap_base;
|
||||
extern unsigned int rtas_data, rtas_size;
|
||||
|
||||
struct _PTE;
|
||||
extern struct _PTE *Hash, *Hash_end;
|
||||
struct hash_pte;
|
||||
extern struct hash_pte *Hash, *Hash_end;
|
||||
extern unsigned long Hash_size, Hash_mask;
|
||||
|
||||
extern unsigned int num_tlbcam_entries;
|
||||
@@ -90,16 +89,4 @@ static inline void flush_HPTE(unsigned context, unsigned long va,
|
||||
else
|
||||
_tlbie(va);
|
||||
}
|
||||
#else /* CONFIG_PPC64 */
|
||||
/* imalloc region types */
|
||||
#define IM_REGION_UNUSED 0x1
|
||||
#define IM_REGION_SUBSET 0x2
|
||||
#define IM_REGION_EXISTS 0x4
|
||||
#define IM_REGION_OVERLAP 0x8
|
||||
#define IM_REGION_SUPERSET 0x10
|
||||
|
||||
extern struct vm_struct * im_get_free_area(unsigned long size);
|
||||
extern struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size,
|
||||
int region_type);
|
||||
extern void im_free(void *addr);
|
||||
#endif
|
||||
|
@@ -8,7 +8,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
||||
@@ -37,7 +36,6 @@
|
||||
unsigned long ioremap_base;
|
||||
unsigned long ioremap_bot;
|
||||
EXPORT_SYMBOL(ioremap_bot); /* aka VMALLOC_END */
|
||||
int io_bat_index;
|
||||
|
||||
#if defined(CONFIG_6xx) || defined(CONFIG_POWER3)
|
||||
#define HAVE_BATS 1
|
||||
@@ -300,51 +298,6 @@ void __init mapin_ram(void)
|
||||
}
|
||||
}
|
||||
|
||||
/* is x a power of 4? */
|
||||
#define is_power_of_4(x) is_power_of_2(x) && (ffs(x) & 1)
|
||||
|
||||
/*
|
||||
* Set up a mapping for a block of I/O.
|
||||
* virt, phys, size must all be page-aligned.
|
||||
* This should only be called before ioremap is called.
|
||||
*/
|
||||
void __init io_block_mapping(unsigned long virt, phys_addr_t phys,
|
||||
unsigned int size, int flags)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (virt > KERNELBASE && virt < ioremap_bot)
|
||||
ioremap_bot = ioremap_base = virt;
|
||||
|
||||
#ifdef HAVE_BATS
|
||||
/*
|
||||
* Use a BAT for this if possible...
|
||||
*/
|
||||
if (io_bat_index < 2 && is_power_of_2(size)
|
||||
&& (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
|
||||
setbat(io_bat_index, virt, phys, size, flags);
|
||||
++io_bat_index;
|
||||
return;
|
||||
}
|
||||
#endif /* HAVE_BATS */
|
||||
|
||||
#ifdef HAVE_TLBCAM
|
||||
/*
|
||||
* Use a CAM for this if possible...
|
||||
*/
|
||||
if (tlbcam_index < num_tlbcam_entries && is_power_of_4(size)
|
||||
&& (virt & (size - 1)) == 0 && (phys & (size - 1)) == 0) {
|
||||
settlbcam(tlbcam_index, virt, phys, size, flags, 0);
|
||||
++tlbcam_index;
|
||||
return;
|
||||
}
|
||||
#endif /* HAVE_TLBCAM */
|
||||
|
||||
/* No BATs available, put it in the page tables. */
|
||||
for (i = 0; i < size; i += PAGE_SIZE)
|
||||
map_page(virt + i, phys + i, flags);
|
||||
}
|
||||
|
||||
/* Scan the real Linux page tables and return a PTE pointer for
|
||||
* a virtual address in a context.
|
||||
* Returns true (1) if PTE was found, zero otherwise. The pointer to
|
||||
@@ -379,82 +332,6 @@ get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
|
||||
return(retval);
|
||||
}
|
||||
|
||||
/* Find physical address for this virtual address. Normally used by
|
||||
* I/O functions, but anyone can call it.
|
||||
*/
|
||||
unsigned long iopa(unsigned long addr)
|
||||
{
|
||||
unsigned long pa;
|
||||
|
||||
/* I don't know why this won't work on PMacs or CHRP. It
|
||||
* appears there is some bug, or there is some implicit
|
||||
* mapping done not properly represented by BATs or in page
|
||||
* tables.......I am actively working on resolving this, but
|
||||
* can't hold up other stuff. -- Dan
|
||||
*/
|
||||
pte_t *pte;
|
||||
struct mm_struct *mm;
|
||||
|
||||
/* Check the BATs */
|
||||
pa = v_mapped_by_bats(addr);
|
||||
if (pa)
|
||||
return pa;
|
||||
|
||||
/* Allow mapping of user addresses (within the thread)
|
||||
* for DMA if necessary.
|
||||
*/
|
||||
if (addr < TASK_SIZE)
|
||||
mm = current->mm;
|
||||
else
|
||||
mm = &init_mm;
|
||||
|
||||
pa = 0;
|
||||
if (get_pteptr(mm, addr, &pte, NULL)) {
|
||||
pa = (pte_val(*pte) & PAGE_MASK) | (addr & ~PAGE_MASK);
|
||||
pte_unmap(pte);
|
||||
}
|
||||
|
||||
return(pa);
|
||||
}
|
||||
|
||||
/* This is will find the virtual address for a physical one....
|
||||
* Swiped from APUS, could be dangerous :-).
|
||||
* This is only a placeholder until I really find a way to make this
|
||||
* work. -- Dan
|
||||
*/
|
||||
unsigned long
|
||||
mm_ptov (unsigned long paddr)
|
||||
{
|
||||
unsigned long ret;
|
||||
#if 0
|
||||
if (paddr < 16*1024*1024)
|
||||
ret = ZTWO_VADDR(paddr);
|
||||
else {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < kmap_chunk_count;){
|
||||
unsigned long phys = kmap_chunks[i++];
|
||||
unsigned long size = kmap_chunks[i++];
|
||||
unsigned long virt = kmap_chunks[i++];
|
||||
if (paddr >= phys
|
||||
&& paddr < (phys + size)){
|
||||
ret = virt + paddr - phys;
|
||||
goto exit;
|
||||
}
|
||||
}
|
||||
|
||||
ret = (unsigned long) __va(paddr);
|
||||
}
|
||||
exit:
|
||||
#ifdef DEBUGPV
|
||||
printk ("PTOV(%lx)=%lx\n", paddr, ret);
|
||||
#endif
|
||||
#else
|
||||
ret = (unsigned long)paddr + KERNELBASE;
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
|
||||
static int __change_page_attr(struct page *page, pgprot_t prot)
|
||||
|
@@ -7,7 +7,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
||||
@@ -34,41 +33,27 @@
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/nodemask.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/lmb.h>
|
||||
#include <asm/rtas.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/eeh.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/mmzone.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/abs_addr.h>
|
||||
#include <asm/vdso.h>
|
||||
#include <asm/firmware.h>
|
||||
|
||||
#include "mmu_decl.h"
|
||||
|
||||
unsigned long ioremap_bot = IMALLOC_BASE;
|
||||
static unsigned long phbs_io_bot = PHBS_IO_BASE;
|
||||
unsigned long ioremap_bot = IOREMAP_BASE;
|
||||
|
||||
/*
|
||||
* map_io_page currently only called by __ioremap
|
||||
@@ -102,8 +87,8 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
|
||||
* entry in the hardware page table.
|
||||
*
|
||||
*/
|
||||
if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
|
||||
mmu_io_psize)) {
|
||||
if (htab_bolt_mapping(ea, (unsigned long)ea + PAGE_SIZE,
|
||||
pa, flags, mmu_io_psize)) {
|
||||
printk(KERN_ERR "Failed to do bolted mapping IO "
|
||||
"memory at %016lx !\n", pa);
|
||||
return -ENOMEM;
|
||||
@@ -113,8 +98,11 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
|
||||
}
|
||||
|
||||
|
||||
static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
|
||||
unsigned long ea, unsigned long size,
|
||||
/**
|
||||
* __ioremap_at - Low level function to establish the page tables
|
||||
* for an IO mapping
|
||||
*/
|
||||
void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long i;
|
||||
@@ -122,17 +110,35 @@ static void __iomem * __ioremap_com(phys_addr_t addr, unsigned long pa,
|
||||
if ((flags & _PAGE_PRESENT) == 0)
|
||||
flags |= pgprot_val(PAGE_KERNEL);
|
||||
|
||||
WARN_ON(pa & ~PAGE_MASK);
|
||||
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
|
||||
WARN_ON(size & ~PAGE_MASK);
|
||||
|
||||
for (i = 0; i < size; i += PAGE_SIZE)
|
||||
if (map_io_page(ea+i, pa+i, flags))
|
||||
if (map_io_page((unsigned long)ea+i, pa+i, flags))
|
||||
return NULL;
|
||||
|
||||
return (void __iomem *) (ea + (addr & ~PAGE_MASK));
|
||||
return (void __iomem *)ea;
|
||||
}
|
||||
|
||||
/**
|
||||
* __iounmap_from - Low level function to tear down the page tables
|
||||
* for an IO mapping. This is used for mappings that
|
||||
* are manipulated manually, like partial unmapping of
|
||||
* PCI IOs or ISA space.
|
||||
*/
|
||||
void __iounmap_at(void *ea, unsigned long size)
|
||||
{
|
||||
WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
|
||||
WARN_ON(size & ~PAGE_MASK);
|
||||
|
||||
unmap_kernel_range((unsigned long)ea, size);
|
||||
}
|
||||
|
||||
void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
|
||||
unsigned long flags)
|
||||
{
|
||||
unsigned long pa, ea;
|
||||
phys_addr_t paligned;
|
||||
void __iomem *ret;
|
||||
|
||||
/*
|
||||
@@ -144,27 +150,30 @@ void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
|
||||
* IMALLOC_END
|
||||
*
|
||||
*/
|
||||
pa = addr & PAGE_MASK;
|
||||
size = PAGE_ALIGN(addr + size) - pa;
|
||||
paligned = addr & PAGE_MASK;
|
||||
size = PAGE_ALIGN(addr + size) - paligned;
|
||||
|
||||
if ((size == 0) || (pa == 0))
|
||||
if ((size == 0) || (paligned == 0))
|
||||
return NULL;
|
||||
|
||||
if (mem_init_done) {
|
||||
struct vm_struct *area;
|
||||
area = im_get_free_area(size);
|
||||
|
||||
area = __get_vm_area(size, VM_IOREMAP,
|
||||
ioremap_bot, IOREMAP_END);
|
||||
if (area == NULL)
|
||||
return NULL;
|
||||
ea = (unsigned long)(area->addr);
|
||||
ret = __ioremap_com(addr, pa, ea, size, flags);
|
||||
ret = __ioremap_at(paligned, area->addr, size, flags);
|
||||
if (!ret)
|
||||
im_free(area->addr);
|
||||
vunmap(area->addr);
|
||||
} else {
|
||||
ea = ioremap_bot;
|
||||
ret = __ioremap_com(addr, pa, ea, size, flags);
|
||||
ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
|
||||
if (ret)
|
||||
ioremap_bot += size;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
ret += addr & ~PAGE_MASK;
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -187,62 +196,9 @@ void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size,
|
||||
}
|
||||
|
||||
|
||||
#define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK))
|
||||
|
||||
int __ioremap_explicit(phys_addr_t pa, unsigned long ea,
|
||||
unsigned long size, unsigned long flags)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
void __iomem *ret;
|
||||
|
||||
/* For now, require page-aligned values for pa, ea, and size */
|
||||
if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) ||
|
||||
!IS_PAGE_ALIGNED(size)) {
|
||||
printk(KERN_ERR "unaligned value in %s\n", __FUNCTION__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (!mem_init_done) {
|
||||
/* Two things to consider in this case:
|
||||
* 1) No records will be kept (imalloc, etc) that the region
|
||||
* has been remapped
|
||||
* 2) It won't be easy to iounmap() the region later (because
|
||||
* of 1)
|
||||
*/
|
||||
;
|
||||
} else {
|
||||
area = im_get_area(ea, size,
|
||||
IM_REGION_UNUSED|IM_REGION_SUBSET|IM_REGION_EXISTS);
|
||||
if (area == NULL) {
|
||||
/* Expected when PHB-dlpar is in play */
|
||||
return 1;
|
||||
}
|
||||
if (ea != (unsigned long) area->addr) {
|
||||
printk(KERN_ERR "unexpected addr return from "
|
||||
"im_get_area\n");
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
ret = __ioremap_com(pa, pa, ea, size, flags);
|
||||
if (ret == NULL) {
|
||||
printk(KERN_ERR "ioremap_explicit() allocation failure !\n");
|
||||
return 1;
|
||||
}
|
||||
if (ret != (void *) ea) {
|
||||
printk(KERN_ERR "__ioremap_com() returned unexpected addr\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmap an IO region and remove it from imalloc'd list.
|
||||
* Access to IO memory should be serialized by driver.
|
||||
* This code is modeled after vmalloc code - unmap_vm_area()
|
||||
*
|
||||
* XXX what about calls before mem_init_done (ie python_countermeasures())
|
||||
*/
|
||||
void __iounmap(volatile void __iomem *token)
|
||||
{
|
||||
@@ -251,9 +207,14 @@ void __iounmap(volatile void __iomem *token)
|
||||
if (!mem_init_done)
|
||||
return;
|
||||
|
||||
addr = (void *) ((unsigned long __force) token & PAGE_MASK);
|
||||
|
||||
im_free(addr);
|
||||
addr = (void *) ((unsigned long __force)
|
||||
PCI_FIX_ADDR(token) & PAGE_MASK);
|
||||
if ((unsigned long)addr < ioremap_bot) {
|
||||
printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
|
||||
" at 0x%p\n", addr);
|
||||
return;
|
||||
}
|
||||
vunmap(addr);
|
||||
}
|
||||
|
||||
void iounmap(volatile void __iomem *token)
|
||||
@@ -264,77 +225,8 @@ void iounmap(volatile void __iomem *token)
|
||||
__iounmap(token);
|
||||
}
|
||||
|
||||
static int iounmap_subset_regions(unsigned long addr, unsigned long size)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
/* Check whether subsets of this region exist */
|
||||
area = im_get_area(addr, size, IM_REGION_SUPERSET);
|
||||
if (area == NULL)
|
||||
return 1;
|
||||
|
||||
while (area) {
|
||||
iounmap((void __iomem *) area->addr);
|
||||
area = im_get_area(addr, size,
|
||||
IM_REGION_SUPERSET);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __iounmap_explicit(volatile void __iomem *start, unsigned long size)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
unsigned long addr;
|
||||
int rc;
|
||||
|
||||
addr = (unsigned long __force) start & PAGE_MASK;
|
||||
|
||||
/* Verify that the region either exists or is a subset of an existing
|
||||
* region. In the latter case, split the parent region to create
|
||||
* the exact region
|
||||
*/
|
||||
area = im_get_area(addr, size,
|
||||
IM_REGION_EXISTS | IM_REGION_SUBSET);
|
||||
if (area == NULL) {
|
||||
/* Determine whether subset regions exist. If so, unmap */
|
||||
rc = iounmap_subset_regions(addr, size);
|
||||
if (rc) {
|
||||
printk(KERN_ERR
|
||||
"%s() cannot unmap nonexistent range 0x%lx\n",
|
||||
__FUNCTION__, addr);
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
iounmap((void __iomem *) area->addr);
|
||||
}
|
||||
/*
|
||||
* FIXME! This can't be right:
|
||||
iounmap(area->addr);
|
||||
* Maybe it should be "iounmap(area);"
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(ioremap);
|
||||
EXPORT_SYMBOL(ioremap_flags);
|
||||
EXPORT_SYMBOL(__ioremap);
|
||||
EXPORT_SYMBOL(iounmap);
|
||||
EXPORT_SYMBOL(__iounmap);
|
||||
|
||||
static DEFINE_SPINLOCK(phb_io_lock);
|
||||
|
||||
void __iomem * reserve_phb_iospace(unsigned long size)
|
||||
{
|
||||
void __iomem *virt_addr;
|
||||
|
||||
if (phbs_io_bot >= IMALLOC_BASE)
|
||||
panic("reserve_phb_iospace(): phb io space overflow\n");
|
||||
|
||||
spin_lock(&phb_io_lock);
|
||||
virt_addr = (void __iomem *) phbs_io_bot;
|
||||
phbs_io_bot += size;
|
||||
spin_unlock(&phb_io_lock);
|
||||
|
||||
return virt_addr;
|
||||
}
|
||||
|
@@ -11,7 +11,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
||||
@@ -35,12 +34,12 @@
|
||||
|
||||
#include "mmu_decl.h"
|
||||
|
||||
PTE *Hash, *Hash_end;
|
||||
struct hash_pte *Hash, *Hash_end;
|
||||
unsigned long Hash_size, Hash_mask;
|
||||
unsigned long _SDR1;
|
||||
|
||||
union ubat { /* BAT register values to be loaded */
|
||||
BAT bat;
|
||||
struct ppc_bat bat;
|
||||
u32 word[2];
|
||||
} BATS[8][2]; /* 8 pairs of IBAT, DBAT */
|
||||
|
||||
@@ -245,7 +244,7 @@ void __init MMU_init_hw(void)
|
||||
cacheable_memzero(Hash, Hash_size);
|
||||
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
|
||||
|
||||
Hash_end = (PTE *) ((unsigned long)Hash + Hash_size);
|
||||
Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
|
||||
|
||||
printk("Total memory = %ldMB; using %ldkB for hash table (at %p)\n",
|
||||
total_memory >> 20, Hash_size >> 10, Hash);
|
||||
|
@@ -55,7 +55,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
|
||||
for (entry = 0; entry < 8; entry++, ste++) {
|
||||
if (!(ste->esid_data & STE_ESID_V)) {
|
||||
ste->vsid_data = vsid_data;
|
||||
asm volatile("eieio":::"memory");
|
||||
eieio();
|
||||
ste->esid_data = esid_data;
|
||||
return (global_entry | entry);
|
||||
}
|
||||
@@ -101,7 +101,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid)
|
||||
asm volatile("sync" : : : "memory"); /* Order update */
|
||||
|
||||
castout_ste->vsid_data = vsid_data;
|
||||
asm volatile("eieio" : : : "memory"); /* Order update */
|
||||
eieio(); /* Order update */
|
||||
castout_ste->esid_data = esid_data;
|
||||
|
||||
asm volatile("slbie %0" : : "r" (old_esid << SID_SHIFT));
|
||||
|
@@ -11,7 +11,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
||||
|
@@ -8,7 +8,6 @@
|
||||
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
|
||||
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
|
||||
* Copyright (C) 1996 Paul Mackerras
|
||||
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
|
||||
*
|
||||
* Derived from "arch/i386/mm/init.c"
|
||||
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
|
||||
@@ -239,3 +238,59 @@ void pte_free_finish(void)
|
||||
pte_free_submit(*batchp);
|
||||
*batchp = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* __flush_hash_table_range - Flush all HPTEs for a given address range
|
||||
* from the hash table (and the TLB). But keeps
|
||||
* the linux PTEs intact.
|
||||
*
|
||||
* @mm : mm_struct of the target address space (generally init_mm)
|
||||
* @start : starting address
|
||||
* @end : ending address (not included in the flush)
|
||||
*
|
||||
* This function is mostly to be used by some IO hotplug code in order
|
||||
* to remove all hash entries from a given address range used to map IO
|
||||
* space on a removed PCI-PCI bidge without tearing down the full mapping
|
||||
* since 64K pages may overlap with other bridges when using 64K pages
|
||||
* with 4K HW pages on IO space.
|
||||
*
|
||||
* Because of that usage pattern, it's only available with CONFIG_HOTPLUG
|
||||
* and is implemented for small size rather than speed.
|
||||
*/
|
||||
#ifdef CONFIG_HOTPLUG
|
||||
|
||||
void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
start = _ALIGN_DOWN(start, PAGE_SIZE);
|
||||
end = _ALIGN_UP(end, PAGE_SIZE);
|
||||
|
||||
BUG_ON(!mm->pgd);
|
||||
|
||||
/* Note: Normally, we should only ever use a batch within a
|
||||
* PTE locked section. This violates the rule, but will work
|
||||
* since we don't actually modify the PTEs, we just flush the
|
||||
* hash while leaving the PTEs intact (including their reference
|
||||
* to being hashed). This is not the most performance oriented
|
||||
* way to do things but is fine for our needs here.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
arch_enter_lazy_mmu_mode();
|
||||
for (; start < end; start += PAGE_SIZE) {
|
||||
pte_t *ptep = find_linux_pte(mm->pgd, start);
|
||||
unsigned long pte;
|
||||
|
||||
if (ptep == NULL)
|
||||
continue;
|
||||
pte = pte_val(*ptep);
|
||||
if (!(pte & _PAGE_HASHPTE))
|
||||
continue;
|
||||
hpte_need_flush(mm, start, ptep, pte, 0);
|
||||
}
|
||||
arch_leave_lazy_mmu_mode();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HOTPLUG */
|
||||
|
Reference in New Issue
Block a user