Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: (77 commits) [POWERPC] Abolish powerpc_flash_init() [POWERPC] Early serial debug support for PPC44x [POWERPC] Support for the Ebony 440GP reference board in arch/powerpc [POWERPC] Add device tree for Ebony [POWERPC] Add powerpc/platforms/44x, disable platforms/4xx for now [POWERPC] MPIC U3/U4 MSI backend [POWERPC] MPIC MSI allocator [POWERPC] Enable MSI mappings for MPIC [POWERPC] Tell Phyp we support MSI [POWERPC] RTAS MSI implementation [POWERPC] PowerPC MSI infrastructure [POWERPC] Rip out the existing powerpc msi stubs [POWERPC] Remove use of 4level-fixup.h for ppc32 [POWERPC] Add powerpc PCI-E reset API implementation [POWERPC] Holly bootwrapper [POWERPC] Holly DTS [POWERPC] Holly defconfig [POWERPC] Add support for 750CL Holly board [POWERPC] Generalize tsi108 PCI setup [POWERPC] Generalize tsi108 PHY types ... Fixed conflict in include/asm-powerpc/kdebug.h manually Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
@@ -24,73 +24,38 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/highmem.h>
|
||||
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/bootx.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#include "mmu_decl.h"
|
||||
|
||||
extern char etext[], _stext[];
|
||||
|
||||
/* Used by the 44x TLB replacement exception handler.
|
||||
* Just needed it declared someplace.
|
||||
*/
|
||||
unsigned int tlb_44x_index = 0;
|
||||
unsigned int tlb_44x_hwater = 62;
|
||||
unsigned int tlb_44x_index; /* = 0 */
|
||||
unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS;
|
||||
|
||||
/*
|
||||
* "Pins" a 256MB TLB entry in AS0 for kernel lowmem
|
||||
*/
|
||||
static void __init
|
||||
ppc44x_pin_tlb(int slot, unsigned int virt, unsigned int phys)
|
||||
static void __init ppc44x_pin_tlb(unsigned int virt, unsigned int phys)
|
||||
{
|
||||
unsigned long attrib = 0;
|
||||
|
||||
__asm__ __volatile__("\
|
||||
clrrwi %2,%2,10\n\
|
||||
ori %2,%2,%4\n\
|
||||
clrrwi %1,%1,10\n\
|
||||
li %0,0\n\
|
||||
ori %0,%0,%5\n\
|
||||
tlbwe %2,%3,%6\n\
|
||||
tlbwe %1,%3,%7\n\
|
||||
tlbwe %0,%3,%8"
|
||||
__asm__ __volatile__(
|
||||
"tlbwe %2,%3,%4\n"
|
||||
"tlbwe %1,%3,%5\n"
|
||||
"tlbwe %0,%3,%6\n"
|
||||
:
|
||||
: "r" (attrib), "r" (phys), "r" (virt), "r" (slot),
|
||||
"i" (PPC44x_TLB_VALID | PPC44x_TLB_256M),
|
||||
"i" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
|
||||
: "r" (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G),
|
||||
"r" (phys),
|
||||
"r" (virt | PPC44x_TLB_VALID | PPC44x_TLB_256M),
|
||||
"r" (tlb_44x_hwater--), /* slot for this TLB entry */
|
||||
"i" (PPC44x_TLB_PAGEID),
|
||||
"i" (PPC44x_TLB_XLAT),
|
||||
"i" (PPC44x_TLB_ATTRIB));
|
||||
}
|
||||
|
||||
/*
|
||||
* MMU_init_hw does the chip-specific initialization of the MMU hardware.
|
||||
*/
|
||||
void __init MMU_init_hw(void)
|
||||
{
|
||||
flush_instruction_cache();
|
||||
@@ -98,22 +63,13 @@ void __init MMU_init_hw(void)
|
||||
|
||||
unsigned long __init mmu_mapin_ram(void)
|
||||
{
|
||||
unsigned int pinned_tlbs = 1;
|
||||
int i;
|
||||
unsigned long addr;
|
||||
|
||||
/* Determine number of entries necessary to cover lowmem */
|
||||
pinned_tlbs = (unsigned int)
|
||||
(_ALIGN(total_lowmem, PPC_PIN_SIZE) >> PPC44x_PIN_SHIFT);
|
||||
|
||||
/* Write upper watermark to save location */
|
||||
tlb_44x_hwater = PPC44x_LOW_SLOT - pinned_tlbs;
|
||||
|
||||
/* If necessary, set additional pinned TLBs */
|
||||
if (pinned_tlbs > 1)
|
||||
for (i = (PPC44x_LOW_SLOT-(pinned_tlbs-1)); i < PPC44x_LOW_SLOT; i++) {
|
||||
unsigned int phys_addr = (PPC44x_LOW_SLOT-i) * PPC_PIN_SIZE;
|
||||
ppc44x_pin_tlb(i, phys_addr+PAGE_OFFSET, phys_addr);
|
||||
}
|
||||
/* Pin in enough TLBs to cover any lowmem not covered by the
|
||||
* initial 256M mapping established in head_44x.S */
|
||||
for (addr = PPC_PIN_SIZE; addr < total_lowmem;
|
||||
addr += PPC_PIN_SIZE)
|
||||
ppc44x_pin_tlb(addr + PAGE_OFFSET, addr);
|
||||
|
||||
return total_lowmem;
|
||||
}
|
||||
|
@@ -39,37 +39,26 @@
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/siginfo.h>
|
||||
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
|
||||
|
||||
/* Hook to register for page fault notifications */
|
||||
int register_page_fault_notifier(struct notifier_block *nb)
|
||||
static inline int notify_page_fault(struct pt_regs *regs)
|
||||
{
|
||||
return atomic_notifier_chain_register(¬ify_page_fault_chain, nb);
|
||||
}
|
||||
int ret = 0;
|
||||
|
||||
int unregister_page_fault_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb);
|
||||
}
|
||||
/* kprobe_running() needs smp_processor_id() */
|
||||
if (!user_mode(regs)) {
|
||||
preempt_disable();
|
||||
if (kprobe_running() && kprobe_fault_handler(regs, 11))
|
||||
ret = 1;
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
static inline int notify_page_fault(enum die_val val, const char *str,
|
||||
struct pt_regs *regs, long err, int trap, int sig)
|
||||
{
|
||||
struct die_args args = {
|
||||
.regs = regs,
|
||||
.str = str,
|
||||
.err = err,
|
||||
.trapnr = trap,
|
||||
.signr = sig
|
||||
};
|
||||
return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args);
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static inline int notify_page_fault(enum die_val val, const char *str,
|
||||
struct pt_regs *regs, long err, int trap, int sig)
|
||||
static inline int notify_page_fault(struct pt_regs *regs)
|
||||
{
|
||||
return NOTIFY_DONE;
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -175,8 +164,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
|
||||
is_write = error_code & ESR_DST;
|
||||
#endif /* CONFIG_4xx || CONFIG_BOOKE */
|
||||
|
||||
if (notify_page_fault(DIE_PAGE_FAULT, "page_fault", regs, error_code,
|
||||
11, SIGSEGV) == NOTIFY_STOP)
|
||||
if (notify_page_fault(regs))
|
||||
return 0;
|
||||
|
||||
if (trap == 0x300) {
|
||||
|
@@ -26,6 +26,7 @@
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/kexec.h>
|
||||
|
||||
#ifdef DEBUG_LOW
|
||||
#define DBG_LOW(fmt...) udbg_printf(fmt)
|
||||
@@ -340,31 +341,70 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va,
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX This need fixing based on page size. It's only used by
|
||||
* native_hpte_clear() for now which needs fixing too so they
|
||||
* make a good pair...
|
||||
*/
|
||||
static unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
|
||||
#define LP_SHIFT 12
|
||||
#define LP_BITS 8
|
||||
#define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
|
||||
|
||||
static void hpte_decode(hpte_t *hpte, unsigned long slot,
|
||||
int *psize, unsigned long *va)
|
||||
{
|
||||
unsigned long avpn = HPTE_V_AVPN_VAL(hpte_v);
|
||||
unsigned long va;
|
||||
unsigned long hpte_r = hpte->r;
|
||||
unsigned long hpte_v = hpte->v;
|
||||
unsigned long avpn;
|
||||
int i, size, shift, penc, avpnm_bits;
|
||||
|
||||
va = avpn << 23;
|
||||
if (!(hpte_v & HPTE_V_LARGE))
|
||||
size = MMU_PAGE_4K;
|
||||
else {
|
||||
for (i = 0; i < LP_BITS; i++) {
|
||||
if ((hpte_r & LP_MASK(i+1)) == LP_MASK(i+1))
|
||||
break;
|
||||
}
|
||||
penc = LP_MASK(i+1) >> LP_SHIFT;
|
||||
for (size = 0; size < MMU_PAGE_COUNT; size++) {
|
||||
|
||||
if (! (hpte_v & HPTE_V_LARGE)) {
|
||||
unsigned long vpi, pteg;
|
||||
/* 4K pages are not represented by LP */
|
||||
if (size == MMU_PAGE_4K)
|
||||
continue;
|
||||
|
||||
pteg = slot / HPTES_PER_GROUP;
|
||||
if (hpte_v & HPTE_V_SECONDARY)
|
||||
pteg = ~pteg;
|
||||
/* valid entries have a shift value */
|
||||
if (!mmu_psize_defs[size].shift)
|
||||
continue;
|
||||
|
||||
vpi = ((va >> 28) ^ pteg) & htab_hash_mask;
|
||||
|
||||
va |= vpi << PAGE_SHIFT;
|
||||
if (penc == mmu_psize_defs[size].penc)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return va;
|
||||
/*
|
||||
* FIXME, the code below works for 16M, 64K, and 4K pages as these
|
||||
* fall under the p<=23 rules for calculating the virtual address.
|
||||
* In the case of 16M pages, an extra bit is stolen from the AVPN
|
||||
* field to achieve the requisite 24 bits.
|
||||
*
|
||||
* Does not work for 16G pages or 1 TB segments.
|
||||
*/
|
||||
shift = mmu_psize_defs[size].shift;
|
||||
if (mmu_psize_defs[size].avpnm)
|
||||
avpnm_bits = __ilog2_u64(mmu_psize_defs[size].avpnm) + 1;
|
||||
else
|
||||
avpnm_bits = 0;
|
||||
if (shift - avpnm_bits <= 23) {
|
||||
avpn = HPTE_V_AVPN_VAL(hpte_v) << 23;
|
||||
|
||||
if (shift < 23) {
|
||||
unsigned long vpi, pteg;
|
||||
|
||||
pteg = slot / HPTES_PER_GROUP;
|
||||
if (hpte_v & HPTE_V_SECONDARY)
|
||||
pteg = ~pteg;
|
||||
vpi = ((avpn >> 28) ^ pteg) & htab_hash_mask;
|
||||
avpn |= (vpi << mmu_psize_defs[size].shift);
|
||||
}
|
||||
}
|
||||
|
||||
*va = avpn;
|
||||
*psize = size;
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -374,15 +414,14 @@ static unsigned long slot2va(unsigned long hpte_v, unsigned long slot)
|
||||
*
|
||||
* TODO: add batching support when enabled. remember, no dynamic memory here,
|
||||
* athough there is the control page available...
|
||||
*
|
||||
* XXX FIXME: 4k only for now !
|
||||
*/
|
||||
static void native_hpte_clear(void)
|
||||
{
|
||||
unsigned long slot, slots, flags;
|
||||
hpte_t *hptep = htab_address;
|
||||
unsigned long hpte_v;
|
||||
unsigned long hpte_v, va;
|
||||
unsigned long pteg_count;
|
||||
int psize;
|
||||
|
||||
pteg_count = htab_hash_mask + 1;
|
||||
|
||||
@@ -408,8 +447,9 @@ static void native_hpte_clear(void)
|
||||
* already hold the native_tlbie_lock.
|
||||
*/
|
||||
if (hpte_v & HPTE_V_VALID) {
|
||||
hpte_decode(hptep, slot, &psize, &va);
|
||||
hptep->v = 0;
|
||||
__tlbie(slot2va(hpte_v, slot), MMU_PAGE_4K);
|
||||
__tlbie(va, psize);
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -103,7 +103,7 @@ int mmu_ci_restrictions;
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
static u8 *linear_map_hash_slots;
|
||||
static unsigned long linear_map_hash_count;
|
||||
static spinlock_t linear_map_hash_lock;
|
||||
static DEFINE_SPINLOCK(linear_map_hash_lock);
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
||||
/* There are definitions of page sizes arrays to be used when none
|
||||
|
@@ -80,7 +80,6 @@ int page_is_ram(unsigned long pfn)
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(page_is_ram);
|
||||
|
||||
pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
||||
unsigned long size, pgprot_t vma_prot)
|
||||
|
@@ -40,7 +40,8 @@ extern int __map_without_bats;
|
||||
extern unsigned long ioremap_base;
|
||||
extern unsigned int rtas_data, rtas_size;
|
||||
|
||||
extern PTE *Hash, *Hash_end;
|
||||
struct _PTE;
|
||||
extern struct _PTE *Hash, *Hash_end;
|
||||
extern unsigned long Hash_size, Hash_mask;
|
||||
|
||||
extern unsigned int num_tlbcam_entries;
|
||||
|
@@ -261,7 +261,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
|
||||
int err = -ENOMEM;
|
||||
|
||||
/* Use upper 10 bits of VA to index the first level map */
|
||||
pd = pmd_offset(pgd_offset_k(va), va);
|
||||
pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
|
||||
/* Use middle 10 bits of VA to index the second-level map */
|
||||
pg = pte_alloc_kernel(pd, va);
|
||||
if (pg != 0) {
|
||||
@@ -354,23 +354,27 @@ int
|
||||
get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
int retval = 0;
|
||||
|
||||
pgd = pgd_offset(mm, addr & PAGE_MASK);
|
||||
if (pgd) {
|
||||
pmd = pmd_offset(pgd, addr & PAGE_MASK);
|
||||
if (pmd_present(*pmd)) {
|
||||
pte = pte_offset_map(pmd, addr & PAGE_MASK);
|
||||
if (pte) {
|
||||
retval = 1;
|
||||
*ptep = pte;
|
||||
if (pmdp)
|
||||
*pmdp = pmd;
|
||||
/* XXX caller needs to do pte_unmap, yuck */
|
||||
}
|
||||
}
|
||||
pud = pud_offset(pgd, addr & PAGE_MASK);
|
||||
if (pud && pud_present(*pud)) {
|
||||
pmd = pmd_offset(pud, addr & PAGE_MASK);
|
||||
if (pmd_present(*pmd)) {
|
||||
pte = pte_offset_map(pmd, addr & PAGE_MASK);
|
||||
if (pte) {
|
||||
retval = 1;
|
||||
*ptep = pte;
|
||||
if (pmdp)
|
||||
*pmdp = pmd;
|
||||
/* XXX caller needs to do pte_unmap, yuck */
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return(retval);
|
||||
}
|
||||
|
@@ -227,7 +227,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm)
|
||||
* the first (bolted) segment, so that do_stab_bolted won't get a
|
||||
* recursive segment miss on the segment table itself.
|
||||
*/
|
||||
void stabs_alloc(void)
|
||||
void __init stabs_alloc(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
|
Reference in New Issue
Block a user