Merge branch 'linus' into perfcounters/core
Conflicts: arch/x86/include/asm/kmap_types.h include/linux/mm.h include/asm-generic/kmap_types.h Merge reason: We crossed changes with kmap_types.h cleanups in mainline. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Šī revīzija ir iekļauta:
@@ -36,7 +36,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
|
||||
firmware.o nvram_64.o
|
||||
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
|
||||
obj-$(CONFIG_PPC64) += vdso64/
|
||||
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
|
||||
obj-$(CONFIG_ALTIVEC) += vecemu.o
|
||||
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
|
||||
obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o
|
||||
obj-$(CONFIG_PPC_CLOCK) += clock.o
|
||||
@@ -82,6 +82,7 @@ obj-$(CONFIG_SMP) += smp.o
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
|
||||
|
||||
pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o
|
||||
obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \
|
||||
@@ -111,6 +112,7 @@ obj-y += ppc_save_regs.o
|
||||
endif
|
||||
|
||||
extra-$(CONFIG_PPC_FPU) += fpu.o
|
||||
extra-$(CONFIG_ALTIVEC) += vector.o
|
||||
extra-$(CONFIG_PPC64) += entry_64.o
|
||||
|
||||
extra-y += systbl_chk.i
|
||||
@@ -123,6 +125,7 @@ PHONY += systbl_chk
|
||||
systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
|
||||
$(call cmd,systbl_chk)
|
||||
|
||||
ifeq ($(CONFIG_PPC_OF_BOOT_TRAMPOLINE),y)
|
||||
$(obj)/built-in.o: prom_init_check
|
||||
|
||||
quiet_cmd_prom_init_check = CALL $<
|
||||
@@ -131,5 +134,6 @@ quiet_cmd_prom_init_check = CALL $<
|
||||
PHONY += prom_init_check
|
||||
prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o
|
||||
$(call cmd,prom_init_check)
|
||||
endif
|
||||
|
||||
clean-files := vmlinux.lds
|
||||
|
@@ -24,6 +24,7 @@
|
||||
#include <asm/system.h>
|
||||
#include <asm/cache.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/emulated_ops.h>
|
||||
|
||||
struct aligninfo {
|
||||
unsigned char len;
|
||||
@@ -730,8 +731,10 @@ int fix_alignment(struct pt_regs *regs)
|
||||
areg = dsisr & 0x1f; /* register to update */
|
||||
|
||||
#ifdef CONFIG_SPE
|
||||
if ((instr >> 26) == 0x4)
|
||||
if ((instr >> 26) == 0x4) {
|
||||
PPC_WARN_EMULATED(spe);
|
||||
return emulate_spe(regs, reg, instr);
|
||||
}
|
||||
#endif
|
||||
|
||||
instr = (dsisr >> 10) & 0x7f;
|
||||
@@ -783,23 +786,28 @@ int fix_alignment(struct pt_regs *regs)
|
||||
flags |= SPLT;
|
||||
nb = 8;
|
||||
}
|
||||
PPC_WARN_EMULATED(vsx);
|
||||
return emulate_vsx(addr, reg, areg, regs, flags, nb);
|
||||
}
|
||||
#endif
|
||||
/* A size of 0 indicates an instruction we don't support, with
|
||||
* the exception of DCBZ which is handled as a special case here
|
||||
*/
|
||||
if (instr == DCBZ)
|
||||
if (instr == DCBZ) {
|
||||
PPC_WARN_EMULATED(dcbz);
|
||||
return emulate_dcbz(regs, addr);
|
||||
}
|
||||
if (unlikely(nb == 0))
|
||||
return 0;
|
||||
|
||||
/* Load/Store Multiple instructions are handled in their own
|
||||
* function
|
||||
*/
|
||||
if (flags & M)
|
||||
if (flags & M) {
|
||||
PPC_WARN_EMULATED(multiple);
|
||||
return emulate_multiple(regs, addr, reg, nb,
|
||||
flags, instr, swiz);
|
||||
}
|
||||
|
||||
/* Verify the address of the operand */
|
||||
if (unlikely(user_mode(regs) &&
|
||||
@@ -816,8 +824,12 @@ int fix_alignment(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
/* Special case for 16-byte FP loads and stores */
|
||||
if (nb == 16)
|
||||
if (nb == 16) {
|
||||
PPC_WARN_EMULATED(fp_pair);
|
||||
return emulate_fp_pair(addr, reg, flags);
|
||||
}
|
||||
|
||||
PPC_WARN_EMULATED(unaligned);
|
||||
|
||||
/* If we are loading, get the data from user space, else
|
||||
* get it from register values
|
||||
|
@@ -122,8 +122,6 @@ int main(void)
|
||||
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
|
||||
DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
|
||||
DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
|
||||
DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
|
||||
DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
|
||||
DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
|
||||
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
|
||||
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
|
||||
@@ -132,35 +130,30 @@ int main(void)
|
||||
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
|
||||
DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
|
||||
DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending));
|
||||
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
|
||||
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
|
||||
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
|
||||
DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
|
||||
#ifdef CONFIG_PPC_MM_SLICES
|
||||
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
|
||||
context.low_slices_psize));
|
||||
DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
|
||||
context.high_slices_psize));
|
||||
DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
|
||||
#endif /* CONFIG_PPC_MM_SLICES */
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
|
||||
DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
|
||||
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
|
||||
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
|
||||
DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
|
||||
#ifdef CONFIG_PPC_MM_SLICES
|
||||
DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
|
||||
#else
|
||||
DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
|
||||
|
||||
#endif /* CONFIG_PPC_MM_SLICES */
|
||||
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
|
||||
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
|
||||
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
|
||||
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
|
||||
DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
|
||||
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
|
||||
DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
|
||||
DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
|
||||
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
|
||||
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
|
||||
DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
|
||||
DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
|
||||
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
|
||||
|
||||
DEFINE(SLBSHADOW_STACKVSID,
|
||||
offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid));
|
||||
DEFINE(SLBSHADOW_STACKESID,
|
||||
@@ -170,6 +163,15 @@ int main(void)
|
||||
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
|
||||
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
|
||||
DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
|
||||
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
|
||||
DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
|
||||
DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
|
||||
DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
|
||||
DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
|
||||
DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
|
||||
DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
/* RTAS */
|
||||
|
@@ -427,7 +427,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.cpu_name = "POWER7 (architected)",
|
||||
.cpu_features = CPU_FTRS_POWER7,
|
||||
.cpu_user_features = COMMON_USER_POWER7,
|
||||
.mmu_features = MMU_FTR_HPTE_TABLE,
|
||||
.mmu_features = MMU_FTR_HPTE_TABLE |
|
||||
MMU_FTR_TLBIE_206,
|
||||
.icache_bsize = 128,
|
||||
.dcache_bsize = 128,
|
||||
.machine_check = machine_check_generic,
|
||||
@@ -441,7 +442,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.cpu_name = "POWER7 (raw)",
|
||||
.cpu_features = CPU_FTRS_POWER7,
|
||||
.cpu_user_features = COMMON_USER_POWER7,
|
||||
.mmu_features = MMU_FTR_HPTE_TABLE,
|
||||
.mmu_features = MMU_FTR_HPTE_TABLE |
|
||||
MMU_FTR_TLBIE_206,
|
||||
.icache_bsize = 128,
|
||||
.dcache_bsize = 128,
|
||||
.num_pmcs = 6,
|
||||
|
163
arch/powerpc/kernel/dma-swiotlb.c
Parasts fails
163
arch/powerpc/kernel/dma-swiotlb.c
Parasts fails
@@ -0,0 +1,163 @@
|
||||
/*
|
||||
* Contains routines needed to support swiotlb for ppc.
|
||||
*
|
||||
* Copyright (C) 2009 Becky Bruce, Freescale Semiconductor
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/swiotlb.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/abs_addr.h>
|
||||
|
||||
int swiotlb __read_mostly;
|
||||
unsigned int ppc_swiotlb_enable;
|
||||
|
||||
void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr)
|
||||
{
|
||||
unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr));
|
||||
void *pageaddr = page_address(pfn_to_page(pfn));
|
||||
|
||||
if (pageaddr != NULL)
|
||||
return pageaddr + (addr % PAGE_SIZE);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
|
||||
{
|
||||
return paddr + get_dma_direct_offset(hwdev);
|
||||
}
|
||||
|
||||
phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
|
||||
|
||||
{
|
||||
return baddr - get_dma_direct_offset(hwdev);
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if an address needs bounce buffering via swiotlb.
|
||||
* Going forward I expect the swiotlb code to generalize on using
|
||||
* a dma_ops->addr_needs_map, and this function will move from here to the
|
||||
* generic swiotlb code.
|
||||
*/
|
||||
int
|
||||
swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr,
|
||||
size_t size)
|
||||
{
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
return dma_ops->addr_needs_map(hwdev, addr, size);
|
||||
}
|
||||
|
||||
/*
|
||||
* Determine if an address is reachable by a pci device, or if we must bounce.
|
||||
*/
|
||||
static int
|
||||
swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
u64 mask = dma_get_mask(hwdev);
|
||||
dma_addr_t max;
|
||||
struct pci_controller *hose;
|
||||
struct pci_dev *pdev = to_pci_dev(hwdev);
|
||||
|
||||
hose = pci_bus_to_host(pdev->bus);
|
||||
max = hose->dma_window_base_cur + hose->dma_window_size;
|
||||
|
||||
/* check that we're within mapped pci window space */
|
||||
if ((addr + size > max) | (addr < hose->dma_window_base_cur))
|
||||
return 1;
|
||||
|
||||
return !is_buffer_dma_capable(mask, addr, size);
|
||||
}
|
||||
|
||||
static int
|
||||
swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
|
||||
{
|
||||
return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* At the moment, all platforms that use this code only require
|
||||
* swiotlb to be used if we're operating on HIGHMEM. Since
|
||||
* we don't ever call anything other than map_sg, unmap_sg,
|
||||
* map_page, and unmap_page on highmem, use normal dma_ops
|
||||
* for everything else.
|
||||
*/
|
||||
struct dma_mapping_ops swiotlb_dma_ops = {
|
||||
.alloc_coherent = dma_direct_alloc_coherent,
|
||||
.free_coherent = dma_direct_free_coherent,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
.map_page = swiotlb_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.addr_needs_map = swiotlb_addr_needs_map,
|
||||
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
|
||||
.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = swiotlb_sync_sg_for_device
|
||||
};
|
||||
|
||||
struct dma_mapping_ops swiotlb_pci_dma_ops = {
|
||||
.alloc_coherent = dma_direct_alloc_coherent,
|
||||
.free_coherent = dma_direct_free_coherent,
|
||||
.map_sg = swiotlb_map_sg_attrs,
|
||||
.unmap_sg = swiotlb_unmap_sg_attrs,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
.map_page = swiotlb_map_page,
|
||||
.unmap_page = swiotlb_unmap_page,
|
||||
.addr_needs_map = swiotlb_pci_addr_needs_map,
|
||||
.sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
|
||||
.sync_single_range_for_device = swiotlb_sync_single_range_for_device,
|
||||
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = swiotlb_sync_sg_for_device
|
||||
};
|
||||
|
||||
static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
|
||||
/* We are only intereted in device addition */
|
||||
if (action != BUS_NOTIFY_ADD_DEVICE)
|
||||
return 0;
|
||||
|
||||
/* May need to bounce if the device can't address all of DRAM */
|
||||
if (dma_get_mask(dev) < lmb_end_of_DRAM())
|
||||
set_dma_ops(dev, &swiotlb_dma_ops);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static struct notifier_block ppc_swiotlb_plat_bus_notifier = {
|
||||
.notifier_call = ppc_swiotlb_bus_notify,
|
||||
.priority = 0,
|
||||
};
|
||||
|
||||
static struct notifier_block ppc_swiotlb_of_bus_notifier = {
|
||||
.notifier_call = ppc_swiotlb_bus_notify,
|
||||
.priority = 0,
|
||||
};
|
||||
|
||||
int __init swiotlb_setup_bus_notifier(void)
|
||||
{
|
||||
bus_register_notifier(&platform_bus_type,
|
||||
&ppc_swiotlb_plat_bus_notifier);
|
||||
bus_register_notifier(&of_platform_bus_type,
|
||||
&ppc_swiotlb_of_bus_notifier);
|
||||
|
||||
return 0;
|
||||
}
|
@@ -19,7 +19,7 @@
|
||||
* default the offset is PCI_DRAM_OFFSET.
|
||||
*/
|
||||
|
||||
static unsigned long get_dma_direct_offset(struct device *dev)
|
||||
unsigned long get_dma_direct_offset(struct device *dev)
|
||||
{
|
||||
if (dev)
|
||||
return (unsigned long)dev->archdata.dma_data;
|
||||
|
978
arch/powerpc/kernel/exceptions-64s.S
Parasts fails
978
arch/powerpc/kernel/exceptions-64s.S
Parasts fails
@@ -0,0 +1,978 @@
|
||||
/*
|
||||
* This file contains the 64-bit "server" PowerPC variant
|
||||
* of the low level exception handling including exception
|
||||
* vectors, exception return, part of the slb and stab
|
||||
* handling and other fixed offset specific things.
|
||||
*
|
||||
* This file is meant to be #included from head_64.S due to
|
||||
* position dependant assembly.
|
||||
*
|
||||
* Most of this originates from head_64.S and thus has the same
|
||||
* copyright history.
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* We layout physical memory as follows:
|
||||
* 0x0000 - 0x00ff : Secondary processor spin code
|
||||
* 0x0100 - 0x2fff : pSeries Interrupt prologs
|
||||
* 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
|
||||
* 0x6000 - 0x6fff : Initial (CPU0) segment table
|
||||
* 0x7000 - 0x7fff : FWNMI data area
|
||||
* 0x8000 - : Early init and support code
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* SPRG Usage
|
||||
*
|
||||
* Register Definition
|
||||
*
|
||||
* SPRG0 reserved for hypervisor
|
||||
* SPRG1 temp - used to save gpr
|
||||
* SPRG2 temp - used to save gpr
|
||||
* SPRG3 virt addr of paca
|
||||
*/
|
||||
|
||||
/*
|
||||
* This is the start of the interrupt handlers for pSeries
|
||||
* This code runs with relocation off.
|
||||
* Code from here to __end_interrupts gets copied down to real
|
||||
* address 0x100 when we are running a relocatable kernel.
|
||||
* Therefore any relative branches in this section must only
|
||||
* branch to labels in this section.
|
||||
*/
|
||||
. = 0x100
|
||||
.globl __start_interrupts
|
||||
__start_interrupts:
|
||||
|
||||
STD_EXCEPTION_PSERIES(0x100, system_reset)
|
||||
|
||||
. = 0x200
|
||||
_machine_check_pSeries:
|
||||
HMT_MEDIUM
|
||||
mtspr SPRN_SPRG1,r13 /* save r13 */
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
|
||||
|
||||
. = 0x300
|
||||
.globl data_access_pSeries
|
||||
data_access_pSeries:
|
||||
HMT_MEDIUM
|
||||
mtspr SPRN_SPRG1,r13
|
||||
BEGIN_FTR_SECTION
|
||||
mtspr SPRN_SPRG2,r12
|
||||
mfspr r13,SPRN_DAR
|
||||
mfspr r12,SPRN_DSISR
|
||||
srdi r13,r13,60
|
||||
rlwimi r13,r12,16,0x20
|
||||
mfcr r12
|
||||
cmpwi r13,0x2c
|
||||
beq do_stab_bolted_pSeries
|
||||
mtcrf 0x80,r12
|
||||
mfspr r12,SPRN_SPRG2
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
|
||||
|
||||
. = 0x380
|
||||
.globl data_access_slb_pSeries
|
||||
data_access_slb_pSeries:
|
||||
HMT_MEDIUM
|
||||
mtspr SPRN_SPRG1,r13
|
||||
mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
|
||||
std r3,PACA_EXSLB+EX_R3(r13)
|
||||
mfspr r3,SPRN_DAR
|
||||
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
|
||||
mfcr r9
|
||||
#ifdef __DISABLED__
|
||||
/* Keep that around for when we re-implement dynamic VSIDs */
|
||||
cmpdi r3,0
|
||||
bge slb_miss_user_pseries
|
||||
#endif /* __DISABLED__ */
|
||||
std r10,PACA_EXSLB+EX_R10(r13)
|
||||
std r11,PACA_EXSLB+EX_R11(r13)
|
||||
std r12,PACA_EXSLB+EX_R12(r13)
|
||||
mfspr r10,SPRN_SPRG1
|
||||
std r10,PACA_EXSLB+EX_R13(r13)
|
||||
mfspr r12,SPRN_SRR1 /* and SRR1 */
|
||||
#ifndef CONFIG_RELOCATABLE
|
||||
b .slb_miss_realmode
|
||||
#else
|
||||
/*
|
||||
* We can't just use a direct branch to .slb_miss_realmode
|
||||
* because the distance from here to there depends on where
|
||||
* the kernel ends up being put.
|
||||
*/
|
||||
mfctr r11
|
||||
ld r10,PACAKBASE(r13)
|
||||
LOAD_HANDLER(r10, .slb_miss_realmode)
|
||||
mtctr r10
|
||||
bctr
|
||||
#endif
|
||||
|
||||
STD_EXCEPTION_PSERIES(0x400, instruction_access)
|
||||
|
||||
. = 0x480
|
||||
.globl instruction_access_slb_pSeries
|
||||
instruction_access_slb_pSeries:
|
||||
HMT_MEDIUM
|
||||
mtspr SPRN_SPRG1,r13
|
||||
mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
|
||||
std r3,PACA_EXSLB+EX_R3(r13)
|
||||
mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
|
||||
std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
|
||||
mfcr r9
|
||||
#ifdef __DISABLED__
|
||||
/* Keep that around for when we re-implement dynamic VSIDs */
|
||||
cmpdi r3,0
|
||||
bge slb_miss_user_pseries
|
||||
#endif /* __DISABLED__ */
|
||||
std r10,PACA_EXSLB+EX_R10(r13)
|
||||
std r11,PACA_EXSLB+EX_R11(r13)
|
||||
std r12,PACA_EXSLB+EX_R12(r13)
|
||||
mfspr r10,SPRN_SPRG1
|
||||
std r10,PACA_EXSLB+EX_R13(r13)
|
||||
mfspr r12,SPRN_SRR1 /* and SRR1 */
|
||||
#ifndef CONFIG_RELOCATABLE
|
||||
b .slb_miss_realmode
|
||||
#else
|
||||
mfctr r11
|
||||
ld r10,PACAKBASE(r13)
|
||||
LOAD_HANDLER(r10, .slb_miss_realmode)
|
||||
mtctr r10
|
||||
bctr
|
||||
#endif
|
||||
|
||||
MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
|
||||
STD_EXCEPTION_PSERIES(0x600, alignment)
|
||||
STD_EXCEPTION_PSERIES(0x700, program_check)
|
||||
STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
|
||||
MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
|
||||
STD_EXCEPTION_PSERIES(0xa00, trap_0a)
|
||||
STD_EXCEPTION_PSERIES(0xb00, trap_0b)
|
||||
|
||||
. = 0xc00
|
||||
.globl system_call_pSeries
|
||||
system_call_pSeries:
|
||||
HMT_MEDIUM
|
||||
BEGIN_FTR_SECTION
|
||||
cmpdi r0,0x1ebe
|
||||
beq- 1f
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
|
||||
mr r9,r13
|
||||
mfspr r13,SPRN_SPRG3
|
||||
mfspr r11,SPRN_SRR0
|
||||
ld r12,PACAKBASE(r13)
|
||||
ld r10,PACAKMSR(r13)
|
||||
LOAD_HANDLER(r12, system_call_entry)
|
||||
mtspr SPRN_SRR0,r12
|
||||
mfspr r12,SPRN_SRR1
|
||||
mtspr SPRN_SRR1,r10
|
||||
rfid
|
||||
b . /* prevent speculative execution */
|
||||
|
||||
/* Fast LE/BE switch system call */
|
||||
1: mfspr r12,SPRN_SRR1
|
||||
xori r12,r12,MSR_LE
|
||||
mtspr SPRN_SRR1,r12
|
||||
rfid /* return to userspace */
|
||||
b .
|
||||
|
||||
STD_EXCEPTION_PSERIES(0xd00, single_step)
|
||||
STD_EXCEPTION_PSERIES(0xe00, trap_0e)
|
||||
|
||||
/* We need to deal with the Altivec unavailable exception
|
||||
* here which is at 0xf20, thus in the middle of the
|
||||
* prolog code of the PerformanceMonitor one. A little
|
||||
* trickery is thus necessary
|
||||
*/
|
||||
. = 0xf00
|
||||
b performance_monitor_pSeries
|
||||
|
||||
. = 0xf20
|
||||
b altivec_unavailable_pSeries
|
||||
|
||||
. = 0xf40
|
||||
b vsx_unavailable_pSeries
|
||||
|
||||
#ifdef CONFIG_CBE_RAS
|
||||
HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
|
||||
#endif /* CONFIG_CBE_RAS */
|
||||
STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
|
||||
#ifdef CONFIG_CBE_RAS
|
||||
HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
|
||||
#endif /* CONFIG_CBE_RAS */
|
||||
STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
|
||||
#ifdef CONFIG_CBE_RAS
|
||||
HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
|
||||
#endif /* CONFIG_CBE_RAS */
|
||||
|
||||
. = 0x3000
|
||||
|
||||
/*** pSeries interrupt support ***/
|
||||
|
||||
/* moved from 0xf00 */
|
||||
STD_EXCEPTION_PSERIES(., performance_monitor)
|
||||
STD_EXCEPTION_PSERIES(., altivec_unavailable)
|
||||
STD_EXCEPTION_PSERIES(., vsx_unavailable)
|
||||
|
||||
/*
|
||||
* An interrupt came in while soft-disabled; clear EE in SRR1,
|
||||
* clear paca->hard_enabled and return.
|
||||
*/
|
||||
masked_interrupt:
|
||||
stb r10,PACAHARDIRQEN(r13)
|
||||
mtcrf 0x80,r9
|
||||
ld r9,PACA_EXGEN+EX_R9(r13)
|
||||
mfspr r10,SPRN_SRR1
|
||||
rldicl r10,r10,48,1 /* clear MSR_EE */
|
||||
rotldi r10,r10,16
|
||||
mtspr SPRN_SRR1,r10
|
||||
ld r10,PACA_EXGEN+EX_R10(r13)
|
||||
mfspr r13,SPRN_SPRG1
|
||||
rfid
|
||||
b .
|
||||
|
||||
.align 7
|
||||
do_stab_bolted_pSeries:
|
||||
mtcrf 0x80,r12
|
||||
mfspr r12,SPRN_SPRG2
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
/*
|
||||
* Vectors for the FWNMI option. Share common code.
|
||||
*/
|
||||
.globl system_reset_fwnmi
|
||||
.align 7
|
||||
system_reset_fwnmi:
|
||||
HMT_MEDIUM
|
||||
mtspr SPRN_SPRG1,r13 /* save r13 */
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
|
||||
|
||||
.globl machine_check_fwnmi
|
||||
.align 7
|
||||
machine_check_fwnmi:
|
||||
HMT_MEDIUM
|
||||
mtspr SPRN_SPRG1,r13 /* save r13 */
|
||||
EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
|
||||
|
||||
#endif /* CONFIG_PPC_PSERIES */
|
||||
|
||||
#ifdef __DISABLED__
|
||||
/*
|
||||
* This is used for when the SLB miss handler has to go virtual,
|
||||
* which doesn't happen for now anymore but will once we re-implement
|
||||
* dynamic VSIDs for shared page tables
|
||||
*/
|
||||
slb_miss_user_pseries:
|
||||
std r10,PACA_EXGEN+EX_R10(r13)
|
||||
std r11,PACA_EXGEN+EX_R11(r13)
|
||||
std r12,PACA_EXGEN+EX_R12(r13)
|
||||
mfspr r10,SPRG1
|
||||
ld r11,PACA_EXSLB+EX_R9(r13)
|
||||
ld r12,PACA_EXSLB+EX_R3(r13)
|
||||
std r10,PACA_EXGEN+EX_R13(r13)
|
||||
std r11,PACA_EXGEN+EX_R9(r13)
|
||||
std r12,PACA_EXGEN+EX_R3(r13)
|
||||
clrrdi r12,r13,32
|
||||
mfmsr r10
|
||||
mfspr r11,SRR0 /* save SRR0 */
|
||||
ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
|
||||
ori r10,r10,MSR_IR|MSR_DR|MSR_RI
|
||||
mtspr SRR0,r12
|
||||
mfspr r12,SRR1 /* and SRR1 */
|
||||
mtspr SRR1,r10
|
||||
rfid
|
||||
b . /* prevent spec. execution */
|
||||
#endif /* __DISABLED__ */
|
||||
|
||||
.align 7
|
||||
.globl __end_interrupts
|
||||
__end_interrupts:
|
||||
|
||||
/*
|
||||
* Code from here down to __end_handlers is invoked from the
|
||||
* exception prologs above. Because the prologs assemble the
|
||||
* addresses of these handlers using the LOAD_HANDLER macro,
|
||||
* which uses an addi instruction, these handlers must be in
|
||||
* the first 32k of the kernel image.
|
||||
*/
|
||||
|
||||
/*** Common interrupt handlers ***/
|
||||
|
||||
STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
|
||||
|
||||
/*
|
||||
* Machine check is different because we use a different
|
||||
* save area: PACA_EXMC instead of PACA_EXGEN.
|
||||
*/
|
||||
.align 7
|
||||
.globl machine_check_common
|
||||
machine_check_common:
|
||||
EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
|
||||
FINISH_NAP
|
||||
DISABLE_INTS
|
||||
bl .save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .machine_check_exception
|
||||
b .ret_from_except
|
||||
|
||||
STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
|
||||
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
|
||||
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
|
||||
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
|
||||
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
|
||||
STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
|
||||
STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
|
||||
#else
|
||||
STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
|
||||
#endif
|
||||
#ifdef CONFIG_CBE_RAS
|
||||
STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
|
||||
STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
|
||||
STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
|
||||
#endif /* CONFIG_CBE_RAS */
|
||||
|
||||
.align 7
|
||||
system_call_entry:
|
||||
b system_call_common
|
||||
|
||||
/*
|
||||
* Here we have detected that the kernel stack pointer is bad.
|
||||
* R9 contains the saved CR, r13 points to the paca,
|
||||
* r10 contains the (bad) kernel stack pointer,
|
||||
* r11 and r12 contain the saved SRR0 and SRR1.
|
||||
* We switch to using an emergency stack, save the registers there,
|
||||
* and call kernel_bad_stack(), which panics.
|
||||
*/
|
||||
bad_stack:
|
||||
ld r1,PACAEMERGSP(r13)
|
||||
subi r1,r1,64+INT_FRAME_SIZE
|
||||
std r9,_CCR(r1)
|
||||
std r10,GPR1(r1)
|
||||
std r11,_NIP(r1)
|
||||
std r12,_MSR(r1)
|
||||
mfspr r11,SPRN_DAR
|
||||
mfspr r12,SPRN_DSISR
|
||||
std r11,_DAR(r1)
|
||||
std r12,_DSISR(r1)
|
||||
mflr r10
|
||||
mfctr r11
|
||||
mfxer r12
|
||||
std r10,_LINK(r1)
|
||||
std r11,_CTR(r1)
|
||||
std r12,_XER(r1)
|
||||
SAVE_GPR(0,r1)
|
||||
SAVE_GPR(2,r1)
|
||||
SAVE_4GPRS(3,r1)
|
||||
SAVE_2GPRS(7,r1)
|
||||
SAVE_10GPRS(12,r1)
|
||||
SAVE_10GPRS(22,r1)
|
||||
lhz r12,PACA_TRAP_SAVE(r13)
|
||||
std r12,_TRAP(r1)
|
||||
addi r11,r1,INT_FRAME_SIZE
|
||||
std r11,0(r1)
|
||||
li r12,0
|
||||
std r12,0(r11)
|
||||
ld r2,PACATOC(r13)
|
||||
1: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .kernel_bad_stack
|
||||
b 1b
|
||||
|
||||
/*
|
||||
* Here r13 points to the paca, r9 contains the saved CR,
|
||||
* SRR0 and SRR1 are saved in r11 and r12,
|
||||
* r9 - r13 are saved in paca->exgen.
|
||||
*/
|
||||
.align 7
|
||||
.globl data_access_common
|
||||
data_access_common:
|
||||
mfspr r10,SPRN_DAR
|
||||
std r10,PACA_EXGEN+EX_DAR(r13)
|
||||
mfspr r10,SPRN_DSISR
|
||||
stw r10,PACA_EXGEN+EX_DSISR(r13)
|
||||
EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
|
||||
ld r3,PACA_EXGEN+EX_DAR(r13)
|
||||
lwz r4,PACA_EXGEN+EX_DSISR(r13)
|
||||
li r5,0x300
|
||||
b .do_hash_page /* Try to handle as hpte fault */
|
||||
|
||||
.align 7
|
||||
.globl instruction_access_common
|
||||
instruction_access_common:
|
||||
EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
|
||||
ld r3,_NIP(r1)
|
||||
andis. r4,r12,0x5820
|
||||
li r5,0x400
|
||||
b .do_hash_page /* Try to handle as hpte fault */
|
||||
|
||||
/*
|
||||
* Here is the common SLB miss user that is used when going to virtual
|
||||
* mode for SLB misses, that is currently not used
|
||||
*/
|
||||
#ifdef __DISABLED__
|
||||
.align 7
|
||||
.globl slb_miss_user_common
|
||||
slb_miss_user_common:
|
||||
mflr r10
|
||||
std r3,PACA_EXGEN+EX_DAR(r13)
|
||||
stw r9,PACA_EXGEN+EX_CCR(r13)
|
||||
std r10,PACA_EXGEN+EX_LR(r13)
|
||||
std r11,PACA_EXGEN+EX_SRR0(r13)
|
||||
bl .slb_allocate_user
|
||||
|
||||
ld r10,PACA_EXGEN+EX_LR(r13)
|
||||
ld r3,PACA_EXGEN+EX_R3(r13)
|
||||
lwz r9,PACA_EXGEN+EX_CCR(r13)
|
||||
ld r11,PACA_EXGEN+EX_SRR0(r13)
|
||||
mtlr r10
|
||||
beq- slb_miss_fault
|
||||
|
||||
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
|
||||
beq- unrecov_user_slb
|
||||
mfmsr r10
|
||||
|
||||
.machine push
|
||||
.machine "power4"
|
||||
mtcrf 0x80,r9
|
||||
.machine pop
|
||||
|
||||
clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
|
||||
mtmsrd r10,1
|
||||
|
||||
mtspr SRR0,r11
|
||||
mtspr SRR1,r12
|
||||
|
||||
ld r9,PACA_EXGEN+EX_R9(r13)
|
||||
ld r10,PACA_EXGEN+EX_R10(r13)
|
||||
ld r11,PACA_EXGEN+EX_R11(r13)
|
||||
ld r12,PACA_EXGEN+EX_R12(r13)
|
||||
ld r13,PACA_EXGEN+EX_R13(r13)
|
||||
rfid
|
||||
b .
|
||||
|
||||
slb_miss_fault:
|
||||
EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
|
||||
ld r4,PACA_EXGEN+EX_DAR(r13)
|
||||
li r5,0
|
||||
std r4,_DAR(r1)
|
||||
std r5,_DSISR(r1)
|
||||
b handle_page_fault
|
||||
|
||||
unrecov_user_slb:
|
||||
EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
|
||||
DISABLE_INTS
|
||||
bl .save_nvgprs
|
||||
1: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .unrecoverable_exception
|
||||
b 1b
|
||||
|
||||
#endif /* __DISABLED__ */
|
||||
|
||||
|
||||
/*
|
||||
* r13 points to the PACA, r9 contains the saved CR,
|
||||
* r12 contain the saved SRR1, SRR0 is still ready for return
|
||||
* r3 has the faulting address
|
||||
* r9 - r13 are saved in paca->exslb.
|
||||
* r3 is saved in paca->slb_r3
|
||||
* We assume we aren't going to take any exceptions during this procedure.
|
||||
*/
|
||||
_GLOBAL(slb_miss_realmode)
|
||||
mflr r10
|
||||
#ifdef CONFIG_RELOCATABLE
|
||||
mtctr r11
|
||||
#endif
|
||||
|
||||
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
|
||||
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
|
||||
|
||||
bl .slb_allocate_realmode
|
||||
|
||||
/* All done -- return from exception. */
|
||||
|
||||
ld r10,PACA_EXSLB+EX_LR(r13)
|
||||
ld r3,PACA_EXSLB+EX_R3(r13)
|
||||
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
BEGIN_FW_FTR_SECTION
|
||||
ld r11,PACALPPACAPTR(r13)
|
||||
ld r11,LPPACASRR0(r11) /* get SRR0 value */
|
||||
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
|
||||
#endif /* CONFIG_PPC_ISERIES */
|
||||
|
||||
mtlr r10
|
||||
|
||||
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
|
||||
beq- 2f
|
||||
|
||||
.machine push
|
||||
.machine "power4"
|
||||
mtcrf 0x80,r9
|
||||
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
|
||||
.machine pop
|
||||
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
BEGIN_FW_FTR_SECTION
|
||||
mtspr SPRN_SRR0,r11
|
||||
mtspr SPRN_SRR1,r12
|
||||
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
|
||||
#endif /* CONFIG_PPC_ISERIES */
|
||||
ld r9,PACA_EXSLB+EX_R9(r13)
|
||||
ld r10,PACA_EXSLB+EX_R10(r13)
|
||||
ld r11,PACA_EXSLB+EX_R11(r13)
|
||||
ld r12,PACA_EXSLB+EX_R12(r13)
|
||||
ld r13,PACA_EXSLB+EX_R13(r13)
|
||||
rfid
|
||||
b . /* prevent speculative execution */
|
||||
|
||||
2:
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
BEGIN_FW_FTR_SECTION
|
||||
b unrecov_slb
|
||||
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
|
||||
#endif /* CONFIG_PPC_ISERIES */
|
||||
mfspr r11,SPRN_SRR0
|
||||
ld r10,PACAKBASE(r13)
|
||||
LOAD_HANDLER(r10,unrecov_slb)
|
||||
mtspr SPRN_SRR0,r10
|
||||
ld r10,PACAKMSR(r13)
|
||||
mtspr SPRN_SRR1,r10
|
||||
rfid
|
||||
b .
|
||||
|
||||
unrecov_slb:
|
||||
EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
|
||||
DISABLE_INTS
|
||||
bl .save_nvgprs
|
||||
1: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .unrecoverable_exception
|
||||
b 1b
|
||||
|
||||
.align 7
|
||||
.globl hardware_interrupt_common
|
||||
.globl hardware_interrupt_entry
|
||||
hardware_interrupt_common:
|
||||
EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
|
||||
FINISH_NAP
|
||||
hardware_interrupt_entry:
|
||||
DISABLE_INTS
|
||||
BEGIN_FTR_SECTION
|
||||
bl .ppc64_runlatch_on
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .do_IRQ
|
||||
b .ret_from_except_lite
|
||||
|
||||
#ifdef CONFIG_PPC_970_NAP
|
||||
power4_fixup_nap:
|
||||
andc r9,r9,r10
|
||||
std r9,TI_LOCAL_FLAGS(r11)
|
||||
ld r10,_LINK(r1) /* make idle task do the */
|
||||
std r10,_NIP(r1) /* equivalent of a blr */
|
||||
blr
|
||||
#endif
|
||||
|
||||
.align 7
|
||||
.globl alignment_common
|
||||
alignment_common:
|
||||
mfspr r10,SPRN_DAR
|
||||
std r10,PACA_EXGEN+EX_DAR(r13)
|
||||
mfspr r10,SPRN_DSISR
|
||||
stw r10,PACA_EXGEN+EX_DSISR(r13)
|
||||
EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
|
||||
ld r3,PACA_EXGEN+EX_DAR(r13)
|
||||
lwz r4,PACA_EXGEN+EX_DSISR(r13)
|
||||
std r3,_DAR(r1)
|
||||
std r4,_DSISR(r1)
|
||||
bl .save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ENABLE_INTS
|
||||
bl .alignment_exception
|
||||
b .ret_from_except
|
||||
|
||||
.align 7
|
||||
.globl program_check_common
|
||||
program_check_common:
|
||||
EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
|
||||
bl .save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ENABLE_INTS
|
||||
bl .program_check_exception
|
||||
b .ret_from_except
|
||||
|
||||
.align 7
|
||||
.globl fp_unavailable_common
|
||||
fp_unavailable_common:
|
||||
EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
|
||||
bne 1f /* if from user, just load it up */
|
||||
bl .save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ENABLE_INTS
|
||||
bl .kernel_fp_unavailable_exception
|
||||
BUG_OPCODE
|
||||
1: bl .load_up_fpu
|
||||
b fast_exception_return
|
||||
|
||||
.align 7
|
||||
.globl altivec_unavailable_common
|
||||
altivec_unavailable_common:
|
||||
EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
BEGIN_FTR_SECTION
|
||||
beq 1f
|
||||
bl .load_up_altivec
|
||||
b fast_exception_return
|
||||
1:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
#endif
|
||||
bl .save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ENABLE_INTS
|
||||
bl .altivec_unavailable_exception
|
||||
b .ret_from_except
|
||||
|
||||
.align 7
|
||||
.globl vsx_unavailable_common
|
||||
vsx_unavailable_common:
|
||||
EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
|
||||
#ifdef CONFIG_VSX
|
||||
BEGIN_FTR_SECTION
|
||||
bne .load_up_vsx
|
||||
1:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_VSX)
|
||||
#endif
|
||||
bl .save_nvgprs
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ENABLE_INTS
|
||||
bl .vsx_unavailable_exception
|
||||
b .ret_from_except
|
||||
|
||||
.align 7
|
||||
.globl __end_handlers
|
||||
__end_handlers:
|
||||
|
||||
/*
|
||||
* Return from an exception with minimal checks.
|
||||
* The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
|
||||
* If interrupts have been enabled, or anything has been
|
||||
* done that might have changed the scheduling status of
|
||||
* any task or sent any task a signal, you should use
|
||||
* ret_from_except or ret_from_except_lite instead of this.
|
||||
*/
|
||||
fast_exc_return_irq: /* restores irq state too */
|
||||
ld r3,SOFTE(r1)
|
||||
TRACE_AND_RESTORE_IRQ(r3);
|
||||
ld r12,_MSR(r1)
|
||||
rldicl r4,r12,49,63 /* get MSR_EE to LSB */
|
||||
stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
|
||||
b 1f
|
||||
|
||||
.globl fast_exception_return
|
||||
fast_exception_return:
|
||||
ld r12,_MSR(r1)
|
||||
1: ld r11,_NIP(r1)
|
||||
andi. r3,r12,MSR_RI /* check if RI is set */
|
||||
beq- unrecov_fer
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
andi. r3,r12,MSR_PR
|
||||
beq 2f
|
||||
ACCOUNT_CPU_USER_EXIT(r3, r4)
|
||||
2:
|
||||
#endif
|
||||
|
||||
ld r3,_CCR(r1)
|
||||
ld r4,_LINK(r1)
|
||||
ld r5,_CTR(r1)
|
||||
ld r6,_XER(r1)
|
||||
mtcr r3
|
||||
mtlr r4
|
||||
mtctr r5
|
||||
mtxer r6
|
||||
REST_GPR(0, r1)
|
||||
REST_8GPRS(2, r1)
|
||||
|
||||
mfmsr r10
|
||||
rldicl r10,r10,48,1 /* clear EE */
|
||||
rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
|
||||
mtmsrd r10,1
|
||||
|
||||
mtspr SPRN_SRR1,r12
|
||||
mtspr SPRN_SRR0,r11
|
||||
REST_4GPRS(10, r1)
|
||||
ld r1,GPR1(r1)
|
||||
rfid
|
||||
b . /* prevent speculative execution */
|
||||
|
||||
unrecov_fer:
|
||||
bl .save_nvgprs
|
||||
1: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .unrecoverable_exception
|
||||
b 1b
|
||||
|
||||
|
||||
/*
|
||||
* Hash table stuff
|
||||
*/
|
||||
.align 7
|
||||
_STATIC(do_hash_page)
|
||||
std r3,_DAR(r1)
|
||||
std r4,_DSISR(r1)
|
||||
|
||||
andis. r0,r4,0xa450 /* weird error? */
|
||||
bne- handle_page_fault /* if not, try to insert a HPTE */
|
||||
BEGIN_FTR_SECTION
|
||||
andis. r0,r4,0x0020 /* Is it a segment table fault? */
|
||||
bne- do_ste_alloc /* If so handle it */
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
|
||||
|
||||
/*
|
||||
* On iSeries, we soft-disable interrupts here, then
|
||||
* hard-enable interrupts so that the hash_page code can spin on
|
||||
* the hash_table_lock without problems on a shared processor.
|
||||
*/
|
||||
DISABLE_INTS
|
||||
|
||||
/*
|
||||
* Currently, trace_hardirqs_off() will be called by DISABLE_INTS
|
||||
* and will clobber volatile registers when irq tracing is enabled
|
||||
* so we need to reload them. It may be possible to be smarter here
|
||||
* and move the irq tracing elsewhere but let's keep it simple for
|
||||
* now
|
||||
*/
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
ld r3,_DAR(r1)
|
||||
ld r4,_DSISR(r1)
|
||||
ld r5,_TRAP(r1)
|
||||
ld r12,_MSR(r1)
|
||||
clrrdi r5,r5,4
|
||||
#endif /* CONFIG_TRACE_IRQFLAGS */
|
||||
/*
|
||||
* We need to set the _PAGE_USER bit if MSR_PR is set or if we are
|
||||
* accessing a userspace segment (even from the kernel). We assume
|
||||
* kernel addresses always have the high bit set.
|
||||
*/
|
||||
rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
|
||||
rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
|
||||
orc r0,r12,r0 /* MSR_PR | ~high_bit */
|
||||
rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
|
||||
ori r4,r4,1 /* add _PAGE_PRESENT */
|
||||
rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
|
||||
|
||||
/*
|
||||
* r3 contains the faulting address
|
||||
* r4 contains the required access permissions
|
||||
* r5 contains the trap number
|
||||
*
|
||||
* at return r3 = 0 for success
|
||||
*/
|
||||
bl .hash_page /* build HPTE if possible */
|
||||
cmpdi r3,0 /* see if hash_page succeeded */
|
||||
|
||||
BEGIN_FW_FTR_SECTION
|
||||
/*
|
||||
* If we had interrupts soft-enabled at the point where the
|
||||
* DSI/ISI occurred, and an interrupt came in during hash_page,
|
||||
* handle it now.
|
||||
* We jump to ret_from_except_lite rather than fast_exception_return
|
||||
* because ret_from_except_lite will check for and handle pending
|
||||
* interrupts if necessary.
|
||||
*/
|
||||
beq 13f
|
||||
END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
|
||||
|
||||
BEGIN_FW_FTR_SECTION
|
||||
/*
|
||||
* Here we have interrupts hard-disabled, so it is sufficient
|
||||
* to restore paca->{soft,hard}_enable and get out.
|
||||
*/
|
||||
beq fast_exc_return_irq /* Return from exception on success */
|
||||
END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
|
||||
|
||||
/* For a hash failure, we don't bother re-enabling interrupts */
|
||||
ble- 12f
|
||||
|
||||
/*
|
||||
* hash_page couldn't handle it, set soft interrupt enable back
|
||||
* to what it was before the trap. Note that .raw_local_irq_restore
|
||||
* handles any interrupts pending at this point.
|
||||
*/
|
||||
ld r3,SOFTE(r1)
|
||||
TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
|
||||
bl .raw_local_irq_restore
|
||||
b 11f
|
||||
|
||||
/* Here we have a page fault that hash_page can't handle. */
|
||||
handle_page_fault:
|
||||
ENABLE_INTS
|
||||
11: ld r4,_DAR(r1)
|
||||
ld r5,_DSISR(r1)
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
bl .do_page_fault
|
||||
cmpdi r3,0
|
||||
beq+ 13f
|
||||
bl .save_nvgprs
|
||||
mr r5,r3
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
lwz r4,_DAR(r1)
|
||||
bl .bad_page_fault
|
||||
b .ret_from_except
|
||||
|
||||
13: b .ret_from_except_lite
|
||||
|
||||
/* We have a page fault that hash_page could handle but HV refused
|
||||
* the PTE insertion
|
||||
*/
|
||||
12: bl .save_nvgprs
|
||||
mr r5,r3
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
ld r4,_DAR(r1)
|
||||
bl .low_hash_fault
|
||||
b .ret_from_except
|
||||
|
||||
/* here we have a segment miss */
|
||||
do_ste_alloc:
|
||||
bl .ste_allocate /* try to insert stab entry */
|
||||
cmpdi r3,0
|
||||
bne- handle_page_fault
|
||||
b fast_exception_return
|
||||
|
||||
/*
|
||||
* r13 points to the PACA, r9 contains the saved CR,
|
||||
* r11 and r12 contain the saved SRR0 and SRR1.
|
||||
* r9 - r13 are saved in paca->exslb.
|
||||
* We assume we aren't going to take any exceptions during this procedure.
|
||||
* We assume (DAR >> 60) == 0xc.
|
||||
*/
|
||||
.align 7
|
||||
_GLOBAL(do_stab_bolted)
|
||||
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
|
||||
std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
|
||||
|
||||
/* Hash to the primary group */
|
||||
ld r10,PACASTABVIRT(r13)
|
||||
mfspr r11,SPRN_DAR
|
||||
srdi r11,r11,28
|
||||
rldimi r10,r11,7,52 /* r10 = first ste of the group */
|
||||
|
||||
/* Calculate VSID */
|
||||
/* This is a kernel address, so protovsid = ESID */
|
||||
ASM_VSID_SCRAMBLE(r11, r9, 256M)
|
||||
rldic r9,r11,12,16 /* r9 = vsid << 12 */
|
||||
|
||||
/* Search the primary group for a free entry */
|
||||
1: ld r11,0(r10) /* Test valid bit of the current ste */
|
||||
andi. r11,r11,0x80
|
||||
beq 2f
|
||||
addi r10,r10,16
|
||||
andi. r11,r10,0x70
|
||||
bne 1b
|
||||
|
||||
/* Stick for only searching the primary group for now. */
|
||||
/* At least for now, we use a very simple random castout scheme */
|
||||
/* Use the TB as a random number ; OR in 1 to avoid entry 0 */
|
||||
mftb r11
|
||||
rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
|
||||
ori r11,r11,0x10
|
||||
|
||||
/* r10 currently points to an ste one past the group of interest */
|
||||
/* make it point to the randomly selected entry */
|
||||
subi r10,r10,128
|
||||
or r10,r10,r11 /* r10 is the entry to invalidate */
|
||||
|
||||
isync /* mark the entry invalid */
|
||||
ld r11,0(r10)
|
||||
rldicl r11,r11,56,1 /* clear the valid bit */
|
||||
rotldi r11,r11,8
|
||||
std r11,0(r10)
|
||||
sync
|
||||
|
||||
clrrdi r11,r11,28 /* Get the esid part of the ste */
|
||||
slbie r11
|
||||
|
||||
2: std r9,8(r10) /* Store the vsid part of the ste */
|
||||
eieio
|
||||
|
||||
mfspr r11,SPRN_DAR /* Get the new esid */
|
||||
clrrdi r11,r11,28 /* Permits a full 32b of ESID */
|
||||
ori r11,r11,0x90 /* Turn on valid and kp */
|
||||
std r11,0(r10) /* Put new entry back into the stab */
|
||||
|
||||
sync
|
||||
|
||||
/* All done -- return from exception. */
|
||||
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
|
||||
ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
|
||||
|
||||
andi. r10,r12,MSR_RI
|
||||
beq- unrecov_slb
|
||||
|
||||
mtcrf 0x80,r9 /* restore CR */
|
||||
|
||||
mfmsr r10
|
||||
clrrdi r10,r10,2
|
||||
mtmsrd r10,1
|
||||
|
||||
mtspr SPRN_SRR0,r11
|
||||
mtspr SPRN_SRR1,r12
|
||||
ld r9,PACA_EXSLB+EX_R9(r13)
|
||||
ld r10,PACA_EXSLB+EX_R10(r13)
|
||||
ld r11,PACA_EXSLB+EX_R11(r13)
|
||||
ld r12,PACA_EXSLB+EX_R12(r13)
|
||||
ld r13,PACA_EXSLB+EX_R13(r13)
|
||||
rfid
|
||||
b . /* prevent speculative execution */
|
||||
|
||||
/*
|
||||
* Space for CPU0's segment table.
|
||||
*
|
||||
* On iSeries, the hypervisor must fill in at least one entry before
|
||||
* we get control (with relocate on). The address is given to the hv
|
||||
* as a page number (see xLparMap below), so this must be at a
|
||||
* fixed address (the linker can't compute (u64)&initial_stab >>
|
||||
* PAGE_SHIFT).
|
||||
*/
|
||||
. = STAB0_OFFSET /* 0x6000 */
|
||||
.globl initial_stab
|
||||
initial_stab:
|
||||
.space 4096
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
/*
|
||||
* Data area reserved for FWNMI option.
|
||||
* This address (0x7000) is fixed by the RPA.
|
||||
*/
|
||||
.= 0x7000
|
||||
.globl fwnmi_data_area
|
||||
fwnmi_data_area:
|
||||
#endif /* CONFIG_PPC_PSERIES */
|
||||
|
||||
/* iSeries does not use the FWNMI stuff, so it is safe to put
|
||||
* this here, even if we later allow kernels that will boot on
|
||||
* both pSeries and iSeries */
|
||||
#ifdef CONFIG_PPC_ISERIES
|
||||
. = LPARMAP_PHYS
|
||||
.globl xLparMap
|
||||
xLparMap:
|
||||
.quad HvEsidsToMap /* xNumberEsids */
|
||||
.quad HvRangesToMap /* xNumberRanges */
|
||||
.quad STAB0_PAGE /* xSegmentTableOffs */
|
||||
.zero 40 /* xRsvd */
|
||||
/* xEsids (HvEsidsToMap entries of 2 quads) */
|
||||
.quad PAGE_OFFSET_ESID /* xKernelEsid */
|
||||
.quad PAGE_OFFSET_VSID /* xKernelVsid */
|
||||
.quad VMALLOC_START_ESID /* xKernelEsid */
|
||||
.quad VMALLOC_START_VSID /* xKernelVsid */
|
||||
/* xRanges (HvRangesToMap entries of 3 quads) */
|
||||
.quad HvPagesToMap /* xPages */
|
||||
.quad 0 /* xOffset */
|
||||
.quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
|
||||
|
||||
#endif /* CONFIG_PPC_ISERIES */
|
||||
|
||||
#ifdef CONFIG_PPC_PSERIES
|
||||
. = 0x8000
|
||||
#endif /* CONFIG_PPC_PSERIES */
|
@@ -23,25 +23,14 @@
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/ftrace.h>
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
# define GET_ADDR(addr) addr
|
||||
#else
|
||||
/* PowerPC64's functions are data that points to the functions */
|
||||
# define GET_ADDR(addr) (*(unsigned long *)addr)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE
|
||||
static unsigned int ftrace_nop_replace(void)
|
||||
{
|
||||
return PPC_INST_NOP;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
|
||||
{
|
||||
unsigned int op;
|
||||
|
||||
addr = GET_ADDR(addr);
|
||||
addr = ppc_function_entry((void *)addr);
|
||||
|
||||
/* if (link) set op to 'bl' else 'b' */
|
||||
op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
|
||||
@@ -49,14 +38,6 @@ ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
|
||||
return op;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
# define _ASM_ALIGN " .align 3 "
|
||||
# define _ASM_PTR " .llong "
|
||||
#else
|
||||
# define _ASM_ALIGN " .align 2 "
|
||||
# define _ASM_PTR " .long "
|
||||
#endif
|
||||
|
||||
static int
|
||||
ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
|
||||
{
|
||||
@@ -197,7 +178,7 @@ __ftrace_make_nop(struct module *mod,
|
||||
ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
|
||||
|
||||
/* This should match what was called */
|
||||
if (ptr != GET_ADDR(addr)) {
|
||||
if (ptr != ppc_function_entry((void *)addr)) {
|
||||
printk(KERN_ERR "addr does not match %lx\n", ptr);
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -328,7 +309,7 @@ int ftrace_make_nop(struct module *mod,
|
||||
if (test_24bit_addr(ip, addr)) {
|
||||
/* within range */
|
||||
old = ftrace_call_replace(ip, addr, 1);
|
||||
new = ftrace_nop_replace();
|
||||
new = PPC_INST_NOP;
|
||||
return ftrace_modify_code(ip, old, new);
|
||||
}
|
||||
|
||||
@@ -466,7 +447,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
|
||||
*/
|
||||
if (test_24bit_addr(ip, addr)) {
|
||||
/* within range */
|
||||
old = ftrace_nop_replace();
|
||||
old = PPC_INST_NOP;
|
||||
new = ftrace_call_replace(ip, addr, 1);
|
||||
return ftrace_modify_code(ip, old, new);
|
||||
}
|
||||
@@ -570,7 +551,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
|
||||
return_hooker = (unsigned long)&mod_return_to_handler;
|
||||
#endif
|
||||
|
||||
return_hooker = GET_ADDR(return_hooker);
|
||||
return_hooker = ppc_function_entry((void *)return_hooker);
|
||||
|
||||
/*
|
||||
* Protect against fault, even if it shouldn't
|
||||
|
@@ -733,9 +733,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
|
||||
AltiVecUnavailable:
|
||||
EXCEPTION_PROLOG
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
bne load_up_altivec /* if from user, just load it up */
|
||||
beq 1f
|
||||
bl load_up_altivec /* if from user, just load it up */
|
||||
b fast_exception_return
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
1: addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
|
||||
|
||||
PerformanceMonitor:
|
||||
@@ -743,101 +745,6 @@ PerformanceMonitor:
|
||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||
EXC_XFER_STD(0xf00, performance_monitor_exception)
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
/* Note that the AltiVec support is closely modeled after the FP
|
||||
* support. Changes to one are likely to be applicable to the
|
||||
* other! */
|
||||
load_up_altivec:
|
||||
/*
|
||||
* Disable AltiVec for the task which had AltiVec previously,
|
||||
* and save its AltiVec registers in its thread_struct.
|
||||
* Enables AltiVec for use in the kernel on return.
|
||||
* On SMP we know the AltiVec units are free, since we give it up every
|
||||
* switch. -- Kumar
|
||||
*/
|
||||
mfmsr r5
|
||||
oris r5,r5,MSR_VEC@h
|
||||
MTMSRD(r5) /* enable use of AltiVec now */
|
||||
isync
|
||||
/*
|
||||
* For SMP, we don't do lazy AltiVec switching because it just gets too
|
||||
* horrendously complex, especially when a task switches from one CPU
|
||||
* to another. Instead we call giveup_altivec in switch_to.
|
||||
*/
|
||||
#ifndef CONFIG_SMP
|
||||
tophys(r6,0)
|
||||
addis r3,r6,last_task_used_altivec@ha
|
||||
lwz r4,last_task_used_altivec@l(r3)
|
||||
cmpwi 0,r4,0
|
||||
beq 1f
|
||||
add r4,r4,r6
|
||||
addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
|
||||
SAVE_32VRS(0,r10,r4)
|
||||
mfvscr vr0
|
||||
li r10,THREAD_VSCR
|
||||
stvx vr0,r10,r4
|
||||
lwz r5,PT_REGS(r4)
|
||||
add r5,r5,r6
|
||||
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
lis r10,MSR_VEC@h
|
||||
andc r4,r4,r10 /* disable altivec for previous task */
|
||||
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
1:
|
||||
#endif /* CONFIG_SMP */
|
||||
/* enable use of AltiVec after return */
|
||||
oris r9,r9,MSR_VEC@h
|
||||
mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
|
||||
li r4,1
|
||||
li r10,THREAD_VSCR
|
||||
stw r4,THREAD_USED_VR(r5)
|
||||
lvx vr0,r10,r5
|
||||
mtvscr vr0
|
||||
REST_32VRS(0,r10,r5)
|
||||
#ifndef CONFIG_SMP
|
||||
subi r4,r5,THREAD
|
||||
sub r4,r4,r6
|
||||
stw r4,last_task_used_altivec@l(r3)
|
||||
#endif /* CONFIG_SMP */
|
||||
/* restore registers and return */
|
||||
/* we haven't used ctr or xer or lr */
|
||||
b fast_exception_return
|
||||
|
||||
/*
|
||||
* giveup_altivec(tsk)
|
||||
* Disable AltiVec for the task given as the argument,
|
||||
* and save the AltiVec registers in its thread_struct.
|
||||
* Enables AltiVec for use in the kernel on return.
|
||||
*/
|
||||
|
||||
.globl giveup_altivec
|
||||
giveup_altivec:
|
||||
mfmsr r5
|
||||
oris r5,r5,MSR_VEC@h
|
||||
SYNC
|
||||
MTMSRD(r5) /* enable use of AltiVec now */
|
||||
isync
|
||||
cmpwi 0,r3,0
|
||||
beqlr- /* if no previous owner, done */
|
||||
addi r3,r3,THREAD /* want THREAD of task */
|
||||
lwz r5,PT_REGS(r3)
|
||||
cmpwi 0,r5,0
|
||||
SAVE_32VRS(0, r4, r3)
|
||||
mfvscr vr0
|
||||
li r4,THREAD_VSCR
|
||||
stvx vr0,r4,r3
|
||||
beq 1f
|
||||
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
lis r3,MSR_VEC@h
|
||||
andc r4,r4,r3 /* disable AltiVec for previous task */
|
||||
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
1:
|
||||
#ifndef CONFIG_SMP
|
||||
li r5,0
|
||||
lis r4,last_task_used_altivec@ha
|
||||
stw r5,last_task_used_altivec@l(r4)
|
||||
#endif /* CONFIG_SMP */
|
||||
blr
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
/*
|
||||
* This code is jumped to from the startup code to copy
|
||||
|
Failā izmaiņas netiks attēlotas, jo tās ir par lielu
Ielādēt izmaiņas
@@ -256,7 +256,7 @@ label:
|
||||
* off DE in the DSRR1 value and clearing the debug status. \
|
||||
*/ \
|
||||
mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \
|
||||
andis. r10,r10,DBSR_IC@h; \
|
||||
andis. r10,r10,(DBSR_IC|DBSR_BT)@h; \
|
||||
beq+ 2f; \
|
||||
\
|
||||
lis r10,KERNELBASE@h; /* check if exception in vectors */ \
|
||||
@@ -271,7 +271,7 @@ label:
|
||||
\
|
||||
/* here it looks like we got an inappropriate debug exception. */ \
|
||||
1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CDRR1 value */ \
|
||||
lis r10,DBSR_IC@h; /* clear the IC event */ \
|
||||
lis r10,(DBSR_IC|DBSR_BT)@h; /* clear the IC event */ \
|
||||
mtspr SPRN_DBSR,r10; \
|
||||
/* restore state and get out */ \
|
||||
lwz r10,_CCR(r11); \
|
||||
@@ -309,7 +309,7 @@ label:
|
||||
* off DE in the CSRR1 value and clearing the debug status. \
|
||||
*/ \
|
||||
mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \
|
||||
andis. r10,r10,DBSR_IC@h; \
|
||||
andis. r10,r10,(DBSR_IC|DBSR_BT)@h; \
|
||||
beq+ 2f; \
|
||||
\
|
||||
lis r10,KERNELBASE@h; /* check if exception in vectors */ \
|
||||
@@ -317,14 +317,14 @@ label:
|
||||
cmplw r12,r10; \
|
||||
blt+ 2f; /* addr below exception vectors */ \
|
||||
\
|
||||
lis r10,DebugCrit@h; \
|
||||
lis r10,DebugCrit@h; \
|
||||
ori r10,r10,DebugCrit@l; \
|
||||
cmplw r12,r10; \
|
||||
bgt+ 2f; /* addr above exception vectors */ \
|
||||
\
|
||||
/* here it looks like we got an inappropriate debug exception. */ \
|
||||
1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CSRR1 value */ \
|
||||
lis r10,DBSR_IC@h; /* clear the IC event */ \
|
||||
lis r10,(DBSR_IC|DBSR_BT)@h; /* clear the IC event */ \
|
||||
mtspr SPRN_DBSR,r10; \
|
||||
/* restore state and get out */ \
|
||||
lwz r10,_CCR(r11); \
|
||||
|
@@ -9,10 +9,6 @@
|
||||
|
||||
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
|
||||
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
||||
struct mm_struct init_mm = INIT_MM(init_mm);
|
||||
|
||||
EXPORT_SYMBOL(init_mm);
|
||||
|
||||
/*
|
||||
* Initial thread structure.
|
||||
*
|
||||
|
@@ -118,6 +118,7 @@ notrace void raw_local_irq_restore(unsigned long en)
|
||||
if (!en)
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
|
||||
/*
|
||||
* Do we need to disable preemption here? Not really: in the
|
||||
@@ -135,6 +136,7 @@ notrace void raw_local_irq_restore(unsigned long en)
|
||||
if (local_paca->lppaca_ptr->int_dword.any_int)
|
||||
iseries_handle_interrupts();
|
||||
}
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
|
||||
if (test_perf_counter_pending()) {
|
||||
clear_perf_counter_pending();
|
||||
@@ -254,77 +256,84 @@ void fixup_irqs(cpumask_t map)
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
static inline void handle_one_irq(unsigned int irq)
|
||||
{
|
||||
struct thread_info *curtp, *irqtp;
|
||||
unsigned long saved_sp_limit;
|
||||
struct irq_desc *desc;
|
||||
|
||||
/* Switch to the irq stack to handle this */
|
||||
curtp = current_thread_info();
|
||||
irqtp = hardirq_ctx[smp_processor_id()];
|
||||
|
||||
if (curtp == irqtp) {
|
||||
/* We're already on the irq stack, just handle it */
|
||||
generic_handle_irq(irq);
|
||||
return;
|
||||
}
|
||||
|
||||
desc = irq_desc + irq;
|
||||
saved_sp_limit = current->thread.ksp_limit;
|
||||
|
||||
irqtp->task = curtp->task;
|
||||
irqtp->flags = 0;
|
||||
|
||||
/* Copy the softirq bits in preempt_count so that the
|
||||
* softirq checks work in the hardirq context. */
|
||||
irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
|
||||
(curtp->preempt_count & SOFTIRQ_MASK);
|
||||
|
||||
current->thread.ksp_limit = (unsigned long)irqtp +
|
||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
||||
|
||||
call_handle_irq(irq, desc, irqtp, desc->handle_irq);
|
||||
current->thread.ksp_limit = saved_sp_limit;
|
||||
irqtp->task = NULL;
|
||||
|
||||
/* Set any flag that may have been set on the
|
||||
* alternate stack
|
||||
*/
|
||||
if (irqtp->flags)
|
||||
set_bits(irqtp->flags, &curtp->flags);
|
||||
}
|
||||
#else
|
||||
static inline void handle_one_irq(unsigned int irq)
|
||||
{
|
||||
generic_handle_irq(irq);
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline void check_stack_overflow(void)
|
||||
{
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
long sp;
|
||||
|
||||
sp = __get_SP() & (THREAD_SIZE-1);
|
||||
|
||||
/* check for stack overflow: is there less than 2KB free? */
|
||||
if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
|
||||
printk("do_IRQ: stack overflow: %ld\n",
|
||||
sp - sizeof(struct thread_info));
|
||||
dump_stack();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void do_IRQ(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
unsigned int irq;
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
struct thread_info *curtp, *irqtp;
|
||||
#endif
|
||||
|
||||
irq_enter();
|
||||
|
||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||
/* Debugging check for stack overflow: is there less than 2KB free? */
|
||||
{
|
||||
long sp;
|
||||
check_stack_overflow();
|
||||
|
||||
sp = __get_SP() & (THREAD_SIZE-1);
|
||||
|
||||
if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
|
||||
printk("do_IRQ: stack overflow: %ld\n",
|
||||
sp - sizeof(struct thread_info));
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Every platform is required to implement ppc_md.get_irq.
|
||||
* This function will either return an irq number or NO_IRQ to
|
||||
* indicate there are no more pending.
|
||||
* The value NO_IRQ_IGNORE is for buggy hardware and means that this
|
||||
* IRQ has already been handled. -- Tom
|
||||
*/
|
||||
irq = ppc_md.get_irq();
|
||||
|
||||
if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
|
||||
#ifdef CONFIG_IRQSTACKS
|
||||
/* Switch to the irq stack to handle this */
|
||||
curtp = current_thread_info();
|
||||
irqtp = hardirq_ctx[smp_processor_id()];
|
||||
if (curtp != irqtp) {
|
||||
struct irq_desc *desc = irq_desc + irq;
|
||||
void *handler = desc->handle_irq;
|
||||
unsigned long saved_sp_limit = current->thread.ksp_limit;
|
||||
if (handler == NULL)
|
||||
handler = &__do_IRQ;
|
||||
irqtp->task = curtp->task;
|
||||
irqtp->flags = 0;
|
||||
|
||||
/* Copy the softirq bits in preempt_count so that the
|
||||
* softirq checks work in the hardirq context.
|
||||
*/
|
||||
irqtp->preempt_count =
|
||||
(irqtp->preempt_count & ~SOFTIRQ_MASK) |
|
||||
(curtp->preempt_count & SOFTIRQ_MASK);
|
||||
|
||||
current->thread.ksp_limit = (unsigned long)irqtp +
|
||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
||||
call_handle_irq(irq, desc, irqtp, handler);
|
||||
current->thread.ksp_limit = saved_sp_limit;
|
||||
irqtp->task = NULL;
|
||||
|
||||
|
||||
/* Set any flag that may have been set on the
|
||||
* alternate stack
|
||||
*/
|
||||
if (irqtp->flags)
|
||||
set_bits(irqtp->flags, &curtp->flags);
|
||||
} else
|
||||
#endif
|
||||
generic_handle_irq(irq);
|
||||
} else if (irq != NO_IRQ_IGNORE)
|
||||
if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
|
||||
handle_one_irq(irq);
|
||||
else if (irq != NO_IRQ_IGNORE)
|
||||
/* That's not SMP safe ... but who cares ? */
|
||||
ppc_spurious_interrupts++;
|
||||
|
||||
|
@@ -169,6 +169,9 @@ struct hvcall_ppp_data {
|
||||
u8 unallocated_weight;
|
||||
u16 active_procs_in_pool;
|
||||
u16 active_system_procs;
|
||||
u16 phys_platform_procs;
|
||||
u32 max_proc_cap_avail;
|
||||
u32 entitled_proc_cap_avail;
|
||||
};
|
||||
|
||||
/*
|
||||
@@ -190,13 +193,18 @@ struct hvcall_ppp_data {
|
||||
* XX - Unallocated Variable Processor Capacity Weight.
|
||||
* XXXX - Active processors in Physical Processor Pool.
|
||||
* XXXX - Processors active on platform.
|
||||
* R8 (QQQQRRRRRRSSSSSS). if ibm,partition-performance-parameters-level >= 1
|
||||
* XXXX - Physical platform procs allocated to virtualization.
|
||||
* XXXXXX - Max procs capacity % available to the partitions pool.
|
||||
* XXXXXX - Entitled procs capacity % available to the
|
||||
* partitions pool.
|
||||
*/
|
||||
static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
|
||||
{
|
||||
unsigned long rc;
|
||||
unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
|
||||
unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
|
||||
|
||||
rc = plpar_hcall(H_GET_PPP, retbuf);
|
||||
rc = plpar_hcall9(H_GET_PPP, retbuf);
|
||||
|
||||
ppp_data->entitlement = retbuf[0];
|
||||
ppp_data->unallocated_entitlement = retbuf[1];
|
||||
@@ -210,6 +218,10 @@ static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
|
||||
ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;
|
||||
ppp_data->active_system_procs = retbuf[3] & 0xffff;
|
||||
|
||||
ppp_data->phys_platform_procs = retbuf[4] >> 6 * 8;
|
||||
ppp_data->max_proc_cap_avail = (retbuf[4] >> 3 * 8) & 0xffffff;
|
||||
ppp_data->entitled_proc_cap_avail = retbuf[4] & 0xffffff;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
@@ -234,6 +246,8 @@ static unsigned h_pic(unsigned long *pool_idle_time,
|
||||
static void parse_ppp_data(struct seq_file *m)
|
||||
{
|
||||
struct hvcall_ppp_data ppp_data;
|
||||
struct device_node *root;
|
||||
const int *perf_level;
|
||||
int rc;
|
||||
|
||||
rc = h_get_ppp(&ppp_data);
|
||||
@@ -267,6 +281,28 @@ static void parse_ppp_data(struct seq_file *m)
|
||||
seq_printf(m, "capped=%d\n", ppp_data.capped);
|
||||
seq_printf(m, "unallocated_capacity=%lld\n",
|
||||
ppp_data.unallocated_entitlement);
|
||||
|
||||
/* The last bits of information returned from h_get_ppp are only
|
||||
* valid if the ibm,partition-performance-parameters-level
|
||||
* property is >= 1.
|
||||
*/
|
||||
root = of_find_node_by_path("/");
|
||||
if (root) {
|
||||
perf_level = of_get_property(root,
|
||||
"ibm,partition-performance-parameters-level",
|
||||
NULL);
|
||||
if (perf_level && (*perf_level >= 1)) {
|
||||
seq_printf(m,
|
||||
"physical_procs_allocated_to_virtualization=%d\n",
|
||||
ppp_data.phys_platform_procs);
|
||||
seq_printf(m, "max_proc_capacity_available=%d\n",
|
||||
ppp_data.max_proc_cap_avail);
|
||||
seq_printf(m, "entitled_proc_capacity_available=%d\n",
|
||||
ppp_data.entitled_proc_cap_avail);
|
||||
}
|
||||
|
||||
of_node_put(root);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -457,98 +457,6 @@ _GLOBAL(disable_kernel_fp)
|
||||
isync
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
|
||||
#if 0 /* this has no callers for now */
|
||||
/*
|
||||
* disable_kernel_altivec()
|
||||
* Disable the VMX.
|
||||
*/
|
||||
_GLOBAL(disable_kernel_altivec)
|
||||
mfmsr r3
|
||||
rldicl r0,r3,(63-MSR_VEC_LG),1
|
||||
rldicl r3,r0,(MSR_VEC_LG+1),0
|
||||
mtmsrd r3 /* disable use of VMX now */
|
||||
isync
|
||||
blr
|
||||
#endif /* 0 */
|
||||
|
||||
/*
|
||||
* giveup_altivec(tsk)
|
||||
* Disable VMX for the task given as the argument,
|
||||
* and save the vector registers in its thread_struct.
|
||||
* Enables the VMX for use in the kernel on return.
|
||||
*/
|
||||
_GLOBAL(giveup_altivec)
|
||||
mfmsr r5
|
||||
oris r5,r5,MSR_VEC@h
|
||||
mtmsrd r5 /* enable use of VMX now */
|
||||
isync
|
||||
cmpdi 0,r3,0
|
||||
beqlr- /* if no previous owner, done */
|
||||
addi r3,r3,THREAD /* want THREAD of task */
|
||||
ld r5,PT_REGS(r3)
|
||||
cmpdi 0,r5,0
|
||||
SAVE_32VRS(0,r4,r3)
|
||||
mfvscr vr0
|
||||
li r4,THREAD_VSCR
|
||||
stvx vr0,r4,r3
|
||||
beq 1f
|
||||
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
#ifdef CONFIG_VSX
|
||||
BEGIN_FTR_SECTION
|
||||
lis r3,(MSR_VEC|MSR_VSX)@h
|
||||
FTR_SECTION_ELSE
|
||||
lis r3,MSR_VEC@h
|
||||
ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
|
||||
#else
|
||||
lis r3,MSR_VEC@h
|
||||
#endif
|
||||
andc r4,r4,r3 /* disable FP for previous task */
|
||||
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
1:
|
||||
#ifndef CONFIG_SMP
|
||||
li r5,0
|
||||
ld r4,last_task_used_altivec@got(r2)
|
||||
std r5,0(r4)
|
||||
#endif /* CONFIG_SMP */
|
||||
blr
|
||||
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
/*
|
||||
* __giveup_vsx(tsk)
|
||||
* Disable VSX for the task given as the argument.
|
||||
* Does NOT save vsx registers.
|
||||
* Enables the VSX for use in the kernel on return.
|
||||
*/
|
||||
_GLOBAL(__giveup_vsx)
|
||||
mfmsr r5
|
||||
oris r5,r5,MSR_VSX@h
|
||||
mtmsrd r5 /* enable use of VSX now */
|
||||
isync
|
||||
|
||||
cmpdi 0,r3,0
|
||||
beqlr- /* if no previous owner, done */
|
||||
addi r3,r3,THREAD /* want THREAD of task */
|
||||
ld r5,PT_REGS(r3)
|
||||
cmpdi 0,r5,0
|
||||
beq 1f
|
||||
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
lis r3,MSR_VSX@h
|
||||
andc r4,r4,r3 /* disable VSX for previous task */
|
||||
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
1:
|
||||
#ifndef CONFIG_SMP
|
||||
li r5,0
|
||||
ld r4,last_task_used_vsx@got(r2)
|
||||
std r5,0(r4)
|
||||
#endif /* CONFIG_SMP */
|
||||
blr
|
||||
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
/* kexec_wait(phys_cpu)
|
||||
*
|
||||
* wait for the flag to change, indicating this kernel is going away but
|
||||
|
@@ -18,6 +18,8 @@
|
||||
* field correctly */
|
||||
extern unsigned long __toc_start;
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
|
||||
/*
|
||||
* The structure which the hypervisor knows about - this structure
|
||||
* should not cross a page boundary. The vpa_init/register_vpa call
|
||||
@@ -41,6 +43,10 @@ struct lppaca lppaca[] = {
|
||||
},
|
||||
};
|
||||
|
||||
#endif /* CONFIG_PPC_BOOK3S */
|
||||
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
|
||||
/*
|
||||
* 3 persistent SLBs are registered here. The buffer will be zero
|
||||
* initially, hence will all be invaild until we actually write them.
|
||||
@@ -52,6 +58,8 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = {
|
||||
},
|
||||
};
|
||||
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
|
||||
/* The Paca is an array with one entry per processor. Each contains an
|
||||
* lppaca, which contains the information shared between the
|
||||
* hypervisor and Linux.
|
||||
@@ -77,15 +85,19 @@ void __init initialise_pacas(void)
|
||||
for (cpu = 0; cpu < NR_CPUS; cpu++) {
|
||||
struct paca_struct *new_paca = &paca[cpu];
|
||||
|
||||
#ifdef CONFIG_PPC_BOOK3S
|
||||
new_paca->lppaca_ptr = &lppaca[cpu];
|
||||
#endif
|
||||
new_paca->lock_token = 0x8000;
|
||||
new_paca->paca_index = cpu;
|
||||
new_paca->kernel_toc = kernel_toc;
|
||||
new_paca->kernelbase = (unsigned long) _stext;
|
||||
new_paca->kernel_msr = MSR_KERNEL;
|
||||
new_paca->hw_cpu_id = 0xffff;
|
||||
new_paca->slb_shadow_ptr = &slb_shadow[cpu];
|
||||
new_paca->__current = &init_task;
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
new_paca->slb_shadow_ptr = &slb_shadow[cpu];
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
|
||||
}
|
||||
}
|
||||
|
@@ -1505,7 +1505,7 @@ void __init pcibios_resource_survey(void)
|
||||
* rest of the code later, for now, keep it as-is as our main
|
||||
* resource allocation function doesn't deal with sub-trees yet.
|
||||
*/
|
||||
void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
|
||||
void pcibios_claim_one_bus(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
struct pci_bus *child_bus;
|
||||
@@ -1533,7 +1533,6 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
|
||||
list_for_each_entry(child_bus, &bus->children, node)
|
||||
pcibios_claim_one_bus(child_bus);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
|
||||
|
||||
|
||||
/* pcibios_finish_adding_to_bus
|
||||
|
@@ -33,7 +33,6 @@ int pcibios_assign_bus_offset = 1;
|
||||
|
||||
void pcibios_make_OF_bus_map(void);
|
||||
|
||||
static void fixup_broken_pcnet32(struct pci_dev* dev);
|
||||
static void fixup_cpc710_pci64(struct pci_dev* dev);
|
||||
#ifdef CONFIG_PPC_OF
|
||||
static u8* pci_to_OF_bus_map;
|
||||
@@ -71,16 +70,6 @@ fixup_hide_host_resource_fsl(struct pci_dev *dev)
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
|
||||
|
||||
static void
|
||||
fixup_broken_pcnet32(struct pci_dev* dev)
|
||||
{
|
||||
if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
|
||||
dev->vendor = PCI_VENDOR_ID_AMD;
|
||||
pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
|
||||
|
||||
static void
|
||||
fixup_cpc710_pci64(struct pci_dev* dev)
|
||||
{
|
||||
@@ -447,14 +436,6 @@ static int __init pcibios_init(void)
|
||||
|
||||
subsys_initcall(pcibios_init);
|
||||
|
||||
/* the next one is stolen from the alpha port... */
|
||||
void __init
|
||||
pcibios_update_irq(struct pci_dev *dev, int irq)
|
||||
{
|
||||
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
|
||||
/* XXX FIXME - update OF device tree node interrupt property */
|
||||
}
|
||||
|
||||
static struct pci_controller*
|
||||
pci_bus_to_hose(int bus)
|
||||
{
|
||||
|
@@ -43,16 +43,6 @@ unsigned long pci_probe_only = 1;
|
||||
unsigned long pci_io_base = ISA_IO_BASE;
|
||||
EXPORT_SYMBOL(pci_io_base);
|
||||
|
||||
static void fixup_broken_pcnet32(struct pci_dev* dev)
|
||||
{
|
||||
if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
|
||||
dev->vendor = PCI_VENDOR_ID_AMD;
|
||||
pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
|
||||
}
|
||||
}
|
||||
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
|
||||
|
||||
|
||||
static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
|
||||
{
|
||||
const u32 *prop;
|
||||
@@ -430,6 +420,9 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
|
||||
* so flushing the hash table is the only sane way to make sure
|
||||
* that no hash entries are covering that removed bridge area
|
||||
* while still allowing other busses overlapping those pages
|
||||
*
|
||||
* Note: If we ever support P2P hotplug on Book3E, we'll have
|
||||
* to do an appropriate TLB flush here too
|
||||
*/
|
||||
if (bus->self) {
|
||||
struct resource *res = bus->resource[0];
|
||||
@@ -437,8 +430,10 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
|
||||
pr_debug("IO unmapping for PCI-PCI bridge %s\n",
|
||||
pci_name(bus->self));
|
||||
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
|
||||
res->end + _IO_BASE + 1);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -511,7 +506,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
|
||||
pr_debug("IO mapping for PHB %s\n", hose->dn->full_name);
|
||||
pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
|
||||
hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
|
||||
pr_debug(" size=0x%016lx (alloc=0x%016lx)\n",
|
||||
pr_debug(" size=0x%016llx (alloc=0x%016lx)\n",
|
||||
hose->pci_io_size, size_page);
|
||||
|
||||
/* Establish the mapping */
|
||||
|
@@ -27,7 +27,6 @@
|
||||
#include <asm/io.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/pci-bridge.h>
|
||||
#include <asm/pSeries_reconfig.h>
|
||||
#include <asm/ppc-pci.h>
|
||||
#include <asm/firmware.h>
|
||||
|
||||
@@ -35,7 +34,7 @@
|
||||
* Traverse_func that inits the PCI fields of the device node.
|
||||
* NOTE: this *must* be done before read/write config to the device.
|
||||
*/
|
||||
static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
|
||||
void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
|
||||
{
|
||||
struct pci_controller *phb = data;
|
||||
const int *type =
|
||||
@@ -184,29 +183,6 @@ struct device_node *fetch_dev_dn(struct pci_dev *dev)
|
||||
}
|
||||
EXPORT_SYMBOL(fetch_dev_dn);
|
||||
|
||||
static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
|
||||
{
|
||||
struct device_node *np = node;
|
||||
struct pci_dn *pci = NULL;
|
||||
int err = NOTIFY_OK;
|
||||
|
||||
switch (action) {
|
||||
case PSERIES_RECONFIG_ADD:
|
||||
pci = np->parent->data;
|
||||
if (pci)
|
||||
update_dn_pci_info(np, pci->phb);
|
||||
break;
|
||||
default:
|
||||
err = NOTIFY_DONE;
|
||||
break;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct notifier_block pci_dn_reconfig_nb = {
|
||||
.notifier_call = pci_dn_reconfig_notifier,
|
||||
};
|
||||
|
||||
/**
|
||||
* pci_devs_phb_init - Initialize phbs and pci devs under them.
|
||||
*
|
||||
@@ -223,6 +199,4 @@ void __init pci_devs_phb_init(void)
|
||||
/* This must be done first so the device nodes have valid pci info! */
|
||||
list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
|
||||
pci_devs_phb_init_dynamic(phb);
|
||||
|
||||
pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb);
|
||||
}
|
||||
|
@@ -650,7 +650,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
|
||||
_ALIGN_UP(sizeof(struct thread_info), 16);
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
if (cpu_has_feature(CPU_FTR_SLB)) {
|
||||
unsigned long sp_vsid;
|
||||
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
|
||||
|
@@ -585,7 +585,7 @@ static void __init check_cpu_pa_features(unsigned long node)
|
||||
ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
static void __init check_cpu_slb_size(unsigned long node)
|
||||
{
|
||||
u32 *slb_size_ptr;
|
||||
|
@@ -44,10 +44,7 @@
|
||||
#include <asm/sections.h>
|
||||
#include <asm/machdep.h>
|
||||
|
||||
#ifdef CONFIG_LOGO_LINUX_CLUT224
|
||||
#include <linux/linux_logo.h>
|
||||
extern const struct linux_logo logo_linux_clut224;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Properties whose value is longer than this get excluded from our
|
||||
|
@@ -704,15 +704,34 @@ void user_enable_single_step(struct task_struct *task)
|
||||
|
||||
if (regs != NULL) {
|
||||
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
|
||||
task->thread.dbcr0 &= ~DBCR0_BT;
|
||||
task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
|
||||
regs->msr |= MSR_DE;
|
||||
#else
|
||||
regs->msr &= ~MSR_BE;
|
||||
regs->msr |= MSR_SE;
|
||||
#endif
|
||||
}
|
||||
set_tsk_thread_flag(task, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
void user_enable_block_step(struct task_struct *task)
|
||||
{
|
||||
struct pt_regs *regs = task->thread.regs;
|
||||
|
||||
if (regs != NULL) {
|
||||
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
|
||||
task->thread.dbcr0 &= ~DBCR0_IC;
|
||||
task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
|
||||
regs->msr |= MSR_DE;
|
||||
#else
|
||||
regs->msr &= ~MSR_SE;
|
||||
regs->msr |= MSR_BE;
|
||||
#endif
|
||||
}
|
||||
set_tsk_thread_flag(task, TIF_SINGLESTEP);
|
||||
}
|
||||
|
||||
void user_disable_single_step(struct task_struct *task)
|
||||
{
|
||||
struct pt_regs *regs = task->thread.regs;
|
||||
@@ -726,10 +745,10 @@ void user_disable_single_step(struct task_struct *task)
|
||||
|
||||
if (regs != NULL) {
|
||||
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
|
||||
task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_IDM);
|
||||
task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
|
||||
regs->msr &= ~MSR_DE;
|
||||
#else
|
||||
regs->msr &= ~MSR_SE;
|
||||
regs->msr &= ~(MSR_SE | MSR_BE);
|
||||
#endif
|
||||
}
|
||||
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
|
||||
|
@@ -93,10 +93,7 @@ static int rtas_pci_read_config(struct pci_bus *bus,
|
||||
{
|
||||
struct device_node *busdn, *dn;
|
||||
|
||||
if (bus->self)
|
||||
busdn = pci_device_to_OF_node(bus->self);
|
||||
else
|
||||
busdn = bus->sysdata; /* must be a phb */
|
||||
busdn = pci_bus_to_OF_node(bus);
|
||||
|
||||
/* Search only direct children of the bus */
|
||||
for (dn = busdn->child; dn; dn = dn->sibling) {
|
||||
@@ -140,10 +137,7 @@ static int rtas_pci_write_config(struct pci_bus *bus,
|
||||
{
|
||||
struct device_node *busdn, *dn;
|
||||
|
||||
if (bus->self)
|
||||
busdn = pci_device_to_OF_node(bus->self);
|
||||
else
|
||||
busdn = bus->sysdata; /* must be a phb */
|
||||
busdn = pci_bus_to_OF_node(bus);
|
||||
|
||||
/* Search only direct children of the bus */
|
||||
for (dn = busdn->child; dn; dn = dn->sibling) {
|
||||
|
@@ -39,6 +39,7 @@
|
||||
#include <asm/serial.h>
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/swiotlb.h>
|
||||
|
||||
#include "setup.h"
|
||||
|
||||
@@ -332,6 +333,11 @@ void __init setup_arch(char **cmdline_p)
|
||||
ppc_md.setup_arch();
|
||||
if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (ppc_swiotlb_enable)
|
||||
swiotlb_init();
|
||||
#endif
|
||||
|
||||
paging_init();
|
||||
|
||||
/* Initialize the MMU context management stuff */
|
||||
|
@@ -61,6 +61,7 @@
|
||||
#include <asm/xmon.h>
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/kexec.h>
|
||||
#include <asm/swiotlb.h>
|
||||
|
||||
#include "setup.h"
|
||||
|
||||
@@ -417,12 +418,14 @@ void __init setup_system(void)
|
||||
if (ppc64_caches.iline_size != 0x80)
|
||||
printk("ppc64_caches.icache_line_size = 0x%x\n",
|
||||
ppc64_caches.iline_size);
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
if (htab_address)
|
||||
printk("htab_address = 0x%p\n", htab_address);
|
||||
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
if (PHYSICAL_START > 0)
|
||||
printk("physical_start = 0x%lx\n",
|
||||
PHYSICAL_START);
|
||||
printk("physical_start = 0x%llx\n",
|
||||
(unsigned long long)PHYSICAL_START);
|
||||
printk("-----------------------------------------------------\n");
|
||||
|
||||
DBG(" <- setup_system()\n");
|
||||
@@ -511,8 +514,9 @@ void __init setup_arch(char **cmdline_p)
|
||||
irqstack_early_init();
|
||||
emergency_stack_init();
|
||||
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
stabs_alloc();
|
||||
|
||||
#endif
|
||||
/* set up the bootmem stuff with available memory */
|
||||
do_init_bootmem();
|
||||
sparse_init();
|
||||
@@ -524,6 +528,11 @@ void __init setup_arch(char **cmdline_p)
|
||||
if (ppc_md.setup_arch)
|
||||
ppc_md.setup_arch();
|
||||
|
||||
#ifdef CONFIG_SWIOTLB
|
||||
if (ppc_swiotlb_enable)
|
||||
swiotlb_init();
|
||||
#endif
|
||||
|
||||
paging_init();
|
||||
ppc64_boot_msg(0x15, "Setup Done");
|
||||
}
|
||||
|
@@ -52,6 +52,7 @@
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/posix-timers.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/processor.h>
|
||||
@@ -109,7 +110,7 @@ static void decrementer_set_mode(enum clock_event_mode mode,
|
||||
static struct clock_event_device decrementer_clockevent = {
|
||||
.name = "decrementer",
|
||||
.rating = 200,
|
||||
.shift = 16,
|
||||
.shift = 0, /* To be filled in */
|
||||
.mult = 0, /* To be filled in */
|
||||
.irq = 0,
|
||||
.set_next_event = decrementer_set_next_event,
|
||||
@@ -843,6 +844,22 @@ static void decrementer_set_mode(enum clock_event_mode mode,
|
||||
decrementer_set_next_event(DECREMENTER_MAX, dev);
|
||||
}
|
||||
|
||||
static void __init setup_clockevent_multiplier(unsigned long hz)
|
||||
{
|
||||
u64 mult, shift = 32;
|
||||
|
||||
while (1) {
|
||||
mult = div_sc(hz, NSEC_PER_SEC, shift);
|
||||
if (mult && (mult >> 32UL) == 0UL)
|
||||
break;
|
||||
|
||||
shift--;
|
||||
}
|
||||
|
||||
decrementer_clockevent.shift = shift;
|
||||
decrementer_clockevent.mult = mult;
|
||||
}
|
||||
|
||||
static void register_decrementer_clockevent(int cpu)
|
||||
{
|
||||
struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
|
||||
@@ -860,8 +877,7 @@ static void __init init_decrementer_clockevent(void)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC,
|
||||
decrementer_clockevent.shift);
|
||||
setup_clockevent_multiplier(ppc_tb_freq);
|
||||
decrementer_clockevent.max_delta_ns =
|
||||
clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
|
||||
decrementer_clockevent.min_delta_ns =
|
||||
@@ -1128,6 +1144,15 @@ void div128_by_32(u64 dividend_high, u64 dividend_low,
|
||||
|
||||
}
|
||||
|
||||
/* We don't need to calibrate delay, we use the CPU timebase for that */
|
||||
void calibrate_delay(void)
|
||||
{
|
||||
/* Some generic code (such as spinlock debug) use loops_per_jiffy
|
||||
* as the number of __delay(1) in a jiffy, so make it so
|
||||
*/
|
||||
loops_per_jiffy = tb_ticks_per_jiffy;
|
||||
}
|
||||
|
||||
static int __init rtc_init(void)
|
||||
{
|
||||
struct platform_device *pdev;
|
||||
|
@@ -33,7 +33,9 @@
|
||||
#include <linux/backlight.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include <asm/emulated_ops.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/system.h>
|
||||
@@ -757,36 +759,44 @@ static int emulate_instruction(struct pt_regs *regs)
|
||||
|
||||
/* Emulate the mfspr rD, PVR. */
|
||||
if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
|
||||
PPC_WARN_EMULATED(mfpvr);
|
||||
rd = (instword >> 21) & 0x1f;
|
||||
regs->gpr[rd] = mfspr(SPRN_PVR);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Emulating the dcba insn is just a no-op. */
|
||||
if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA)
|
||||
if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
|
||||
PPC_WARN_EMULATED(dcba);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Emulate the mcrxr insn. */
|
||||
if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
|
||||
int shift = (instword >> 21) & 0x1c;
|
||||
unsigned long msk = 0xf0000000UL >> shift;
|
||||
|
||||
PPC_WARN_EMULATED(mcrxr);
|
||||
regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
|
||||
regs->xer &= ~0xf0000000UL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Emulate load/store string insn. */
|
||||
if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING)
|
||||
if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
|
||||
PPC_WARN_EMULATED(string);
|
||||
return emulate_string_inst(regs, instword);
|
||||
}
|
||||
|
||||
/* Emulate the popcntb (Population Count Bytes) instruction. */
|
||||
if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
|
||||
PPC_WARN_EMULATED(popcntb);
|
||||
return emulate_popcntb_inst(regs, instword);
|
||||
}
|
||||
|
||||
/* Emulate isel (Integer Select) instruction */
|
||||
if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
|
||||
PPC_WARN_EMULATED(isel);
|
||||
return emulate_isel(regs, instword);
|
||||
}
|
||||
|
||||
@@ -984,6 +994,8 @@ void SoftwareEmulation(struct pt_regs *regs)
|
||||
|
||||
#ifdef CONFIG_MATH_EMULATION
|
||||
errcode = do_mathemu(regs);
|
||||
if (errcode >= 0)
|
||||
PPC_WARN_EMULATED(math);
|
||||
|
||||
switch (errcode) {
|
||||
case 0:
|
||||
@@ -1005,6 +1017,9 @@ void SoftwareEmulation(struct pt_regs *regs)
|
||||
|
||||
#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
|
||||
errcode = Soft_emulate_8xx(regs);
|
||||
if (errcode >= 0)
|
||||
PPC_WARN_EMULATED(8xx);
|
||||
|
||||
switch (errcode) {
|
||||
case 0:
|
||||
emulate_single_step(regs);
|
||||
@@ -1026,7 +1041,34 @@ void SoftwareEmulation(struct pt_regs *regs)
|
||||
|
||||
void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
|
||||
{
|
||||
if (debug_status & DBSR_IC) { /* instruction completion */
|
||||
/* Hack alert: On BookE, Branch Taken stops on the branch itself, while
|
||||
* on server, it stops on the target of the branch. In order to simulate
|
||||
* the server behaviour, we thus restart right away with a single step
|
||||
* instead of stopping here when hitting a BT
|
||||
*/
|
||||
if (debug_status & DBSR_BT) {
|
||||
regs->msr &= ~MSR_DE;
|
||||
|
||||
/* Disable BT */
|
||||
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
|
||||
/* Clear the BT event */
|
||||
mtspr(SPRN_DBSR, DBSR_BT);
|
||||
|
||||
/* Do the single step trick only when coming from userspace */
|
||||
if (user_mode(regs)) {
|
||||
current->thread.dbcr0 &= ~DBCR0_BT;
|
||||
current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
|
||||
regs->msr |= MSR_DE;
|
||||
return;
|
||||
}
|
||||
|
||||
if (notify_die(DIE_SSTEP, "block_step", regs, 5,
|
||||
5, SIGTRAP) == NOTIFY_STOP) {
|
||||
return;
|
||||
}
|
||||
if (debugger_sstep(regs))
|
||||
return;
|
||||
} else if (debug_status & DBSR_IC) { /* Instruction complete */
|
||||
regs->msr &= ~MSR_DE;
|
||||
|
||||
/* Disable instruction completion */
|
||||
@@ -1042,9 +1084,8 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
|
||||
if (debugger_sstep(regs))
|
||||
return;
|
||||
|
||||
if (user_mode(regs)) {
|
||||
current->thread.dbcr0 &= ~DBCR0_IC;
|
||||
}
|
||||
if (user_mode(regs))
|
||||
current->thread.dbcr0 &= ~(DBCR0_IC);
|
||||
|
||||
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
|
||||
} else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
|
||||
@@ -1088,6 +1129,7 @@ void altivec_assist_exception(struct pt_regs *regs)
|
||||
|
||||
flush_altivec_to_thread(current);
|
||||
|
||||
PPC_WARN_EMULATED(altivec);
|
||||
err = emulate_altivec(regs);
|
||||
if (err == 0) {
|
||||
regs->nip += 4; /* skip emulated instruction */
|
||||
@@ -1286,3 +1328,79 @@ void kernel_bad_stack(struct pt_regs *regs)
|
||||
void __init trap_init(void)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_PPC_EMULATED_STATS
|
||||
|
||||
#define WARN_EMULATED_SETUP(type) .type = { .name = #type }
|
||||
|
||||
struct ppc_emulated ppc_emulated = {
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
WARN_EMULATED_SETUP(altivec),
|
||||
#endif
|
||||
WARN_EMULATED_SETUP(dcba),
|
||||
WARN_EMULATED_SETUP(dcbz),
|
||||
WARN_EMULATED_SETUP(fp_pair),
|
||||
WARN_EMULATED_SETUP(isel),
|
||||
WARN_EMULATED_SETUP(mcrxr),
|
||||
WARN_EMULATED_SETUP(mfpvr),
|
||||
WARN_EMULATED_SETUP(multiple),
|
||||
WARN_EMULATED_SETUP(popcntb),
|
||||
WARN_EMULATED_SETUP(spe),
|
||||
WARN_EMULATED_SETUP(string),
|
||||
WARN_EMULATED_SETUP(unaligned),
|
||||
#ifdef CONFIG_MATH_EMULATION
|
||||
WARN_EMULATED_SETUP(math),
|
||||
#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
|
||||
WARN_EMULATED_SETUP(8xx),
|
||||
#endif
|
||||
#ifdef CONFIG_VSX
|
||||
WARN_EMULATED_SETUP(vsx),
|
||||
#endif
|
||||
};
|
||||
|
||||
u32 ppc_warn_emulated;
|
||||
|
||||
void ppc_warn_emulated_print(const char *type)
|
||||
{
|
||||
if (printk_ratelimit())
|
||||
pr_warning("%s used emulated %s instruction\n", current->comm,
|
||||
type);
|
||||
}
|
||||
|
||||
static int __init ppc_warn_emulated_init(void)
|
||||
{
|
||||
struct dentry *dir, *d;
|
||||
unsigned int i;
|
||||
struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
|
||||
|
||||
if (!powerpc_debugfs_root)
|
||||
return -ENODEV;
|
||||
|
||||
dir = debugfs_create_dir("emulated_instructions",
|
||||
powerpc_debugfs_root);
|
||||
if (!dir)
|
||||
return -ENOMEM;
|
||||
|
||||
d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
|
||||
&ppc_warn_emulated);
|
||||
if (!d)
|
||||
goto fail;
|
||||
|
||||
for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
|
||||
d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
|
||||
(u32 *)&entries[i].val.counter);
|
||||
if (!d)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
debugfs_remove_recursive(dir);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
device_initcall(ppc_warn_emulated_init);
|
||||
|
||||
#endif /* CONFIG_PPC_EMULATED_STATS */
|
||||
|
@@ -1,5 +1,215 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ppc_asm.h>
|
||||
#include <asm/reg.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
* load_up_altivec(unused, unused, tsk)
|
||||
* Disable VMX for the task which had it previously,
|
||||
* and save its vector registers in its thread_struct.
|
||||
* Enables the VMX for use in the kernel on return.
|
||||
* On SMP we know the VMX is free, since we give it up every
|
||||
* switch (ie, no lazy save of the vector registers).
|
||||
*/
|
||||
_GLOBAL(load_up_altivec)
|
||||
mfmsr r5 /* grab the current MSR */
|
||||
oris r5,r5,MSR_VEC@h
|
||||
MTMSRD(r5) /* enable use of AltiVec now */
|
||||
isync
|
||||
|
||||
/*
|
||||
* For SMP, we don't do lazy VMX switching because it just gets too
|
||||
* horrendously complex, especially when a task switches from one CPU
|
||||
* to another. Instead we call giveup_altvec in switch_to.
|
||||
* VRSAVE isn't dealt with here, that is done in the normal context
|
||||
* switch code. Note that we could rely on vrsave value to eventually
|
||||
* avoid saving all of the VREGs here...
|
||||
*/
|
||||
#ifndef CONFIG_SMP
|
||||
LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
|
||||
toreal(r3)
|
||||
PPC_LL r4,ADDROFF(last_task_used_altivec)(r3)
|
||||
PPC_LCMPI 0,r4,0
|
||||
beq 1f
|
||||
|
||||
/* Save VMX state to last_task_used_altivec's THREAD struct */
|
||||
toreal(r4)
|
||||
addi r4,r4,THREAD
|
||||
SAVE_32VRS(0,r5,r4)
|
||||
mfvscr vr0
|
||||
li r10,THREAD_VSCR
|
||||
stvx vr0,r10,r4
|
||||
/* Disable VMX for last_task_used_altivec */
|
||||
PPC_LL r5,PT_REGS(r4)
|
||||
toreal(r5)
|
||||
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
lis r10,MSR_VEC@h
|
||||
andc r4,r4,r10
|
||||
PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
1:
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/* Hack: if we get an altivec unavailable trap with VRSAVE
|
||||
* set to all zeros, we assume this is a broken application
|
||||
* that fails to set it properly, and thus we switch it to
|
||||
* all 1's
|
||||
*/
|
||||
mfspr r4,SPRN_VRSAVE
|
||||
cmpdi 0,r4,0
|
||||
bne+ 1f
|
||||
li r4,-1
|
||||
mtspr SPRN_VRSAVE,r4
|
||||
1:
|
||||
/* enable use of VMX after return */
|
||||
#ifdef CONFIG_PPC32
|
||||
mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
|
||||
oris r9,r9,MSR_VEC@h
|
||||
#else
|
||||
ld r4,PACACURRENT(r13)
|
||||
addi r5,r4,THREAD /* Get THREAD */
|
||||
oris r12,r12,MSR_VEC@h
|
||||
std r12,_MSR(r1)
|
||||
#endif
|
||||
li r4,1
|
||||
li r10,THREAD_VSCR
|
||||
stw r4,THREAD_USED_VR(r5)
|
||||
lvx vr0,r10,r5
|
||||
mtvscr vr0
|
||||
REST_32VRS(0,r4,r5)
|
||||
#ifndef CONFIG_SMP
|
||||
/* Update last_task_used_math to 'current' */
|
||||
subi r4,r5,THREAD /* Back to 'current' */
|
||||
fromreal(r4)
|
||||
PPC_STL r4,ADDROFF(last_task_used_math)(r3)
|
||||
#endif /* CONFIG_SMP */
|
||||
/* restore registers and return */
|
||||
blr
|
||||
|
||||
/*
|
||||
* giveup_altivec(tsk)
|
||||
* Disable VMX for the task given as the argument,
|
||||
* and save the vector registers in its thread_struct.
|
||||
* Enables the VMX for use in the kernel on return.
|
||||
*/
|
||||
_GLOBAL(giveup_altivec)
|
||||
mfmsr r5
|
||||
oris r5,r5,MSR_VEC@h
|
||||
SYNC
|
||||
MTMSRD(r5) /* enable use of VMX now */
|
||||
isync
|
||||
PPC_LCMPI 0,r3,0
|
||||
beqlr- /* if no previous owner, done */
|
||||
addi r3,r3,THREAD /* want THREAD of task */
|
||||
PPC_LL r5,PT_REGS(r3)
|
||||
PPC_LCMPI 0,r5,0
|
||||
SAVE_32VRS(0,r4,r3)
|
||||
mfvscr vr0
|
||||
li r4,THREAD_VSCR
|
||||
stvx vr0,r4,r3
|
||||
beq 1f
|
||||
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
#ifdef CONFIG_VSX
|
||||
BEGIN_FTR_SECTION
|
||||
lis r3,(MSR_VEC|MSR_VSX)@h
|
||||
FTR_SECTION_ELSE
|
||||
lis r3,MSR_VEC@h
|
||||
ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
|
||||
#else
|
||||
lis r3,MSR_VEC@h
|
||||
#endif
|
||||
andc r4,r4,r3 /* disable FP for previous task */
|
||||
PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
1:
|
||||
#ifndef CONFIG_SMP
|
||||
li r5,0
|
||||
LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
|
||||
PPC_STL r5,ADDROFF(last_task_used_altivec)(r4)
|
||||
#endif /* CONFIG_SMP */
|
||||
blr
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
|
||||
#ifdef CONFIG_PPC32
|
||||
#error This asm code isn't ready for 32-bit kernels
|
||||
#endif
|
||||
|
||||
/*
|
||||
* load_up_vsx(unused, unused, tsk)
|
||||
* Disable VSX for the task which had it previously,
|
||||
* and save its vector registers in its thread_struct.
|
||||
* Reuse the fp and vsx saves, but first check to see if they have
|
||||
* been saved already.
|
||||
*/
|
||||
_GLOBAL(load_up_vsx)
|
||||
/* Load FP and VSX registers if they haven't been done yet */
|
||||
andi. r5,r12,MSR_FP
|
||||
beql+ load_up_fpu /* skip if already loaded */
|
||||
andis. r5,r12,MSR_VEC@h
|
||||
beql+ load_up_altivec /* skip if already loaded */
|
||||
|
||||
#ifndef CONFIG_SMP
|
||||
ld r3,last_task_used_vsx@got(r2)
|
||||
ld r4,0(r3)
|
||||
cmpdi 0,r4,0
|
||||
beq 1f
|
||||
/* Disable VSX for last_task_used_vsx */
|
||||
addi r4,r4,THREAD
|
||||
ld r5,PT_REGS(r4)
|
||||
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
lis r6,MSR_VSX@h
|
||||
andc r6,r4,r6
|
||||
std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
1:
|
||||
#endif /* CONFIG_SMP */
|
||||
ld r4,PACACURRENT(r13)
|
||||
addi r4,r4,THREAD /* Get THREAD */
|
||||
li r6,1
|
||||
stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
|
||||
/* enable use of VSX after return */
|
||||
oris r12,r12,MSR_VSX@h
|
||||
std r12,_MSR(r1)
|
||||
#ifndef CONFIG_SMP
|
||||
/* Update last_task_used_math to 'current' */
|
||||
ld r4,PACACURRENT(r13)
|
||||
std r4,0(r3)
|
||||
#endif /* CONFIG_SMP */
|
||||
b fast_exception_return
|
||||
|
||||
/*
|
||||
* __giveup_vsx(tsk)
|
||||
* Disable VSX for the task given as the argument.
|
||||
* Does NOT save vsx registers.
|
||||
* Enables the VSX for use in the kernel on return.
|
||||
*/
|
||||
_GLOBAL(__giveup_vsx)
|
||||
mfmsr r5
|
||||
oris r5,r5,MSR_VSX@h
|
||||
mtmsrd r5 /* enable use of VSX now */
|
||||
isync
|
||||
|
||||
cmpdi 0,r3,0
|
||||
beqlr- /* if no previous owner, done */
|
||||
addi r3,r3,THREAD /* want THREAD of task */
|
||||
ld r5,PT_REGS(r3)
|
||||
cmpdi 0,r5,0
|
||||
beq 1f
|
||||
ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
lis r3,MSR_VSX@h
|
||||
andc r4,r4,r3 /* disable VSX for previous task */
|
||||
std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
|
||||
1:
|
||||
#ifndef CONFIG_SMP
|
||||
li r5,0
|
||||
ld r4,last_task_used_vsx@got(r2)
|
||||
std r5,0(r4)
|
||||
#endif /* CONFIG_SMP */
|
||||
blr
|
||||
|
||||
#endif /* CONFIG_VSX */
|
||||
|
||||
|
||||
/*
|
||||
* The routines below are in assembler so we can closely control the
|
||||
|
Atsaukties uz šo jaunā problēmā
Block a user