Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6

Conflicts:

	arch/x86/kernel/io_apic.c
This commit is contained in:
Rusty Russell
2008-12-31 23:05:57 +10:30
2084 changed files with 144487 additions and 50597 deletions

1
arch/sparc/kernel/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
vmlinux.lds

View File

@@ -2,25 +2,98 @@
# Makefile for the linux kernel.
#
extra-y := head.o init_task.o vmlinux.lds
asflags-y := -ansi
ccflags-y := -Werror
EXTRA_AFLAGS := -ansi
extra-y := head_$(BITS).o
extra-y += init_task.o
extra-y += vmlinux.lds
IRQ_OBJS := irq.o sun4m_irq.o sun4c_irq.o sun4d_irq.o
obj-y := entry.o wof.o wuf.o etrap.o rtrap.o traps.o $(IRQ_OBJS) \
process.o signal.o ioport.o setup.o idprom.o \
sys_sparc.o systbls.o \
time.o windows.o cpu.o devices.o \
tadpole.o tick14.o ptrace.o \
unaligned.o una_asm.o muldiv.o \
prom.o of_device.o devres.o dma.o
obj-$(CONFIG_SPARC32) += entry.o wof.o wuf.o
obj-$(CONFIG_SPARC32) += etrap_32.o
obj-$(CONFIG_SPARC32) += rtrap_32.o
obj-y += traps_$(BITS).o
devres-y = ../../../kernel/irq/devres.o
# IRQ
obj-y += irq_$(BITS).o
obj-$(CONFIG_SPARC32) += sun4m_irq.o sun4c_irq.o sun4d_irq.o
obj-$(CONFIG_PCI) += pcic.o
obj-$(CONFIG_SMP) += trampoline.o smp.o sun4m_smp.o sun4d_smp.o
obj-$(CONFIG_SUN_AUXIO) += auxio.o
obj-$(CONFIG_SUN_PM) += apc.o pmc.o
obj-$(CONFIG_MODULES) += module.o sparc_ksyms.o
obj-$(CONFIG_SPARC_LED) += led.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-y += process_$(BITS).o
obj-y += signal_$(BITS).o
obj-$(CONFIG_SPARC32) += ioport.o
obj-y += setup_$(BITS).o
obj-y += idprom.o
obj-y += sys_sparc_$(BITS).o
obj-$(CONFIG_SPARC32) += systbls_32.o
obj-y += time_$(BITS).o
obj-$(CONFIG_SPARC32) += windows.o
obj-y += cpu.o
obj-$(CONFIG_SPARC32) += devices.o
obj-$(CONFIG_SPARC32) += tadpole.o
obj-$(CONFIG_SPARC32) += tick14.o
obj-y += ptrace_$(BITS).o
obj-y += unaligned_$(BITS).o
obj-y += una_asm_$(BITS).o
obj-$(CONFIG_SPARC32) += muldiv.o
obj-y += prom_common.o
obj-y += prom_$(BITS).o
obj-y += of_device_$(BITS).o
obj-$(CONFIG_SPARC64) += prom_irqtrans.o
obj-$(CONFIG_SPARC64) += reboot.o
obj-$(CONFIG_SPARC64) += sysfs.o
obj-$(CONFIG_SPARC64) += iommu.o
obj-$(CONFIG_SPARC64) += central.o
obj-$(CONFIG_SPARC64) += starfire.o
obj-$(CONFIG_SPARC64) += power.o
obj-$(CONFIG_SPARC64) += sbus.o
obj-$(CONFIG_SPARC64) += ebus.o
obj-$(CONFIG_SPARC64) += visemul.o
obj-$(CONFIG_SPARC64) += hvapi.o
obj-$(CONFIG_SPARC64) += sstate.o
obj-$(CONFIG_SPARC64) += mdesc.o
# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation
obj-$(CONFIG_SPARC32) += devres.o
devres-y := ../../../kernel/irq/devres.o
obj-$(CONFIG_SPARC32) += dma.o
obj-$(CONFIG_SPARC32_PCI) += pcic.o
obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o
obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o
obj-$(CONFIG_SPARC64_SMP) += hvtramp.o
obj-y += auxio_$(BITS).o
obj-$(CONFIG_SUN_PM) += apc.o pmc.o
obj-$(CONFIG_MODULES) += module.o
obj-$(CONFIG_MODULES) += sparc_ksyms_$(BITS).o
obj-$(CONFIG_SPARC_LED) += led.o
obj-$(CONFIG_KGDB) += kgdb_$(BITS).o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
CFLAGS_REMOVE_ftrace.o := -pg
obj-$(CONFIG_STACKTRACE) += stacktrace.o
# sparc64 PCI
obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o
obj-$(CONFIG_SPARC64_PCI) += pci_psycho.o pci_sabre.o pci_schizo.o
obj-$(CONFIG_SPARC64_PCI) += pci_sun4v.o pci_sun4v_asm.o pci_fire.o
obj-$(CONFIG_PCI_MSI) += pci_msi.o
obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o
# sparc64 cpufreq
obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o
obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o
obj-$(CONFIG_US3_MC) += chmc.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_SUN_LDOMS) += ldc.o vio.o viohs.o ds.o
obj-$(CONFIG_AUDIT) += audit.o
audit--$(CONFIG_AUDIT) := compat_audit.o
obj-$(CONFIG_COMPAT) += $(audit--y)

View File

@@ -14,15 +14,28 @@
// #include <linux/mm.h>
#include <linux/kbuild.h>
int foo(void)
#ifdef CONFIG_SPARC32
int sparc32_foo(void)
{
DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
BLANK();
DEFINE(AOFF_thread_fork_kpsr,
offsetof(struct thread_struct, fork_kpsr));
return 0;
}
#else
int sparc64_foo(void)
{
return 0;
}
#endif
int foo(void)
{
BLANK();
DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread));
BLANK();
DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context));
/* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */
return 0;
}

83
arch/sparc/kernel/audit.c Normal file
View File

@@ -0,0 +1,83 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/audit.h>
#include <asm/unistd.h>
static unsigned dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
static unsigned read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
static unsigned write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
static unsigned chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
static unsigned signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int audit_classify_arch(int arch)
{
#ifdef CONFIG_COMPAT
if (arch == AUDIT_ARCH_SPARC)
return 1;
#endif
return 0;
}
int audit_classify_syscall(int abi, unsigned syscall)
{
#ifdef CONFIG_COMPAT
extern int sparc32_classify_syscall(unsigned);
if (abi == AUDIT_ARCH_SPARC)
return sparc32_classify_syscall(syscall);
#endif
switch(syscall) {
case __NR_open:
return 2;
case __NR_openat:
return 3;
case __NR_socketcall:
return 4;
case __NR_execve:
return 5;
default:
return 0;
}
}
static int __init audit_classes_init(void)
{
#ifdef CONFIG_COMPAT
extern __u32 sparc32_dir_class[];
extern __u32 sparc32_write_class[];
extern __u32 sparc32_read_class[];
extern __u32 sparc32_chattr_class[];
extern __u32 sparc32_signal_class[];
audit_register_class(AUDIT_CLASS_WRITE_32, sparc32_write_class);
audit_register_class(AUDIT_CLASS_READ_32, sparc32_read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, sparc32_dir_class);
audit_register_class(AUDIT_CLASS_CHATTR_32, sparc32_chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL_32, sparc32_signal_class);
#endif
audit_register_class(AUDIT_CLASS_WRITE, write_class);
audit_register_class(AUDIT_CLASS_READ, read_class);
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
audit_register_class(AUDIT_CLASS_SIGNAL, signal_class);
return 0;
}
__initcall(audit_classes_init);

View File

@@ -0,0 +1,149 @@
/* auxio.c: Probing for the Sparc AUXIO register at boot time.
*
* Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
*
* Refactoring for unified NCR/PCIO support 2002 Eric Brower (ebrower@usa.net)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/io.h>
#include <asm/auxio.h>
void __iomem *auxio_register = NULL;
EXPORT_SYMBOL(auxio_register);
enum auxio_type {
AUXIO_TYPE_NODEV,
AUXIO_TYPE_SBUS,
AUXIO_TYPE_EBUS
};
static enum auxio_type auxio_devtype = AUXIO_TYPE_NODEV;
static DEFINE_SPINLOCK(auxio_lock);
static void __auxio_rmw(u8 bits_on, u8 bits_off, int ebus)
{
if (auxio_register) {
unsigned long flags;
u8 regval, newval;
spin_lock_irqsave(&auxio_lock, flags);
regval = (ebus ?
(u8) readl(auxio_register) :
sbus_readb(auxio_register));
newval = regval | bits_on;
newval &= ~bits_off;
if (!ebus)
newval &= ~AUXIO_AUX1_MASK;
if (ebus)
writel((u32) newval, auxio_register);
else
sbus_writeb(newval, auxio_register);
spin_unlock_irqrestore(&auxio_lock, flags);
}
}
static void __auxio_set_bit(u8 bit, int on, int ebus)
{
u8 bits_on = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
u8 bits_off = 0;
if (!on) {
u8 tmp = bits_off;
bits_off = bits_on;
bits_on = tmp;
}
__auxio_rmw(bits_on, bits_off, ebus);
}
void auxio_set_led(int on)
{
int ebus = auxio_devtype == AUXIO_TYPE_EBUS;
u8 bit;
bit = (ebus ? AUXIO_PCIO_LED : AUXIO_AUX1_LED);
__auxio_set_bit(bit, on, ebus);
}
static void __auxio_sbus_set_lte(int on)
{
__auxio_set_bit(AUXIO_AUX1_LTE, on, 0);
}
void auxio_set_lte(int on)
{
switch(auxio_devtype) {
case AUXIO_TYPE_SBUS:
__auxio_sbus_set_lte(on);
break;
case AUXIO_TYPE_EBUS:
/* FALL-THROUGH */
default:
break;
}
}
static struct of_device_id __initdata auxio_match[] = {
{
.name = "auxio",
},
{},
};
MODULE_DEVICE_TABLE(of, auxio_match);
static int __devinit auxio_probe(struct of_device *dev, const struct of_device_id *match)
{
struct device_node *dp = dev->node;
unsigned long size;
if (!strcmp(dp->parent->name, "ebus")) {
auxio_devtype = AUXIO_TYPE_EBUS;
size = sizeof(u32);
} else if (!strcmp(dp->parent->name, "sbus")) {
auxio_devtype = AUXIO_TYPE_SBUS;
size = 1;
} else {
printk("auxio: Unknown parent bus type [%s]\n",
dp->parent->name);
return -ENODEV;
}
auxio_register = of_ioremap(&dev->resource[0], 0, size, "auxio");
if (!auxio_register)
return -ENODEV;
printk(KERN_INFO "AUXIO: Found device at %s\n",
dp->full_name);
if (auxio_devtype == AUXIO_TYPE_EBUS)
auxio_set_led(AUXIO_LED_ON);
return 0;
}
static struct of_platform_driver auxio_driver = {
.match_table = auxio_match,
.probe = auxio_probe,
.driver = {
.name = "auxio",
},
};
static int __init auxio_init(void)
{
return of_register_driver(&auxio_driver, &of_platform_bus_type);
}
/* Must be after subsys_initcall() so that busses are probed. Must
* be before device_initcall() because things like the floppy driver
* need to use the AUXIO register.
*/
fs_initcall(auxio_init);

268
arch/sparc/kernel/central.c Normal file
View File

@@ -0,0 +1,268 @@
/* central.c: Central FHC driver for Sunfire/Starfire/Wildfire.
*
* Copyright (C) 1997, 1999, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <asm/fhc.h>
#include <asm/upa.h>
struct clock_board {
void __iomem *clock_freq_regs;
void __iomem *clock_regs;
void __iomem *clock_ver_reg;
int num_slots;
struct resource leds_resource;
struct platform_device leds_pdev;
};
struct fhc {
void __iomem *pregs;
bool central;
bool jtag_master;
int board_num;
struct resource leds_resource;
struct platform_device leds_pdev;
};
static int __devinit clock_board_calc_nslots(struct clock_board *p)
{
u8 reg = upa_readb(p->clock_regs + CLOCK_STAT1) & 0xc0;
switch (reg) {
case 0x40:
return 16;
case 0xc0:
return 8;
case 0x80:
reg = 0;
if (p->clock_ver_reg)
reg = upa_readb(p->clock_ver_reg);
if (reg) {
if (reg & 0x80)
return 4;
else
return 5;
}
/* Fallthrough */
default:
return 4;
}
}
static int __devinit clock_board_probe(struct of_device *op,
const struct of_device_id *match)
{
struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL);
int err = -ENOMEM;
if (!p) {
printk(KERN_ERR "clock_board: Cannot allocate struct clock_board\n");
goto out;
}
p->clock_freq_regs = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]),
"clock_board_freq");
if (!p->clock_freq_regs) {
printk(KERN_ERR "clock_board: Cannot map clock_freq_regs\n");
goto out_free;
}
p->clock_regs = of_ioremap(&op->resource[1], 0,
resource_size(&op->resource[1]),
"clock_board_regs");
if (!p->clock_regs) {
printk(KERN_ERR "clock_board: Cannot map clock_regs\n");
goto out_unmap_clock_freq_regs;
}
if (op->resource[2].flags) {
p->clock_ver_reg = of_ioremap(&op->resource[2], 0,
resource_size(&op->resource[2]),
"clock_ver_reg");
if (!p->clock_ver_reg) {
printk(KERN_ERR "clock_board: Cannot map clock_ver_reg\n");
goto out_unmap_clock_regs;
}
}
p->num_slots = clock_board_calc_nslots(p);
p->leds_resource.start = (unsigned long)
(p->clock_regs + CLOCK_CTRL);
p->leds_resource.end = p->leds_resource.end;
p->leds_resource.name = "leds";
p->leds_pdev.name = "sunfire-clockboard-leds";
p->leds_pdev.resource = &p->leds_resource;
p->leds_pdev.num_resources = 1;
p->leds_pdev.dev.parent = &op->dev;
err = platform_device_register(&p->leds_pdev);
if (err) {
printk(KERN_ERR "clock_board: Could not register LEDS "
"platform device\n");
goto out_unmap_clock_ver_reg;
}
printk(KERN_INFO "clock_board: Detected %d slot Enterprise system.\n",
p->num_slots);
err = 0;
out:
return err;
out_unmap_clock_ver_reg:
if (p->clock_ver_reg)
of_iounmap(&op->resource[2], p->clock_ver_reg,
resource_size(&op->resource[2]));
out_unmap_clock_regs:
of_iounmap(&op->resource[1], p->clock_regs,
resource_size(&op->resource[1]));
out_unmap_clock_freq_regs:
of_iounmap(&op->resource[0], p->clock_freq_regs,
resource_size(&op->resource[0]));
out_free:
kfree(p);
goto out;
}
static struct of_device_id __initdata clock_board_match[] = {
{
.name = "clock-board",
},
{},
};
static struct of_platform_driver clock_board_driver = {
.match_table = clock_board_match,
.probe = clock_board_probe,
.driver = {
.name = "clock_board",
},
};
static int __devinit fhc_probe(struct of_device *op,
const struct of_device_id *match)
{
struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL);
int err = -ENOMEM;
u32 reg;
if (!p) {
printk(KERN_ERR "fhc: Cannot allocate struct fhc\n");
goto out;
}
if (!strcmp(op->node->parent->name, "central"))
p->central = true;
p->pregs = of_ioremap(&op->resource[0], 0,
resource_size(&op->resource[0]),
"fhc_pregs");
if (!p->pregs) {
printk(KERN_ERR "fhc: Cannot map pregs\n");
goto out_free;
}
if (p->central) {
reg = upa_readl(p->pregs + FHC_PREGS_BSR);
p->board_num = ((reg >> 16) & 1) | ((reg >> 12) & 0x0e);
} else {
p->board_num = of_getintprop_default(op->node, "board#", -1);
if (p->board_num == -1) {
printk(KERN_ERR "fhc: No board# property\n");
goto out_unmap_pregs;
}
if (upa_readl(p->pregs + FHC_PREGS_JCTRL) & FHC_JTAG_CTRL_MENAB)
p->jtag_master = true;
}
if (!p->central) {
p->leds_resource.start = (unsigned long)
(p->pregs + FHC_PREGS_CTRL);
p->leds_resource.end = p->leds_resource.end;
p->leds_resource.name = "leds";
p->leds_pdev.name = "sunfire-fhc-leds";
p->leds_pdev.resource = &p->leds_resource;
p->leds_pdev.num_resources = 1;
p->leds_pdev.dev.parent = &op->dev;
err = platform_device_register(&p->leds_pdev);
if (err) {
printk(KERN_ERR "fhc: Could not register LEDS "
"platform device\n");
goto out_unmap_pregs;
}
}
reg = upa_readl(p->pregs + FHC_PREGS_CTRL);
if (!p->central)
reg |= FHC_CONTROL_IXIST;
reg &= ~(FHC_CONTROL_AOFF |
FHC_CONTROL_BOFF |
FHC_CONTROL_SLINE);
upa_writel(reg, p->pregs + FHC_PREGS_CTRL);
upa_readl(p->pregs + FHC_PREGS_CTRL);
reg = upa_readl(p->pregs + FHC_PREGS_ID);
printk(KERN_INFO "fhc: Board #%d, Version[%x] PartID[%x] Manuf[%x] %s\n",
p->board_num,
(reg & FHC_ID_VERS) >> 28,
(reg & FHC_ID_PARTID) >> 12,
(reg & FHC_ID_MANUF) >> 1,
(p->jtag_master ?
"(JTAG Master)" :
(p->central ? "(Central)" : "")));
err = 0;
out:
return err;
out_unmap_pregs:
of_iounmap(&op->resource[0], p->pregs, resource_size(&op->resource[0]));
out_free:
kfree(p);
goto out;
}
static struct of_device_id __initdata fhc_match[] = {
{
.name = "fhc",
},
{},
};
static struct of_platform_driver fhc_driver = {
.match_table = fhc_match,
.probe = fhc_probe,
.driver = {
.name = "fhc",
},
};
static int __init sunfire_init(void)
{
(void) of_register_driver(&fhc_driver, &of_platform_bus_type);
(void) of_register_driver(&clock_board_driver, &of_platform_bus_type);
return 0;
}
subsys_initcall(sunfire_init);

579
arch/sparc/kernel/cherrs.S Normal file
View File

@@ -0,0 +1,579 @@
/* These get patched into the trap table at boot time
* once we know we have a cheetah processor.
*/
.globl cheetah_fecc_trap_vector
.type cheetah_fecc_trap_vector,#function
cheetah_fecc_trap_vector:
membar #Sync
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
andn %g1, DCU_DC | DCU_IC, %g1
stxa %g1, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
sethi %hi(cheetah_fast_ecc), %g2
jmpl %g2 + %lo(cheetah_fast_ecc), %g0
mov 0, %g1
.size cheetah_fecc_trap_vector,.-cheetah_fecc_trap_vector
.globl cheetah_fecc_trap_vector_tl1
.type cheetah_fecc_trap_vector_tl1,#function
cheetah_fecc_trap_vector_tl1:
membar #Sync
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
andn %g1, DCU_DC | DCU_IC, %g1
stxa %g1, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
sethi %hi(cheetah_fast_ecc), %g2
jmpl %g2 + %lo(cheetah_fast_ecc), %g0
mov 1, %g1
.size cheetah_fecc_trap_vector_tl1,.-cheetah_fecc_trap_vector_tl1
.globl cheetah_cee_trap_vector
.type cheetah_cee_trap_vector,#function
cheetah_cee_trap_vector:
membar #Sync
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
andn %g1, DCU_IC, %g1
stxa %g1, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
sethi %hi(cheetah_cee), %g2
jmpl %g2 + %lo(cheetah_cee), %g0
mov 0, %g1
.size cheetah_cee_trap_vector,.-cheetah_cee_trap_vector
.globl cheetah_cee_trap_vector_tl1
.type cheetah_cee_trap_vector_tl1,#function
cheetah_cee_trap_vector_tl1:
membar #Sync
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
andn %g1, DCU_IC, %g1
stxa %g1, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
sethi %hi(cheetah_cee), %g2
jmpl %g2 + %lo(cheetah_cee), %g0
mov 1, %g1
.size cheetah_cee_trap_vector_tl1,.-cheetah_cee_trap_vector_tl1
.globl cheetah_deferred_trap_vector
.type cheetah_deferred_trap_vector,#function
cheetah_deferred_trap_vector:
membar #Sync
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1;
andn %g1, DCU_DC | DCU_IC, %g1;
stxa %g1, [%g0] ASI_DCU_CONTROL_REG;
membar #Sync;
sethi %hi(cheetah_deferred_trap), %g2
jmpl %g2 + %lo(cheetah_deferred_trap), %g0
mov 0, %g1
.size cheetah_deferred_trap_vector,.-cheetah_deferred_trap_vector
.globl cheetah_deferred_trap_vector_tl1
.type cheetah_deferred_trap_vector_tl1,#function
cheetah_deferred_trap_vector_tl1:
membar #Sync;
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1;
andn %g1, DCU_DC | DCU_IC, %g1;
stxa %g1, [%g0] ASI_DCU_CONTROL_REG;
membar #Sync;
sethi %hi(cheetah_deferred_trap), %g2
jmpl %g2 + %lo(cheetah_deferred_trap), %g0
mov 1, %g1
.size cheetah_deferred_trap_vector_tl1,.-cheetah_deferred_trap_vector_tl1
/* Cheetah+ specific traps. These are for the new I/D cache parity
* error traps. The first argument to cheetah_plus_parity_handler
* is encoded as follows:
*
* Bit0: 0=dcache,1=icache
* Bit1: 0=recoverable,1=unrecoverable
*/
.globl cheetah_plus_dcpe_trap_vector
.type cheetah_plus_dcpe_trap_vector,#function
cheetah_plus_dcpe_trap_vector:
membar #Sync
sethi %hi(do_cheetah_plus_data_parity), %g7
jmpl %g7 + %lo(do_cheetah_plus_data_parity), %g0
nop
nop
nop
nop
nop
.size cheetah_plus_dcpe_trap_vector,.-cheetah_plus_dcpe_trap_vector
.type do_cheetah_plus_data_parity,#function
do_cheetah_plus_data_parity:
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
mov 0x0, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
ba,a,pt %xcc, rtrap_irq
.size do_cheetah_plus_data_parity,.-do_cheetah_plus_data_parity
.globl cheetah_plus_dcpe_trap_vector_tl1
.type cheetah_plus_dcpe_trap_vector_tl1,#function
cheetah_plus_dcpe_trap_vector_tl1:
membar #Sync
wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
sethi %hi(do_dcpe_tl1), %g3
jmpl %g3 + %lo(do_dcpe_tl1), %g0
nop
nop
nop
nop
.size cheetah_plus_dcpe_trap_vector_tl1,.-cheetah_plus_dcpe_trap_vector_tl1
.globl cheetah_plus_icpe_trap_vector
.type cheetah_plus_icpe_trap_vector,#function
cheetah_plus_icpe_trap_vector:
membar #Sync
sethi %hi(do_cheetah_plus_insn_parity), %g7
jmpl %g7 + %lo(do_cheetah_plus_insn_parity), %g0
nop
nop
nop
nop
nop
.size cheetah_plus_icpe_trap_vector,.-cheetah_plus_icpe_trap_vector
.type do_cheetah_plus_insn_parity,#function
do_cheetah_plus_insn_parity:
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
mov 0x1, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
ba,a,pt %xcc, rtrap_irq
.size do_cheetah_plus_insn_parity,.-do_cheetah_plus_insn_parity
.globl cheetah_plus_icpe_trap_vector_tl1
.type cheetah_plus_icpe_trap_vector_tl1,#function
cheetah_plus_icpe_trap_vector_tl1:
membar #Sync
wrpr PSTATE_IG | PSTATE_PEF | PSTATE_PRIV, %pstate
sethi %hi(do_icpe_tl1), %g3
jmpl %g3 + %lo(do_icpe_tl1), %g0
nop
nop
nop
nop
.size cheetah_plus_icpe_trap_vector_tl1,.-cheetah_plus_icpe_trap_vector_tl1
/* If we take one of these traps when tl >= 1, then we
* jump to interrupt globals. If some trap level above us
* was also using interrupt globals, we cannot recover.
* We may use all interrupt global registers except %g6.
*/
.globl do_dcpe_tl1
.type do_dcpe_tl1,#function
do_dcpe_tl1:
rdpr %tl, %g1 ! Save original trap level
mov 1, %g2 ! Setup TSTATE checking loop
sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit
1: wrpr %g2, %tl ! Set trap level to check
rdpr %tstate, %g4 ! Read TSTATE for this level
andcc %g4, %g3, %g0 ! Interrupt globals in use?
bne,a,pn %xcc, do_dcpe_tl1_fatal ! Yep, irrecoverable
wrpr %g1, %tl ! Restore original trap level
add %g2, 1, %g2 ! Next trap level
cmp %g2, %g1 ! Hit them all yet?
ble,pt %icc, 1b ! Not yet
nop
wrpr %g1, %tl ! Restore original trap level
do_dcpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
sethi %hi(dcache_parity_tl1_occurred), %g2
lduw [%g2 + %lo(dcache_parity_tl1_occurred)], %g1
add %g1, 1, %g1
stw %g1, [%g2 + %lo(dcache_parity_tl1_occurred)]
/* Reset D-cache parity */
sethi %hi(1 << 16), %g1 ! D-cache size
mov (1 << 5), %g2 ! D-cache line size
sub %g1, %g2, %g1 ! Move down 1 cacheline
1: srl %g1, 14, %g3 ! Compute UTAG
membar #Sync
stxa %g3, [%g1] ASI_DCACHE_UTAG
membar #Sync
sub %g2, 8, %g3 ! 64-bit data word within line
2: membar #Sync
stxa %g0, [%g1 + %g3] ASI_DCACHE_DATA
membar #Sync
subcc %g3, 8, %g3 ! Next 64-bit data word
bge,pt %icc, 2b
nop
subcc %g1, %g2, %g1 ! Next cacheline
bge,pt %icc, 1b
nop
ba,pt %xcc, dcpe_icpe_tl1_common
nop
do_dcpe_tl1_fatal:
sethi %hi(1f), %g7
ba,pt %xcc, etraptl1
1: or %g7, %lo(1b), %g7
mov 0x2, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
ba,pt %xcc, rtrap
nop
.size do_dcpe_tl1,.-do_dcpe_tl1
.globl do_icpe_tl1
.type do_icpe_tl1,#function
do_icpe_tl1:
rdpr %tl, %g1 ! Save original trap level
mov 1, %g2 ! Setup TSTATE checking loop
sethi %hi(TSTATE_IG), %g3 ! TSTATE mask bit
1: wrpr %g2, %tl ! Set trap level to check
rdpr %tstate, %g4 ! Read TSTATE for this level
andcc %g4, %g3, %g0 ! Interrupt globals in use?
bne,a,pn %xcc, do_icpe_tl1_fatal ! Yep, irrecoverable
wrpr %g1, %tl ! Restore original trap level
add %g2, 1, %g2 ! Next trap level
cmp %g2, %g1 ! Hit them all yet?
ble,pt %icc, 1b ! Not yet
nop
wrpr %g1, %tl ! Restore original trap level
do_icpe_tl1_nonfatal: /* Ok we may use interrupt globals safely. */
sethi %hi(icache_parity_tl1_occurred), %g2
lduw [%g2 + %lo(icache_parity_tl1_occurred)], %g1
add %g1, 1, %g1
stw %g1, [%g2 + %lo(icache_parity_tl1_occurred)]
/* Flush I-cache */
sethi %hi(1 << 15), %g1 ! I-cache size
mov (1 << 5), %g2 ! I-cache line size
sub %g1, %g2, %g1
1: or %g1, (2 << 3), %g3
stxa %g0, [%g3] ASI_IC_TAG
membar #Sync
subcc %g1, %g2, %g1
bge,pt %icc, 1b
nop
ba,pt %xcc, dcpe_icpe_tl1_common
nop
do_icpe_tl1_fatal:
sethi %hi(1f), %g7
ba,pt %xcc, etraptl1
1: or %g7, %lo(1b), %g7
mov 0x3, %o0
call cheetah_plus_parity_error
add %sp, PTREGS_OFF, %o1
ba,pt %xcc, rtrap
nop
.size do_icpe_tl1,.-do_icpe_tl1
.type dcpe_icpe_tl1_common,#function
dcpe_icpe_tl1_common:
/* Flush D-cache, re-enable D/I caches in DCU and finally
* retry the trapping instruction.
*/
sethi %hi(1 << 16), %g1 ! D-cache size
mov (1 << 5), %g2 ! D-cache line size
sub %g1, %g2, %g1
1: stxa %g0, [%g1] ASI_DCACHE_TAG
membar #Sync
subcc %g1, %g2, %g1
bge,pt %icc, 1b
nop
ldxa [%g0] ASI_DCU_CONTROL_REG, %g1
or %g1, (DCU_DC | DCU_IC), %g1
stxa %g1, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
retry
.size dcpe_icpe_tl1_common,.-dcpe_icpe_tl1_common
/* Capture I/D/E-cache state into per-cpu error scoreboard.
*
* %g1: (TL>=0) ? 1 : 0
* %g2: scratch
* %g3: scratch
* %g4: AFSR
* %g5: AFAR
* %g6: unused, will have current thread ptr after etrap
* %g7: scratch
*/
.type __cheetah_log_error,#function
__cheetah_log_error:
/* Put "TL1" software bit into AFSR. */
and %g1, 0x1, %g1
sllx %g1, 63, %g2
or %g4, %g2, %g4
/* Get log entry pointer for this cpu at this trap level. */
BRANCH_IF_JALAPENO(g2,g3,50f)
ldxa [%g0] ASI_SAFARI_CONFIG, %g2
srlx %g2, 17, %g2
ba,pt %xcc, 60f
and %g2, 0x3ff, %g2
50: ldxa [%g0] ASI_JBUS_CONFIG, %g2
srlx %g2, 17, %g2
and %g2, 0x1f, %g2
60: sllx %g2, 9, %g2
sethi %hi(cheetah_error_log), %g3
ldx [%g3 + %lo(cheetah_error_log)], %g3
brz,pn %g3, 80f
nop
add %g3, %g2, %g3
sllx %g1, 8, %g1
add %g3, %g1, %g1
/* %g1 holds pointer to the top of the logging scoreboard */
ldx [%g1 + 0x0], %g7
cmp %g7, -1
bne,pn %xcc, 80f
nop
stx %g4, [%g1 + 0x0]
stx %g5, [%g1 + 0x8]
add %g1, 0x10, %g1
/* %g1 now points to D-cache logging area */
set 0x3ff8, %g2 /* DC_addr mask */
and %g5, %g2, %g2 /* DC_addr bits of AFAR */
srlx %g5, 12, %g3
or %g3, 1, %g3 /* PHYS tag + valid */
10: ldxa [%g2] ASI_DCACHE_TAG, %g7
cmp %g3, %g7 /* TAG match? */
bne,pt %xcc, 13f
nop
/* Yep, what we want, capture state. */
stx %g2, [%g1 + 0x20]
stx %g7, [%g1 + 0x28]
/* A membar Sync is required before and after utag access. */
membar #Sync
ldxa [%g2] ASI_DCACHE_UTAG, %g7
membar #Sync
stx %g7, [%g1 + 0x30]
ldxa [%g2] ASI_DCACHE_SNOOP_TAG, %g7
stx %g7, [%g1 + 0x38]
clr %g3
12: ldxa [%g2 + %g3] ASI_DCACHE_DATA, %g7
stx %g7, [%g1]
add %g3, (1 << 5), %g3
cmp %g3, (4 << 5)
bl,pt %xcc, 12b
add %g1, 0x8, %g1
ba,pt %xcc, 20f
add %g1, 0x20, %g1
13: sethi %hi(1 << 14), %g7
add %g2, %g7, %g2
srlx %g2, 14, %g7
cmp %g7, 4
bl,pt %xcc, 10b
nop
add %g1, 0x40, %g1
/* %g1 now points to I-cache logging area */
20: set 0x1fe0, %g2 /* IC_addr mask */
and %g5, %g2, %g2 /* IC_addr bits of AFAR */
sllx %g2, 1, %g2 /* IC_addr[13:6]==VA[12:5] */
srlx %g5, (13 - 8), %g3 /* Make PTAG */
andn %g3, 0xff, %g3 /* Mask off undefined bits */
21: ldxa [%g2] ASI_IC_TAG, %g7
andn %g7, 0xff, %g7
cmp %g3, %g7
bne,pt %xcc, 23f
nop
/* Yep, what we want, capture state. */
stx %g2, [%g1 + 0x40]
stx %g7, [%g1 + 0x48]
add %g2, (1 << 3), %g2
ldxa [%g2] ASI_IC_TAG, %g7
add %g2, (1 << 3), %g2
stx %g7, [%g1 + 0x50]
ldxa [%g2] ASI_IC_TAG, %g7
add %g2, (1 << 3), %g2
stx %g7, [%g1 + 0x60]
ldxa [%g2] ASI_IC_TAG, %g7
stx %g7, [%g1 + 0x68]
sub %g2, (3 << 3), %g2
ldxa [%g2] ASI_IC_STAG, %g7
stx %g7, [%g1 + 0x58]
clr %g3
srlx %g2, 2, %g2
22: ldxa [%g2 + %g3] ASI_IC_INSTR, %g7
stx %g7, [%g1]
add %g3, (1 << 3), %g3
cmp %g3, (8 << 3)
bl,pt %xcc, 22b
add %g1, 0x8, %g1
ba,pt %xcc, 30f
add %g1, 0x30, %g1
23: sethi %hi(1 << 14), %g7
add %g2, %g7, %g2
srlx %g2, 14, %g7
cmp %g7, 4
bl,pt %xcc, 21b
nop
add %g1, 0x70, %g1
/* %g1 now points to E-cache logging area */
30: andn %g5, (32 - 1), %g2
stx %g2, [%g1 + 0x20]
ldxa [%g2] ASI_EC_TAG_DATA, %g7
stx %g7, [%g1 + 0x28]
ldxa [%g2] ASI_EC_R, %g0
clr %g3
31: ldxa [%g3] ASI_EC_DATA, %g7
stx %g7, [%g1 + %g3]
add %g3, 0x8, %g3
cmp %g3, 0x20
bl,pt %xcc, 31b
nop
80:
rdpr %tt, %g2
cmp %g2, 0x70
be c_fast_ecc
cmp %g2, 0x63
be c_cee
nop
ba,pt %xcc, c_deferred
.size __cheetah_log_error,.-__cheetah_log_error
/* Cheetah FECC trap handling, we get here from tl{0,1}_fecc
* in the trap table. That code has done a memory barrier
* and has disabled both the I-cache and D-cache in the DCU
* control register. The I-cache is disabled so that we may
* capture the corrupted cache line, and the D-cache is disabled
* because corrupt data may have been placed there and we don't
* want to reference it.
*
* %g1 is one if this trap occurred at %tl >= 1.
*
* Next, we turn off error reporting so that we don't recurse.
*/
.globl cheetah_fast_ecc
.type cheetah_fast_ecc,#function
cheetah_fast_ecc:
ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync
/* Fetch and clear AFSR/AFAR */
ldxa [%g0] ASI_AFSR, %g4
ldxa [%g0] ASI_AFAR, %g5
stxa %g4, [%g0] ASI_AFSR
membar #Sync
ba,pt %xcc, __cheetah_log_error
nop
.size cheetah_fast_ecc,.-cheetah_fast_ecc
.type c_fast_ecc,#function
c_fast_ecc:
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
mov %l4, %o1
mov %l5, %o2
call cheetah_fecc_handler
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap_irq
.size c_fast_ecc,.-c_fast_ecc
/* Our caller has disabled I-cache and performed membar Sync. */
.globl cheetah_cee
.type cheetah_cee,#function
cheetah_cee:
ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
andn %g2, ESTATE_ERROR_CEEN, %g2
stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync
/* Fetch and clear AFSR/AFAR */
ldxa [%g0] ASI_AFSR, %g4
ldxa [%g0] ASI_AFAR, %g5
stxa %g4, [%g0] ASI_AFSR
membar #Sync
ba,pt %xcc, __cheetah_log_error
nop
.size cheetah_cee,.-cheetah_cee
.type c_cee,#function
c_cee:
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
mov %l4, %o1
mov %l5, %o2
call cheetah_cee_handler
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap_irq
.size c_cee,.-c_cee
/* Our caller has disabled I-cache+D-cache and performed membar Sync. */
.globl cheetah_deferred_trap
.type cheetah_deferred_trap,#function
cheetah_deferred_trap:
ldxa [%g0] ASI_ESTATE_ERROR_EN, %g2
andn %g2, ESTATE_ERROR_NCEEN | ESTATE_ERROR_CEEN, %g2
stxa %g2, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync
/* Fetch and clear AFSR/AFAR */
ldxa [%g0] ASI_AFSR, %g4
ldxa [%g0] ASI_AFAR, %g5
stxa %g4, [%g0] ASI_AFSR
membar #Sync
ba,pt %xcc, __cheetah_log_error
nop
.size cheetah_deferred_trap,.-cheetah_deferred_trap
.type c_deferred,#function
c_deferred:
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
mov %l4, %o1
mov %l5, %o2
call cheetah_deferred_handler
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap_irq
.size c_deferred,.-c_deferred

863
arch/sparc/kernel/chmc.c Normal file
View File

@@ -0,0 +1,863 @@
/* chmc.c: Driver for UltraSPARC-III memory controller.
*
* Copyright (C) 2001, 2007, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/spitfire.h>
#include <asm/chmctrl.h>
#include <asm/cpudata.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/head.h>
#include <asm/io.h>
#include <asm/memctrl.h>
#define DRV_MODULE_NAME "chmc"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "0.2"
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_DESCRIPTION("UltraSPARC-III memory controller driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
static int mc_type;
#define MC_TYPE_SAFARI 1
#define MC_TYPE_JBUS 2
static dimm_printer_t us3mc_dimm_printer;
#define CHMCTRL_NDGRPS 2
#define CHMCTRL_NDIMMS 4
#define CHMC_DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS)
/* OBP memory-layout property format. */
struct chmc_obp_map {
unsigned char dimm_map[144];
unsigned char pin_map[576];
};
#define DIMM_LABEL_SZ 8
struct chmc_obp_mem_layout {
/* One max 8-byte string label per DIMM. Usually
* this matches the label on the motherboard where
* that DIMM resides.
*/
char dimm_labels[CHMC_DIMMS_PER_MC][DIMM_LABEL_SZ];
/* If symmetric use map[0], else it is
* asymmetric and map[1] should be used.
*/
char symmetric;
struct chmc_obp_map map[2];
};
#define CHMCTRL_NBANKS 4
struct chmc_bank_info {
struct chmc *p;
int bank_id;
u64 raw_reg;
int valid;
int uk;
int um;
int lk;
int lm;
int interleave;
unsigned long base;
unsigned long size;
};
struct chmc {
struct list_head list;
int portid;
struct chmc_obp_mem_layout layout_prop;
int layout_size;
void __iomem *regs;
u64 timing_control1;
u64 timing_control2;
u64 timing_control3;
u64 timing_control4;
u64 memaddr_control;
struct chmc_bank_info logical_banks[CHMCTRL_NBANKS];
};
#define JBUSMC_REGS_SIZE 8
#define JB_MC_REG1_DIMM2_BANK3 0x8000000000000000UL
#define JB_MC_REG1_DIMM1_BANK1 0x4000000000000000UL
#define JB_MC_REG1_DIMM2_BANK2 0x2000000000000000UL
#define JB_MC_REG1_DIMM1_BANK0 0x1000000000000000UL
#define JB_MC_REG1_XOR 0x0000010000000000UL
#define JB_MC_REG1_ADDR_GEN_2 0x000000e000000000UL
#define JB_MC_REG1_ADDR_GEN_2_SHIFT 37
#define JB_MC_REG1_ADDR_GEN_1 0x0000001c00000000UL
#define JB_MC_REG1_ADDR_GEN_1_SHIFT 34
#define JB_MC_REG1_INTERLEAVE 0x0000000001800000UL
#define JB_MC_REG1_INTERLEAVE_SHIFT 23
#define JB_MC_REG1_DIMM2_PTYPE 0x0000000000200000UL
#define JB_MC_REG1_DIMM2_PTYPE_SHIFT 21
#define JB_MC_REG1_DIMM1_PTYPE 0x0000000000100000UL
#define JB_MC_REG1_DIMM1_PTYPE_SHIFT 20
#define PART_TYPE_X8 0
#define PART_TYPE_X4 1
#define INTERLEAVE_NONE 0
#define INTERLEAVE_SAME 1
#define INTERLEAVE_INTERNAL 2
#define INTERLEAVE_BOTH 3
#define ADDR_GEN_128MB 0
#define ADDR_GEN_256MB 1
#define ADDR_GEN_512MB 2
#define ADDR_GEN_1GB 3
#define JB_NUM_DIMM_GROUPS 2
#define JB_NUM_DIMMS_PER_GROUP 2
#define JB_NUM_DIMMS (JB_NUM_DIMM_GROUPS * JB_NUM_DIMMS_PER_GROUP)
struct jbusmc_obp_map {
unsigned char dimm_map[18];
unsigned char pin_map[144];
};
struct jbusmc_obp_mem_layout {
/* One max 8-byte string label per DIMM. Usually
* this matches the label on the motherboard where
* that DIMM resides.
*/
char dimm_labels[JB_NUM_DIMMS][DIMM_LABEL_SZ];
/* If symmetric use map[0], else it is
* asymmetric and map[1] should be used.
*/
char symmetric;
struct jbusmc_obp_map map;
char _pad;
};
struct jbusmc_dimm_group {
struct jbusmc *controller;
int index;
u64 base_addr;
u64 size;
};
struct jbusmc {
void __iomem *regs;
u64 mc_reg_1;
u32 portid;
struct jbusmc_obp_mem_layout layout;
int layout_len;
int num_dimm_groups;
struct jbusmc_dimm_group dimm_groups[JB_NUM_DIMM_GROUPS];
struct list_head list;
};
static DEFINE_SPINLOCK(mctrl_list_lock);
static LIST_HEAD(mctrl_list);
static void mc_list_add(struct list_head *list)
{
spin_lock(&mctrl_list_lock);
list_add(list, &mctrl_list);
spin_unlock(&mctrl_list_lock);
}
static void mc_list_del(struct list_head *list)
{
spin_lock(&mctrl_list_lock);
list_del_init(list);
spin_unlock(&mctrl_list_lock);
}
#define SYNDROME_MIN -1
#define SYNDROME_MAX 144
/* Covert syndrome code into the way the bits are positioned
* on the bus.
*/
static int syndrome_to_qword_code(int syndrome_code)
{
if (syndrome_code < 128)
syndrome_code += 16;
else if (syndrome_code < 128 + 9)
syndrome_code -= (128 - 7);
else if (syndrome_code < (128 + 9 + 3))
syndrome_code -= (128 + 9 - 4);
else
syndrome_code -= (128 + 9 + 3);
return syndrome_code;
}
/* All this magic has to do with how a cache line comes over the wire
* on Safari and JBUS. A 64-bit line comes over in 1 or more quadword
* cycles, each of which transmit ECC/MTAG info as well as the actual
* data.
*/
#define L2_LINE_SIZE 64
#define L2_LINE_ADDR_MSK (L2_LINE_SIZE - 1)
#define QW_PER_LINE 4
#define QW_BYTES (L2_LINE_SIZE / QW_PER_LINE)
#define QW_BITS 144
#define SAFARI_LAST_BIT (576 - 1)
#define JBUS_LAST_BIT (144 - 1)
static void get_pin_and_dimm_str(int syndrome_code, unsigned long paddr,
int *pin_p, char **dimm_str_p, void *_prop,
int base_dimm_offset)
{
int qword_code = syndrome_to_qword_code(syndrome_code);
int cache_line_offset;
int offset_inverse;
int dimm_map_index;
int map_val;
if (mc_type == MC_TYPE_JBUS) {
struct jbusmc_obp_mem_layout *p = _prop;
/* JBUS */
cache_line_offset = qword_code;
offset_inverse = (JBUS_LAST_BIT - cache_line_offset);
dimm_map_index = offset_inverse / 8;
map_val = p->map.dimm_map[dimm_map_index];
map_val = ((map_val >> ((7 - (offset_inverse & 7)))) & 1);
*dimm_str_p = p->dimm_labels[base_dimm_offset + map_val];
*pin_p = p->map.pin_map[cache_line_offset];
} else {
struct chmc_obp_mem_layout *p = _prop;
struct chmc_obp_map *mp;
int qword;
/* Safari */
if (p->symmetric)
mp = &p->map[0];
else
mp = &p->map[1];
qword = (paddr & L2_LINE_ADDR_MSK) / QW_BYTES;
cache_line_offset = ((3 - qword) * QW_BITS) + qword_code;
offset_inverse = (SAFARI_LAST_BIT - cache_line_offset);
dimm_map_index = offset_inverse >> 2;
map_val = mp->dimm_map[dimm_map_index];
map_val = ((map_val >> ((3 - (offset_inverse & 3)) << 1)) & 0x3);
*dimm_str_p = p->dimm_labels[base_dimm_offset + map_val];
*pin_p = mp->pin_map[cache_line_offset];
}
}
static struct jbusmc_dimm_group *jbusmc_find_dimm_group(unsigned long phys_addr)
{
struct jbusmc *p;
list_for_each_entry(p, &mctrl_list, list) {
int i;
for (i = 0; i < p->num_dimm_groups; i++) {
struct jbusmc_dimm_group *dp = &p->dimm_groups[i];
if (phys_addr < dp->base_addr ||
(dp->base_addr + dp->size) <= phys_addr)
continue;
return dp;
}
}
return NULL;
}
static int jbusmc_print_dimm(int syndrome_code,
unsigned long phys_addr,
char *buf, int buflen)
{
struct jbusmc_obp_mem_layout *prop;
struct jbusmc_dimm_group *dp;
struct jbusmc *p;
int first_dimm;
dp = jbusmc_find_dimm_group(phys_addr);
if (dp == NULL ||
syndrome_code < SYNDROME_MIN ||
syndrome_code > SYNDROME_MAX) {
buf[0] = '?';
buf[1] = '?';
buf[2] = '?';
buf[3] = '\0';
}
p = dp->controller;
prop = &p->layout;
first_dimm = dp->index * JB_NUM_DIMMS_PER_GROUP;
if (syndrome_code != SYNDROME_MIN) {
char *dimm_str;
int pin;
get_pin_and_dimm_str(syndrome_code, phys_addr, &pin,
&dimm_str, prop, first_dimm);
sprintf(buf, "%s, pin %3d", dimm_str, pin);
} else {
int dimm;
/* Multi-bit error, we just dump out all the
* dimm labels associated with this dimm group.
*/
for (dimm = 0; dimm < JB_NUM_DIMMS_PER_GROUP; dimm++) {
sprintf(buf, "%s ",
prop->dimm_labels[first_dimm + dimm]);
buf += strlen(buf);
}
}
return 0;
}
static u64 __devinit jbusmc_dimm_group_size(u64 base,
const struct linux_prom64_registers *mem_regs,
int num_mem_regs)
{
u64 max = base + (8UL * 1024 * 1024 * 1024);
u64 max_seen = base;
int i;
for (i = 0; i < num_mem_regs; i++) {
const struct linux_prom64_registers *ent;
u64 this_base;
u64 this_end;
ent = &mem_regs[i];
this_base = ent->phys_addr;
this_end = this_base + ent->reg_size;
if (base < this_base || base >= this_end)
continue;
if (this_end > max)
this_end = max;
if (this_end > max_seen)
max_seen = this_end;
}
return max_seen - base;
}
static void __devinit jbusmc_construct_one_dimm_group(struct jbusmc *p,
unsigned long index,
const struct linux_prom64_registers *mem_regs,
int num_mem_regs)
{
struct jbusmc_dimm_group *dp = &p->dimm_groups[index];
dp->controller = p;
dp->index = index;
dp->base_addr = (p->portid * (64UL * 1024 * 1024 * 1024));
dp->base_addr += (index * (8UL * 1024 * 1024 * 1024));
dp->size = jbusmc_dimm_group_size(dp->base_addr, mem_regs, num_mem_regs);
}
static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p,
const struct linux_prom64_registers *mem_regs,
int num_mem_regs)
{
if (p->mc_reg_1 & JB_MC_REG1_DIMM1_BANK0) {
jbusmc_construct_one_dimm_group(p, 0, mem_regs, num_mem_regs);
p->num_dimm_groups++;
}
if (p->mc_reg_1 & JB_MC_REG1_DIMM2_BANK2) {
jbusmc_construct_one_dimm_group(p, 1, mem_regs, num_mem_regs);
p->num_dimm_groups++;
}
}
static int __devinit jbusmc_probe(struct of_device *op,
const struct of_device_id *match)
{
const struct linux_prom64_registers *mem_regs;
struct device_node *mem_node;
int err, len, num_mem_regs;
struct jbusmc *p;
const u32 *prop;
const void *ml;
err = -ENODEV;
mem_node = of_find_node_by_path("/memory");
if (!mem_node) {
printk(KERN_ERR PFX "Cannot find /memory node.\n");
goto out;
}
mem_regs = of_get_property(mem_node, "reg", &len);
if (!mem_regs) {
printk(KERN_ERR PFX "Cannot get reg property of /memory node.\n");
goto out;
}
num_mem_regs = len / sizeof(*mem_regs);
err = -ENOMEM;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
printk(KERN_ERR PFX "Cannot allocate struct jbusmc.\n");
goto out;
}
INIT_LIST_HEAD(&p->list);
err = -ENODEV;
prop = of_get_property(op->node, "portid", &len);
if (!prop || len != 4) {
printk(KERN_ERR PFX "Cannot find portid.\n");
goto out_free;
}
p->portid = *prop;
prop = of_get_property(op->node, "memory-control-register-1", &len);
if (!prop || len != 8) {
printk(KERN_ERR PFX "Cannot get memory control register 1.\n");
goto out_free;
}
p->mc_reg_1 = ((u64)prop[0] << 32) | (u64) prop[1];
err = -ENOMEM;
p->regs = of_ioremap(&op->resource[0], 0, JBUSMC_REGS_SIZE, "jbusmc");
if (!p->regs) {
printk(KERN_ERR PFX "Cannot map jbusmc regs.\n");
goto out_free;
}
err = -ENODEV;
ml = of_get_property(op->node, "memory-layout", &p->layout_len);
if (!ml) {
printk(KERN_ERR PFX "Cannot get memory layout property.\n");
goto out_iounmap;
}
if (p->layout_len > sizeof(p->layout)) {
printk(KERN_ERR PFX "Unexpected memory-layout size %d\n",
p->layout_len);
goto out_iounmap;
}
memcpy(&p->layout, ml, p->layout_len);
jbusmc_construct_dimm_groups(p, mem_regs, num_mem_regs);
mc_list_add(&p->list);
printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %s\n",
op->node->full_name);
dev_set_drvdata(&op->dev, p);
err = 0;
out:
return err;
out_iounmap:
of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE);
out_free:
kfree(p);
goto out;
}
/* Does BANK decode PHYS_ADDR? */
static int chmc_bank_match(struct chmc_bank_info *bp, unsigned long phys_addr)
{
unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT;
unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT;
/* Bank must be enabled to match. */
if (bp->valid == 0)
return 0;
/* Would BANK match upper bits? */
upper_bits ^= bp->um; /* What bits are different? */
upper_bits = ~upper_bits; /* Invert. */
upper_bits |= bp->uk; /* What bits don't matter for matching? */
upper_bits = ~upper_bits; /* Invert. */
if (upper_bits)
return 0;
/* Would BANK match lower bits? */
lower_bits ^= bp->lm; /* What bits are different? */
lower_bits = ~lower_bits; /* Invert. */
lower_bits |= bp->lk; /* What bits don't matter for matching? */
lower_bits = ~lower_bits; /* Invert. */
if (lower_bits)
return 0;
/* I always knew you'd be the one. */
return 1;
}
/* Given PHYS_ADDR, search memory controller banks for a match. */
static struct chmc_bank_info *chmc_find_bank(unsigned long phys_addr)
{
struct chmc *p;
list_for_each_entry(p, &mctrl_list, list) {
int bank_no;
for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) {
struct chmc_bank_info *bp;
bp = &p->logical_banks[bank_no];
if (chmc_bank_match(bp, phys_addr))
return bp;
}
}
return NULL;
}
/* This is the main purpose of this driver. */
static int chmc_print_dimm(int syndrome_code,
unsigned long phys_addr,
char *buf, int buflen)
{
struct chmc_bank_info *bp;
struct chmc_obp_mem_layout *prop;
int bank_in_controller, first_dimm;
bp = chmc_find_bank(phys_addr);
if (bp == NULL ||
syndrome_code < SYNDROME_MIN ||
syndrome_code > SYNDROME_MAX) {
buf[0] = '?';
buf[1] = '?';
buf[2] = '?';
buf[3] = '\0';
return 0;
}
prop = &bp->p->layout_prop;
bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1);
first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1));
first_dimm *= CHMCTRL_NDIMMS;
if (syndrome_code != SYNDROME_MIN) {
char *dimm_str;
int pin;
get_pin_and_dimm_str(syndrome_code, phys_addr, &pin,
&dimm_str, prop, first_dimm);
sprintf(buf, "%s, pin %3d", dimm_str, pin);
} else {
int dimm;
/* Multi-bit error, we just dump out all the
* dimm labels associated with this bank.
*/
for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) {
sprintf(buf, "%s ",
prop->dimm_labels[first_dimm + dimm]);
buf += strlen(buf);
}
}
return 0;
}
/* Accessing the registers is slightly complicated. If you want
* to get at the memory controller which is on the same processor
* the code is executing, you must use special ASI load/store else
* you go through the global mapping.
*/
static u64 chmc_read_mcreg(struct chmc *p, unsigned long offset)
{
unsigned long ret, this_cpu;
preempt_disable();
this_cpu = real_hard_smp_processor_id();
if (p->portid == this_cpu) {
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (offset), "i" (ASI_MCU_CTRL_REG));
} else {
__asm__ __volatile__("ldxa [%1] %2, %0"
: "=r" (ret)
: "r" (p->regs + offset),
"i" (ASI_PHYS_BYPASS_EC_E));
}
preempt_enable();
return ret;
}
#if 0 /* currently unused */
static void chmc_write_mcreg(struct chmc *p, unsigned long offset, u64 val)
{
if (p->portid == smp_processor_id()) {
__asm__ __volatile__("stxa %0, [%1] %2"
: : "r" (val),
"r" (offset), "i" (ASI_MCU_CTRL_REG));
} else {
__asm__ __volatile__("ldxa %0, [%1] %2"
: : "r" (val),
"r" (p->regs + offset),
"i" (ASI_PHYS_BYPASS_EC_E));
}
}
#endif
static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 val)
{
struct chmc_bank_info *bp = &p->logical_banks[which_bank];
bp->p = p;
bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank;
bp->raw_reg = val;
bp->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT;
bp->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT;
bp->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT;
bp->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT;
bp->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT;
bp->base = (bp->um);
bp->base &= ~(bp->uk);
bp->base <<= PA_UPPER_BITS_SHIFT;
switch(bp->lk) {
case 0xf:
default:
bp->interleave = 1;
break;
case 0xe:
bp->interleave = 2;
break;
case 0xc:
bp->interleave = 4;
break;
case 0x8:
bp->interleave = 8;
break;
case 0x0:
bp->interleave = 16;
break;
};
/* UK[10] is reserved, and UK[11] is not set for the SDRAM
* bank size definition.
*/
bp->size = (((unsigned long)bp->uk &
((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT;
bp->size /= bp->interleave;
}
static void chmc_fetch_decode_regs(struct chmc *p)
{
if (p->layout_size == 0)
return;
chmc_interpret_one_decode_reg(p, 0,
chmc_read_mcreg(p, CHMCTRL_DECODE1));
chmc_interpret_one_decode_reg(p, 1,
chmc_read_mcreg(p, CHMCTRL_DECODE2));
chmc_interpret_one_decode_reg(p, 2,
chmc_read_mcreg(p, CHMCTRL_DECODE3));
chmc_interpret_one_decode_reg(p, 3,
chmc_read_mcreg(p, CHMCTRL_DECODE4));
}
static int __devinit chmc_probe(struct of_device *op,
const struct of_device_id *match)
{
struct device_node *dp = op->node;
unsigned long ver;
const void *pval;
int len, portid;
struct chmc *p;
int err;
err = -ENODEV;
__asm__ ("rdpr %%ver, %0" : "=r" (ver));
if ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID)
goto out;
portid = of_getintprop_default(dp, "portid", -1);
if (portid == -1)
goto out;
pval = of_get_property(dp, "memory-layout", &len);
if (pval && len > sizeof(p->layout_prop)) {
printk(KERN_ERR PFX "Unexpected memory-layout property "
"size %d.\n", len);
goto out;
}
err = -ENOMEM;
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
printk(KERN_ERR PFX "Could not allocate struct chmc.\n");
goto out;
}
p->portid = portid;
p->layout_size = len;
if (!pval)
p->layout_size = 0;
else
memcpy(&p->layout_prop, pval, len);
p->regs = of_ioremap(&op->resource[0], 0, 0x48, "chmc");
if (!p->regs) {
printk(KERN_ERR PFX "Could not map registers.\n");
goto out_free;
}
if (p->layout_size != 0UL) {
p->timing_control1 = chmc_read_mcreg(p, CHMCTRL_TCTRL1);
p->timing_control2 = chmc_read_mcreg(p, CHMCTRL_TCTRL2);
p->timing_control3 = chmc_read_mcreg(p, CHMCTRL_TCTRL3);
p->timing_control4 = chmc_read_mcreg(p, CHMCTRL_TCTRL4);
p->memaddr_control = chmc_read_mcreg(p, CHMCTRL_MACTRL);
}
chmc_fetch_decode_regs(p);
mc_list_add(&p->list);
printk(KERN_INFO PFX "UltraSPARC-III memory controller at %s [%s]\n",
dp->full_name,
(p->layout_size ? "ACTIVE" : "INACTIVE"));
dev_set_drvdata(&op->dev, p);
err = 0;
out:
return err;
out_free:
kfree(p);
goto out;
}
static int __devinit us3mc_probe(struct of_device *op,
const struct of_device_id *match)
{
if (mc_type == MC_TYPE_SAFARI)
return chmc_probe(op, match);
else if (mc_type == MC_TYPE_JBUS)
return jbusmc_probe(op, match);
return -ENODEV;
}
static void __devexit chmc_destroy(struct of_device *op, struct chmc *p)
{
list_del(&p->list);
of_iounmap(&op->resource[0], p->regs, 0x48);
kfree(p);
}
static void __devexit jbusmc_destroy(struct of_device *op, struct jbusmc *p)
{
mc_list_del(&p->list);
of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE);
kfree(p);
}
static int __devexit us3mc_remove(struct of_device *op)
{
void *p = dev_get_drvdata(&op->dev);
if (p) {
if (mc_type == MC_TYPE_SAFARI)
chmc_destroy(op, p);
else if (mc_type == MC_TYPE_JBUS)
jbusmc_destroy(op, p);
}
return 0;
}
static const struct of_device_id us3mc_match[] = {
{
.name = "memory-controller",
},
{},
};
MODULE_DEVICE_TABLE(of, us3mc_match);
static struct of_platform_driver us3mc_driver = {
.name = "us3mc",
.match_table = us3mc_match,
.probe = us3mc_probe,
.remove = __devexit_p(us3mc_remove),
};
static inline bool us3mc_platform(void)
{
if (tlb_type == cheetah || tlb_type == cheetah_plus)
return true;
return false;
}
static int __init us3mc_init(void)
{
unsigned long ver;
int ret;
if (!us3mc_platform())
return -ENODEV;
__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
if ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID) {
mc_type = MC_TYPE_JBUS;
us3mc_dimm_printer = jbusmc_print_dimm;
} else {
mc_type = MC_TYPE_SAFARI;
us3mc_dimm_printer = chmc_print_dimm;
}
ret = register_dimm_printer(us3mc_dimm_printer);
if (!ret) {
ret = of_register_driver(&us3mc_driver, &of_bus_type);
if (ret)
unregister_dimm_printer(us3mc_dimm_printer);
}
return ret;
}
static void __exit us3mc_cleanup(void)
{
if (us3mc_platform()) {
unregister_dimm_printer(us3mc_dimm_printer);
of_unregister_driver(&us3mc_driver);
}
}
module_init(us3mc_init);
module_exit(us3mc_cleanup);

View File

@@ -0,0 +1,43 @@
#define __32bit_syscall_numbers__
#include <asm/unistd.h>
unsigned sparc32_dir_class[] = {
#include <asm-generic/audit_dir_write.h>
~0U
};
unsigned sparc32_chattr_class[] = {
#include <asm-generic/audit_change_attr.h>
~0U
};
unsigned sparc32_write_class[] = {
#include <asm-generic/audit_write.h>
~0U
};
unsigned sparc32_read_class[] = {
#include <asm-generic/audit_read.h>
~0U
};
unsigned sparc32_signal_class[] = {
#include <asm-generic/audit_signal.h>
~0U
};
int sparc32_classify_syscall(unsigned syscall)
{
switch(syscall) {
case __NR_open:
return 2;
case __NR_openat:
return 3;
case __NR_socketcall:
return 4;
case __NR_execve:
return 5;
default:
return 1;
}
}

View File

@@ -8,6 +8,8 @@
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/threads.h>
#include <asm/spitfire.h>
#include <asm/oplib.h>
#include <asm/page.h>
#include <asm/head.h>
@@ -15,153 +17,322 @@
#include <asm/mbus.h>
#include <asm/cpudata.h>
#include "kernel.h"
DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
struct cpu_iu_info {
int psr_impl;
int psr_vers;
char* cpu_name; /* should be enough I hope... */
struct cpu_info {
int psr_vers;
const char *name;
};
struct cpu_fp_info {
int psr_impl;
int fp_vers;
char* fp_name;
struct fpu_info {
int fp_vers;
const char *name;
};
#define NOCPU 8
#define NOFPU 8
struct manufacturer_info {
int psr_impl;
struct cpu_info cpu_info[NOCPU];
struct fpu_info fpu_info[NOFPU];
};
#define CPU(ver, _name) \
{ .psr_vers = ver, .name = _name }
#define FPU(ver, _name) \
{ .fp_vers = ver, .name = _name }
static const struct manufacturer_info __initconst manufacturer_info[] = {
{
0,
/* Sun4/100, 4/200, SLC */
.cpu_info = {
CPU(0, "Fujitsu MB86900/1A or LSI L64831 SparcKIT-40"),
/* borned STP1012PGA */
CPU(4, "Fujitsu MB86904"),
CPU(5, "Fujitsu TurboSparc MB86907"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(0, "Fujitsu MB86910 or Weitek WTL1164/5"),
FPU(1, "Fujitsu MB86911 or Weitek WTL1164/5 or LSI L64831"),
FPU(2, "LSI Logic L64802 or Texas Instruments ACT8847"),
/* SparcStation SLC, SparcStation1 */
FPU(3, "Weitek WTL3170/2"),
/* SPARCstation-5 */
FPU(4, "Lsi Logic/Meiko L64804 or compatible"),
FPU(-1, NULL)
}
},{
1,
.cpu_info = {
/* SparcStation2, SparcServer 490 & 690 */
CPU(0, "LSI Logic Corporation - L64811"),
/* SparcStation2 */
CPU(1, "Cypress/ROSS CY7C601"),
/* Embedded controller */
CPU(3, "Cypress/ROSS CY7C611"),
/* Ross Technologies HyperSparc */
CPU(0xf, "ROSS HyperSparc RT620"),
CPU(0xe, "ROSS HyperSparc RT625 or RT626"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(0, "ROSS HyperSparc combined IU/FPU"),
FPU(1, "Lsi Logic L64814"),
FPU(2, "Texas Instruments TMS390-C602A"),
FPU(3, "Cypress CY7C602 FPU"),
FPU(-1, NULL)
}
},{
2,
.cpu_info = {
/* ECL Implementation, CRAY S-MP Supercomputer... AIEEE! */
/* Someone please write the code to support this beast! ;) */
CPU(0, "Bipolar Integrated Technology - B5010"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(-1, NULL)
}
},{
3,
.cpu_info = {
CPU(0, "LSI Logic Corporation - unknown-type"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(-1, NULL)
}
},{
4,
.cpu_info = {
CPU(0, "Texas Instruments, Inc. - SuperSparc-(II)"),
/* SparcClassic -- borned STP1010TAB-50*/
CPU(1, "Texas Instruments, Inc. - MicroSparc"),
CPU(2, "Texas Instruments, Inc. - MicroSparc II"),
CPU(3, "Texas Instruments, Inc. - SuperSparc 51"),
CPU(4, "Texas Instruments, Inc. - SuperSparc 61"),
CPU(5, "Texas Instruments, Inc. - unknown"),
CPU(-1, NULL)
},
.fpu_info = {
/* SuperSparc 50 module */
FPU(0, "SuperSparc on-chip FPU"),
/* SparcClassic */
FPU(4, "TI MicroSparc on chip FPU"),
FPU(-1, NULL)
}
},{
5,
.cpu_info = {
CPU(0, "Matsushita - MN10501"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(0, "Matsushita MN10501"),
FPU(-1, NULL)
}
},{
6,
.cpu_info = {
CPU(0, "Philips Corporation - unknown"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(-1, NULL)
}
},{
7,
.cpu_info = {
CPU(0, "Harvest VLSI Design Center, Inc. - unknown"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(-1, NULL)
}
},{
8,
.cpu_info = {
CPU(0, "Systems and Processes Engineering Corporation (SPEC)"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(-1, NULL)
}
},{
9,
.cpu_info = {
/* Gallium arsenide 200MHz, BOOOOGOOOOMIPS!!! */
CPU(0, "Fujitsu or Weitek Power-UP"),
CPU(1, "Fujitsu or Weitek Power-UP"),
CPU(2, "Fujitsu or Weitek Power-UP"),
CPU(3, "Fujitsu or Weitek Power-UP"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(3, "Fujitsu or Weitek on-chip FPU"),
FPU(-1, NULL)
}
},{
0x17,
.cpu_info = {
CPU(0x10, "TI UltraSparc I (SpitFire)"),
CPU(0x11, "TI UltraSparc II (BlackBird)"),
CPU(0x12, "TI UltraSparc IIi (Sabre)"),
CPU(0x13, "TI UltraSparc IIe (Hummingbird)"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(0x10, "UltraSparc I integrated FPU"),
FPU(0x11, "UltraSparc II integrated FPU"),
FPU(0x12, "UltraSparc IIi integrated FPU"),
FPU(0x13, "UltraSparc IIe integrated FPU"),
FPU(-1, NULL)
}
},{
0x22,
.cpu_info = {
CPU(0x10, "TI UltraSparc I (SpitFire)"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(0x10, "UltraSparc I integrated FPU"),
FPU(-1, NULL)
}
},{
0x3e,
.cpu_info = {
CPU(0x14, "TI UltraSparc III (Cheetah)"),
CPU(0x15, "TI UltraSparc III+ (Cheetah+)"),
CPU(0x16, "TI UltraSparc IIIi (Jalapeno)"),
CPU(0x18, "TI UltraSparc IV (Jaguar)"),
CPU(0x19, "TI UltraSparc IV+ (Panther)"),
CPU(0x22, "TI UltraSparc IIIi+ (Serrano)"),
CPU(-1, NULL)
},
.fpu_info = {
FPU(0x14, "UltraSparc III integrated FPU"),
FPU(0x15, "UltraSparc III+ integrated FPU"),
FPU(0x16, "UltraSparc IIIi integrated FPU"),
FPU(0x18, "UltraSparc IV integrated FPU"),
FPU(0x19, "UltraSparc IV+ integrated FPU"),
FPU(0x22, "UltraSparc IIIi+ integrated FPU"),
FPU(-1, NULL)
}
}};
/* In order to get the fpu type correct, you need to take the IDPROM's
* machine type value into consideration too. I will fix this.
*/
static struct cpu_fp_info linux_sparc_fpu[] = {
{ 0, 0, "Fujitsu MB86910 or Weitek WTL1164/5"},
{ 0, 1, "Fujitsu MB86911 or Weitek WTL1164/5 or LSI L64831"},
{ 0, 2, "LSI Logic L64802 or Texas Instruments ACT8847"},
/* SparcStation SLC, SparcStation1 */
{ 0, 3, "Weitek WTL3170/2"},
/* SPARCstation-5 */
{ 0, 4, "Lsi Logic/Meiko L64804 or compatible"},
{ 0, 5, "reserved"},
{ 0, 6, "reserved"},
{ 0, 7, "No FPU"},
{ 1, 0, "ROSS HyperSparc combined IU/FPU"},
{ 1, 1, "Lsi Logic L64814"},
{ 1, 2, "Texas Instruments TMS390-C602A"},
{ 1, 3, "Cypress CY7C602 FPU"},
{ 1, 4, "reserved"},
{ 1, 5, "reserved"},
{ 1, 6, "reserved"},
{ 1, 7, "No FPU"},
{ 2, 0, "BIT B5010 or B5110/20 or B5210"},
{ 2, 1, "reserved"},
{ 2, 2, "reserved"},
{ 2, 3, "reserved"},
{ 2, 4, "reserved"},
{ 2, 5, "reserved"},
{ 2, 6, "reserved"},
{ 2, 7, "No FPU"},
/* SuperSparc 50 module */
{ 4, 0, "SuperSparc on-chip FPU"},
/* SparcClassic */
{ 4, 4, "TI MicroSparc on chip FPU"},
{ 5, 0, "Matsushita MN10501"},
{ 5, 1, "reserved"},
{ 5, 2, "reserved"},
{ 5, 3, "reserved"},
{ 5, 4, "reserved"},
{ 5, 5, "reserved"},
{ 5, 6, "reserved"},
{ 5, 7, "No FPU"},
{ 9, 3, "Fujitsu or Weitek on-chip FPU"},
};
#define NSPARCFPU ARRAY_SIZE(linux_sparc_fpu)
static struct cpu_iu_info linux_sparc_chips[] = {
/* Sun4/100, 4/200, SLC */
{ 0, 0, "Fujitsu MB86900/1A or LSI L64831 SparcKIT-40"},
/* borned STP1012PGA */
{ 0, 4, "Fujitsu MB86904"},
{ 0, 5, "Fujitsu TurboSparc MB86907"},
/* SparcStation2, SparcServer 490 & 690 */
{ 1, 0, "LSI Logic Corporation - L64811"},
/* SparcStation2 */
{ 1, 1, "Cypress/ROSS CY7C601"},
/* Embedded controller */
{ 1, 3, "Cypress/ROSS CY7C611"},
/* Ross Technologies HyperSparc */
{ 1, 0xf, "ROSS HyperSparc RT620"},
{ 1, 0xe, "ROSS HyperSparc RT625 or RT626"},
/* ECL Implementation, CRAY S-MP Supercomputer... AIEEE! */
/* Someone please write the code to support this beast! ;) */
{ 2, 0, "Bipolar Integrated Technology - B5010"},
{ 3, 0, "LSI Logic Corporation - unknown-type"},
{ 4, 0, "Texas Instruments, Inc. - SuperSparc-(II)"},
/* SparcClassic -- borned STP1010TAB-50*/
{ 4, 1, "Texas Instruments, Inc. - MicroSparc"},
{ 4, 2, "Texas Instruments, Inc. - MicroSparc II"},
{ 4, 3, "Texas Instruments, Inc. - SuperSparc 51"},
{ 4, 4, "Texas Instruments, Inc. - SuperSparc 61"},
{ 4, 5, "Texas Instruments, Inc. - unknown"},
{ 5, 0, "Matsushita - MN10501"},
{ 6, 0, "Philips Corporation - unknown"},
{ 7, 0, "Harvest VLSI Design Center, Inc. - unknown"},
/* Gallium arsenide 200MHz, BOOOOGOOOOMIPS!!! */
{ 8, 0, "Systems and Processes Engineering Corporation (SPEC)"},
{ 9, 0, "Fujitsu or Weitek Power-UP"},
{ 9, 1, "Fujitsu or Weitek Power-UP"},
{ 9, 2, "Fujitsu or Weitek Power-UP"},
{ 9, 3, "Fujitsu or Weitek Power-UP"},
{ 0xa, 0, "UNKNOWN CPU-VENDOR/TYPE"},
{ 0xb, 0, "UNKNOWN CPU-VENDOR/TYPE"},
{ 0xc, 0, "UNKNOWN CPU-VENDOR/TYPE"},
{ 0xd, 0, "UNKNOWN CPU-VENDOR/TYPE"},
{ 0xe, 0, "UNKNOWN CPU-VENDOR/TYPE"},
{ 0xf, 0, "UNKNOWN CPU-VENDOR/TYPE"},
};
#define NSPARCCHIPS ARRAY_SIZE(linux_sparc_chips)
char *sparc_cpu_type;
char *sparc_fpu_type;
const char *sparc_cpu_type;
const char *sparc_fpu_type;
unsigned int fsr_storage;
static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers)
{
sparc_cpu_type = NULL;
sparc_fpu_type = NULL;
if (psr_impl < ARRAY_SIZE(manufacturer_info))
{
const struct cpu_info *cpu;
const struct fpu_info *fpu;
cpu = &manufacturer_info[psr_impl].cpu_info[0];
while (cpu->psr_vers != -1)
{
if (cpu->psr_vers == psr_vers) {
sparc_cpu_type = cpu->name;
sparc_fpu_type = "No FPU";
break;
}
cpu++;
}
fpu = &manufacturer_info[psr_impl].fpu_info[0];
while (fpu->fp_vers != -1)
{
if (fpu->fp_vers == fpu_vers) {
sparc_fpu_type = fpu->name;
break;
}
fpu++;
}
}
if (sparc_cpu_type == NULL)
{
printk(KERN_ERR "CPU: Unknown chip, impl[0x%x] vers[0x%x]\n",
psr_impl, psr_vers);
sparc_cpu_type = "Unknown CPU";
}
if (sparc_fpu_type == NULL)
{
printk(KERN_ERR "FPU: Unknown chip, impl[0x%x] vers[0x%x]\n",
psr_impl, fpu_vers);
sparc_fpu_type = "Unknown FPU";
}
}
#ifdef CONFIG_SPARC32
void __cpuinit cpu_probe(void)
{
int psr_impl, psr_vers, fpu_vers;
int i, psr;
int psr;
psr_impl = ((get_psr()>>28)&0xf);
psr_vers = ((get_psr()>>24)&0xf);
psr_impl = ((get_psr() >> 28) & 0xf);
psr_vers = ((get_psr() >> 24) & 0xf);
psr = get_psr();
put_psr(psr | PSR_EF);
fpu_vers = ((get_fsr()>>17)&0x7);
fpu_vers = ((get_fsr() >> 17) & 0x7);
put_psr(psr);
for(i = 0; i<NSPARCCHIPS; i++) {
if(linux_sparc_chips[i].psr_impl == psr_impl)
if(linux_sparc_chips[i].psr_vers == psr_vers) {
sparc_cpu_type = linux_sparc_chips[i].cpu_name;
break;
}
}
set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers);
}
#else
static void __init sun4v_cpu_probe(void)
{
switch (sun4v_chip_type) {
case SUN4V_CHIP_NIAGARA1:
sparc_cpu_type = "UltraSparc T1 (Niagara)";
sparc_fpu_type = "UltraSparc T1 integrated FPU";
break;
if(i==NSPARCCHIPS)
printk("DEBUG: psr.impl = 0x%x psr.vers = 0x%x\n", psr_impl,
psr_vers);
case SUN4V_CHIP_NIAGARA2:
sparc_cpu_type = "UltraSparc T2 (Niagara2)";
sparc_fpu_type = "UltraSparc T2 integrated FPU";
break;
for(i = 0; i<NSPARCFPU; i++) {
if(linux_sparc_fpu[i].psr_impl == psr_impl)
if(linux_sparc_fpu[i].fp_vers == fpu_vers) {
sparc_fpu_type = linux_sparc_fpu[i].fp_name;
break;
}
}
if(i == NSPARCFPU) {
printk("DEBUG: psr.impl = 0x%x fsr.vers = 0x%x\n", psr_impl,
fpu_vers);
sparc_fpu_type = linux_sparc_fpu[31].fp_name;
default:
printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n",
prom_cpu_compatible);
sparc_cpu_type = "Unknown SUN4V CPU";
sparc_fpu_type = "Unknown SUN4V FPU";
break;
}
}
static int __init cpu_type_probe(void)
{
if (tlb_type == hypervisor) {
sun4v_cpu_probe();
} else {
unsigned long ver;
int manuf, impl;
__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
manuf = ((ver >> 48) & 0xffff);
impl = ((ver >> 32) & 0xffff);
set_cpu_and_fpu(manuf, impl, impl);
}
return 0;
}
arch_initcall(cpu_type_probe);
#endif

View File

@@ -133,14 +133,12 @@ void __init device_scan(void)
#endif /* !CONFIG_SMP */
cpu_probe();
#ifdef CONFIG_SUN_AUXIO
{
extern void auxio_probe(void);
extern void auxio_power_probe(void);
auxio_probe();
auxio_power_probe();
}
#endif
clock_stop_probe();
if (ARCH_SUN4C)

1244
arch/sparc/kernel/ds.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,39 @@
/* DTLB ** ICACHE line 1: Context 0 check and TSB load */
ldxa [%g0] ASI_DMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer
ldxa [%g0] ASI_DMMU, %g6 ! Get TAG TARGET
srlx %g6, 48, %g5 ! Get context
sllx %g6, 22, %g6 ! Zero out context
brz,pn %g5, kvmap_dtlb ! Context 0 processing
srlx %g6, 22, %g6 ! Delay slot
TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry
cmp %g4, %g6 ! Compare TAG
/* DTLB ** ICACHE line 2: TSB compare and TLB load */
bne,pn %xcc, tsb_miss_dtlb ! Miss
mov FAULT_CODE_DTLB, %g3
stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Load TLB
retry ! Trap done
nop
nop
nop
nop
/* DTLB ** ICACHE line 3: */
nop
nop
nop
nop
nop
nop
nop
nop
/* DTLB ** ICACHE line 4: */
nop
nop
nop
nop
nop
nop
nop
nop

View File

@@ -0,0 +1,54 @@
/*
* dtlb_prot.S: DTLB protection trap strategy.
* This is included directly into the trap table.
*
* Copyright (C) 1996,1998 David S. Miller (davem@redhat.com)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
/* Ways we can get here:
*
* [TL == 0] 1) User stores to readonly pages.
* [TL == 0] 2) Nucleus stores to user readonly pages.
* [TL > 0] 3) Nucleus stores to user readonly stack frame.
*/
/* PROT ** ICACHE line 1: User DTLB protection trap */
mov TLB_SFSR, %g1
stxa %g0, [%g1] ASI_DMMU ! Clear FaultValid bit
membar #Sync ! Synchronize stores
rdpr %pstate, %g5 ! Move into alt-globals
wrpr %g5, PSTATE_AG|PSTATE_MG, %pstate
rdpr %tl, %g1 ! Need a winfixup?
cmp %g1, 1 ! Trap level >1?
mov TLB_TAG_ACCESS, %g4 ! For reload of vaddr
/* PROT ** ICACHE line 2: More real fault processing */
bgu,pn %xcc, winfix_trampoline ! Yes, perform winfixup
ldxa [%g4] ASI_DMMU, %g5 ! Put tagaccess in %g5
ba,pt %xcc, sparc64_realfault_common ! Nope, normal fault
mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
nop
nop
nop
nop
/* PROT ** ICACHE line 3: Unused... */
nop
nop
nop
nop
nop
nop
nop
nop
/* PROT ** ICACHE line 4: Unused... */
nop
nop
nop
nop
nop
nop
nop
nop

257
arch/sparc/kernel/ebus.c Normal file
View File

@@ -0,0 +1,257 @@
/* ebus.c: EBUS DMA library code.
*
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/ebus_dma.h>
#include <asm/io.h>
#define EBDMA_CSR 0x00UL /* Control/Status */
#define EBDMA_ADDR 0x04UL /* DMA Address */
#define EBDMA_COUNT 0x08UL /* DMA Count */
#define EBDMA_CSR_INT_PEND 0x00000001
#define EBDMA_CSR_ERR_PEND 0x00000002
#define EBDMA_CSR_DRAIN 0x00000004
#define EBDMA_CSR_INT_EN 0x00000010
#define EBDMA_CSR_RESET 0x00000080
#define EBDMA_CSR_WRITE 0x00000100
#define EBDMA_CSR_EN_DMA 0x00000200
#define EBDMA_CSR_CYC_PEND 0x00000400
#define EBDMA_CSR_DIAG_RD_DONE 0x00000800
#define EBDMA_CSR_DIAG_WR_DONE 0x00001000
#define EBDMA_CSR_EN_CNT 0x00002000
#define EBDMA_CSR_TC 0x00004000
#define EBDMA_CSR_DIS_CSR_DRN 0x00010000
#define EBDMA_CSR_BURST_SZ_MASK 0x000c0000
#define EBDMA_CSR_BURST_SZ_1 0x00080000
#define EBDMA_CSR_BURST_SZ_4 0x00000000
#define EBDMA_CSR_BURST_SZ_8 0x00040000
#define EBDMA_CSR_BURST_SZ_16 0x000c0000
#define EBDMA_CSR_DIAG_EN 0x00100000
#define EBDMA_CSR_DIS_ERR_PEND 0x00400000
#define EBDMA_CSR_TCI_DIS 0x00800000
#define EBDMA_CSR_EN_NEXT 0x01000000
#define EBDMA_CSR_DMA_ON 0x02000000
#define EBDMA_CSR_A_LOADED 0x04000000
#define EBDMA_CSR_NA_LOADED 0x08000000
#define EBDMA_CSR_DEV_ID_MASK 0xf0000000
#define EBUS_DMA_RESET_TIMEOUT 10000
static void __ebus_dma_reset(struct ebus_dma_info *p, int no_drain)
{
int i;
u32 val = 0;
writel(EBDMA_CSR_RESET, p->regs + EBDMA_CSR);
udelay(1);
if (no_drain)
return;
for (i = EBUS_DMA_RESET_TIMEOUT; i > 0; i--) {
val = readl(p->regs + EBDMA_CSR);
if (!(val & (EBDMA_CSR_DRAIN | EBDMA_CSR_CYC_PEND)))
break;
udelay(10);
}
}
static irqreturn_t ebus_dma_irq(int irq, void *dev_id)
{
struct ebus_dma_info *p = dev_id;
unsigned long flags;
u32 csr = 0;
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
if (csr & EBDMA_CSR_ERR_PEND) {
printk(KERN_CRIT "ebus_dma(%s): DMA error!\n", p->name);
p->callback(p, EBUS_DMA_EVENT_ERROR, p->client_cookie);
return IRQ_HANDLED;
} else if (csr & EBDMA_CSR_INT_PEND) {
p->callback(p,
(csr & EBDMA_CSR_TC) ?
EBUS_DMA_EVENT_DMA : EBUS_DMA_EVENT_DEVICE,
p->client_cookie);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
int ebus_dma_register(struct ebus_dma_info *p)
{
u32 csr;
if (!p->regs)
return -EINVAL;
if (p->flags & ~(EBUS_DMA_FLAG_USE_EBDMA_HANDLER |
EBUS_DMA_FLAG_TCI_DISABLE))
return -EINVAL;
if ((p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) && !p->callback)
return -EINVAL;
if (!strlen(p->name))
return -EINVAL;
__ebus_dma_reset(p, 1);
csr = EBDMA_CSR_BURST_SZ_16 | EBDMA_CSR_EN_CNT;
if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
csr |= EBDMA_CSR_TCI_DIS;
writel(csr, p->regs + EBDMA_CSR);
return 0;
}
EXPORT_SYMBOL(ebus_dma_register);
int ebus_dma_irq_enable(struct ebus_dma_info *p, int on)
{
unsigned long flags;
u32 csr;
if (on) {
if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
if (request_irq(p->irq, ebus_dma_irq, IRQF_SHARED, p->name, p))
return -EBUSY;
}
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
csr |= EBDMA_CSR_INT_EN;
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
} else {
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
csr &= ~EBDMA_CSR_INT_EN;
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
if (p->flags & EBUS_DMA_FLAG_USE_EBDMA_HANDLER) {
free_irq(p->irq, p);
}
}
return 0;
}
EXPORT_SYMBOL(ebus_dma_irq_enable);
void ebus_dma_unregister(struct ebus_dma_info *p)
{
unsigned long flags;
u32 csr;
int irq_on = 0;
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
if (csr & EBDMA_CSR_INT_EN) {
csr &= ~EBDMA_CSR_INT_EN;
writel(csr, p->regs + EBDMA_CSR);
irq_on = 1;
}
spin_unlock_irqrestore(&p->lock, flags);
if (irq_on)
free_irq(p->irq, p);
}
EXPORT_SYMBOL(ebus_dma_unregister);
int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr, size_t len)
{
unsigned long flags;
u32 csr;
int err;
if (len >= (1 << 24))
return -EINVAL;
spin_lock_irqsave(&p->lock, flags);
csr = readl(p->regs + EBDMA_CSR);
err = -EINVAL;
if (!(csr & EBDMA_CSR_EN_DMA))
goto out;
err = -EBUSY;
if (csr & EBDMA_CSR_NA_LOADED)
goto out;
writel(len, p->regs + EBDMA_COUNT);
writel(bus_addr, p->regs + EBDMA_ADDR);
err = 0;
out:
spin_unlock_irqrestore(&p->lock, flags);
return err;
}
EXPORT_SYMBOL(ebus_dma_request);
void ebus_dma_prepare(struct ebus_dma_info *p, int write)
{
unsigned long flags;
u32 csr;
spin_lock_irqsave(&p->lock, flags);
__ebus_dma_reset(p, 0);
csr = (EBDMA_CSR_INT_EN |
EBDMA_CSR_EN_CNT |
EBDMA_CSR_BURST_SZ_16 |
EBDMA_CSR_EN_NEXT);
if (write)
csr |= EBDMA_CSR_WRITE;
if (p->flags & EBUS_DMA_FLAG_TCI_DISABLE)
csr |= EBDMA_CSR_TCI_DIS;
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
}
EXPORT_SYMBOL(ebus_dma_prepare);
unsigned int ebus_dma_residue(struct ebus_dma_info *p)
{
return readl(p->regs + EBDMA_COUNT);
}
EXPORT_SYMBOL(ebus_dma_residue);
unsigned int ebus_dma_addr(struct ebus_dma_info *p)
{
return readl(p->regs + EBDMA_ADDR);
}
EXPORT_SYMBOL(ebus_dma_addr);
void ebus_dma_enable(struct ebus_dma_info *p, int on)
{
unsigned long flags;
u32 orig_csr, csr;
spin_lock_irqsave(&p->lock, flags);
orig_csr = csr = readl(p->regs + EBDMA_CSR);
if (on)
csr |= EBDMA_CSR_EN_DMA;
else
csr &= ~EBDMA_CSR_EN_DMA;
if ((orig_csr & EBDMA_CSR_EN_DMA) !=
(csr & EBDMA_CSR_EN_DMA))
writel(csr, p->regs + EBDMA_CSR);
spin_unlock_irqrestore(&p->lock, flags);
}
EXPORT_SYMBOL(ebus_dma_enable);

229
arch/sparc/kernel/entry.h Normal file
View File

@@ -0,0 +1,229 @@
#ifndef _ENTRY_H
#define _ENTRY_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
/* irq */
extern void handler_irq(int irq, struct pt_regs *regs);
#ifdef CONFIG_SPARC32
/* traps */
extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
extern void do_illegal_instruction(struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
extern void do_priv_instruction(struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
extern void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc,
unsigned long npc,
unsigned long psr);
extern void do_fpd_trap(struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
extern void do_fpe_trap(struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
extern void handle_tag_overflow(struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
extern void handle_watchpoint(struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
extern void handle_reg_access(struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
extern void handle_cp_disabled(struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
extern void handle_cp_exception(struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
/* entry.S */
extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
void *fpqueue, unsigned long *fpqdepth);
extern void fpload(unsigned long *fpregs, unsigned long *fsr);
#else /* CONFIG_SPARC32 */
extern void __init per_cpu_patch(void);
extern void __init sun4v_patch(void);
extern void __init boot_cpu_id_too_large(int cpu);
extern unsigned int dcache_parity_tl1_occurred;
extern unsigned int icache_parity_tl1_occurred;
extern asmlinkage void update_perfctrs(void);
extern asmlinkage void sparc_breakpoint(struct pt_regs *regs);
extern void timer_interrupt(int irq, struct pt_regs *regs);
extern void do_notify_resume(struct pt_regs *regs,
unsigned long orig_i0,
unsigned long thread_info_flags);
extern asmlinkage int syscall_trace_enter(struct pt_regs *regs);
extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
extern void bad_trap_tl1(struct pt_regs *regs, long lvl);
extern void do_fpe_common(struct pt_regs *regs);
extern void do_fpieee(struct pt_regs *regs);
extern void do_fpother(struct pt_regs *regs);
extern void do_tof(struct pt_regs *regs);
extern void do_div0(struct pt_regs *regs);
extern void do_illegal_instruction(struct pt_regs *regs);
extern void mem_address_unaligned(struct pt_regs *regs,
unsigned long sfar,
unsigned long sfsr);
extern void sun4v_do_mna(struct pt_regs *regs,
unsigned long addr,
unsigned long type_ctx);
extern void do_privop(struct pt_regs *regs);
extern void do_privact(struct pt_regs *regs);
extern void do_cee(struct pt_regs *regs);
extern void do_cee_tl1(struct pt_regs *regs);
extern void do_dae_tl1(struct pt_regs *regs);
extern void do_iae_tl1(struct pt_regs *regs);
extern void do_div0_tl1(struct pt_regs *regs);
extern void do_fpdis_tl1(struct pt_regs *regs);
extern void do_fpieee_tl1(struct pt_regs *regs);
extern void do_fpother_tl1(struct pt_regs *regs);
extern void do_ill_tl1(struct pt_regs *regs);
extern void do_irq_tl1(struct pt_regs *regs);
extern void do_lddfmna_tl1(struct pt_regs *regs);
extern void do_stdfmna_tl1(struct pt_regs *regs);
extern void do_paw(struct pt_regs *regs);
extern void do_paw_tl1(struct pt_regs *regs);
extern void do_vaw(struct pt_regs *regs);
extern void do_vaw_tl1(struct pt_regs *regs);
extern void do_tof_tl1(struct pt_regs *regs);
extern void do_getpsr(struct pt_regs *regs);
extern void spitfire_insn_access_exception(struct pt_regs *regs,
unsigned long sfsr,
unsigned long sfar);
extern void spitfire_insn_access_exception_tl1(struct pt_regs *regs,
unsigned long sfsr,
unsigned long sfar);
extern void spitfire_data_access_exception(struct pt_regs *regs,
unsigned long sfsr,
unsigned long sfar);
extern void spitfire_data_access_exception_tl1(struct pt_regs *regs,
unsigned long sfsr,
unsigned long sfar);
extern void spitfire_access_error(struct pt_regs *regs,
unsigned long status_encoded,
unsigned long afar);
extern void cheetah_fecc_handler(struct pt_regs *regs,
unsigned long afsr,
unsigned long afar);
extern void cheetah_cee_handler(struct pt_regs *regs,
unsigned long afsr,
unsigned long afar);
extern void cheetah_deferred_handler(struct pt_regs *regs,
unsigned long afsr,
unsigned long afar);
extern void cheetah_plus_parity_error(int type, struct pt_regs *regs);
extern void sun4v_insn_access_exception(struct pt_regs *regs,
unsigned long addr,
unsigned long type_ctx);
extern void sun4v_insn_access_exception_tl1(struct pt_regs *regs,
unsigned long addr,
unsigned long type_ctx);
extern void sun4v_data_access_exception(struct pt_regs *regs,
unsigned long addr,
unsigned long type_ctx);
extern void sun4v_data_access_exception_tl1(struct pt_regs *regs,
unsigned long addr,
unsigned long type_ctx);
extern void sun4v_resum_error(struct pt_regs *regs,
unsigned long offset);
extern void sun4v_resum_overflow(struct pt_regs *regs);
extern void sun4v_nonresum_error(struct pt_regs *regs,
unsigned long offset);
extern void sun4v_nonresum_overflow(struct pt_regs *regs);
extern unsigned long sun4v_err_itlb_vaddr;
extern unsigned long sun4v_err_itlb_ctx;
extern unsigned long sun4v_err_itlb_pte;
extern unsigned long sun4v_err_itlb_error;
extern void sun4v_itlb_error_report(struct pt_regs *regs, int tl);
extern unsigned long sun4v_err_dtlb_vaddr;
extern unsigned long sun4v_err_dtlb_ctx;
extern unsigned long sun4v_err_dtlb_pte;
extern unsigned long sun4v_err_dtlb_error;
extern void sun4v_dtlb_error_report(struct pt_regs *regs, int tl);
extern void hypervisor_tlbop_error(unsigned long err,
unsigned long op);
extern void hypervisor_tlbop_error_xcall(unsigned long err,
unsigned long op);
/* WARNING: The error trap handlers in assembly know the precise
* layout of the following structure.
*
* C-level handlers in traps.c use this information to log the
* error and then determine how to recover (if possible).
*/
struct cheetah_err_info {
/*0x00*/u64 afsr;
/*0x08*/u64 afar;
/* D-cache state */
/*0x10*/u64 dcache_data[4]; /* The actual data */
/*0x30*/u64 dcache_index; /* D-cache index */
/*0x38*/u64 dcache_tag; /* D-cache tag/valid */
/*0x40*/u64 dcache_utag; /* D-cache microtag */
/*0x48*/u64 dcache_stag; /* D-cache snooptag */
/* I-cache state */
/*0x50*/u64 icache_data[8]; /* The actual insns + predecode */
/*0x90*/u64 icache_index; /* I-cache index */
/*0x98*/u64 icache_tag; /* I-cache phys tag */
/*0xa0*/u64 icache_utag; /* I-cache microtag */
/*0xa8*/u64 icache_stag; /* I-cache snooptag */
/*0xb0*/u64 icache_upper; /* I-cache upper-tag */
/*0xb8*/u64 icache_lower; /* I-cache lower-tag */
/* E-cache state */
/*0xc0*/u64 ecache_data[4]; /* 32 bytes from staging registers */
/*0xe0*/u64 ecache_index; /* E-cache index */
/*0xe8*/u64 ecache_tag; /* E-cache tag/state */
/*0xf0*/u64 __pad[32 - 30];
};
#define CHAFSR_INVALID ((u64)-1L)
/* This is allocated at boot time based upon the largest hardware
* cpu ID in the system. We allocate two entries per cpu, one for
* TL==0 logging and one for TL >= 1 logging.
*/
extern struct cheetah_err_info *cheetah_error_log;
/* UPA nodes send interrupt packet to UltraSparc with first data reg
* value low 5 (7 on Starfire) bits holding the IRQ identifier being
* delivered. We must translate this into a non-vector IRQ so we can
* set the softint on this cpu.
*
* To make processing these packets efficient and race free we use
* an array of irq buckets below. The interrupt vector handler in
* entry.S feeds incoming packets into per-cpu pil-indexed lists.
*
* If you make changes to ino_bucket, please update hand coded assembler
* of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
*/
struct ino_bucket {
/*0x00*/unsigned long __irq_chain_pa;
/* Virtual interrupt number assigned to this INO. */
/*0x08*/unsigned int __virt_irq;
/*0x0c*/unsigned int __pad;
};
extern struct ino_bucket *ivector_table;
extern unsigned long ivector_table_pa;
extern void init_irqwork_curcpu(void);
extern void __cpuinit sun4v_register_mondo_queues(int this_cpu);
#endif /* CONFIG_SPARC32 */
#endif /* _ENTRY_H */

View File

@@ -0,0 +1,236 @@
/*
* etrap.S: Preparing for entry into the kernel on Sparc V9.
*
* Copyright (C) 1996, 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
#include <asm/page.h>
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#define TASK_REGOFF (THREAD_SIZE-TRACEREG_SZ-STACKFRAME_SZ)
#define ETRAP_PSTATE1 (PSTATE_TSO | PSTATE_PRIV)
#define ETRAP_PSTATE2 \
(PSTATE_TSO | PSTATE_PEF | PSTATE_PRIV | PSTATE_IE)
/*
* On entry, %g7 is return address - 0x4.
* %g4 and %g5 will be preserved %l4 and %l5 respectively.
*/
.text
.align 64
.globl etrap_syscall, etrap, etrap_irq, etraptl1
etrap: rdpr %pil, %g2
etrap_irq: clr %g3
etrap_syscall: TRAP_LOAD_THREAD_REG(%g6, %g1)
rdpr %tstate, %g1
or %g1, %g3, %g1
sllx %g2, 20, %g3
andcc %g1, TSTATE_PRIV, %g0
or %g1, %g3, %g1
bne,pn %xcc, 1f
sub %sp, STACKFRAME_SZ+TRACEREG_SZ-STACK_BIAS, %g2
wrpr %g0, 7, %cleanwin
sethi %hi(TASK_REGOFF), %g2
sethi %hi(TSTATE_PEF), %g3
or %g2, %lo(TASK_REGOFF), %g2
and %g1, %g3, %g3
brnz,pn %g3, 1f
add %g6, %g2, %g2
wr %g0, 0, %fprs
1: rdpr %tpc, %g3
stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TSTATE]
rdpr %tnpc, %g1
stx %g3, [%g2 + STACKFRAME_SZ + PT_V9_TPC]
rd %y, %g3
stx %g1, [%g2 + STACKFRAME_SZ + PT_V9_TNPC]
rdpr %tt, %g1
st %g3, [%g2 + STACKFRAME_SZ + PT_V9_Y]
sethi %hi(PT_REGS_MAGIC), %g3
or %g3, %g1, %g1
st %g1, [%g2 + STACKFRAME_SZ + PT_V9_MAGIC]
rdpr %cansave, %g1
brnz,pt %g1, etrap_save
nop
rdpr %cwp, %g1
add %g1, 2, %g1
wrpr %g1, %cwp
be,pt %xcc, etrap_user_spill
mov ASI_AIUP, %g3
rdpr %otherwin, %g3
brz %g3, etrap_kernel_spill
mov ASI_AIUS, %g3
etrap_user_spill:
wr %g3, 0x0, %asi
ldx [%g6 + TI_FLAGS], %g3
and %g3, _TIF_32BIT, %g3
brnz,pt %g3, etrap_user_spill_32bit
nop
ba,a,pt %xcc, etrap_user_spill_64bit
etrap_save: save %g2, -STACK_BIAS, %sp
mov %g6, %l6
bne,pn %xcc, 3f
mov PRIMARY_CONTEXT, %l4
rdpr %canrestore, %g3
rdpr %wstate, %g2
wrpr %g0, 0, %canrestore
sll %g2, 3, %g2
mov 1, %l5
stb %l5, [%l6 + TI_FPDEPTH]
wrpr %g3, 0, %otherwin
wrpr %g2, 0, %wstate
sethi %hi(sparc64_kern_pri_context), %g2
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g3
661: stxa %g3, [%l4] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g3, [%l4] ASI_MMU
.previous
sethi %hi(KERNBASE), %l4
flush %l4
mov ASI_AIUS, %l7
2: mov %g4, %l4
mov %g5, %l5
add %g7, 4, %l2
/* Go to trap time globals so we can save them. */
661: wrpr %g0, ETRAP_PSTATE1, %pstate
.section .sun4v_1insn_patch, "ax"
.word 661b
SET_GL(0)
.previous
stx %g1, [%sp + PTREGS_OFF + PT_V9_G1]
stx %g2, [%sp + PTREGS_OFF + PT_V9_G2]
sllx %l7, 24, %l7
stx %g3, [%sp + PTREGS_OFF + PT_V9_G3]
rdpr %cwp, %l0
stx %g4, [%sp + PTREGS_OFF + PT_V9_G4]
stx %g5, [%sp + PTREGS_OFF + PT_V9_G5]
stx %g6, [%sp + PTREGS_OFF + PT_V9_G6]
stx %g7, [%sp + PTREGS_OFF + PT_V9_G7]
or %l7, %l0, %l7
sethi %hi(TSTATE_TSO | TSTATE_PEF), %l0
or %l7, %l0, %l7
wrpr %l2, %tnpc
wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate
stx %i0, [%sp + PTREGS_OFF + PT_V9_I0]
stx %i1, [%sp + PTREGS_OFF + PT_V9_I1]
stx %i2, [%sp + PTREGS_OFF + PT_V9_I2]
stx %i3, [%sp + PTREGS_OFF + PT_V9_I3]
stx %i4, [%sp + PTREGS_OFF + PT_V9_I4]
stx %i5, [%sp + PTREGS_OFF + PT_V9_I5]
stx %i6, [%sp + PTREGS_OFF + PT_V9_I6]
mov %l6, %g6
stx %i7, [%sp + PTREGS_OFF + PT_V9_I7]
LOAD_PER_CPU_BASE(%g5, %g6, %g4, %g3, %l1)
ldx [%g6 + TI_TASK], %g4
done
3: mov ASI_P, %l7
ldub [%l6 + TI_FPDEPTH], %l5
add %l6, TI_FPSAVED + 1, %l4
srl %l5, 1, %l3
add %l5, 2, %l5
stb %l5, [%l6 + TI_FPDEPTH]
ba,pt %xcc, 2b
stb %g0, [%l4 + %l3]
nop
etraptl1: /* Save tstate/tpc/tnpc of TL 1-->4 and the tl register itself.
* We place this right after pt_regs on the trap stack.
* The layout is:
* 0x00 TL1's TSTATE
* 0x08 TL1's TPC
* 0x10 TL1's TNPC
* 0x18 TL1's TT
* ...
* 0x58 TL4's TT
* 0x60 TL
*/
TRAP_LOAD_THREAD_REG(%g6, %g1)
sub %sp, ((4 * 8) * 4) + 8, %g2
rdpr %tl, %g1
wrpr %g0, 1, %tl
rdpr %tstate, %g3
stx %g3, [%g2 + STACK_BIAS + 0x00]
rdpr %tpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x08]
rdpr %tnpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x10]
rdpr %tt, %g3
stx %g3, [%g2 + STACK_BIAS + 0x18]
wrpr %g0, 2, %tl
rdpr %tstate, %g3
stx %g3, [%g2 + STACK_BIAS + 0x20]
rdpr %tpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x28]
rdpr %tnpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x30]
rdpr %tt, %g3
stx %g3, [%g2 + STACK_BIAS + 0x38]
sethi %hi(is_sun4v), %g3
lduw [%g3 + %lo(is_sun4v)], %g3
brnz,pn %g3, finish_tl1_capture
nop
wrpr %g0, 3, %tl
rdpr %tstate, %g3
stx %g3, [%g2 + STACK_BIAS + 0x40]
rdpr %tpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x48]
rdpr %tnpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x50]
rdpr %tt, %g3
stx %g3, [%g2 + STACK_BIAS + 0x58]
wrpr %g0, 4, %tl
rdpr %tstate, %g3
stx %g3, [%g2 + STACK_BIAS + 0x60]
rdpr %tpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x68]
rdpr %tnpc, %g3
stx %g3, [%g2 + STACK_BIAS + 0x70]
rdpr %tt, %g3
stx %g3, [%g2 + STACK_BIAS + 0x78]
stx %g1, [%g2 + STACK_BIAS + 0x80]
finish_tl1_capture:
wrpr %g0, 1, %tl
661: nop
.section .sun4v_1insn_patch, "ax"
.word 661b
SET_GL(1)
.previous
rdpr %tstate, %g1
sub %g2, STACKFRAME_SZ + TRACEREG_SZ - STACK_BIAS, %g2
ba,pt %xcc, 1b
andcc %g1, TSTATE_PRIV, %g0
#undef TASK_REGOFF
#undef ETRAP_PSTATE1

View File

@@ -0,0 +1,384 @@
/* This is trivial with the new code... */
.globl do_fpdis
.type do_fpdis,#function
do_fpdis:
sethi %hi(TSTATE_PEF), %g4
rdpr %tstate, %g5
andcc %g5, %g4, %g0
be,pt %xcc, 1f
nop
rd %fprs, %g5
andcc %g5, FPRS_FEF, %g0
be,pt %xcc, 1f
nop
/* Legal state when DCR_IFPOE is set in Cheetah %dcr. */
sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
add %g0, %g0, %g0
ba,a,pt %xcc, rtrap
1: TRAP_LOAD_THREAD_REG(%g6, %g1)
ldub [%g6 + TI_FPSAVED], %g5
wr %g0, FPRS_FEF, %fprs
andcc %g5, FPRS_FEF, %g0
be,a,pt %icc, 1f
clr %g7
ldx [%g6 + TI_GSR], %g7
1: andcc %g5, FPRS_DL, %g0
bne,pn %icc, 2f
fzero %f0
andcc %g5, FPRS_DU, %g0
bne,pn %icc, 1f
fzero %f2
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
faddd %f0, %f2, %f8
fmuld %f0, %f2, %f10
faddd %f0, %f2, %f12
fmuld %f0, %f2, %f14
faddd %f0, %f2, %f16
fmuld %f0, %f2, %f18
faddd %f0, %f2, %f20
fmuld %f0, %f2, %f22
faddd %f0, %f2, %f24
fmuld %f0, %f2, %f26
faddd %f0, %f2, %f28
fmuld %f0, %f2, %f30
faddd %f0, %f2, %f32
fmuld %f0, %f2, %f34
faddd %f0, %f2, %f36
fmuld %f0, %f2, %f38
faddd %f0, %f2, %f40
fmuld %f0, %f2, %f42
faddd %f0, %f2, %f44
fmuld %f0, %f2, %f46
faddd %f0, %f2, %f48
fmuld %f0, %f2, %f50
faddd %f0, %f2, %f52
fmuld %f0, %f2, %f54
faddd %f0, %f2, %f56
fmuld %f0, %f2, %f58
b,pt %xcc, fpdis_exit2
faddd %f0, %f2, %f60
1: mov SECONDARY_CONTEXT, %g3
add %g6, TI_FPREGS + 0x80, %g1
faddd %f0, %f2, %f4
fmuld %f0, %f2, %f6
661: ldxa [%g3] ASI_DMMU, %g5
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%g3] ASI_MMU, %g5
.previous
sethi %hi(sparc64_kern_sec_context), %g2
ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
661: stxa %g2, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g3] ASI_MMU
.previous
membar #Sync
add %g6, TI_FPREGS + 0xc0, %g2
faddd %f0, %f2, %f8
fmuld %f0, %f2, %f10
membar #Sync
ldda [%g1] ASI_BLK_S, %f32
ldda [%g2] ASI_BLK_S, %f48
membar #Sync
faddd %f0, %f2, %f12
fmuld %f0, %f2, %f14
faddd %f0, %f2, %f16
fmuld %f0, %f2, %f18
faddd %f0, %f2, %f20
fmuld %f0, %f2, %f22
faddd %f0, %f2, %f24
fmuld %f0, %f2, %f26
faddd %f0, %f2, %f28
fmuld %f0, %f2, %f30
b,pt %xcc, fpdis_exit
nop
2: andcc %g5, FPRS_DU, %g0
bne,pt %icc, 3f
fzero %f32
mov SECONDARY_CONTEXT, %g3
fzero %f34
661: ldxa [%g3] ASI_DMMU, %g5
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%g3] ASI_MMU, %g5
.previous
add %g6, TI_FPREGS, %g1
sethi %hi(sparc64_kern_sec_context), %g2
ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
661: stxa %g2, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g3] ASI_MMU
.previous
membar #Sync
add %g6, TI_FPREGS + 0x40, %g2
faddd %f32, %f34, %f36
fmuld %f32, %f34, %f38
membar #Sync
ldda [%g1] ASI_BLK_S, %f0
ldda [%g2] ASI_BLK_S, %f16
membar #Sync
faddd %f32, %f34, %f40
fmuld %f32, %f34, %f42
faddd %f32, %f34, %f44
fmuld %f32, %f34, %f46
faddd %f32, %f34, %f48
fmuld %f32, %f34, %f50
faddd %f32, %f34, %f52
fmuld %f32, %f34, %f54
faddd %f32, %f34, %f56
fmuld %f32, %f34, %f58
faddd %f32, %f34, %f60
fmuld %f32, %f34, %f62
ba,pt %xcc, fpdis_exit
nop
3: mov SECONDARY_CONTEXT, %g3
add %g6, TI_FPREGS, %g1
661: ldxa [%g3] ASI_DMMU, %g5
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%g3] ASI_MMU, %g5
.previous
sethi %hi(sparc64_kern_sec_context), %g2
ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
661: stxa %g2, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g3] ASI_MMU
.previous
membar #Sync
mov 0x40, %g2
membar #Sync
ldda [%g1] ASI_BLK_S, %f0
ldda [%g1 + %g2] ASI_BLK_S, %f16
add %g1, 0x80, %g1
ldda [%g1] ASI_BLK_S, %f32
ldda [%g1 + %g2] ASI_BLK_S, %f48
membar #Sync
fpdis_exit:
661: stxa %g5, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g5, [%g3] ASI_MMU
.previous
membar #Sync
fpdis_exit2:
wr %g7, 0, %gsr
ldx [%g6 + TI_XFSR], %fsr
rdpr %tstate, %g3
or %g3, %g4, %g3 ! anal...
wrpr %g3, %tstate
wr %g0, FPRS_FEF, %fprs ! clean DU/DL bits
retry
.size do_fpdis,.-do_fpdis
.align 32
.type fp_other_bounce,#function
fp_other_bounce:
call do_fpother
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
.size fp_other_bounce,.-fp_other_bounce
.align 32
.globl do_fpother_check_fitos
.type do_fpother_check_fitos,#function
do_fpother_check_fitos:
TRAP_LOAD_THREAD_REG(%g6, %g1)
sethi %hi(fp_other_bounce - 4), %g7
or %g7, %lo(fp_other_bounce - 4), %g7
/* NOTE: Need to preserve %g7 until we fully commit
* to the fitos fixup.
*/
stx %fsr, [%g6 + TI_XFSR]
rdpr %tstate, %g3
andcc %g3, TSTATE_PRIV, %g0
bne,pn %xcc, do_fptrap_after_fsr
nop
ldx [%g6 + TI_XFSR], %g3
srlx %g3, 14, %g1
and %g1, 7, %g1
cmp %g1, 2 ! Unfinished FP-OP
bne,pn %xcc, do_fptrap_after_fsr
sethi %hi(1 << 23), %g1 ! Inexact
andcc %g3, %g1, %g0
bne,pn %xcc, do_fptrap_after_fsr
rdpr %tpc, %g1
lduwa [%g1] ASI_AIUP, %g3 ! This cannot ever fail
#define FITOS_MASK 0xc1f83fe0
#define FITOS_COMPARE 0x81a01880
sethi %hi(FITOS_MASK), %g1
or %g1, %lo(FITOS_MASK), %g1
and %g3, %g1, %g1
sethi %hi(FITOS_COMPARE), %g2
or %g2, %lo(FITOS_COMPARE), %g2
cmp %g1, %g2
bne,pn %xcc, do_fptrap_after_fsr
nop
std %f62, [%g6 + TI_FPREGS + (62 * 4)]
sethi %hi(fitos_table_1), %g1
and %g3, 0x1f, %g2
or %g1, %lo(fitos_table_1), %g1
sllx %g2, 2, %g2
jmpl %g1 + %g2, %g0
ba,pt %xcc, fitos_emul_continue
fitos_table_1:
fitod %f0, %f62
fitod %f1, %f62
fitod %f2, %f62
fitod %f3, %f62
fitod %f4, %f62
fitod %f5, %f62
fitod %f6, %f62
fitod %f7, %f62
fitod %f8, %f62
fitod %f9, %f62
fitod %f10, %f62
fitod %f11, %f62
fitod %f12, %f62
fitod %f13, %f62
fitod %f14, %f62
fitod %f15, %f62
fitod %f16, %f62
fitod %f17, %f62
fitod %f18, %f62
fitod %f19, %f62
fitod %f20, %f62
fitod %f21, %f62
fitod %f22, %f62
fitod %f23, %f62
fitod %f24, %f62
fitod %f25, %f62
fitod %f26, %f62
fitod %f27, %f62
fitod %f28, %f62
fitod %f29, %f62
fitod %f30, %f62
fitod %f31, %f62
fitos_emul_continue:
sethi %hi(fitos_table_2), %g1
srl %g3, 25, %g2
or %g1, %lo(fitos_table_2), %g1
and %g2, 0x1f, %g2
sllx %g2, 2, %g2
jmpl %g1 + %g2, %g0
ba,pt %xcc, fitos_emul_fini
fitos_table_2:
fdtos %f62, %f0
fdtos %f62, %f1
fdtos %f62, %f2
fdtos %f62, %f3
fdtos %f62, %f4
fdtos %f62, %f5
fdtos %f62, %f6
fdtos %f62, %f7
fdtos %f62, %f8
fdtos %f62, %f9
fdtos %f62, %f10
fdtos %f62, %f11
fdtos %f62, %f12
fdtos %f62, %f13
fdtos %f62, %f14
fdtos %f62, %f15
fdtos %f62, %f16
fdtos %f62, %f17
fdtos %f62, %f18
fdtos %f62, %f19
fdtos %f62, %f20
fdtos %f62, %f21
fdtos %f62, %f22
fdtos %f62, %f23
fdtos %f62, %f24
fdtos %f62, %f25
fdtos %f62, %f26
fdtos %f62, %f27
fdtos %f62, %f28
fdtos %f62, %f29
fdtos %f62, %f30
fdtos %f62, %f31
fitos_emul_fini:
ldd [%g6 + TI_FPREGS + (62 * 4)], %f62
done
.size do_fpother_check_fitos,.-do_fpother_check_fitos
.align 32
.globl do_fptrap
.type do_fptrap,#function
do_fptrap:
TRAP_LOAD_THREAD_REG(%g6, %g1)
stx %fsr, [%g6 + TI_XFSR]
do_fptrap_after_fsr:
ldub [%g6 + TI_FPSAVED], %g3
rd %fprs, %g1
or %g3, %g1, %g3
stb %g3, [%g6 + TI_FPSAVED]
rd %gsr, %g3
stx %g3, [%g6 + TI_GSR]
mov SECONDARY_CONTEXT, %g3
661: ldxa [%g3] ASI_DMMU, %g5
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%g3] ASI_MMU, %g5
.previous
sethi %hi(sparc64_kern_sec_context), %g2
ldx [%g2 + %lo(sparc64_kern_sec_context)], %g2
661: stxa %g2, [%g3] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g3] ASI_MMU
.previous
membar #Sync
add %g6, TI_FPREGS, %g2
andcc %g1, FPRS_DL, %g0
be,pn %icc, 4f
mov 0x40, %g3
stda %f0, [%g2] ASI_BLK_S
stda %f16, [%g2 + %g3] ASI_BLK_S
andcc %g1, FPRS_DU, %g0
be,pn %icc, 5f
4: add %g2, 128, %g2
stda %f32, [%g2] ASI_BLK_S
stda %f48, [%g2 + %g3] ASI_BLK_S
5: mov SECONDARY_CONTEXT, %g1
membar #Sync
661: stxa %g5, [%g1] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g5, [%g1] ASI_MMU
.previous
membar #Sync
ba,pt %xcc, etrap
wr %g0, 0, %fprs
.size do_fptrap,.-do_fptrap

View File

@@ -0,0 +1,76 @@
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/list.h>
#include <asm/ftrace.h>
static const u32 ftrace_nop = 0x01000000;
unsigned char *ftrace_nop_replace(void)
{
return (char *)&ftrace_nop;
}
unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr)
{
static u32 call;
s32 off;
off = ((s32)addr - (s32)ip);
call = 0x40000000 | ((u32)off >> 2);
return (unsigned char *) &call;
}
int
ftrace_modify_code(unsigned long ip, unsigned char *old_code,
unsigned char *new_code)
{
u32 old = *(u32 *)old_code;
u32 new = *(u32 *)new_code;
u32 replaced;
int faulted;
__asm__ __volatile__(
"1: cas [%[ip]], %[old], %[new]\n"
" flush %[ip]\n"
" mov 0, %[faulted]\n"
"2:\n"
" .section .fixup,#alloc,#execinstr\n"
" .align 4\n"
"3: sethi %%hi(2b), %[faulted]\n"
" jmpl %[faulted] + %%lo(2b), %%g0\n"
" mov 1, %[faulted]\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
" .align 4\n"
" .word 1b, 3b\n"
" .previous\n"
: "=r" (replaced), [faulted] "=r" (faulted)
: [new] "0" (new), [old] "r" (old), [ip] "r" (ip)
: "memory");
if (replaced != old && replaced != new)
faulted = 2;
return faulted;
}
int ftrace_update_ftrace_func(ftrace_func_t func)
{
unsigned long ip = (unsigned long)(&ftrace_call);
unsigned char old[MCOUNT_INSN_SIZE], *new;
memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(ip, (unsigned long)func);
return ftrace_modify_code(ip, old, new);
}
int __init ftrace_dyn_arch_init(void *data)
{
ftrace_mcount_set(data);
return 0;
}

View File

@@ -0,0 +1,24 @@
.globl getcc
.type getcc,#function
getcc:
ldx [%o0 + PT_V9_TSTATE], %o1
srlx %o1, 32, %o1
and %o1, 0xf, %o1
retl
stx %o1, [%o0 + PT_V9_G1]
.size getcc,.-getcc
.globl setcc
.type setcc,#function
setcc:
ldx [%o0 + PT_V9_TSTATE], %o1
ldx [%o0 + PT_V9_G1], %o2
or %g0, %ulo(TSTATE_ICC), %o3
sllx %o3, 32, %o3
andn %o1, %o3, %o1
sllx %o2, 32, %o2
and %o2, %o3, %o2
or %o1, %o2, %o1
retl
stx %o1, [%o0 + PT_V9_TSTATE]
.size setcc,.-setcc

View File

@@ -990,7 +990,7 @@ sun4c_continue_boot:
/* Zero out our BSS section. */
set __bss_start , %o0 ! First address of BSS
set end , %o1 ! Last address of BSS
set _end , %o1 ! Last address of BSS
add %o0, 0x1, %o0
1:
stb %g0, [%o0]

900
arch/sparc/kernel/head_64.S Normal file
View File

@@ -0,0 +1,900 @@
/* head.S: Initial boot code for the Sparc64 port of Linux.
*
* Copyright (C) 1996, 1997, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 David Sitsky (David.Sitsky@anu.edu.au)
* Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 Miguel de Icaza (miguel@nuclecu.unam.mx)
*/
#include <linux/version.h>
#include <linux/errno.h>
#include <linux/threads.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <asm/thread_info.h>
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
#include <asm/spitfire.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/errno.h>
#include <asm/signal.h>
#include <asm/processor.h>
#include <asm/lsu.h>
#include <asm/dcr.h>
#include <asm/dcu.h>
#include <asm/head.h>
#include <asm/ttable.h>
#include <asm/mmu.h>
#include <asm/cpudata.h>
#include <asm/pil.h>
#include <asm/estate.h>
#include <asm/sfafsr.h>
#include <asm/unistd.h>
/* This section from from _start to sparc64_boot_end should fit into
* 0x0000000000404000 to 0x0000000000408000.
*/
.text
.globl start, _start, stext, _stext
_start:
start:
_stext:
stext:
! 0x0000000000404000
b sparc64_boot
flushw /* Flush register file. */
/* This stuff has to be in sync with SILO and other potential boot loaders
* Fields should be kept upward compatible and whenever any change is made,
* HdrS version should be incremented.
*/
.global root_flags, ram_flags, root_dev
.global sparc_ramdisk_image, sparc_ramdisk_size
.global sparc_ramdisk_image64
.ascii "HdrS"
.word LINUX_VERSION_CODE
/* History:
*
* 0x0300 : Supports being located at other than 0x4000
* 0x0202 : Supports kernel params string
* 0x0201 : Supports reboot_command
*/
.half 0x0301 /* HdrS version */
root_flags:
.half 1
root_dev:
.half 0
ram_flags:
.half 0
sparc_ramdisk_image:
.word 0
sparc_ramdisk_size:
.word 0
.xword reboot_command
.xword bootstr_info
sparc_ramdisk_image64:
.xword 0
.word _end
/* PROM cif handler code address is in %o4. */
sparc64_boot:
mov %o4, %l7
/* We need to remap the kernel. Use position independant
* code to remap us to KERNBASE.
*
* SILO can invoke us with 32-bit address masking enabled,
* so make sure that's clear.
*/
rdpr %pstate, %g1
andn %g1, PSTATE_AM, %g1
wrpr %g1, 0x0, %pstate
ba,a,pt %xcc, 1f
.globl prom_finddev_name, prom_chosen_path, prom_root_node
.globl prom_getprop_name, prom_mmu_name, prom_peer_name
.globl prom_callmethod_name, prom_translate_name, prom_root_compatible
.globl prom_map_name, prom_unmap_name, prom_mmu_ihandle_cache
.globl prom_boot_mapped_pc, prom_boot_mapping_mode
.globl prom_boot_mapping_phys_high, prom_boot_mapping_phys_low
.globl prom_compatible_name, prom_cpu_path, prom_cpu_compatible
.globl is_sun4v, sun4v_chip_type, prom_set_trap_table_name
prom_peer_name:
.asciz "peer"
prom_compatible_name:
.asciz "compatible"
prom_finddev_name:
.asciz "finddevice"
prom_chosen_path:
.asciz "/chosen"
prom_cpu_path:
.asciz "/cpu"
prom_getprop_name:
.asciz "getprop"
prom_mmu_name:
.asciz "mmu"
prom_callmethod_name:
.asciz "call-method"
prom_translate_name:
.asciz "translate"
prom_map_name:
.asciz "map"
prom_unmap_name:
.asciz "unmap"
prom_set_trap_table_name:
.asciz "SUNW,set-trap-table"
prom_sun4v_name:
.asciz "sun4v"
prom_niagara_prefix:
.asciz "SUNW,UltraSPARC-T"
.align 4
prom_root_compatible:
.skip 64
prom_cpu_compatible:
.skip 64
prom_root_node:
.word 0
prom_mmu_ihandle_cache:
.word 0
prom_boot_mapped_pc:
.word 0
prom_boot_mapping_mode:
.word 0
.align 8
prom_boot_mapping_phys_high:
.xword 0
prom_boot_mapping_phys_low:
.xword 0
is_sun4v:
.word 0
sun4v_chip_type:
.word SUN4V_CHIP_INVALID
1:
rd %pc, %l0
mov (1b - prom_peer_name), %l1
sub %l0, %l1, %l1
mov 0, %l2
/* prom_root_node = prom_peer(0) */
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "peer"
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, 0
stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
ldx [%sp + 2047 + 128 + 0x20], %l4 ! prom root node
mov (1b - prom_root_node), %l1
sub %l0, %l1, %l1
stw %l4, [%l1]
mov (1b - prom_getprop_name), %l1
mov (1b - prom_compatible_name), %l2
mov (1b - prom_root_compatible), %l5
sub %l0, %l1, %l1
sub %l0, %l2, %l2
sub %l0, %l5, %l5
/* prom_getproperty(prom_root_node, "compatible",
* &prom_root_compatible, 64)
*/
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
mov 4, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, prom_root_node
stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible"
stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_root_compatible
mov 64, %l3
stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size
stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
mov (1b - prom_finddev_name), %l1
mov (1b - prom_chosen_path), %l2
mov (1b - prom_boot_mapped_pc), %l3
sub %l0, %l1, %l1
sub %l0, %l2, %l2
sub %l0, %l3, %l3
stw %l0, [%l3]
sub %sp, (192 + 128), %sp
/* chosen_node = prom_finddevice("/chosen") */
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice"
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/chosen"
stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
ldx [%sp + 2047 + 128 + 0x20], %l4 ! chosen device node
mov (1b - prom_getprop_name), %l1
mov (1b - prom_mmu_name), %l2
mov (1b - prom_mmu_ihandle_cache), %l5
sub %l0, %l1, %l1
sub %l0, %l2, %l2
sub %l0, %l5, %l5
/* prom_mmu_ihandle_cache = prom_getint(chosen_node, "mmu") */
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
mov 4, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, chosen_node
stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "mmu"
stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_mmu_ihandle_cache
mov 4, %l3
stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, sizeof(arg3)
stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
mov (1b - prom_callmethod_name), %l1
mov (1b - prom_translate_name), %l2
sub %l0, %l1, %l1
sub %l0, %l2, %l2
lduw [%l5], %l5 ! prom_mmu_ihandle_cache
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "call-method"
mov 3, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 3
mov 5, %l3
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 5
stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1: "translate"
stx %l5, [%sp + 2047 + 128 + 0x20] ! arg2: prom_mmu_ihandle_cache
/* PAGE align */
srlx %l0, 13, %l3
sllx %l3, 13, %l3
stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: vaddr, our PC
stx %g0, [%sp + 2047 + 128 + 0x30] ! res1
stx %g0, [%sp + 2047 + 128 + 0x38] ! res2
stx %g0, [%sp + 2047 + 128 + 0x40] ! res3
stx %g0, [%sp + 2047 + 128 + 0x48] ! res4
stx %g0, [%sp + 2047 + 128 + 0x50] ! res5
call %l7
add %sp, (2047 + 128), %o0 ! argument array
ldx [%sp + 2047 + 128 + 0x40], %l1 ! translation mode
mov (1b - prom_boot_mapping_mode), %l4
sub %l0, %l4, %l4
stw %l1, [%l4]
mov (1b - prom_boot_mapping_phys_high), %l4
sub %l0, %l4, %l4
ldx [%sp + 2047 + 128 + 0x48], %l2 ! physaddr high
stx %l2, [%l4 + 0x0]
ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low
/* 4MB align */
srlx %l3, 22, %l3
sllx %l3, 22, %l3
stx %l3, [%l4 + 0x8]
/* Leave service as-is, "call-method" */
mov 7, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 7
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
mov (1b - prom_map_name), %l3
sub %l0, %l3, %l3
stx %l3, [%sp + 2047 + 128 + 0x18] ! arg1: "map"
/* Leave arg2 as-is, prom_mmu_ihandle_cache */
mov -1, %l3
stx %l3, [%sp + 2047 + 128 + 0x28] ! arg3: mode (-1 default)
/* 4MB align the kernel image size. */
set (_end - KERNBASE), %l3
set ((4 * 1024 * 1024) - 1), %l4
add %l3, %l4, %l3
andn %l3, %l4, %l3
stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4: roundup(ksize, 4MB)
sethi %hi(KERNBASE), %l3
stx %l3, [%sp + 2047 + 128 + 0x38] ! arg5: vaddr (KERNBASE)
stx %g0, [%sp + 2047 + 128 + 0x40] ! arg6: empty
mov (1b - prom_boot_mapping_phys_low), %l3
sub %l0, %l3, %l3
ldx [%l3], %l3
stx %l3, [%sp + 2047 + 128 + 0x48] ! arg7: phys addr
call %l7
add %sp, (2047 + 128), %o0 ! argument array
add %sp, (192 + 128), %sp
sethi %hi(prom_root_compatible), %g1
or %g1, %lo(prom_root_compatible), %g1
sethi %hi(prom_sun4v_name), %g7
or %g7, %lo(prom_sun4v_name), %g7
mov 5, %g3
90: ldub [%g7], %g2
ldub [%g1], %g4
cmp %g2, %g4
bne,pn %icc, 80f
add %g7, 1, %g7
subcc %g3, 1, %g3
bne,pt %xcc, 90b
add %g1, 1, %g1
sethi %hi(is_sun4v), %g1
or %g1, %lo(is_sun4v), %g1
mov 1, %g7
stw %g7, [%g1]
/* cpu_node = prom_finddevice("/cpu") */
mov (1b - prom_finddev_name), %l1
mov (1b - prom_cpu_path), %l2
sub %l0, %l1, %l1
sub %l0, %l2, %l2
sub %sp, (192 + 128), %sp
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "finddevice"
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 1
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l2, [%sp + 2047 + 128 + 0x18] ! arg1, "/cpu"
stx %g0, [%sp + 2047 + 128 + 0x20] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
ldx [%sp + 2047 + 128 + 0x20], %l4 ! cpu device node
mov (1b - prom_getprop_name), %l1
mov (1b - prom_compatible_name), %l2
mov (1b - prom_cpu_compatible), %l5
sub %l0, %l1, %l1
sub %l0, %l2, %l2
sub %l0, %l5, %l5
/* prom_getproperty(cpu_node, "compatible",
* &prom_cpu_compatible, 64)
*/
stx %l1, [%sp + 2047 + 128 + 0x00] ! service, "getprop"
mov 4, %l3
stx %l3, [%sp + 2047 + 128 + 0x08] ! num_args, 4
mov 1, %l3
stx %l3, [%sp + 2047 + 128 + 0x10] ! num_rets, 1
stx %l4, [%sp + 2047 + 128 + 0x18] ! arg1, cpu_node
stx %l2, [%sp + 2047 + 128 + 0x20] ! arg2, "compatible"
stx %l5, [%sp + 2047 + 128 + 0x28] ! arg3, &prom_cpu_compatible
mov 64, %l3
stx %l3, [%sp + 2047 + 128 + 0x30] ! arg4, size
stx %g0, [%sp + 2047 + 128 + 0x38] ! ret1
call %l7
add %sp, (2047 + 128), %o0 ! argument array
add %sp, (192 + 128), %sp
sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
sethi %hi(prom_niagara_prefix), %g7
or %g7, %lo(prom_niagara_prefix), %g7
mov 17, %g3
90: ldub [%g7], %g2
ldub [%g1], %g4
cmp %g2, %g4
bne,pn %icc, 4f
add %g7, 1, %g7
subcc %g3, 1, %g3
bne,pt %xcc, 90b
add %g1, 1, %g1
sethi %hi(prom_cpu_compatible), %g1
or %g1, %lo(prom_cpu_compatible), %g1
ldub [%g1 + 17], %g2
cmp %g2, '1'
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA1, %g4
cmp %g2, '2'
be,pt %xcc, 5f
mov SUN4V_CHIP_NIAGARA2, %g4
4:
mov SUN4V_CHIP_UNKNOWN, %g4
5: sethi %hi(sun4v_chip_type), %g2
or %g2, %lo(sun4v_chip_type), %g2
stw %g4, [%g2]
80:
BRANCH_IF_SUN4V(g1, jump_to_sun4u_init)
BRANCH_IF_CHEETAH_BASE(g1,g7,cheetah_boot)
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,cheetah_plus_boot)
ba,pt %xcc, spitfire_boot
nop
cheetah_plus_boot:
/* Preserve OBP chosen DCU and DCR register settings. */
ba,pt %xcc, cheetah_generic_boot
nop
cheetah_boot:
mov DCR_BPE | DCR_RPE | DCR_SI | DCR_IFPOE | DCR_MS, %g1
wr %g1, %asr18
sethi %uhi(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
or %g7, %ulo(DCU_ME|DCU_RE|DCU_HPE|DCU_SPE|DCU_SL|DCU_WE), %g7
sllx %g7, 32, %g7
or %g7, DCU_DM | DCU_IM | DCU_DC | DCU_IC, %g7
stxa %g7, [%g0] ASI_DCU_CONTROL_REG
membar #Sync
cheetah_generic_boot:
mov TSB_EXTENSION_P, %g3
stxa %g0, [%g3] ASI_DMMU
stxa %g0, [%g3] ASI_IMMU
membar #Sync
mov TSB_EXTENSION_S, %g3
stxa %g0, [%g3] ASI_DMMU
membar #Sync
mov TSB_EXTENSION_N, %g3
stxa %g0, [%g3] ASI_DMMU
stxa %g0, [%g3] ASI_IMMU
membar #Sync
ba,a,pt %xcc, jump_to_sun4u_init
spitfire_boot:
/* Typically PROM has already enabled both MMU's and both on-chip
* caches, but we do it here anyway just to be paranoid.
*/
mov (LSU_CONTROL_IC|LSU_CONTROL_DC|LSU_CONTROL_IM|LSU_CONTROL_DM), %g1
stxa %g1, [%g0] ASI_LSU_CONTROL
membar #Sync
jump_to_sun4u_init:
/*
* Make sure we are in privileged mode, have address masking,
* using the ordinary globals and have enabled floating
* point.
*
* Again, typically PROM has left %pil at 13 or similar, and
* (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE) in %pstate.
*/
wrpr %g0, (PSTATE_PRIV|PSTATE_PEF|PSTATE_IE), %pstate
wr %g0, 0, %fprs
set sun4u_init, %g2
jmpl %g2 + %g0, %g0
nop
.section .text.init.refok
sun4u_init:
BRANCH_IF_SUN4V(g1, sun4v_init)
/* Set ctx 0 */
mov PRIMARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_DMMU
membar #Sync
mov SECONDARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_DMMU
membar #Sync
ba,pt %xcc, sun4u_continue
nop
sun4v_init:
/* Set ctx 0 */
mov PRIMARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_MMU
membar #Sync
mov SECONDARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_MMU
membar #Sync
ba,pt %xcc, niagara_tlb_fixup
nop
sun4u_continue:
BRANCH_IF_ANY_CHEETAH(g1, g7, cheetah_tlb_fixup)
ba,pt %xcc, spitfire_tlb_fixup
nop
niagara_tlb_fixup:
mov 3, %g2 /* Set TLB type to hypervisor. */
sethi %hi(tlb_type), %g1
stw %g2, [%g1 + %lo(tlb_type)]
/* Patch copy/clear ops. */
sethi %hi(sun4v_chip_type), %g1
lduw [%g1 + %lo(sun4v_chip_type)], %g1
cmp %g1, SUN4V_CHIP_NIAGARA1
be,pt %xcc, niagara_patch
cmp %g1, SUN4V_CHIP_NIAGARA2
be,pt %xcc, niagara2_patch
nop
call generic_patch_copyops
nop
call generic_patch_bzero
nop
call generic_patch_pageops
nop
ba,a,pt %xcc, 80f
niagara2_patch:
call niagara2_patch_copyops
nop
call niagara_patch_bzero
nop
call niagara2_patch_pageops
nop
ba,a,pt %xcc, 80f
niagara_patch:
call niagara_patch_copyops
nop
call niagara_patch_bzero
nop
call niagara_patch_pageops
nop
80:
/* Patch TLB/cache ops. */
call hypervisor_patch_cachetlbops
nop
ba,pt %xcc, tlb_fixup_done
nop
cheetah_tlb_fixup:
mov 2, %g2 /* Set TLB type to cheetah+. */
BRANCH_IF_CHEETAH_PLUS_OR_FOLLOWON(g1,g7,1f)
mov 1, %g2 /* Set TLB type to cheetah. */
1: sethi %hi(tlb_type), %g1
stw %g2, [%g1 + %lo(tlb_type)]
/* Patch copy/page operations to cheetah optimized versions. */
call cheetah_patch_copyops
nop
call cheetah_patch_copy_page
nop
call cheetah_patch_cachetlbops
nop
ba,pt %xcc, tlb_fixup_done
nop
spitfire_tlb_fixup:
/* Set TLB type to spitfire. */
mov 0, %g2
sethi %hi(tlb_type), %g1
stw %g2, [%g1 + %lo(tlb_type)]
tlb_fixup_done:
sethi %hi(init_thread_union), %g6
or %g6, %lo(init_thread_union), %g6
ldx [%g6 + TI_TASK], %g4
mov %sp, %l6
wr %g0, ASI_P, %asi
mov 1, %g1
sllx %g1, THREAD_SHIFT, %g1
sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
add %g6, %g1, %sp
mov 0, %fp
/* Set per-cpu pointer initially to zero, this makes
* the boot-cpu use the in-kernel-image per-cpu areas
* before setup_per_cpu_area() is invoked.
*/
clr %g5
wrpr %g0, 0, %wstate
wrpr %g0, 0x0, %tl
/* Clear the bss */
sethi %hi(__bss_start), %o0
or %o0, %lo(__bss_start), %o0
sethi %hi(_end), %o1
or %o1, %lo(_end), %o1
call __bzero
sub %o1, %o0, %o1
#ifdef CONFIG_LOCKDEP
/* We have this call this super early, as even prom_init can grab
* spinlocks and thus call into the lockdep code.
*/
call lockdep_init
nop
#endif
mov %l6, %o1 ! OpenPROM stack
call prom_init
mov %l7, %o0 ! OpenPROM cif handler
/* Initialize current_thread_info()->cpu as early as possible.
* In order to do that accurately we have to patch up the get_cpuid()
* assembler sequences. And that, in turn, requires that we know
* if we are on a Starfire box or not. While we're here, patch up
* the sun4v sequences as well.
*/
call check_if_starfire
nop
call per_cpu_patch
nop
call sun4v_patch
nop
#ifdef CONFIG_SMP
call hard_smp_processor_id
nop
cmp %o0, NR_CPUS
blu,pt %xcc, 1f
nop
call boot_cpu_id_too_large
nop
/* Not reached... */
1:
/* If we boot on a non-zero cpu, all of the per-cpu
* variable references we make before setting up the
* per-cpu areas will use a bogus offset. Put a
* compensating factor into __per_cpu_base to handle
* this cleanly.
*
* What the per-cpu code calculates is:
*
* __per_cpu_base + (cpu << __per_cpu_shift)
*
* These two variables are zero initially, so to
* make it all cancel out to zero we need to put
* "0 - (cpu << 0)" into __per_cpu_base so that the
* above formula evaluates to zero.
*
* We cannot even perform a printk() until this stuff
* is setup as that calls cpu_clock() which uses
* per-cpu variables.
*/
sub %g0, %o0, %o1
sethi %hi(__per_cpu_base), %o2
stx %o1, [%o2 + %lo(__per_cpu_base)]
#else
mov 0, %o0
#endif
sth %o0, [%g6 + TI_CPU]
call prom_init_report
nop
/* Off we go.... */
call start_kernel
nop
/* Not reached... */
.previous
/* This is meant to allow the sharing of this code between
* boot processor invocation (via setup_tba() below) and
* secondary processor startup (via trampoline.S). The
* former does use this code, the latter does not yet due
* to some complexities. That should be fixed up at some
* point.
*
* There used to be enormous complexity wrt. transferring
* over from the firwmare's trap table to the Linux kernel's.
* For example, there was a chicken & egg problem wrt. building
* the OBP page tables, yet needing to be on the Linux kernel
* trap table (to translate PAGE_OFFSET addresses) in order to
* do that.
*
* We now handle OBP tlb misses differently, via linear lookups
* into the prom_trans[] array. So that specific problem no
* longer exists. Yet, unfortunately there are still some issues
* preventing trampoline.S from using this code... ho hum.
*/
.globl setup_trap_table
setup_trap_table:
save %sp, -192, %sp
/* Force interrupts to be disabled. */
rdpr %pstate, %l0
andn %l0, PSTATE_IE, %o1
wrpr %o1, 0x0, %pstate
rdpr %pil, %l1
wrpr %g0, PIL_NORMAL_MAX, %pil
/* Make the firmware call to jump over to the Linux trap table. */
sethi %hi(is_sun4v), %o0
lduw [%o0 + %lo(is_sun4v)], %o0
brz,pt %o0, 1f
nop
TRAP_LOAD_TRAP_BLOCK(%g2, %g3)
add %g2, TRAP_PER_CPU_FAULT_INFO, %g2
stxa %g2, [%g0] ASI_SCRATCHPAD
/* Compute physical address:
*
* paddr = kern_base + (mmfsa_vaddr - KERNBASE)
*/
sethi %hi(KERNBASE), %g3
sub %g2, %g3, %g2
sethi %hi(kern_base), %g3
ldx [%g3 + %lo(kern_base)], %g3
add %g2, %g3, %o1
sethi %hi(sparc64_ttable_tl0), %o0
set prom_set_trap_table_name, %g2
stx %g2, [%sp + 2047 + 128 + 0x00]
mov 2, %g2
stx %g2, [%sp + 2047 + 128 + 0x08]
mov 0, %g2
stx %g2, [%sp + 2047 + 128 + 0x10]
stx %o0, [%sp + 2047 + 128 + 0x18]
stx %o1, [%sp + 2047 + 128 + 0x20]
sethi %hi(p1275buf), %g2
or %g2, %lo(p1275buf), %g2
ldx [%g2 + 0x08], %o1
call %o1
add %sp, (2047 + 128), %o0
ba,pt %xcc, 2f
nop
1: sethi %hi(sparc64_ttable_tl0), %o0
set prom_set_trap_table_name, %g2
stx %g2, [%sp + 2047 + 128 + 0x00]
mov 1, %g2
stx %g2, [%sp + 2047 + 128 + 0x08]
mov 0, %g2
stx %g2, [%sp + 2047 + 128 + 0x10]
stx %o0, [%sp + 2047 + 128 + 0x18]
sethi %hi(p1275buf), %g2
or %g2, %lo(p1275buf), %g2
ldx [%g2 + 0x08], %o1
call %o1
add %sp, (2047 + 128), %o0
/* Start using proper page size encodings in ctx register. */
2: sethi %hi(sparc64_kern_pri_context), %g3
ldx [%g3 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1
661: stxa %g2, [%g1] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g1] ASI_MMU
.previous
membar #Sync
BRANCH_IF_SUN4V(o2, 1f)
/* Kill PROM timer */
sethi %hi(0x80000000), %o2
sllx %o2, 32, %o2
wr %o2, 0, %tick_cmpr
BRANCH_IF_ANY_CHEETAH(o2, o3, 1f)
ba,pt %xcc, 2f
nop
/* Disable STICK_INT interrupts. */
1:
sethi %hi(0x80000000), %o2
sllx %o2, 32, %o2
wr %o2, %asr25
2:
wrpr %g0, %g0, %wstate
call init_irqwork_curcpu
nop
/* Now we can restore interrupt state. */
wrpr %l0, 0, %pstate
wrpr %l1, 0x0, %pil
ret
restore
.globl setup_tba
setup_tba:
save %sp, -192, %sp
/* The boot processor is the only cpu which invokes this
* routine, the other cpus set things up via trampoline.S.
* So save the OBP trap table address here.
*/
rdpr %tba, %g7
sethi %hi(prom_tba), %o1
or %o1, %lo(prom_tba), %o1
stx %g7, [%o1]
call setup_trap_table
nop
ret
restore
sparc64_boot_end:
#include "etrap_64.S"
#include "rtrap_64.S"
#include "winfixup.S"
#include "fpu_traps.S"
#include "ivec.S"
#include "getsetcc.S"
#include "utrap.S"
#include "spiterrs.S"
#include "cherrs.S"
#include "misctrap.S"
#include "syscalls.S"
#include "helpers.S"
#include "hvcalls.S"
#include "sun4v_tlb_miss.S"
#include "sun4v_ivec.S"
#include "ktlb.S"
#include "tsb.S"
/*
* The following skip makes sure the trap table in ttable.S is aligned
* on a 32K boundary as required by the v9 specs for TBA register.
*
* We align to a 32K boundary, then we have the 32K kernel TSB,
* the 64K kernel 4MB TSB, and then the 32K aligned trap table.
*/
1:
.skip 0x4000 + _start - 1b
! 0x0000000000408000
.globl swapper_tsb
swapper_tsb:
.skip (32 * 1024)
.globl swapper_4m_tsb
swapper_4m_tsb:
.skip (64 * 1024)
! 0x0000000000420000
/* Some care needs to be exercised if you try to move the
* location of the trap table relative to other things. For
* one thing there are br* instructions in some of the
* trap table entires which branch back to code in ktlb.S
* Those instructions can only handle a signed 16-bit
* displacement.
*
* There is a binutils bug (bugzilla #4558) which causes
* the relocation overflow checks for such instructions to
* not be done correctly. So bintuils will not notice the
* error and will instead write junk into the relocation and
* you'll have an unbootable kernel.
*/
#include "ttable.S"
! 0x0000000000428000
#include "systbls_64.S"
.data
.align 8
.globl prom_tba, tlb_type
prom_tba: .xword 0
tlb_type: .word 0 /* Must NOT end up in BSS */
.section ".fixup",#alloc,#execinstr
.globl __ret_efault, __retl_efault
__ret_efault:
ret
restore %g0, -EFAULT, %o0
__retl_efault:
retl
mov -EFAULT, %o0

View File

@@ -0,0 +1,63 @@
.align 32
.globl __flushw_user
.type __flushw_user,#function
__flushw_user:
rdpr %otherwin, %g1
brz,pn %g1, 2f
clr %g2
1: save %sp, -128, %sp
rdpr %otherwin, %g1
brnz,pt %g1, 1b
add %g2, 1, %g2
1: sub %g2, 1, %g2
brnz,pt %g2, 1b
restore %g0, %g0, %g0
2: retl
nop
.size __flushw_user,.-__flushw_user
/* Flush %fp and %i7 to the stack for all register
* windows active inside of the cpu. This allows
* show_stack_trace() to avoid using an expensive
* 'flushw'.
*/
.globl stack_trace_flush
.type stack_trace_flush,#function
stack_trace_flush:
rdpr %pstate, %o0
wrpr %o0, PSTATE_IE, %pstate
rdpr %cwp, %g1
rdpr %canrestore, %g2
sub %g1, 1, %g3
1: brz,pn %g2, 2f
sub %g2, 1, %g2
wrpr %g3, %cwp
stx %fp, [%sp + STACK_BIAS + RW_V9_I6]
stx %i7, [%sp + STACK_BIAS + RW_V9_I7]
ba,pt %xcc, 1b
sub %g3, 1, %g3
2: wrpr %g1, %cwp
wrpr %o0, %pstate
retl
nop
.size stack_trace_flush,.-stack_trace_flush
#ifdef CONFIG_SMP
.globl hard_smp_processor_id
.type hard_smp_processor_id,#function
hard_smp_processor_id:
#endif
.globl real_hard_smp_processor_id
.type real_hard_smp_processor_id,#function
real_hard_smp_processor_id:
__GET_CPUID(%o0)
retl
nop
#ifdef CONFIG_SMP
.size hard_smp_processor_id,.-hard_smp_processor_id
#endif
.size real_hard_smp_processor_id,.-real_hard_smp_processor_id

193
arch/sparc/kernel/hvapi.c Normal file
View File

@@ -0,0 +1,193 @@
/* hvapi.c: Hypervisor API management.
*
* Copyright (C) 2007 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <asm/hypervisor.h>
#include <asm/oplib.h>
/* If the hypervisor indicates that the API setting
* calls are unsupported, by returning HV_EBADTRAP or
* HV_ENOTSUPPORTED, we assume that API groups with the
* PRE_API flag set are major 1 minor 0.
*/
struct api_info {
unsigned long group;
unsigned long major;
unsigned long minor;
unsigned int refcnt;
unsigned int flags;
#define FLAG_PRE_API 0x00000001
};
static struct api_info api_table[] = {
{ .group = HV_GRP_SUN4V, .flags = FLAG_PRE_API },
{ .group = HV_GRP_CORE, .flags = FLAG_PRE_API },
{ .group = HV_GRP_INTR, },
{ .group = HV_GRP_SOFT_STATE, },
{ .group = HV_GRP_PCI, .flags = FLAG_PRE_API },
{ .group = HV_GRP_LDOM, },
{ .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API },
{ .group = HV_GRP_NCS, .flags = FLAG_PRE_API },
{ .group = HV_GRP_RNG, },
{ .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
{ .group = HV_GRP_FIRE_PERF, },
{ .group = HV_GRP_N2_CPU, },
{ .group = HV_GRP_NIU, },
{ .group = HV_GRP_VF_CPU, },
{ .group = HV_GRP_DIAG, .flags = FLAG_PRE_API },
};
static DEFINE_SPINLOCK(hvapi_lock);
static struct api_info *__get_info(unsigned long group)
{
int i;
for (i = 0; i < ARRAY_SIZE(api_table); i++) {
if (api_table[i].group == group)
return &api_table[i];
}
return NULL;
}
static void __get_ref(struct api_info *p)
{
p->refcnt++;
}
static void __put_ref(struct api_info *p)
{
if (--p->refcnt == 0) {
unsigned long ignore;
sun4v_set_version(p->group, 0, 0, &ignore);
p->major = p->minor = 0;
}
}
/* Register a hypervisor API specification. It indicates the
* API group and desired major+minor.
*
* If an existing API registration exists '0' (success) will
* be returned if it is compatible with the one being registered.
* Otherwise a negative error code will be returned.
*
* Otherwise an attempt will be made to negotiate the requested
* API group/major/minor with the hypervisor, and errors returned
* if that does not succeed.
*/
int sun4v_hvapi_register(unsigned long group, unsigned long major,
unsigned long *minor)
{
struct api_info *p;
unsigned long flags;
int ret;
spin_lock_irqsave(&hvapi_lock, flags);
p = __get_info(group);
ret = -EINVAL;
if (p) {
if (p->refcnt) {
ret = -EINVAL;
if (p->major == major) {
*minor = p->minor;
ret = 0;
}
} else {
unsigned long actual_minor;
unsigned long hv_ret;
hv_ret = sun4v_set_version(group, major, *minor,
&actual_minor);
ret = -EINVAL;
if (hv_ret == HV_EOK) {
*minor = actual_minor;
p->major = major;
p->minor = actual_minor;
ret = 0;
} else if (hv_ret == HV_EBADTRAP ||
hv_ret == HV_ENOTSUPPORTED) {
if (p->flags & FLAG_PRE_API) {
if (major == 1) {
p->major = 1;
p->minor = 0;
*minor = 0;
ret = 0;
}
}
}
}
if (ret == 0)
__get_ref(p);
}
spin_unlock_irqrestore(&hvapi_lock, flags);
return ret;
}
EXPORT_SYMBOL(sun4v_hvapi_register);
void sun4v_hvapi_unregister(unsigned long group)
{
struct api_info *p;
unsigned long flags;
spin_lock_irqsave(&hvapi_lock, flags);
p = __get_info(group);
if (p)
__put_ref(p);
spin_unlock_irqrestore(&hvapi_lock, flags);
}
EXPORT_SYMBOL(sun4v_hvapi_unregister);
int sun4v_hvapi_get(unsigned long group,
unsigned long *major,
unsigned long *minor)
{
struct api_info *p;
unsigned long flags;
int ret;
spin_lock_irqsave(&hvapi_lock, flags);
ret = -EINVAL;
p = __get_info(group);
if (p && p->refcnt) {
*major = p->major;
*minor = p->minor;
ret = 0;
}
spin_unlock_irqrestore(&hvapi_lock, flags);
return ret;
}
EXPORT_SYMBOL(sun4v_hvapi_get);
void __init sun4v_hvapi_init(void)
{
unsigned long group, major, minor;
group = HV_GRP_SUN4V;
major = 1;
minor = 0;
if (sun4v_hvapi_register(group, major, &minor))
goto bad;
group = HV_GRP_CORE;
major = 1;
minor = 1;
if (sun4v_hvapi_register(group, major, &minor))
goto bad;
return;
bad:
prom_printf("HVAPI: Cannot register API group "
"%lx with major(%u) minor(%u)\n",
group, major, minor);
prom_halt();
}

800
arch/sparc/kernel/hvcalls.S Normal file
View File

@@ -0,0 +1,800 @@
/* %o0: devhandle
* %o1: devino
*
* returns %o0: sysino
*/
ENTRY(sun4v_devino_to_sysino)
mov HV_FAST_INTR_DEVINO2SYSINO, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
ENDPROC(sun4v_devino_to_sysino)
/* %o0: sysino
*
* returns %o0: intr_enabled (HV_INTR_{DISABLED,ENABLED})
*/
ENTRY(sun4v_intr_getenabled)
mov HV_FAST_INTR_GETENABLED, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
ENDPROC(sun4v_intr_getenabled)
/* %o0: sysino
* %o1: intr_enabled (HV_INTR_{DISABLED,ENABLED})
*/
ENTRY(sun4v_intr_setenabled)
mov HV_FAST_INTR_SETENABLED, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_intr_setenabled)
/* %o0: sysino
*
* returns %o0: intr_state (HV_INTR_STATE_*)
*/
ENTRY(sun4v_intr_getstate)
mov HV_FAST_INTR_GETSTATE, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
ENDPROC(sun4v_intr_getstate)
/* %o0: sysino
* %o1: intr_state (HV_INTR_STATE_*)
*/
ENTRY(sun4v_intr_setstate)
mov HV_FAST_INTR_SETSTATE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_intr_setstate)
/* %o0: sysino
*
* returns %o0: cpuid
*/
ENTRY(sun4v_intr_gettarget)
mov HV_FAST_INTR_GETTARGET, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
ENDPROC(sun4v_intr_gettarget)
/* %o0: sysino
* %o1: cpuid
*/
ENTRY(sun4v_intr_settarget)
mov HV_FAST_INTR_SETTARGET, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_intr_settarget)
/* %o0: cpuid
* %o1: pc
* %o2: rtba
* %o3: arg0
*
* returns %o0: status
*/
ENTRY(sun4v_cpu_start)
mov HV_FAST_CPU_START, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_start)
/* %o0: cpuid
*
* returns %o0: status
*/
ENTRY(sun4v_cpu_stop)
mov HV_FAST_CPU_STOP, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_stop)
/* returns %o0: status */
ENTRY(sun4v_cpu_yield)
mov HV_FAST_CPU_YIELD, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_yield)
/* %o0: type
* %o1: queue paddr
* %o2: num queue entries
*
* returns %o0: status
*/
ENTRY(sun4v_cpu_qconf)
mov HV_FAST_CPU_QCONF, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_qconf)
/* %o0: num cpus in cpu list
* %o1: cpu list paddr
* %o2: mondo block paddr
*
* returns %o0: status
*/
ENTRY(sun4v_cpu_mondo_send)
mov HV_FAST_CPU_MONDO_SEND, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_cpu_mondo_send)
/* %o0: CPU ID
*
* returns %o0: -status if status non-zero, else
* %o0: cpu state as HV_CPU_STATE_*
*/
ENTRY(sun4v_cpu_state)
mov HV_FAST_CPU_STATE, %o5
ta HV_FAST_TRAP
brnz,pn %o0, 1f
sub %g0, %o0, %o0
mov %o1, %o0
1: retl
nop
ENDPROC(sun4v_cpu_state)
/* %o0: virtual address
* %o1: must be zero
* %o2: TTE
* %o3: HV_MMU_* flags
*
* returns %o0: status
*/
ENTRY(sun4v_mmu_map_perm_addr)
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_mmu_map_perm_addr)
/* %o0: number of TSB descriptions
* %o1: TSB descriptions real address
*
* returns %o0: status
*/
ENTRY(sun4v_mmu_tsb_ctx0)
mov HV_FAST_MMU_TSB_CTX0, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_mmu_tsb_ctx0)
/* %o0: API group number
* %o1: pointer to unsigned long major number storage
* %o2: pointer to unsigned long minor number storage
*
* returns %o0: status
*/
ENTRY(sun4v_get_version)
mov HV_CORE_GET_VER, %o5
mov %o1, %o3
mov %o2, %o4
ta HV_CORE_TRAP
stx %o1, [%o3]
retl
stx %o2, [%o4]
ENDPROC(sun4v_get_version)
/* %o0: API group number
* %o1: desired major number
* %o2: desired minor number
* %o3: pointer to unsigned long actual minor number storage
*
* returns %o0: status
*/
ENTRY(sun4v_set_version)
mov HV_CORE_SET_VER, %o5
mov %o3, %o4
ta HV_CORE_TRAP
retl
stx %o1, [%o4]
ENDPROC(sun4v_set_version)
/* %o0: pointer to unsigned long time
*
* returns %o0: status
*/
ENTRY(sun4v_tod_get)
mov %o0, %o4
mov HV_FAST_TOD_GET, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_tod_get)
/* %o0: time
*
* returns %o0: status
*/
ENTRY(sun4v_tod_set)
mov HV_FAST_TOD_SET, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_tod_set)
/* %o0: pointer to unsigned long status
*
* returns %o0: signed character
*/
ENTRY(sun4v_con_getchar)
mov %o0, %o4
mov HV_FAST_CONS_GETCHAR, %o5
clr %o0
clr %o1
ta HV_FAST_TRAP
stx %o0, [%o4]
retl
sra %o1, 0, %o0
ENDPROC(sun4v_con_getchar)
/* %o0: signed long character
*
* returns %o0: status
*/
ENTRY(sun4v_con_putchar)
mov HV_FAST_CONS_PUTCHAR, %o5
ta HV_FAST_TRAP
retl
sra %o0, 0, %o0
ENDPROC(sun4v_con_putchar)
/* %o0: buffer real address
* %o1: buffer size
* %o2: pointer to unsigned long bytes_read
*
* returns %o0: status
*/
ENTRY(sun4v_con_read)
mov %o2, %o4
mov HV_FAST_CONS_READ, %o5
ta HV_FAST_TRAP
brnz %o0, 1f
cmp %o1, -1 /* break */
be,a,pn %icc, 1f
mov %o1, %o0
cmp %o1, -2 /* hup */
be,a,pn %icc, 1f
mov %o1, %o0
stx %o1, [%o4]
1: retl
nop
ENDPROC(sun4v_con_read)
/* %o0: buffer real address
* %o1: buffer size
* %o2: pointer to unsigned long bytes_written
*
* returns %o0: status
*/
ENTRY(sun4v_con_write)
mov %o2, %o4
mov HV_FAST_CONS_WRITE, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_con_write)
/* %o0: soft state
* %o1: address of description string
*
* returns %o0: status
*/
ENTRY(sun4v_mach_set_soft_state)
mov HV_FAST_MACH_SET_SOFT_STATE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_mach_set_soft_state)
/* %o0: exit code
*
* Does not return.
*/
ENTRY(sun4v_mach_exit)
mov HV_FAST_MACH_EXIT, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_mach_exit)
/* %o0: buffer real address
* %o1: buffer length
* %o2: pointer to unsigned long real_buf_len
*
* returns %o0: status
*/
ENTRY(sun4v_mach_desc)
mov %o2, %o4
mov HV_FAST_MACH_DESC, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_mach_desc)
/* %o0: new timeout in milliseconds
* %o1: pointer to unsigned long orig_timeout
*
* returns %o0: status
*/
ENTRY(sun4v_mach_set_watchdog)
mov %o1, %o4
mov HV_FAST_MACH_SET_WATCHDOG, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_mach_set_watchdog)
/* No inputs and does not return. */
ENTRY(sun4v_mach_sir)
mov %o1, %o4
mov HV_FAST_MACH_SIR, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_mach_sir)
/* %o0: channel
* %o1: ra
* %o2: num_entries
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_tx_qconf)
mov HV_FAST_LDC_TX_QCONF, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_tx_qconf)
/* %o0: channel
* %o1: pointer to unsigned long ra
* %o2: pointer to unsigned long num_entries
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_tx_qinfo)
mov %o1, %g1
mov %o2, %g2
mov HV_FAST_LDC_TX_QINFO, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
retl
nop
ENDPROC(sun4v_ldc_tx_qinfo)
/* %o0: channel
* %o1: pointer to unsigned long head_off
* %o2: pointer to unsigned long tail_off
* %o2: pointer to unsigned long chan_state
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_tx_get_state)
mov %o1, %g1
mov %o2, %g2
mov %o3, %g3
mov HV_FAST_LDC_TX_GET_STATE, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
stx %o3, [%g3]
retl
nop
ENDPROC(sun4v_ldc_tx_get_state)
/* %o0: channel
* %o1: tail_off
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_tx_set_qtail)
mov HV_FAST_LDC_TX_SET_QTAIL, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_tx_set_qtail)
/* %o0: channel
* %o1: ra
* %o2: num_entries
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_rx_qconf)
mov HV_FAST_LDC_RX_QCONF, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_rx_qconf)
/* %o0: channel
* %o1: pointer to unsigned long ra
* %o2: pointer to unsigned long num_entries
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_rx_qinfo)
mov %o1, %g1
mov %o2, %g2
mov HV_FAST_LDC_RX_QINFO, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
retl
nop
ENDPROC(sun4v_ldc_rx_qinfo)
/* %o0: channel
* %o1: pointer to unsigned long head_off
* %o2: pointer to unsigned long tail_off
* %o2: pointer to unsigned long chan_state
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_rx_get_state)
mov %o1, %g1
mov %o2, %g2
mov %o3, %g3
mov HV_FAST_LDC_RX_GET_STATE, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
stx %o3, [%g3]
retl
nop
ENDPROC(sun4v_ldc_rx_get_state)
/* %o0: channel
* %o1: head_off
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_rx_set_qhead)
mov HV_FAST_LDC_RX_SET_QHEAD, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_rx_set_qhead)
/* %o0: channel
* %o1: ra
* %o2: num_entries
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_set_map_table)
mov HV_FAST_LDC_SET_MAP_TABLE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_set_map_table)
/* %o0: channel
* %o1: pointer to unsigned long ra
* %o2: pointer to unsigned long num_entries
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_get_map_table)
mov %o1, %g1
mov %o2, %g2
mov HV_FAST_LDC_GET_MAP_TABLE, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
retl
nop
ENDPROC(sun4v_ldc_get_map_table)
/* %o0: channel
* %o1: dir_code
* %o2: tgt_raddr
* %o3: lcl_raddr
* %o4: len
* %o5: pointer to unsigned long actual_len
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_copy)
mov %o5, %g1
mov HV_FAST_LDC_COPY, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
retl
nop
ENDPROC(sun4v_ldc_copy)
/* %o0: channel
* %o1: cookie
* %o2: pointer to unsigned long ra
* %o3: pointer to unsigned long perm
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_mapin)
mov %o2, %g1
mov %o3, %g2
mov HV_FAST_LDC_MAPIN, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
stx %o2, [%g2]
retl
nop
ENDPROC(sun4v_ldc_mapin)
/* %o0: ra
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_unmap)
mov HV_FAST_LDC_UNMAP, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_unmap)
/* %o0: channel
* %o1: cookie
* %o2: mte_cookie
*
* returns %o0: status
*/
ENTRY(sun4v_ldc_revoke)
mov HV_FAST_LDC_REVOKE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ldc_revoke)
/* %o0: device handle
* %o1: device INO
* %o2: pointer to unsigned long cookie
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_get_cookie)
mov %o2, %g1
mov HV_FAST_VINTR_GET_COOKIE, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
retl
nop
ENDPROC(sun4v_vintr_get_cookie)
/* %o0: device handle
* %o1: device INO
* %o2: cookie
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_set_cookie)
mov HV_FAST_VINTR_SET_COOKIE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_vintr_set_cookie)
/* %o0: device handle
* %o1: device INO
* %o2: pointer to unsigned long valid_state
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_get_valid)
mov %o2, %g1
mov HV_FAST_VINTR_GET_VALID, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
retl
nop
ENDPROC(sun4v_vintr_get_valid)
/* %o0: device handle
* %o1: device INO
* %o2: valid_state
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_set_valid)
mov HV_FAST_VINTR_SET_VALID, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_vintr_set_valid)
/* %o0: device handle
* %o1: device INO
* %o2: pointer to unsigned long state
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_get_state)
mov %o2, %g1
mov HV_FAST_VINTR_GET_STATE, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
retl
nop
ENDPROC(sun4v_vintr_get_state)
/* %o0: device handle
* %o1: device INO
* %o2: state
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_set_state)
mov HV_FAST_VINTR_SET_STATE, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_vintr_set_state)
/* %o0: device handle
* %o1: device INO
* %o2: pointer to unsigned long cpuid
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_get_target)
mov %o2, %g1
mov HV_FAST_VINTR_GET_TARGET, %o5
ta HV_FAST_TRAP
stx %o1, [%g1]
retl
nop
ENDPROC(sun4v_vintr_get_target)
/* %o0: device handle
* %o1: device INO
* %o2: cpuid
*
* returns %o0: status
*/
ENTRY(sun4v_vintr_set_target)
mov HV_FAST_VINTR_SET_TARGET, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_vintr_set_target)
/* %o0: NCS sub-function
* %o1: sub-function arg real-address
* %o2: sub-function arg size
*
* returns %o0: status
*/
ENTRY(sun4v_ncs_request)
mov HV_FAST_NCS_REQUEST, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_ncs_request)
ENTRY(sun4v_svc_send)
save %sp, -192, %sp
mov %i0, %o0
mov %i1, %o1
mov %i2, %o2
mov HV_FAST_SVC_SEND, %o5
ta HV_FAST_TRAP
stx %o1, [%i3]
ret
restore
ENDPROC(sun4v_svc_send)
ENTRY(sun4v_svc_recv)
save %sp, -192, %sp
mov %i0, %o0
mov %i1, %o1
mov %i2, %o2
mov HV_FAST_SVC_RECV, %o5
ta HV_FAST_TRAP
stx %o1, [%i3]
ret
restore
ENDPROC(sun4v_svc_recv)
ENTRY(sun4v_svc_getstatus)
mov HV_FAST_SVC_GETSTATUS, %o5
mov %o1, %o4
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_svc_getstatus)
ENTRY(sun4v_svc_setstatus)
mov HV_FAST_SVC_SETSTATUS, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_svc_setstatus)
ENTRY(sun4v_svc_clrstatus)
mov HV_FAST_SVC_CLRSTATUS, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_svc_clrstatus)
ENTRY(sun4v_mmustat_conf)
mov %o1, %o4
mov HV_FAST_MMUSTAT_CONF, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_mmustat_conf)
ENTRY(sun4v_mmustat_info)
mov %o0, %o4
mov HV_FAST_MMUSTAT_INFO, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_mmustat_info)
ENTRY(sun4v_mmu_demap_all)
clr %o0
clr %o1
mov HV_MMU_ALL, %o2
mov HV_FAST_MMU_DEMAP_ALL, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_mmu_demap_all)
ENTRY(sun4v_niagara_getperf)
mov %o0, %o4
mov HV_FAST_GET_PERFREG, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_niagara_getperf)
ENTRY(sun4v_niagara_setperf)
mov HV_FAST_SET_PERFREG, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_niagara_setperf)
ENTRY(sun4v_niagara2_getperf)
mov %o0, %o4
mov HV_FAST_N2_GET_PERFREG, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
retl
nop
ENDPROC(sun4v_niagara2_getperf)
ENTRY(sun4v_niagara2_setperf)
mov HV_FAST_N2_SET_PERFREG, %o5
ta HV_FAST_TRAP
retl
nop
ENDPROC(sun4v_niagara2_setperf)

140
arch/sparc/kernel/hvtramp.S Normal file
View File

@@ -0,0 +1,140 @@
/* hvtramp.S: Hypervisor start-cpu trampoline code.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/init.h>
#include <asm/thread_info.h>
#include <asm/hypervisor.h>
#include <asm/scratchpad.h>
#include <asm/spitfire.h>
#include <asm/hvtramp.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/pil.h>
__CPUINIT
.align 8
.globl hv_cpu_startup, hv_cpu_startup_end
/* This code executes directly out of the hypervisor
* with physical addressing (va==pa). %o0 contains
* our client argument which for Linux points to
* a descriptor data structure which defines the
* MMU entries we need to load up.
*
* After we set things up we enable the MMU and call
* into the kernel.
*
* First setup basic privileged cpu state.
*/
hv_cpu_startup:
SET_GL(0)
wrpr %g0, PIL_NORMAL_MAX, %pil
wrpr %g0, 0, %canrestore
wrpr %g0, 0, %otherwin
wrpr %g0, 6, %cansave
wrpr %g0, 6, %cleanwin
wrpr %g0, 0, %cwp
wrpr %g0, 0, %wstate
wrpr %g0, 0, %tl
sethi %hi(sparc64_ttable_tl0), %g1
wrpr %g1, %tba
mov %o0, %l0
lduw [%l0 + HVTRAMP_DESCR_CPU], %g1
mov SCRATCHPAD_CPUID, %g2
stxa %g1, [%g2] ASI_SCRATCHPAD
ldx [%l0 + HVTRAMP_DESCR_FAULT_INFO_VA], %g2
stxa %g2, [%g0] ASI_SCRATCHPAD
mov 0, %l1
lduw [%l0 + HVTRAMP_DESCR_NUM_MAPPINGS], %l2
add %l0, HVTRAMP_DESCR_MAPS, %l3
1: ldx [%l3 + HVTRAMP_MAPPING_VADDR], %o0
clr %o1
ldx [%l3 + HVTRAMP_MAPPING_TTE], %o2
mov HV_MMU_IMMU | HV_MMU_DMMU, %o3
mov HV_FAST_MMU_MAP_PERM_ADDR, %o5
ta HV_FAST_TRAP
brnz,pn %o0, 80f
nop
add %l1, 1, %l1
cmp %l1, %l2
blt,a,pt %xcc, 1b
add %l3, HVTRAMP_MAPPING_SIZE, %l3
ldx [%l0 + HVTRAMP_DESCR_FAULT_INFO_PA], %o0
mov HV_FAST_MMU_FAULT_AREA_CONF, %o5
ta HV_FAST_TRAP
brnz,pn %o0, 80f
nop
wrpr %g0, (PSTATE_PRIV | PSTATE_PEF), %pstate
ldx [%l0 + HVTRAMP_DESCR_THREAD_REG], %l6
mov 1, %o0
set 1f, %o1
mov HV_FAST_MMU_ENABLE, %o5
ta HV_FAST_TRAP
ba,pt %xcc, 80f
nop
1:
wr %g0, 0, %fprs
wr %g0, ASI_P, %asi
mov PRIMARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_MMU
membar #Sync
mov SECONDARY_CONTEXT, %g7
stxa %g0, [%g7] ASI_MMU
membar #Sync
mov %l6, %g6
ldx [%g6 + TI_TASK], %g4
mov 1, %g5
sllx %g5, THREAD_SHIFT, %g5
sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
add %g6, %g5, %sp
mov 0, %fp
call init_irqwork_curcpu
nop
call hard_smp_processor_id
nop
call sun4v_register_mondo_queues
nop
call init_cur_cpu_trap
mov %g6, %o0
wrpr %g0, (PSTATE_PRIV | PSTATE_PEF | PSTATE_IE), %pstate
call smp_callin
nop
call cpu_idle
mov 0, %o0
call cpu_panic
nop
80: ba,pt %xcc, 80b
nop
.align 8
hv_cpu_startup_end:

View File

@@ -11,35 +11,37 @@
#include <asm/oplib.h>
#include <asm/idprom.h>
#include <asm/machines.h> /* Fun with Sun released architectures. */
struct idprom *idprom;
static struct idprom idprom_buffer;
#ifdef CONFIG_SPARC32
#include <asm/machines.h> /* Fun with Sun released architectures. */
/* Here is the master table of Sun machines which use some implementation
* of the Sparc CPU and have a meaningful IDPROM machtype value that we
* know about. See asm-sparc/machines.h for empirical constants.
*/
static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = {
/* First, Sun4's */
{ "Sun 4/100 Series", (SM_SUN4 | SM_4_110) },
{ "Sun 4/200 Series", (SM_SUN4 | SM_4_260) },
{ "Sun 4/300 Series", (SM_SUN4 | SM_4_330) },
{ "Sun 4/400 Series", (SM_SUN4 | SM_4_470) },
{ .name = "Sun 4/100 Series", .id_machtype = (SM_SUN4 | SM_4_110) },
{ .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) },
{ .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) },
{ .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) },
/* Now, Sun4c's */
{ "Sun4c SparcStation 1", (SM_SUN4C | SM_4C_SS1) },
{ "Sun4c SparcStation IPC", (SM_SUN4C | SM_4C_IPC) },
{ "Sun4c SparcStation 1+", (SM_SUN4C | SM_4C_SS1PLUS) },
{ "Sun4c SparcStation SLC", (SM_SUN4C | SM_4C_SLC) },
{ "Sun4c SparcStation 2", (SM_SUN4C | SM_4C_SS2) },
{ "Sun4c SparcStation ELC", (SM_SUN4C | SM_4C_ELC) },
{ "Sun4c SparcStation IPX", (SM_SUN4C | SM_4C_IPX) },
{ .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) },
{ .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) },
{ .name = "Sun4c SparcStation 1+", .id_machtype = (SM_SUN4C | SM_4C_SS1PLUS) },
{ .name = "Sun4c SparcStation SLC", .id_machtype = (SM_SUN4C | SM_4C_SLC) },
{ .name = "Sun4c SparcStation 2", .id_machtype = (SM_SUN4C | SM_4C_SS2) },
{ .name = "Sun4c SparcStation ELC", .id_machtype = (SM_SUN4C | SM_4C_ELC) },
{ .name = "Sun4c SparcStation IPX", .id_machtype = (SM_SUN4C | SM_4C_IPX) },
/* Finally, early Sun4m's */
{ "Sun4m SparcSystem600", (SM_SUN4M | SM_4M_SS60) },
{ "Sun4m SparcStation10/20", (SM_SUN4M | SM_4M_SS50) },
{ "Sun4m SparcStation5", (SM_SUN4M | SM_4M_SS40) },
{ .name = "Sun4m SparcSystem600", .id_machtype = (SM_SUN4M | SM_4M_SS60) },
{ .name = "Sun4m SparcStation10/20", .id_machtype = (SM_SUN4M | SM_4M_SS50) },
{ .name = "Sun4m SparcStation5", .id_machtype = (SM_SUN4M | SM_4M_SS40) },
/* One entry for the OBP arch's which are sun4d, sun4e, and newer sun4m's */
{ "Sun4M OBP based system", (SM_SUN4M_OBP | 0x0) } };
{ .name = "Sun4M OBP based system", .id_machtype = (SM_SUN4M_OBP | 0x0) } };
static void __init display_system_type(unsigned char machtype)
{
@@ -47,21 +49,25 @@ static void __init display_system_type(unsigned char machtype)
register int i;
for (i = 0; i < NUM_SUN_MACHINES; i++) {
if(Sun_Machines[i].id_machtype == machtype) {
if (Sun_Machines[i].id_machtype == machtype) {
if (machtype != (SM_SUN4M_OBP | 0x00) ||
prom_getproperty(prom_root_node, "banner-name",
sysname, sizeof(sysname)) <= 0)
printk("TYPE: %s\n", Sun_Machines[i].name);
printk(KERN_WARNING "TYPE: %s\n",
Sun_Machines[i].name);
else
printk("TYPE: %s\n", sysname);
printk(KERN_WARNING "TYPE: %s\n", sysname);
return;
}
}
prom_printf("IDPROM: Bogus id_machtype value, 0x%x\n", machtype);
prom_halt();
prom_printf("IDPROM: Warning, bogus id_machtype value, 0x%x\n", machtype);
}
#else
static void __init display_system_type(unsigned char machtype)
{
}
#endif
/* Calculate the IDPROM checksum (xor of the data bytes). */
static unsigned char __init calc_idprom_cksum(struct idprom *idprom)
{
@@ -80,21 +86,14 @@ void __init idprom_init(void)
idprom = &idprom_buffer;
if (idprom->id_format != 0x01) {
prom_printf("IDPROM: Unknown format type!\n");
prom_halt();
}
if (idprom->id_format != 0x01)
prom_printf("IDPROM: Warning, unknown format type!\n");
if (idprom->id_cksum != calc_idprom_cksum(idprom)) {
prom_printf("IDPROM: Checksum failure (nvram=%x, calc=%x)!\n",
if (idprom->id_cksum != calc_idprom_cksum(idprom))
prom_printf("IDPROM: Warning, checksum failure (nvram=%x, calc=%x)!\n",
idprom->id_cksum, calc_idprom_cksum(idprom));
prom_halt();
}
display_system_type(idprom->id_machtype);
printk("Ethernet address: %x:%x:%x:%x:%x:%x\n",
idprom->id_ethaddr[0], idprom->id_ethaddr[1],
idprom->id_ethaddr[2], idprom->id_ethaddr[3],
idprom->id_ethaddr[4], idprom->id_ethaddr[5]);
printk(KERN_WARNING "Ethernet address: %pM\n", idprom->id_ethaddr);
}

View File

@@ -23,6 +23,5 @@ EXPORT_SYMBOL(init_task);
* in etrap.S which assumes it.
*/
union thread_union init_thread_union
__attribute__((section (".text\"\n\t#")))
__attribute__((aligned (THREAD_SIZE)))
__attribute__((section (".data.init_task")))
= { INIT_THREAD_INFO(init_task) };

866
arch/sparc/kernel/iommu.c Normal file
View File

@@ -0,0 +1,866 @@
/* iommu.c: Generic sparc64 IOMMU support.
*
* Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/iommu-helper.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif
#include <asm/iommu.h>
#include "iommu_common.h"
#define STC_CTXMATCH_ADDR(STC, CTX) \
((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
#define STC_FLUSHFLAG_INIT(STC) \
(*((STC)->strbuf_flushflag) = 0UL)
#define STC_FLUSHFLAG_SET(STC) \
(*((STC)->strbuf_flushflag) != 0UL)
#define iommu_read(__reg) \
({ u64 __ret; \
__asm__ __volatile__("ldxa [%1] %2, %0" \
: "=r" (__ret) \
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
: "memory"); \
__ret; \
})
#define iommu_write(__reg, __val) \
__asm__ __volatile__("stxa %0, [%1] %2" \
: /* no outputs */ \
: "r" (__val), "r" (__reg), \
"i" (ASI_PHYS_BYPASS_EC_E))
/* Must be invoked under the IOMMU lock. */
static void iommu_flushall(struct iommu *iommu)
{
if (iommu->iommu_flushinv) {
iommu_write(iommu->iommu_flushinv, ~(u64)0);
} else {
unsigned long tag;
int entry;
tag = iommu->iommu_tags;
for (entry = 0; entry < 16; entry++) {
iommu_write(tag, 0);
tag += 8;
}
/* Ensure completion of previous PIO writes. */
(void) iommu_read(iommu->write_complete_reg);
}
}
#define IOPTE_CONSISTENT(CTX) \
(IOPTE_VALID | IOPTE_CACHE | \
(((CTX) << 47) & IOPTE_CONTEXT))
#define IOPTE_STREAMING(CTX) \
(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
/* Existing mappings are never marked invalid, instead they
* are pointed to a dummy page.
*/
#define IOPTE_IS_DUMMY(iommu, iopte) \
((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
{
unsigned long val = iopte_val(*iopte);
val &= ~IOPTE_PAGE;
val |= iommu->dummy_page_pa;
iopte_val(*iopte) = val;
}
/* Based almost entirely upon the ppc64 iommu allocator. If you use the 'handle'
* facility it must all be done in one pass while under the iommu lock.
*
* On sun4u platforms, we only flush the IOMMU once every time we've passed
* over the entire page table doing allocations. Therefore we only ever advance
* the hint and cannot backtrack it.
*/
unsigned long iommu_range_alloc(struct device *dev,
struct iommu *iommu,
unsigned long npages,
unsigned long *handle)
{
unsigned long n, end, start, limit, boundary_size;
struct iommu_arena *arena = &iommu->arena;
int pass = 0;
/* This allocator was derived from x86_64's bit string search */
/* Sanity check */
if (unlikely(npages == 0)) {
if (printk_ratelimit())
WARN_ON(1);
return DMA_ERROR_CODE;
}
if (handle && *handle)
start = *handle;
else
start = arena->hint;
limit = arena->limit;
/* The case below can happen if we have a small segment appended
* to a large, or when the previous alloc was at the very end of
* the available space. If so, go back to the beginning and flush.
*/
if (start >= limit) {
start = 0;
if (iommu->flush_all)
iommu->flush_all(iommu);
}
again:
if (dev)
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1 << IO_PAGE_SHIFT);
else
boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
n = iommu_area_alloc(arena->map, limit, start, npages,
iommu->page_table_map_base >> IO_PAGE_SHIFT,
boundary_size >> IO_PAGE_SHIFT, 0);
if (n == -1) {
if (likely(pass < 1)) {
/* First failure, rescan from the beginning. */
start = 0;
if (iommu->flush_all)
iommu->flush_all(iommu);
pass++;
goto again;
} else {
/* Second failure, give up */
return DMA_ERROR_CODE;
}
}
end = n + npages;
arena->hint = end;
/* Update handle for SG allocations */
if (handle)
*handle = end;
return n;
}
void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
{
struct iommu_arena *arena = &iommu->arena;
unsigned long entry;
entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
iommu_area_free(arena->map, entry, npages);
}
int iommu_table_init(struct iommu *iommu, int tsbsize,
u32 dma_offset, u32 dma_addr_mask,
int numa_node)
{
unsigned long i, order, sz, num_tsb_entries;
struct page *page;
num_tsb_entries = tsbsize / sizeof(iopte_t);
/* Setup initial software IOMMU state. */
spin_lock_init(&iommu->lock);
iommu->ctx_lowest_free = 1;
iommu->page_table_map_base = dma_offset;
iommu->dma_addr_mask = dma_addr_mask;
/* Allocate and initialize the free area map. */
sz = num_tsb_entries / 8;
sz = (sz + 7UL) & ~7UL;
iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
if (!iommu->arena.map) {
printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
return -ENOMEM;
}
memset(iommu->arena.map, 0, sz);
iommu->arena.limit = num_tsb_entries;
if (tlb_type != hypervisor)
iommu->flush_all = iommu_flushall;
/* Allocate and initialize the dummy page which we
* set inactive IO PTEs to point to.
*/
page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
if (!page) {
printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
goto out_free_map;
}
iommu->dummy_page = (unsigned long) page_address(page);
memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
/* Now allocate and setup the IOMMU page table itself. */
order = get_order(tsbsize);
page = alloc_pages_node(numa_node, GFP_KERNEL, order);
if (!page) {
printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
goto out_free_dummy_page;
}
iommu->page_table = (iopte_t *)page_address(page);
for (i = 0; i < num_tsb_entries; i++)
iopte_make_dummy(iommu, &iommu->page_table[i]);
return 0;
out_free_dummy_page:
free_page(iommu->dummy_page);
iommu->dummy_page = 0UL;
out_free_map:
kfree(iommu->arena.map);
iommu->arena.map = NULL;
return -ENOMEM;
}
static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
unsigned long npages)
{
unsigned long entry;
entry = iommu_range_alloc(dev, iommu, npages, NULL);
if (unlikely(entry == DMA_ERROR_CODE))
return NULL;
return iommu->page_table + entry;
}
static int iommu_alloc_ctx(struct iommu *iommu)
{
int lowest = iommu->ctx_lowest_free;
int sz = IOMMU_NUM_CTXS - lowest;
int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
if (unlikely(n == sz)) {
n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
if (unlikely(n == lowest)) {
printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
n = 0;
}
}
if (n)
__set_bit(n, iommu->ctx_bitmap);
return n;
}
static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
{
if (likely(ctx)) {
__clear_bit(ctx, iommu->ctx_bitmap);
if (ctx < iommu->ctx_lowest_free)
iommu->ctx_lowest_free = ctx;
}
}
static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp)
{
unsigned long flags, order, first_page;
struct iommu *iommu;
struct page *page;
int npages, nid;
iopte_t *iopte;
void *ret;
size = IO_PAGE_ALIGN(size);
order = get_order(size);
if (order >= 10)
return NULL;
nid = dev->archdata.numa_node;
page = alloc_pages_node(nid, gfp, order);
if (unlikely(!page))
return NULL;
first_page = (unsigned long) page_address(page);
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
spin_lock_irqsave(&iommu->lock, flags);
iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(iopte == NULL)) {
free_pages(first_page, order);
return NULL;
}
*dma_addrp = (iommu->page_table_map_base +
((iopte - iommu->page_table) << IO_PAGE_SHIFT));
ret = (void *) first_page;
npages = size >> IO_PAGE_SHIFT;
first_page = __pa(first_page);
while (npages--) {
iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
IOPTE_WRITE |
(first_page & IOPTE_PAGE));
iopte++;
first_page += IO_PAGE_SIZE;
}
return ret;
}
static void dma_4u_free_coherent(struct device *dev, size_t size,
void *cpu, dma_addr_t dvma)
{
struct iommu *iommu;
iopte_t *iopte;
unsigned long flags, order, npages;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
iopte = iommu->page_table +
((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
spin_lock_irqsave(&iommu->lock, flags);
iommu_range_free(iommu, dvma, npages);
spin_unlock_irqrestore(&iommu->lock, flags);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
}
static dma_addr_t dma_4u_map_single(struct device *dev, void *ptr, size_t sz,
enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr, ctx;
u32 bus_addr, ret;
unsigned long iopte_protection;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (unlikely(direction == DMA_NONE))
goto bad_no_ctx;
oaddr = (unsigned long)ptr;
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
spin_lock_irqsave(&iommu->lock, flags);
base = alloc_npages(dev, iommu, npages);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
spin_unlock_irqrestore(&iommu->lock, flags);
if (unlikely(!base))
goto bad;
bus_addr = (iommu->page_table_map_base +
((base - iommu->page_table) << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
if (strbuf->strbuf_enabled)
iopte_protection = IOPTE_STREAMING(ctx);
else
iopte_protection = IOPTE_CONSISTENT(ctx);
if (direction != DMA_TO_DEVICE)
iopte_protection |= IOPTE_WRITE;
for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
iopte_val(*base) = iopte_protection | base_paddr;
return ret;
bad:
iommu_free_ctx(iommu, ctx);
bad_no_ctx:
if (printk_ratelimit())
WARN_ON(1);
return DMA_ERROR_CODE;
}
static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
u32 vaddr, unsigned long ctx, unsigned long npages,
enum dma_data_direction direction)
{
int limit;
if (strbuf->strbuf_ctxflush &&
iommu->iommu_ctxflush) {
unsigned long matchreg, flushreg;
u64 val;
flushreg = strbuf->strbuf_ctxflush;
matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
iommu_write(flushreg, ctx);
val = iommu_read(matchreg);
val &= 0xffff;
if (!val)
goto do_flush_sync;
while (val) {
if (val & 0x1)
iommu_write(flushreg, ctx);
val >>= 1;
}
val = iommu_read(matchreg);
if (unlikely(val)) {
printk(KERN_WARNING "strbuf_flush: ctx flush "
"timeout matchreg[%lx] ctx[%lx]\n",
val, ctx);
goto do_page_flush;
}
} else {
unsigned long i;
do_page_flush:
for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
iommu_write(strbuf->strbuf_pflush, vaddr);
}
do_flush_sync:
/* If the device could not have possibly put dirty data into
* the streaming cache, no flush-flag synchronization needs
* to be performed.
*/
if (direction == DMA_TO_DEVICE)
return;
STC_FLUSHFLAG_INIT(strbuf);
iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
(void) iommu_read(iommu->write_complete_reg);
limit = 100000;
while (!STC_FLUSHFLAG_SET(strbuf)) {
limit--;
if (!limit)
break;
udelay(1);
rmb();
}
if (!limit)
printk(KERN_WARNING "strbuf_flush: flushflag timeout "
"vaddr[%08x] ctx[%lx] npages[%ld]\n",
vaddr, ctx, npages);
}
static void dma_4u_unmap_single(struct device *dev, dma_addr_t bus_addr,
size_t sz, enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
iopte_t *base;
unsigned long flags, npages, ctx, i;
if (unlikely(direction == DMA_NONE)) {
if (printk_ratelimit())
WARN_ON(1);
return;
}
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
bus_addr &= IO_PAGE_MASK;
spin_lock_irqsave(&iommu->lock, flags);
/* Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
/* Step 1: Kick data out of streaming buffers if necessary. */
if (strbuf->strbuf_enabled)
strbuf_flush(strbuf, iommu, bus_addr, ctx,
npages, direction);
/* Step 2: Clear out TSB entries. */
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
iommu_range_free(iommu, bus_addr, npages);
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
struct scatterlist *s, *outs, *segstart;
unsigned long flags, handle, prot, ctx;
dma_addr_t dma_next = 0, dma_addr;
unsigned int max_seg_size;
unsigned long seg_boundary_size;
int outcount, incount, i;
struct strbuf *strbuf;
struct iommu *iommu;
unsigned long base_shift;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (nelems == 0 || !iommu)
return 0;
spin_lock_irqsave(&iommu->lock, flags);
ctx = 0;
if (iommu->iommu_ctxflush)
ctx = iommu_alloc_ctx(iommu);
if (strbuf->strbuf_enabled)
prot = IOPTE_STREAMING(ctx);
else
prot = IOPTE_CONSISTENT(ctx);
if (direction != DMA_TO_DEVICE)
prot |= IOPTE_WRITE;
outs = s = segstart = &sglist[0];
outcount = 1;
incount = nelems;
handle = 0;
/* Init first segment length for backout at failure */
outs->dma_length = 0;
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
iopte_t *base;
slen = s->length;
/* Sanity check */
if (slen == 0) {
dma_next = 0;
continue;
}
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
entry = iommu_range_alloc(dev, iommu, npages, &handle);
/* Handle failure */
if (unlikely(entry == DMA_ERROR_CODE)) {
if (printk_ratelimit())
printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
" npages %lx\n", iommu, paddr, npages);
goto iommu_map_failed;
}
base = iommu->page_table + entry;
/* Convert entry to a dma_addr_t */
dma_addr = iommu->page_table_map_base +
(entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK;
while (npages--) {
iopte_val(*base) = prot | paddr;
base++;
paddr += IO_PAGE_SIZE;
}
/* If we are in an open segment, try merging */
if (segstart != s) {
/* We cannot merge if:
* - allocated dma_addr isn't contiguous to previous allocation
*/
if ((dma_addr != dma_next) ||
(outs->dma_length + s->length > max_seg_size) ||
(is_span_boundary(out_entry, base_shift,
seg_boundary_size, outs, s))) {
/* Can't merge: create a new segment */
segstart = s;
outcount++;
outs = sg_next(outs);
} else {
outs->dma_length += s->length;
}
}
if (segstart == s) {
/* This is a new segment, fill entries */
outs->dma_address = dma_addr;
outs->dma_length = slen;
out_entry = entry;
}
/* Calculate next page pointer for contiguous check */
dma_next = dma_addr + slen;
}
spin_unlock_irqrestore(&iommu->lock, flags);
if (outcount < incount) {
outs = sg_next(outs);
outs->dma_address = DMA_ERROR_CODE;
outs->dma_length = 0;
}
return outcount;
iommu_map_failed:
for_each_sg(sglist, s, nelems, i) {
if (s->dma_length != 0) {
unsigned long vaddr, npages, entry, j;
iopte_t *base;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
iommu_range_free(iommu, vaddr, npages);
entry = (vaddr - iommu->page_table_map_base)
>> IO_PAGE_SHIFT;
base = iommu->page_table + entry;
for (j = 0; j < npages; j++)
iopte_make_dummy(iommu, base + j);
s->dma_address = DMA_ERROR_CODE;
s->dma_length = 0;
}
if (s == outs)
break;
}
spin_unlock_irqrestore(&iommu->lock, flags);
return 0;
}
/* If contexts are being used, they are the same in all of the mappings
* we make for a particular SG.
*/
static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
{
unsigned long ctx = 0;
if (iommu->iommu_ctxflush) {
iopte_t *base;
u32 bus_addr;
bus_addr = sg->dma_address & IO_PAGE_MASK;
base = iommu->page_table +
((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
}
return ctx;
}
static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction direction)
{
unsigned long flags, ctx;
struct scatterlist *sg;
struct strbuf *strbuf;
struct iommu *iommu;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
ctx = fetch_sg_ctx(iommu, sglist);
spin_lock_irqsave(&iommu->lock, flags);
sg = sglist;
while (nelems--) {
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages, entry;
iopte_t *base;
int i;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
iommu_range_free(iommu, dma_handle, npages);
entry = ((dma_handle - iommu->page_table_map_base)
>> IO_PAGE_SHIFT);
base = iommu->page_table + entry;
dma_handle &= IO_PAGE_MASK;
if (strbuf->strbuf_enabled)
strbuf_flush(strbuf, iommu, dma_handle, ctx,
npages, direction);
for (i = 0; i < npages; i++)
iopte_make_dummy(iommu, base + i);
sg = sg_next(sg);
}
iommu_free_ctx(iommu, ctx);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void dma_4u_sync_single_for_cpu(struct device *dev,
dma_addr_t bus_addr, size_t sz,
enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long flags, ctx, npages;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (!strbuf->strbuf_enabled)
return;
spin_lock_irqsave(&iommu->lock, flags);
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
/* Step 1: Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
iopte = iommu->page_table +
((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
/* Step 2: Kick data out of streaming buffers. */
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static void dma_4u_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sglist, int nelems,
enum dma_data_direction direction)
{
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long flags, ctx, npages, i;
struct scatterlist *sg, *sgprv;
u32 bus_addr;
iommu = dev->archdata.iommu;
strbuf = dev->archdata.stc;
if (!strbuf->strbuf_enabled)
return;
spin_lock_irqsave(&iommu->lock, flags);
/* Step 1: Record the context, if any. */
ctx = 0;
if (iommu->iommu_ctxflush &&
strbuf->strbuf_ctxflush) {
iopte_t *iopte;
iopte = iommu->page_table +
((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
}
/* Step 2: Kick data out of streaming buffers. */
bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
sgprv = NULL;
for_each_sg(sglist, sg, nelems, i) {
if (sg->dma_length == 0)
break;
sgprv = sg;
}
npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
- bus_addr) >> IO_PAGE_SHIFT;
strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
spin_unlock_irqrestore(&iommu->lock, flags);
}
static const struct dma_ops sun4u_dma_ops = {
.alloc_coherent = dma_4u_alloc_coherent,
.free_coherent = dma_4u_free_coherent,
.map_single = dma_4u_map_single,
.unmap_single = dma_4u_unmap_single,
.map_sg = dma_4u_map_sg,
.unmap_sg = dma_4u_unmap_sg,
.sync_single_for_cpu = dma_4u_sync_single_for_cpu,
.sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
};
const struct dma_ops *dma_ops = &sun4u_dma_ops;
EXPORT_SYMBOL(dma_ops);
int dma_supported(struct device *dev, u64 device_mask)
{
struct iommu *iommu = dev->archdata.iommu;
u64 dma_addr_mask = iommu->dma_addr_mask;
if (device_mask >= (1UL << 32UL))
return 0;
if ((device_mask & dma_addr_mask) == dma_addr_mask)
return 1;
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
return pci_dma_supported(to_pci_dev(dev), device_mask);
#endif
return 0;
}
EXPORT_SYMBOL(dma_supported);
int dma_set_mask(struct device *dev, u64 dma_mask)
{
#ifdef CONFIG_PCI
if (dev->bus == &pci_bus_type)
return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
#endif
return -EINVAL;
}
EXPORT_SYMBOL(dma_set_mask);

View File

@@ -0,0 +1,59 @@
/* iommu_common.h: UltraSparc SBUS/PCI common iommu declarations.
*
* Copyright (C) 1999, 2008 David S. Miller (davem@davemloft.net)
*/
#ifndef _IOMMU_COMMON_H
#define _IOMMU_COMMON_H
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/iommu-helper.h>
#include <asm/iommu.h>
#include <asm/scatterlist.h>
/*
* These give mapping size of each iommu pte/tlb.
*/
#define IO_PAGE_SHIFT 13
#define IO_PAGE_SIZE (1UL << IO_PAGE_SHIFT)
#define IO_PAGE_MASK (~(IO_PAGE_SIZE-1))
#define IO_PAGE_ALIGN(addr) ALIGN(addr, IO_PAGE_SIZE)
#define IO_TSB_ENTRIES (128*1024)
#define IO_TSB_SIZE (IO_TSB_ENTRIES * 8)
/*
* This is the hardwired shift in the iotlb tag/data parts.
*/
#define IOMMU_PAGE_SHIFT 13
#define SG_ENT_PHYS_ADDRESS(SG) (__pa(sg_virt((SG))))
static inline int is_span_boundary(unsigned long entry,
unsigned long shift,
unsigned long boundary_size,
struct scatterlist *outs,
struct scatterlist *sg)
{
unsigned long paddr = SG_ENT_PHYS_ADDRESS(outs);
int nr = iommu_num_pages(paddr, outs->dma_length + sg->length,
IO_PAGE_SIZE);
return iommu_is_span_boundary(entry, nr, shift, boundary_size);
}
extern unsigned long iommu_range_alloc(struct device *dev,
struct iommu *iommu,
unsigned long npages,
unsigned long *handle);
extern void iommu_range_free(struct iommu *iommu,
dma_addr_t dma_addr,
unsigned long npages);
#endif /* _IOMMU_COMMON_H */

View File

@@ -552,8 +552,8 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
/* IIep is write-through, not flushing. */
for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg_page(sg)) == NULL);
sg->dvma_address = virt_to_phys(sg_virt(sg));
sg->dvma_length = sg->length;
sg->dma_address = virt_to_phys(sg_virt(sg));
sg->dma_length = sg->length;
}
return nents;
}

View File

@@ -46,6 +46,7 @@
#include <asm/cacheflush.h>
#include <asm/irq_regs.h>
#include "kernel.h"
#include "irq.h"
#ifdef CONFIG_SMP
@@ -592,19 +593,19 @@ EXPORT_SYMBOL(request_irq);
void disable_irq_nosync(unsigned int irq)
{
return __disable_irq(irq);
__disable_irq(irq);
}
EXPORT_SYMBOL(disable_irq_nosync);
void disable_irq(unsigned int irq)
{
return __disable_irq(irq);
__disable_irq(irq);
}
EXPORT_SYMBOL(disable_irq);
void enable_irq(unsigned int irq)
{
return __enable_irq(irq);
__enable_irq(irq);
}
EXPORT_SYMBOL(enable_irq);

1104
arch/sparc/kernel/irq_64.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,39 @@
/* ITLB ** ICACHE line 1: Context 0 check and TSB load */
ldxa [%g0] ASI_IMMU_TSB_8KB_PTR, %g1 ! Get TSB 8K pointer
ldxa [%g0] ASI_IMMU, %g6 ! Get TAG TARGET
srlx %g6, 48, %g5 ! Get context
sllx %g6, 22, %g6 ! Zero out context
brz,pn %g5, kvmap_itlb ! Context 0 processing
srlx %g6, 22, %g6 ! Delay slot
TSB_LOAD_QUAD(%g1, %g4) ! Load TSB entry
cmp %g4, %g6 ! Compare TAG
/* ITLB ** ICACHE line 2: TSB compare and TLB load */
bne,pn %xcc, tsb_miss_itlb ! Miss
mov FAULT_CODE_ITLB, %g3
sethi %hi(_PAGE_EXEC_4U), %g4
andcc %g5, %g4, %g0 ! Executable?
be,pn %xcc, tsb_do_fault
nop ! Delay slot, fill me
stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load TLB
retry ! Trap done
/* ITLB ** ICACHE line 3: */
nop
nop
nop
nop
nop
nop
nop
nop
/* ITLB ** ICACHE line 4: */
nop
nop
nop
nop
nop
nop
nop
nop

51
arch/sparc/kernel/ivec.S Normal file
View File

@@ -0,0 +1,51 @@
/* The registers for cross calls will be:
*
* DATA 0: [low 32-bits] Address of function to call, jmp to this
* [high 32-bits] MMU Context Argument 0, place in %g5
* DATA 1: Address Argument 1, place in %g1
* DATA 2: Address Argument 2, place in %g7
*
* With this method we can do most of the cross-call tlb/cache
* flushing very quickly.
*/
.align 32
.globl do_ivec
.type do_ivec,#function
do_ivec:
mov 0x40, %g3
ldxa [%g3 + %g0] ASI_INTR_R, %g3
sethi %hi(KERNBASE), %g4
cmp %g3, %g4
bgeu,pn %xcc, do_ivec_xcall
srlx %g3, 32, %g5
stxa %g0, [%g0] ASI_INTR_RECEIVE
membar #Sync
sethi %hi(ivector_table_pa), %g2
ldx [%g2 + %lo(ivector_table_pa)], %g2
sllx %g3, 4, %g3
add %g2, %g3, %g3
TRAP_LOAD_IRQ_WORK_PA(%g6, %g1)
ldx [%g6], %g5
stxa %g5, [%g3] ASI_PHYS_USE_EC
stx %g3, [%g6]
wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
retry
do_ivec_xcall:
mov 0x50, %g1
ldxa [%g1 + %g0] ASI_INTR_R, %g1
srl %g3, 0, %g3
mov 0x60, %g7
ldxa [%g7 + %g0] ASI_INTR_R, %g7
stxa %g0, [%g0] ASI_INTR_RECEIVE
membar #Sync
ba,pt %xcc, 1f
nop
.align 32
1: jmpl %g3, %g0
nop
.size do_ivec,.-do_ivec

View File

@@ -0,0 +1,31 @@
#ifndef __SPARC_KERNEL_H
#define __SPARC_KERNEL_H
#include <linux/interrupt.h>
/* cpu.c */
extern const char *sparc_cpu_type;
extern const char *sparc_fpu_type;
extern unsigned int fsr_storage;
#ifdef CONFIG_SPARC32
/* cpu.c */
extern void cpu_probe(void);
/* traps_32.c */
extern void handle_hw_divzero(struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
/* muldiv.c */
extern int do_user_muldiv (struct pt_regs *, unsigned long);
/* irq_32.c */
extern struct irqaction static_irqaction[];
extern int static_irq_count;
extern spinlock_t irq_action_lock;
extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
#else /* CONFIG_SPARC32 */
#endif /* CONFIG_SPARC32 */
#endif /* !(__SPARC_KERNEL_H) */

186
arch/sparc/kernel/kgdb_64.c Normal file
View File

@@ -0,0 +1,186 @@
/* kgdb.c: KGDB support for 64-bit sparc.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <asm/kdebug.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct reg_window *win;
int i;
gdb_regs[GDB_G0] = 0;
for (i = 0; i < 15; i++)
gdb_regs[GDB_G1 + i] = regs->u_regs[UREG_G1 + i];
win = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS);
for (i = 0; i < 8; i++)
gdb_regs[GDB_L0 + i] = win->locals[i];
for (i = 0; i < 8; i++)
gdb_regs[GDB_I0 + i] = win->ins[i];
for (i = GDB_F0; i <= GDB_F62; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_PC] = regs->tpc;
gdb_regs[GDB_NPC] = regs->tnpc;
gdb_regs[GDB_STATE] = regs->tstate;
gdb_regs[GDB_FSR] = 0;
gdb_regs[GDB_FPRS] = 0;
gdb_regs[GDB_Y] = regs->y;
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct thread_info *t = task_thread_info(p);
extern unsigned int switch_to_pc;
extern unsigned int ret_from_syscall;
struct reg_window *win;
unsigned long pc, cwp;
int i;
for (i = GDB_G0; i < GDB_G6; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_G6] = (unsigned long) t;
gdb_regs[GDB_G7] = (unsigned long) p;
for (i = GDB_O0; i < GDB_SP; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_SP] = t->ksp;
gdb_regs[GDB_O7] = 0;
win = (struct reg_window *) (t->ksp + STACK_BIAS);
for (i = 0; i < 8; i++)
gdb_regs[GDB_L0 + i] = win->locals[i];
for (i = 0; i < 8; i++)
gdb_regs[GDB_I0 + i] = win->ins[i];
for (i = GDB_F0; i <= GDB_F62; i++)
gdb_regs[i] = 0;
if (t->new_child)
pc = (unsigned long) &ret_from_syscall;
else
pc = (unsigned long) &switch_to_pc;
gdb_regs[GDB_PC] = pc;
gdb_regs[GDB_NPC] = pc + 4;
cwp = __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP];
gdb_regs[GDB_STATE] = (TSTATE_PRIV | TSTATE_IE | cwp);
gdb_regs[GDB_FSR] = 0;
gdb_regs[GDB_FPRS] = 0;
gdb_regs[GDB_Y] = 0;
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct reg_window *win;
int i;
for (i = 0; i < 15; i++)
regs->u_regs[UREG_G1 + i] = gdb_regs[GDB_G1 + i];
/* If the TSTATE register is changing, we have to preserve
* the CWP field, otherwise window save/restore explodes.
*/
if (regs->tstate != gdb_regs[GDB_STATE]) {
unsigned long cwp = regs->tstate & TSTATE_CWP;
regs->tstate = (gdb_regs[GDB_STATE] & ~TSTATE_CWP) | cwp;
}
regs->tpc = gdb_regs[GDB_PC];
regs->tnpc = gdb_regs[GDB_NPC];
regs->y = gdb_regs[GDB_Y];
win = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS);
for (i = 0; i < 8; i++)
win->locals[i] = gdb_regs[GDB_L0 + i];
for (i = 0; i < 8; i++)
win->ins[i] = gdb_regs[GDB_I0 + i];
}
#ifdef CONFIG_SMP
void smp_kgdb_capture_client(struct pt_regs *regs)
{
unsigned long flags;
__asm__ __volatile__("rdpr %%pstate, %0\n\t"
"wrpr %0, %1, %%pstate"
: "=r" (flags)
: "i" (PSTATE_IE));
flushw_all();
if (atomic_read(&kgdb_active) != -1)
kgdb_nmicallback(raw_smp_processor_id(), regs);
__asm__ __volatile__("wrpr %0, 0, %%pstate"
: : "r" (flags));
}
#endif
int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
char *remcomInBuffer, char *remcomOutBuffer,
struct pt_regs *linux_regs)
{
unsigned long addr;
char *ptr;
switch (remcomInBuffer[0]) {
case 'c':
/* try to read optional parameter, pc unchanged if no parm */
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr)) {
linux_regs->tpc = addr;
linux_regs->tnpc = addr + 4;
}
/* fallthru */
case 'D':
case 'k':
if (linux_regs->tpc == (unsigned long) arch_kgdb_breakpoint) {
linux_regs->tpc = linux_regs->tnpc;
linux_regs->tnpc += 4;
}
return 0;
}
return -1;
}
asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs)
{
unsigned long flags;
if (user_mode(regs)) {
bad_trap(regs, trap_level);
return;
}
flushw_all();
local_irq_save(flags);
kgdb_handle_exception(0x172, SIGTRAP, 0, regs);
local_irq_restore(flags);
}
int kgdb_arch_init(void)
{
return 0;
}
void kgdb_arch_exit(void)
{
}
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x72 */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
};

593
arch/sparc/kernel/kprobes.c Normal file
View File

@@ -0,0 +1,593 @@
/* arch/sparc64/kernel/kprobes.c
*
* Copyright (C) 2004 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kdebug.h>
#include <asm/signal.h>
#include <asm/cacheflush.h>
#include <asm/uaccess.h>
/* We do not have hardware single-stepping on sparc64.
* So we implement software single-stepping with breakpoint
* traps. The top-level scheme is similar to that used
* in the x86 kprobes implementation.
*
* In the kprobe->ainsn.insn[] array we store the original
* instruction at index zero and a break instruction at
* index one.
*
* When we hit a kprobe we:
* - Run the pre-handler
* - Remember "regs->tnpc" and interrupt level stored in
* "regs->tstate" so we can restore them later
* - Disable PIL interrupts
* - Set regs->tpc to point to kprobe->ainsn.insn[0]
* - Set regs->tnpc to point to kprobe->ainsn.insn[1]
* - Mark that we are actively in a kprobe
*
* At this point we wait for the second breakpoint at
* kprobe->ainsn.insn[1] to hit. When it does we:
* - Run the post-handler
* - Set regs->tpc to "remembered" regs->tnpc stored above,
* restore the PIL interrupt level in "regs->tstate" as well
* - Make any adjustments necessary to regs->tnpc in order
* to handle relative branches correctly. See below.
* - Mark that we are no longer actively in a kprobe.
*/
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
int __kprobes arch_prepare_kprobe(struct kprobe *p)
{
p->ainsn.insn[0] = *p->addr;
flushi(&p->ainsn.insn[0]);
p->ainsn.insn[1] = BREAKPOINT_INSTRUCTION_2;
flushi(&p->ainsn.insn[1]);
p->opcode = *p->addr;
return 0;
}
void __kprobes arch_arm_kprobe(struct kprobe *p)
{
*p->addr = BREAKPOINT_INSTRUCTION;
flushi(p->addr);
}
void __kprobes arch_disarm_kprobe(struct kprobe *p)
{
*p->addr = p->opcode;
flushi(p->addr);
}
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
kcb->prev_kprobe.orig_tnpc = kcb->kprobe_orig_tnpc;
kcb->prev_kprobe.orig_tstate_pil = kcb->kprobe_orig_tstate_pil;
}
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->kprobe_orig_tnpc = kcb->prev_kprobe.orig_tnpc;
kcb->kprobe_orig_tstate_pil = kcb->prev_kprobe.orig_tstate_pil;
}
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = p;
kcb->kprobe_orig_tnpc = regs->tnpc;
kcb->kprobe_orig_tstate_pil = (regs->tstate & TSTATE_PIL);
}
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
regs->tstate |= TSTATE_PIL;
/*single step inline, if it a breakpoint instruction*/
if (p->opcode == BREAKPOINT_INSTRUCTION) {
regs->tpc = (unsigned long) p->addr;
regs->tnpc = kcb->kprobe_orig_tnpc;
} else {
regs->tpc = (unsigned long) &p->ainsn.insn[0];
regs->tnpc = (unsigned long) &p->ainsn.insn[1];
}
}
static int __kprobes kprobe_handler(struct pt_regs *regs)
{
struct kprobe *p;
void *addr = (void *) regs->tpc;
int ret = 0;
struct kprobe_ctlblk *kcb;
/*
* We don't want to be preempted for the entire
* duration of kprobe processing
*/
preempt_disable();
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
p = get_kprobe(addr);
if (p) {
if (kcb->kprobe_status == KPROBE_HIT_SS) {
regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
kcb->kprobe_orig_tstate_pil);
goto no_kprobe;
}
/* We have reentered the kprobe_handler(), since
* another probe was hit while within the handler.
* We here save the original kprobes variables and
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
save_previous_kprobe(kcb);
set_current_kprobe(p, regs, kcb);
kprobes_inc_nmissed_count(p);
kcb->kprobe_status = KPROBE_REENTER;
prepare_singlestep(p, regs, kcb);
return 1;
} else {
if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
*/
ret = 1;
goto no_kprobe;
}
p = __get_cpu_var(current_kprobe);
if (p->break_handler && p->break_handler(p, regs))
goto ss_probe;
}
goto no_kprobe;
}
p = get_kprobe(addr);
if (!p) {
if (*(u32 *)addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
* either a probepoint or a debugger breakpoint
* at this address. In either case, no further
* handling of this interrupt is appropriate.
*/
ret = 1;
}
/* Not one of ours: let kernel handle it */
goto no_kprobe;
}
set_current_kprobe(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler && p->pre_handler(p, regs))
return 1;
ss_probe:
prepare_singlestep(p, regs, kcb);
kcb->kprobe_status = KPROBE_HIT_SS;
return 1;
no_kprobe:
preempt_enable_no_resched();
return ret;
}
/* If INSN is a relative control transfer instruction,
* return the corrected branch destination value.
*
* regs->tpc and regs->tnpc still hold the values of the
* program counters at the time of trap due to the execution
* of the BREAKPOINT_INSTRUCTION_2 at p->ainsn.insn[1]
*
*/
static unsigned long __kprobes relbranch_fixup(u32 insn, struct kprobe *p,
struct pt_regs *regs)
{
unsigned long real_pc = (unsigned long) p->addr;
/* Branch not taken, no mods necessary. */
if (regs->tnpc == regs->tpc + 0x4UL)
return real_pc + 0x8UL;
/* The three cases are call, branch w/prediction,
* and traditional branch.
*/
if ((insn & 0xc0000000) == 0x40000000 ||
(insn & 0xc1c00000) == 0x00400000 ||
(insn & 0xc1c00000) == 0x00800000) {
unsigned long ainsn_addr;
ainsn_addr = (unsigned long) &p->ainsn.insn[0];
/* The instruction did all the work for us
* already, just apply the offset to the correct
* instruction location.
*/
return (real_pc + (regs->tnpc - ainsn_addr));
}
/* It is jmpl or some other absolute PC modification instruction,
* leave NPC as-is.
*/
return regs->tnpc;
}
/* If INSN is an instruction which writes it's PC location
* into a destination register, fix that up.
*/
static void __kprobes retpc_fixup(struct pt_regs *regs, u32 insn,
unsigned long real_pc)
{
unsigned long *slot = NULL;
/* Simplest case is 'call', which always uses %o7 */
if ((insn & 0xc0000000) == 0x40000000) {
slot = &regs->u_regs[UREG_I7];
}
/* 'jmpl' encodes the register inside of the opcode */
if ((insn & 0xc1f80000) == 0x81c00000) {
unsigned long rd = ((insn >> 25) & 0x1f);
if (rd <= 15) {
slot = &regs->u_regs[rd];
} else {
/* Hard case, it goes onto the stack. */
flushw_all();
rd -= 16;
slot = (unsigned long *)
(regs->u_regs[UREG_FP] + STACK_BIAS);
slot += rd;
}
}
if (slot != NULL)
*slot = real_pc;
}
/*
* Called after single-stepping. p->addr is the address of the
* instruction which has been replaced by the breakpoint
* instruction. To avoid the SMP problems that can occur when we
* temporarily put back the original opcode to single-step, we
* single-stepped a copy of the instruction. The address of this
* copy is &p->ainsn.insn[0].
*
* This function prepares to return from the post-single-step
* breakpoint trap.
*/
static void __kprobes resume_execution(struct kprobe *p,
struct pt_regs *regs, struct kprobe_ctlblk *kcb)
{
u32 insn = p->ainsn.insn[0];
regs->tnpc = relbranch_fixup(insn, p, regs);
/* This assignment must occur after relbranch_fixup() */
regs->tpc = kcb->kprobe_orig_tnpc;
retpc_fixup(regs, insn, (unsigned long) p->addr);
regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
kcb->kprobe_orig_tstate_pil);
}
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (!cur)
return 0;
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
cur->post_handler(cur, regs, 0);
}
resume_execution(cur, regs, kcb);
/*Restore back the original saved kprobes variables and continue. */
if (kcb->kprobe_status == KPROBE_REENTER) {
restore_previous_kprobe(kcb);
goto out;
}
reset_current_kprobe();
out:
preempt_enable_no_resched();
return 1;
}
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
const struct exception_table_entry *entry;
switch(kcb->kprobe_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* We are here because the instruction being single
* stepped caused a page fault. We reset the current
* kprobe and the tpc points back to the probe address
* and allow the page fault handler to continue as a
* normal page fault.
*/
regs->tpc = (unsigned long)cur->addr;
regs->tnpc = kcb->kprobe_orig_tnpc;
regs->tstate = ((regs->tstate & ~TSTATE_PIL) |
kcb->kprobe_orig_tstate_pil);
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
reset_current_kprobe();
preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
* We increment the nmissed count for accounting,
* we can also use npre/npostfault count for accouting
* these specific fault cases.
*/
kprobes_inc_nmissed_count(cur);
/*
* We come here because instructions in the pre/post
* handler caused the page_fault, this could happen
* if handler tries to access user space by
* copy_from_user(), get_user() etc. Let the
* user-specified handler try to fix it first.
*/
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
return 1;
/*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
entry = search_exception_tables(regs->tpc);
if (entry) {
regs->tpc = entry->fixup;
regs->tnpc = regs->tpc + 4;
return 1;
}
/*
* fixup_exception() could not handle it,
* Let do_page_fault() fix it.
*/
break;
default:
break;
}
return 0;
}
/*
* Wrapper routine to for handling exceptions.
*/
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data)
{
struct die_args *args = (struct die_args *)data;
int ret = NOTIFY_DONE;
if (args->regs && user_mode(args->regs))
return ret;
switch (val) {
case DIE_DEBUG:
if (kprobe_handler(args->regs))
ret = NOTIFY_STOP;
break;
case DIE_DEBUG_2:
if (post_kprobe_handler(args->regs))
ret = NOTIFY_STOP;
break;
default:
break;
}
return ret;
}
asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
struct pt_regs *regs)
{
BUG_ON(trap_level != 0x170 && trap_level != 0x171);
if (user_mode(regs)) {
local_irq_enable();
bad_trap(regs, trap_level);
return;
}
/* trap_level == 0x170 --> ta 0x70
* trap_level == 0x171 --> ta 0x71
*/
if (notify_die((trap_level == 0x170) ? DIE_DEBUG : DIE_DEBUG_2,
(trap_level == 0x170) ? "debug" : "debug_2",
regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP)
bad_trap(regs, trap_level);
}
/* Jprobes support. */
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
memcpy(&(kcb->jprobe_saved_regs), regs, sizeof(*regs));
regs->tpc = (unsigned long) jp->entry;
regs->tnpc = ((unsigned long) jp->entry) + 0x4UL;
regs->tstate |= TSTATE_PIL;
return 1;
}
void __kprobes jprobe_return(void)
{
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
register unsigned long orig_fp asm("g1");
orig_fp = kcb->jprobe_saved_regs.u_regs[UREG_FP];
__asm__ __volatile__("\n"
"1: cmp %%sp, %0\n\t"
"blu,a,pt %%xcc, 1b\n\t"
" restore\n\t"
".globl jprobe_return_trap_instruction\n"
"jprobe_return_trap_instruction:\n\t"
"ta 0x70"
: /* no outputs */
: "r" (orig_fp));
}
extern void jprobe_return_trap_instruction(void);
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
{
u32 *addr = (u32 *) regs->tpc;
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
if (addr == (u32 *) jprobe_return_trap_instruction) {
memcpy(regs, &(kcb->jprobe_saved_regs), sizeof(*regs));
preempt_enable_no_resched();
return 1;
}
return 0;
}
/* The value stored in the return address register is actually 2
* instructions before where the callee will return to.
* Sequences usually look something like this
*
* call some_function <--- return register points here
* nop <--- call delay slot
* whatever <--- where callee returns to
*
* To keep trampoline_probe_handler logic simpler, we normalize the
* value kept in ri->ret_addr so we don't need to keep adjusting it
* back and forth.
*/
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs)
{
ri->ret_addr = (kprobe_opcode_t *)(regs->u_regs[UREG_RETPC] + 8);
/* Replace the return addr with trampoline addr */
regs->u_regs[UREG_RETPC] =
((unsigned long)kretprobe_trampoline) - 8;
}
/*
* Called when the probe at kretprobe trampoline is hit
*/
int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
struct hlist_node *node, *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
INIT_HLIST_HEAD(&empty_rp);
kretprobe_hash_lock(current, &head, &flags);
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* have a return probe installed on them, and/or more then one return
* return probe was registered for a target function.
*
* We can handle this because:
* - instances are always inserted at the head of the list
* - when multiple return probes are registered for the same
* function, the first instance's ret_addr will point to the
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
orig_ret_address = (unsigned long)ri->ret_addr;
recycle_rp_inst(ri, &empty_rp);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
* instances associated with this task are for
* other calls deeper on the call stack
*/
break;
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
regs->tpc = orig_ret_address;
regs->tnpc = orig_ret_address + 4;
reset_current_kprobe();
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
/*
* By returning a non-zero value, we are telling
* kprobe_handler() that we don't want the post_handler
* to run (and have re-enabled preemption)
*/
return 1;
}
void kretprobe_trampoline_holder(void)
{
asm volatile(".global kretprobe_trampoline\n"
"kretprobe_trampoline:\n"
"\tnop\n"
"\tnop\n");
}
static struct kprobe trampoline_p = {
.addr = (kprobe_opcode_t *) &kretprobe_trampoline,
.pre_handler = trampoline_probe_handler
};
int __init arch_init_kprobes(void)
{
return register_kprobe(&trampoline_p);
}
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
{
if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
return 1;
return 0;
}

View File

@@ -0,0 +1,60 @@
#ifndef _KSTACK_H
#define _KSTACK_H
#include <linux/thread_info.h>
#include <linux/sched.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
/* SP must be STACK_BIAS adjusted already. */
static inline bool kstack_valid(struct thread_info *tp, unsigned long sp)
{
unsigned long base = (unsigned long) tp;
if (sp >= (base + sizeof(struct thread_info)) &&
sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
return true;
if (hardirq_stack[tp->cpu]) {
base = (unsigned long) hardirq_stack[tp->cpu];
if (sp >= base &&
sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
return true;
base = (unsigned long) softirq_stack[tp->cpu];
if (sp >= base &&
sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf)))
return true;
}
return false;
}
/* Does "regs" point to a valid pt_regs trap frame? */
static inline bool kstack_is_trap_frame(struct thread_info *tp, struct pt_regs *regs)
{
unsigned long base = (unsigned long) tp;
unsigned long addr = (unsigned long) regs;
if (addr >= base &&
addr <= (base + THREAD_SIZE - sizeof(*regs)))
goto check_magic;
if (hardirq_stack[tp->cpu]) {
base = (unsigned long) hardirq_stack[tp->cpu];
if (addr >= base &&
addr <= (base + THREAD_SIZE - sizeof(*regs)))
goto check_magic;
base = (unsigned long) softirq_stack[tp->cpu];
if (addr >= base &&
addr <= (base + THREAD_SIZE - sizeof(*regs)))
goto check_magic;
}
return false;
check_magic:
if ((regs->magic & ~0x1ff) == PT_REGS_MAGIC)
return true;
return false;
}
#endif /* _KSTACK_H */

304
arch/sparc/kernel/ktlb.S Normal file
View File

@@ -0,0 +1,304 @@
/* arch/sparc64/kernel/ktlb.S: Kernel mapping TLB miss handling.
*
* Copyright (C) 1995, 1997, 2005, 2008 David S. Miller <davem@davemloft.net>
* Copyright (C) 1996 Eddie C. Dost (ecd@brainaid.de)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1996,98,99 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <asm/head.h>
#include <asm/asi.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/tsb.h>
.text
.align 32
kvmap_itlb:
/* g6: TAG TARGET */
mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_IMMU, %g4
/* sun4v_itlb_miss branches here with the missing virtual
* address already loaded into %g4
*/
kvmap_itlb_4v:
kvmap_itlb_nonlinear:
/* Catch kernel NULL pointer calls. */
sethi %hi(PAGE_SIZE), %g5
cmp %g4, %g5
bleu,pn %xcc, kvmap_dtlb_longpath
nop
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load)
kvmap_itlb_tsb_miss:
sethi %hi(LOW_OBP_ADDRESS), %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_itlb_vmalloc_addr
mov 0x1, %g5
sllx %g5, 32, %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_itlb_obp
nop
kvmap_itlb_vmalloc_addr:
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath)
KTSB_LOCK_TAG(%g1, %g2, %g7)
/* Load and check PTE. */
ldxa [%g5] ASI_PHYS_USE_EC, %g5
mov 1, %g7
sllx %g7, TSB_TAG_INVALID_BIT, %g7
brgez,a,pn %g5, kvmap_itlb_longpath
KTSB_STORE(%g1, %g7)
KTSB_WRITE(%g1, %g5, %g6)
/* fallthrough to TLB load */
kvmap_itlb_load:
661: stxa %g5, [%g0] ASI_ITLB_DATA_IN
retry
.section .sun4v_2insn_patch, "ax"
.word 661b
nop
nop
.previous
/* For sun4v the ASI_ITLB_DATA_IN store and the retry
* instruction get nop'd out and we get here to branch
* to the sun4v tlb load code. The registers are setup
* as follows:
*
* %g4: vaddr
* %g5: PTE
* %g6: TAG
*
* The sun4v TLB load wants the PTE in %g3 so we fix that
* up here.
*/
ba,pt %xcc, sun4v_itlb_load
mov %g5, %g3
kvmap_itlb_longpath:
661: rdpr %pstate, %g5
wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
.section .sun4v_2insn_patch, "ax"
.word 661b
SET_GL(1)
nop
.previous
rdpr %tpc, %g5
ba,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_ITLB, %g4
kvmap_itlb_obp:
OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath)
KTSB_LOCK_TAG(%g1, %g2, %g7)
KTSB_WRITE(%g1, %g5, %g6)
ba,pt %xcc, kvmap_itlb_load
nop
kvmap_dtlb_obp:
OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath)
KTSB_LOCK_TAG(%g1, %g2, %g7)
KTSB_WRITE(%g1, %g5, %g6)
ba,pt %xcc, kvmap_dtlb_load
nop
.align 32
kvmap_dtlb_tsb4m_load:
KTSB_LOCK_TAG(%g1, %g2, %g7)
KTSB_WRITE(%g1, %g5, %g6)
ba,pt %xcc, kvmap_dtlb_load
nop
kvmap_dtlb:
/* %g6: TAG TARGET */
mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_DMMU, %g4
/* sun4v_dtlb_miss branches here with the missing virtual
* address already loaded into %g4
*/
kvmap_dtlb_4v:
brgez,pn %g4, kvmap_dtlb_nonlinear
nop
#ifdef CONFIG_DEBUG_PAGEALLOC
/* Index through the base page size TSB even for linear
* mappings when using page allocation debugging.
*/
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
#else
/* Correct TAG_TARGET is already in %g6, check 4mb TSB. */
KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
#endif
/* TSB entry address left in %g1, lookup linear PTE.
* Must preserve %g1 and %g6 (TAG).
*/
kvmap_dtlb_tsb4m_miss:
sethi %hi(kpte_linear_bitmap), %g2
or %g2, %lo(kpte_linear_bitmap), %g2
/* Clear the PAGE_OFFSET top virtual bits, then shift
* down to get a 256MB physical address index.
*/
sllx %g4, 21, %g5
mov 1, %g7
srlx %g5, 21 + 28, %g5
/* Don't try this at home kids... this depends upon srlx
* only taking the low 6 bits of the shift count in %g5.
*/
sllx %g7, %g5, %g7
/* Divide by 64 to get the offset into the bitmask. */
srlx %g5, 6, %g5
sllx %g5, 3, %g5
/* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */
ldx [%g2 + %g5], %g2
andcc %g2, %g7, %g0
sethi %hi(kern_linear_pte_xor), %g5
or %g5, %lo(kern_linear_pte_xor), %g5
bne,a,pt %xcc, 1f
add %g5, 8, %g5
1: ldx [%g5], %g2
.globl kvmap_linear_patch
kvmap_linear_patch:
ba,pt %xcc, kvmap_dtlb_tsb4m_load
xor %g2, %g4, %g5
kvmap_dtlb_vmalloc_addr:
KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath)
KTSB_LOCK_TAG(%g1, %g2, %g7)
/* Load and check PTE. */
ldxa [%g5] ASI_PHYS_USE_EC, %g5
mov 1, %g7
sllx %g7, TSB_TAG_INVALID_BIT, %g7
brgez,a,pn %g5, kvmap_dtlb_longpath
KTSB_STORE(%g1, %g7)
KTSB_WRITE(%g1, %g5, %g6)
/* fallthrough to TLB load */
kvmap_dtlb_load:
661: stxa %g5, [%g0] ASI_DTLB_DATA_IN ! Reload TLB
retry
.section .sun4v_2insn_patch, "ax"
.word 661b
nop
nop
.previous
/* For sun4v the ASI_DTLB_DATA_IN store and the retry
* instruction get nop'd out and we get here to branch
* to the sun4v tlb load code. The registers are setup
* as follows:
*
* %g4: vaddr
* %g5: PTE
* %g6: TAG
*
* The sun4v TLB load wants the PTE in %g3 so we fix that
* up here.
*/
ba,pt %xcc, sun4v_dtlb_load
mov %g5, %g3
#ifdef CONFIG_SPARSEMEM_VMEMMAP
kvmap_vmemmap:
sub %g4, %g5, %g5
srlx %g5, 22, %g5
sethi %hi(vmemmap_table), %g1
sllx %g5, 3, %g5
or %g1, %lo(vmemmap_table), %g1
ba,pt %xcc, kvmap_dtlb_load
ldx [%g1 + %g5], %g5
#endif
kvmap_dtlb_nonlinear:
/* Catch kernel NULL pointer derefs. */
sethi %hi(PAGE_SIZE), %g5
cmp %g4, %g5
bleu,pn %xcc, kvmap_dtlb_longpath
nop
#ifdef CONFIG_SPARSEMEM_VMEMMAP
/* Do not use the TSB for vmemmap. */
mov (VMEMMAP_BASE >> 24), %g5
sllx %g5, 24, %g5
cmp %g4,%g5
bgeu,pn %xcc, kvmap_vmemmap
nop
#endif
KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
kvmap_dtlb_tsbmiss:
sethi %hi(MODULES_VADDR), %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_dtlb_longpath
mov (VMALLOC_END >> 24), %g5
sllx %g5, 24, %g5
cmp %g4, %g5
bgeu,pn %xcc, kvmap_dtlb_longpath
nop
kvmap_check_obp:
sethi %hi(LOW_OBP_ADDRESS), %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_dtlb_vmalloc_addr
mov 0x1, %g5
sllx %g5, 32, %g5
cmp %g4, %g5
blu,pn %xcc, kvmap_dtlb_obp
nop
ba,pt %xcc, kvmap_dtlb_vmalloc_addr
nop
kvmap_dtlb_longpath:
661: rdpr %pstate, %g5
wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate
.section .sun4v_2insn_patch, "ax"
.word 661b
SET_GL(1)
ldxa [%g0] ASI_SCRATCHPAD, %g5
.previous
rdpr %tl, %g3
cmp %g3, 1
661: mov TLB_TAG_ACCESS, %g4
ldxa [%g4] ASI_DMMU, %g5
.section .sun4v_2insn_patch, "ax"
.word 661b
ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
nop
.previous
be,pt %xcc, sparc64_realfault_common
mov FAULT_CODE_DTLB, %g4
ba,pt %xcc, winfix_trampoline
nop

2378
arch/sparc/kernel/ldc.c Normal file

File diff suppressed because it is too large Load Diff

917
arch/sparc/kernel/mdesc.c Normal file
View File

@@ -0,0 +1,917 @@
/* mdesc.c: Sun4V machine description handling.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/lmb.h>
#include <linux/log2.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
#include <asm/cpudata.h>
#include <asm/hypervisor.h>
#include <asm/mdesc.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include <asm/smp.h>
/* Unlike the OBP device tree, the machine description is a full-on
* DAG. An arbitrary number of ARCs are possible from one
* node to other nodes and thus we can't use the OBP device_node
* data structure to represent these nodes inside of the kernel.
*
* Actually, it isn't even a DAG, because there are back pointers
* which create cycles in the graph.
*
* mdesc_hdr and mdesc_elem describe the layout of the data structure
* we get from the Hypervisor.
*/
struct mdesc_hdr {
u32 version; /* Transport version */
u32 node_sz; /* node block size */
u32 name_sz; /* name block size */
u32 data_sz; /* data block size */
} __attribute__((aligned(16)));
struct mdesc_elem {
u8 tag;
#define MD_LIST_END 0x00
#define MD_NODE 0x4e
#define MD_NODE_END 0x45
#define MD_NOOP 0x20
#define MD_PROP_ARC 0x61
#define MD_PROP_VAL 0x76
#define MD_PROP_STR 0x73
#define MD_PROP_DATA 0x64
u8 name_len;
u16 resv;
u32 name_offset;
union {
struct {
u32 data_len;
u32 data_offset;
} data;
u64 val;
} d;
};
struct mdesc_mem_ops {
struct mdesc_handle *(*alloc)(unsigned int mdesc_size);
void (*free)(struct mdesc_handle *handle);
};
struct mdesc_handle {
struct list_head list;
struct mdesc_mem_ops *mops;
void *self_base;
atomic_t refcnt;
unsigned int handle_size;
struct mdesc_hdr mdesc;
};
static void mdesc_handle_init(struct mdesc_handle *hp,
unsigned int handle_size,
void *base)
{
BUG_ON(((unsigned long)&hp->mdesc) & (16UL - 1));
memset(hp, 0, handle_size);
INIT_LIST_HEAD(&hp->list);
hp->self_base = base;
atomic_set(&hp->refcnt, 1);
hp->handle_size = handle_size;
}
static struct mdesc_handle * __init mdesc_lmb_alloc(unsigned int mdesc_size)
{
unsigned int handle_size, alloc_size;
struct mdesc_handle *hp;
unsigned long paddr;
handle_size = (sizeof(struct mdesc_handle) -
sizeof(struct mdesc_hdr) +
mdesc_size);
alloc_size = PAGE_ALIGN(handle_size);
paddr = lmb_alloc(alloc_size, PAGE_SIZE);
hp = NULL;
if (paddr) {
hp = __va(paddr);
mdesc_handle_init(hp, handle_size, hp);
}
return hp;
}
static void mdesc_lmb_free(struct mdesc_handle *hp)
{
unsigned int alloc_size, handle_size = hp->handle_size;
unsigned long start, end;
BUG_ON(atomic_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
alloc_size = PAGE_ALIGN(handle_size);
start = (unsigned long) hp;
end = start + alloc_size;
while (start < end) {
struct page *p;
p = virt_to_page(start);
ClearPageReserved(p);
__free_page(p);
start += PAGE_SIZE;
}
}
static struct mdesc_mem_ops lmb_mdesc_ops = {
.alloc = mdesc_lmb_alloc,
.free = mdesc_lmb_free,
};
static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
{
unsigned int handle_size;
void *base;
handle_size = (sizeof(struct mdesc_handle) -
sizeof(struct mdesc_hdr) +
mdesc_size);
base = kmalloc(handle_size + 15, GFP_KERNEL | __GFP_NOFAIL);
if (base) {
struct mdesc_handle *hp;
unsigned long addr;
addr = (unsigned long)base;
addr = (addr + 15UL) & ~15UL;
hp = (struct mdesc_handle *) addr;
mdesc_handle_init(hp, handle_size, base);
return hp;
}
return NULL;
}
static void mdesc_kfree(struct mdesc_handle *hp)
{
BUG_ON(atomic_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list));
kfree(hp->self_base);
}
static struct mdesc_mem_ops kmalloc_mdesc_memops = {
.alloc = mdesc_kmalloc,
.free = mdesc_kfree,
};
static struct mdesc_handle *mdesc_alloc(unsigned int mdesc_size,
struct mdesc_mem_ops *mops)
{
struct mdesc_handle *hp = mops->alloc(mdesc_size);
if (hp)
hp->mops = mops;
return hp;
}
static void mdesc_free(struct mdesc_handle *hp)
{
hp->mops->free(hp);
}
static struct mdesc_handle *cur_mdesc;
static LIST_HEAD(mdesc_zombie_list);
static DEFINE_SPINLOCK(mdesc_lock);
struct mdesc_handle *mdesc_grab(void)
{
struct mdesc_handle *hp;
unsigned long flags;
spin_lock_irqsave(&mdesc_lock, flags);
hp = cur_mdesc;
if (hp)
atomic_inc(&hp->refcnt);
spin_unlock_irqrestore(&mdesc_lock, flags);
return hp;
}
EXPORT_SYMBOL(mdesc_grab);
void mdesc_release(struct mdesc_handle *hp)
{
unsigned long flags;
spin_lock_irqsave(&mdesc_lock, flags);
if (atomic_dec_and_test(&hp->refcnt)) {
list_del_init(&hp->list);
hp->mops->free(hp);
}
spin_unlock_irqrestore(&mdesc_lock, flags);
}
EXPORT_SYMBOL(mdesc_release);
static DEFINE_MUTEX(mdesc_mutex);
static struct mdesc_notifier_client *client_list;
void mdesc_register_notifier(struct mdesc_notifier_client *client)
{
u64 node;
mutex_lock(&mdesc_mutex);
client->next = client_list;
client_list = client;
mdesc_for_each_node_by_name(cur_mdesc, node, client->node_name)
client->add(cur_mdesc, node);
mutex_unlock(&mdesc_mutex);
}
static const u64 *parent_cfg_handle(struct mdesc_handle *hp, u64 node)
{
const u64 *id;
u64 a;
id = NULL;
mdesc_for_each_arc(a, hp, node, MDESC_ARC_TYPE_BACK) {
u64 target;
target = mdesc_arc_target(hp, a);
id = mdesc_get_property(hp, target,
"cfg-handle", NULL);
if (id)
break;
}
return id;
}
/* Run 'func' on nodes which are in A but not in B. */
static void invoke_on_missing(const char *name,
struct mdesc_handle *a,
struct mdesc_handle *b,
void (*func)(struct mdesc_handle *, u64))
{
u64 node;
mdesc_for_each_node_by_name(a, node, name) {
int found = 0, is_vdc_port = 0;
const char *name_prop;
const u64 *id;
u64 fnode;
name_prop = mdesc_get_property(a, node, "name", NULL);
if (name_prop && !strcmp(name_prop, "vdc-port")) {
is_vdc_port = 1;
id = parent_cfg_handle(a, node);
} else
id = mdesc_get_property(a, node, "id", NULL);
if (!id) {
printk(KERN_ERR "MD: Cannot find ID for %s node.\n",
(name_prop ? name_prop : name));
continue;
}
mdesc_for_each_node_by_name(b, fnode, name) {
const u64 *fid;
if (is_vdc_port) {
name_prop = mdesc_get_property(b, fnode,
"name", NULL);
if (!name_prop ||
strcmp(name_prop, "vdc-port"))
continue;
fid = parent_cfg_handle(b, fnode);
if (!fid) {
printk(KERN_ERR "MD: Cannot find ID "
"for vdc-port node.\n");
continue;
}
} else
fid = mdesc_get_property(b, fnode,
"id", NULL);
if (*id == *fid) {
found = 1;
break;
}
}
if (!found)
func(a, node);
}
}
static void notify_one(struct mdesc_notifier_client *p,
struct mdesc_handle *old_hp,
struct mdesc_handle *new_hp)
{
invoke_on_missing(p->node_name, old_hp, new_hp, p->remove);
invoke_on_missing(p->node_name, new_hp, old_hp, p->add);
}
static void mdesc_notify_clients(struct mdesc_handle *old_hp,
struct mdesc_handle *new_hp)
{
struct mdesc_notifier_client *p = client_list;
while (p) {
notify_one(p, old_hp, new_hp);
p = p->next;
}
}
void mdesc_update(void)
{
unsigned long len, real_len, status;
struct mdesc_handle *hp, *orig_hp;
unsigned long flags;
mutex_lock(&mdesc_mutex);
(void) sun4v_mach_desc(0UL, 0UL, &len);
hp = mdesc_alloc(len, &kmalloc_mdesc_memops);
if (!hp) {
printk(KERN_ERR "MD: mdesc alloc fails\n");
goto out;
}
status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
if (status != HV_EOK || real_len > len) {
printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
status);
atomic_dec(&hp->refcnt);
mdesc_free(hp);
goto out;
}
spin_lock_irqsave(&mdesc_lock, flags);
orig_hp = cur_mdesc;
cur_mdesc = hp;
spin_unlock_irqrestore(&mdesc_lock, flags);
mdesc_notify_clients(orig_hp, hp);
spin_lock_irqsave(&mdesc_lock, flags);
if (atomic_dec_and_test(&orig_hp->refcnt))
mdesc_free(orig_hp);
else
list_add(&orig_hp->list, &mdesc_zombie_list);
spin_unlock_irqrestore(&mdesc_lock, flags);
out:
mutex_unlock(&mdesc_mutex);
}
static struct mdesc_elem *node_block(struct mdesc_hdr *mdesc)
{
return (struct mdesc_elem *) (mdesc + 1);
}
static void *name_block(struct mdesc_hdr *mdesc)
{
return ((void *) node_block(mdesc)) + mdesc->node_sz;
}
static void *data_block(struct mdesc_hdr *mdesc)
{
return ((void *) name_block(mdesc)) + mdesc->name_sz;
}
u64 mdesc_node_by_name(struct mdesc_handle *hp,
u64 from_node, const char *name)
{
struct mdesc_elem *ep = node_block(&hp->mdesc);
const char *names = name_block(&hp->mdesc);
u64 last_node = hp->mdesc.node_sz / 16;
u64 ret;
if (from_node == MDESC_NODE_NULL) {
ret = from_node = 0;
} else if (from_node >= last_node) {
return MDESC_NODE_NULL;
} else {
ret = ep[from_node].d.val;
}
while (ret < last_node) {
if (ep[ret].tag != MD_NODE)
return MDESC_NODE_NULL;
if (!strcmp(names + ep[ret].name_offset, name))
break;
ret = ep[ret].d.val;
}
if (ret >= last_node)
ret = MDESC_NODE_NULL;
return ret;
}
EXPORT_SYMBOL(mdesc_node_by_name);
const void *mdesc_get_property(struct mdesc_handle *hp, u64 node,
const char *name, int *lenp)
{
const char *names = name_block(&hp->mdesc);
u64 last_node = hp->mdesc.node_sz / 16;
void *data = data_block(&hp->mdesc);
struct mdesc_elem *ep;
if (node == MDESC_NODE_NULL || node >= last_node)
return NULL;
ep = node_block(&hp->mdesc) + node;
ep++;
for (; ep->tag != MD_NODE_END; ep++) {
void *val = NULL;
int len = 0;
switch (ep->tag) {
case MD_PROP_VAL:
val = &ep->d.val;
len = 8;
break;
case MD_PROP_STR:
case MD_PROP_DATA:
val = data + ep->d.data.data_offset;
len = ep->d.data.data_len;
break;
default:
break;
}
if (!val)
continue;
if (!strcmp(names + ep->name_offset, name)) {
if (lenp)
*lenp = len;
return val;
}
}
return NULL;
}
EXPORT_SYMBOL(mdesc_get_property);
u64 mdesc_next_arc(struct mdesc_handle *hp, u64 from, const char *arc_type)
{
struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
const char *names = name_block(&hp->mdesc);
u64 last_node = hp->mdesc.node_sz / 16;
if (from == MDESC_NODE_NULL || from >= last_node)
return MDESC_NODE_NULL;
ep = base + from;
ep++;
for (; ep->tag != MD_NODE_END; ep++) {
if (ep->tag != MD_PROP_ARC)
continue;
if (strcmp(names + ep->name_offset, arc_type))
continue;
return ep - base;
}
return MDESC_NODE_NULL;
}
EXPORT_SYMBOL(mdesc_next_arc);
u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc)
{
struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
ep = base + arc;
return ep->d.val;
}
EXPORT_SYMBOL(mdesc_arc_target);
const char *mdesc_node_name(struct mdesc_handle *hp, u64 node)
{
struct mdesc_elem *ep, *base = node_block(&hp->mdesc);
const char *names = name_block(&hp->mdesc);
u64 last_node = hp->mdesc.node_sz / 16;
if (node == MDESC_NODE_NULL || node >= last_node)
return NULL;
ep = base + node;
if (ep->tag != MD_NODE)
return NULL;
return names + ep->name_offset;
}
EXPORT_SYMBOL(mdesc_node_name);
static void __init report_platform_properties(void)
{
struct mdesc_handle *hp = mdesc_grab();
u64 pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "platform");
const char *s;
const u64 *v;
if (pn == MDESC_NODE_NULL) {
prom_printf("No platform node in machine-description.\n");
prom_halt();
}
s = mdesc_get_property(hp, pn, "banner-name", NULL);
printk("PLATFORM: banner-name [%s]\n", s);
s = mdesc_get_property(hp, pn, "name", NULL);
printk("PLATFORM: name [%s]\n", s);
v = mdesc_get_property(hp, pn, "hostid", NULL);
if (v)
printk("PLATFORM: hostid [%08lx]\n", *v);
v = mdesc_get_property(hp, pn, "serial#", NULL);
if (v)
printk("PLATFORM: serial# [%08lx]\n", *v);
v = mdesc_get_property(hp, pn, "stick-frequency", NULL);
printk("PLATFORM: stick-frequency [%08lx]\n", *v);
v = mdesc_get_property(hp, pn, "mac-address", NULL);
if (v)
printk("PLATFORM: mac-address [%lx]\n", *v);
v = mdesc_get_property(hp, pn, "watchdog-resolution", NULL);
if (v)
printk("PLATFORM: watchdog-resolution [%lu ms]\n", *v);
v = mdesc_get_property(hp, pn, "watchdog-max-timeout", NULL);
if (v)
printk("PLATFORM: watchdog-max-timeout [%lu ms]\n", *v);
v = mdesc_get_property(hp, pn, "max-cpus", NULL);
if (v)
printk("PLATFORM: max-cpus [%lu]\n", *v);
#ifdef CONFIG_SMP
{
int max_cpu, i;
if (v) {
max_cpu = *v;
if (max_cpu > NR_CPUS)
max_cpu = NR_CPUS;
} else {
max_cpu = NR_CPUS;
}
for (i = 0; i < max_cpu; i++)
cpu_set(i, cpu_possible_map);
}
#endif
mdesc_release(hp);
}
static void __devinit fill_in_one_cache(cpuinfo_sparc *c,
struct mdesc_handle *hp,
u64 mp)
{
const u64 *level = mdesc_get_property(hp, mp, "level", NULL);
const u64 *size = mdesc_get_property(hp, mp, "size", NULL);
const u64 *line_size = mdesc_get_property(hp, mp, "line-size", NULL);
const char *type;
int type_len;
type = mdesc_get_property(hp, mp, "type", &type_len);
switch (*level) {
case 1:
if (of_find_in_proplist(type, "instn", type_len)) {
c->icache_size = *size;
c->icache_line_size = *line_size;
} else if (of_find_in_proplist(type, "data", type_len)) {
c->dcache_size = *size;
c->dcache_line_size = *line_size;
}
break;
case 2:
c->ecache_size = *size;
c->ecache_line_size = *line_size;
break;
default:
break;
}
if (*level == 1) {
u64 a;
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
u64 target = mdesc_arc_target(hp, a);
const char *name = mdesc_node_name(hp, target);
if (!strcmp(name, "cache"))
fill_in_one_cache(c, hp, target);
}
}
}
static void __devinit mark_core_ids(struct mdesc_handle *hp, u64 mp,
int core_id)
{
u64 a;
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
u64 t = mdesc_arc_target(hp, a);
const char *name;
const u64 *id;
name = mdesc_node_name(hp, t);
if (!strcmp(name, "cpu")) {
id = mdesc_get_property(hp, t, "id", NULL);
if (*id < NR_CPUS)
cpu_data(*id).core_id = core_id;
} else {
u64 j;
mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) {
u64 n = mdesc_arc_target(hp, j);
const char *n_name;
n_name = mdesc_node_name(hp, n);
if (strcmp(n_name, "cpu"))
continue;
id = mdesc_get_property(hp, n, "id", NULL);
if (*id < NR_CPUS)
cpu_data(*id).core_id = core_id;
}
}
}
}
static void __devinit set_core_ids(struct mdesc_handle *hp)
{
int idx;
u64 mp;
idx = 1;
mdesc_for_each_node_by_name(hp, mp, "cache") {
const u64 *level;
const char *type;
int len;
level = mdesc_get_property(hp, mp, "level", NULL);
if (*level != 1)
continue;
type = mdesc_get_property(hp, mp, "type", &len);
if (!of_find_in_proplist(type, "instn", len))
continue;
mark_core_ids(hp, mp, idx);
idx++;
}
}
static void __devinit mark_proc_ids(struct mdesc_handle *hp, u64 mp,
int proc_id)
{
u64 a;
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
u64 t = mdesc_arc_target(hp, a);
const char *name;
const u64 *id;
name = mdesc_node_name(hp, t);
if (strcmp(name, "cpu"))
continue;
id = mdesc_get_property(hp, t, "id", NULL);
if (*id < NR_CPUS)
cpu_data(*id).proc_id = proc_id;
}
}
static void __devinit __set_proc_ids(struct mdesc_handle *hp,
const char *exec_unit_name)
{
int idx;
u64 mp;
idx = 0;
mdesc_for_each_node_by_name(hp, mp, exec_unit_name) {
const char *type;
int len;
type = mdesc_get_property(hp, mp, "type", &len);
if (!of_find_in_proplist(type, "int", len) &&
!of_find_in_proplist(type, "integer", len))
continue;
mark_proc_ids(hp, mp, idx);
idx++;
}
}
static void __devinit set_proc_ids(struct mdesc_handle *hp)
{
__set_proc_ids(hp, "exec_unit");
__set_proc_ids(hp, "exec-unit");
}
static void __devinit get_one_mondo_bits(const u64 *p, unsigned int *mask,
unsigned char def)
{
u64 val;
if (!p)
goto use_default;
val = *p;
if (!val || val >= 64)
goto use_default;
*mask = ((1U << val) * 64U) - 1U;
return;
use_default:
*mask = ((1U << def) * 64U) - 1U;
}
static void __devinit get_mondo_data(struct mdesc_handle *hp, u64 mp,
struct trap_per_cpu *tb)
{
const u64 *val;
val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL);
get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7);
val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL);
get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7);
val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL);
get_one_mondo_bits(val, &tb->resum_qmask, 6);
val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL);
get_one_mondo_bits(val, &tb->nonresum_qmask, 2);
}
void __cpuinit mdesc_fill_in_cpu_data(cpumask_t mask)
{
struct mdesc_handle *hp = mdesc_grab();
u64 mp;
ncpus_probed = 0;
mdesc_for_each_node_by_name(hp, mp, "cpu") {
const u64 *id = mdesc_get_property(hp, mp, "id", NULL);
const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL);
struct trap_per_cpu *tb;
cpuinfo_sparc *c;
int cpuid;
u64 a;
ncpus_probed++;
cpuid = *id;
#ifdef CONFIG_SMP
if (cpuid >= NR_CPUS) {
printk(KERN_WARNING "Ignoring CPU %d which is "
">= NR_CPUS (%d)\n",
cpuid, NR_CPUS);
continue;
}
if (!cpu_isset(cpuid, mask))
continue;
#else
/* On uniprocessor we only want the values for the
* real physical cpu the kernel booted onto, however
* cpu_data() only has one entry at index 0.
*/
if (cpuid != real_hard_smp_processor_id())
continue;
cpuid = 0;
#endif
c = &cpu_data(cpuid);
c->clock_tick = *cfreq;
tb = &trap_block[cpuid];
get_mondo_data(hp, mp, tb);
mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
u64 j, t = mdesc_arc_target(hp, a);
const char *t_name;
t_name = mdesc_node_name(hp, t);
if (!strcmp(t_name, "cache")) {
fill_in_one_cache(c, hp, t);
continue;
}
mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_FWD) {
u64 n = mdesc_arc_target(hp, j);
const char *n_name;
n_name = mdesc_node_name(hp, n);
if (!strcmp(n_name, "cache"))
fill_in_one_cache(c, hp, n);
}
}
#ifdef CONFIG_SMP
cpu_set(cpuid, cpu_present_map);
#endif
c->core_id = 0;
c->proc_id = -1;
}
#ifdef CONFIG_SMP
sparc64_multi_core = 1;
#endif
set_core_ids(hp);
set_proc_ids(hp);
smp_fill_in_sib_core_maps();
mdesc_release(hp);
}
static ssize_t mdesc_read(struct file *file, char __user *buf,
size_t len, loff_t *offp)
{
struct mdesc_handle *hp = mdesc_grab();
int err;
if (!hp)
return -ENODEV;
err = hp->handle_size;
if (len < hp->handle_size)
err = -EMSGSIZE;
else if (copy_to_user(buf, &hp->mdesc, hp->handle_size))
err = -EFAULT;
mdesc_release(hp);
return err;
}
static const struct file_operations mdesc_fops = {
.read = mdesc_read,
.owner = THIS_MODULE,
};
static struct miscdevice mdesc_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "mdesc",
.fops = &mdesc_fops,
};
static int __init mdesc_misc_init(void)
{
return misc_register(&mdesc_misc);
}
__initcall(mdesc_misc_init);
void __init sun4v_mdesc_init(void)
{
struct mdesc_handle *hp;
unsigned long len, real_len, status;
cpumask_t mask;
(void) sun4v_mach_desc(0UL, 0UL, &len);
printk("MDESC: Size is %lu bytes.\n", len);
hp = mdesc_alloc(len, &lmb_mdesc_ops);
if (hp == NULL) {
prom_printf("MDESC: alloc of %lu bytes failed.\n", len);
prom_halt();
}
status = sun4v_mach_desc(__pa(&hp->mdesc), len, &real_len);
if (status != HV_EOK || real_len > len) {
prom_printf("sun4v_mach_desc fails, err(%lu), "
"len(%lu), real_len(%lu)\n",
status, len, real_len);
mdesc_free(hp);
prom_halt();
}
cur_mdesc = hp;
report_platform_properties();
cpus_setall(mask);
mdesc_fill_in_cpu_data(mask);
}

View File

@@ -0,0 +1,97 @@
#ifdef CONFIG_KGDB
.globl arch_kgdb_breakpoint
.type arch_kgdb_breakpoint,#function
arch_kgdb_breakpoint:
ta 0x72
retl
nop
.size arch_kgdb_breakpoint,.-arch_kgdb_breakpoint
#endif
.type __do_privact,#function
__do_privact:
mov TLB_SFSR, %g3
stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
call do_privact
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
.size __do_privact,.-__do_privact
.type do_mna,#function
do_mna:
rdpr %tl, %g3
cmp %g3, 1
/* Setup %g4/%g5 now as they are used in the
* winfixup code.
*/
mov TLB_SFSR, %g3
mov DMMU_SFAR, %g4
ldxa [%g4] ASI_DMMU, %g4
ldxa [%g3] ASI_DMMU, %g5
stxa %g0, [%g3] ASI_DMMU ! Clear FaultValid bit
membar #Sync
bgu,pn %icc, winfix_mna
rdpr %tpc, %g3
1: sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call mem_address_unaligned
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
.size do_mna,.-do_mna
.type do_lddfmna,#function
do_lddfmna:
sethi %hi(109f), %g7
mov TLB_SFSR, %g4
ldxa [%g4] ASI_DMMU, %g5
stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
membar #Sync
mov DMMU_SFAR, %g4
ldxa [%g4] ASI_DMMU, %g4
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call handle_lddfmna
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
.size do_lddfmna,.-do_lddfmna
.type do_stdfmna,#function
do_stdfmna:
sethi %hi(109f), %g7
mov TLB_SFSR, %g4
ldxa [%g4] ASI_DMMU, %g5
stxa %g0, [%g4] ASI_DMMU ! Clear FaultValid bit
membar #Sync
mov DMMU_SFAR, %g4
ldxa [%g4] ASI_DMMU, %g4
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call handle_stdfmna
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
.size do_stdfmna,.-do_stdfmna
.type breakpoint_trap,#function
breakpoint_trap:
call sparc_breakpoint
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
.size breakpoint_trap,.-breakpoint_trap

View File

@@ -1,4 +1,4 @@
/* Kernel module help for sparc32.
/* Kernel module help for sparc64.
*
* Copyright (C) 2001 Rusty Russell.
* Copyright (C) 2002 David S. Miller.
@@ -11,6 +11,48 @@
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <asm/processor.h>
#include <asm/spitfire.h>
#ifdef CONFIG_SPARC64
static void *module_map(unsigned long size)
{
struct vm_struct *area;
size = PAGE_ALIGN(size);
if (!size || size > MODULES_LEN)
return NULL;
area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
if (!area)
return NULL;
return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
}
static char *dot2underscore(char *name)
{
return name;
}
#else
static void *module_map(unsigned long size)
{
return vmalloc(size);
}
/* Replace references to .func with _Func */
static char *dot2underscore(char *name)
{
if (name[0] == '.') {
name[0] = '_';
name[1] = toupper(name[1]);
}
return name;
}
#endif /* CONFIG_SPARC64 */
void *module_alloc(unsigned long size)
{
@@ -20,7 +62,7 @@ void *module_alloc(unsigned long size)
if (size == 0)
return NULL;
ret = vmalloc(size);
ret = module_map(size);
if (!ret)
ret = ERR_PTR(-ENOMEM);
else
@@ -37,16 +79,14 @@ void module_free(struct module *mod, void *module_region)
table entries. */
}
/* Make generic code ignore STT_REGISTER dummy undefined symbols,
* and replace references to .func with _Func
*/
/* Make generic code ignore STT_REGISTER dummy undefined symbols. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
char *secstrings,
struct module *mod)
{
unsigned int symidx;
Elf32_Sym *sym;
Elf_Sym *sym;
char *strtab;
int i;
@@ -56,26 +96,23 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
return -ENOEXEC;
}
}
sym = (Elf32_Sym *)sechdrs[symidx].sh_addr;
sym = (Elf_Sym *)sechdrs[symidx].sh_addr;
strtab = (char *)sechdrs[sechdrs[symidx].sh_link].sh_addr;
for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) {
if (sym[i].st_shndx == SHN_UNDEF) {
if (ELF32_ST_TYPE(sym[i].st_info) == STT_REGISTER)
if (ELF_ST_TYPE(sym[i].st_info) == STT_REGISTER) {
sym[i].st_shndx = SHN_ABS;
else {
} else {
char *name = strtab + sym[i].st_name;
if (name[0] == '.') {
name[0] = '_';
name[1] = toupper(name[1]);
}
dot2underscore(name);
}
}
}
return 0;
}
int apply_relocate(Elf32_Shdr *sechdrs,
int apply_relocate(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
@@ -86,32 +123,68 @@ int apply_relocate(Elf32_Shdr *sechdrs,
return -ENOEXEC;
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
int apply_relocate_add(Elf_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf_Sym *sym;
u8 *location;
u32 *loc32;
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
Elf32_Addr v;
Elf_Addr v;
/* This is where to make the change */
location = (u8 *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
loc32 = (u32 *) location;
#ifdef CONFIG_SPARC64
BUG_ON(((u64)location >> (u64)32) != (u64)0);
#endif /* CONFIG_SPARC64 */
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_R_SYM(rel[i].r_info);
v = sym->st_value + rel[i].r_addend;
switch (ELF32_R_TYPE(rel[i].r_info)) {
switch (ELF_R_TYPE(rel[i].r_info) & 0xff) {
#ifdef CONFIG_SPARC64
case R_SPARC_64:
location[0] = v >> 56;
location[1] = v >> 48;
location[2] = v >> 40;
location[3] = v >> 32;
location[4] = v >> 24;
location[5] = v >> 16;
location[6] = v >> 8;
location[7] = v >> 0;
break;
case R_SPARC_DISP32:
v -= (Elf_Addr) location;
*loc32 = v;
break;
case R_SPARC_WDISP19:
v -= (Elf_Addr) location;
*loc32 = (*loc32 & ~0x7ffff) |
((v >> 2) & 0x7ffff);
break;
case R_SPARC_OLO10:
*loc32 = (*loc32 & ~0x1fff) |
(((v & 0x3ff) +
(ELF_R_TYPE(rel[i].r_info) >> 8))
& 0x1fff);
break;
#endif /* CONFIG_SPARC64 */
case R_SPARC_32:
case R_SPARC_UA32:
location[0] = v >> 24;
@@ -121,13 +194,13 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
break;
case R_SPARC_WDISP30:
v -= (Elf32_Addr) location;
v -= (Elf_Addr) location;
*loc32 = (*loc32 & ~0x3fffffff) |
((v >> 2) & 0x3fffffff);
break;
case R_SPARC_WDISP22:
v -= (Elf32_Addr) location;
v -= (Elf_Addr) location;
*loc32 = (*loc32 & ~0x3fffff) |
((v >> 2) & 0x3fffff);
break;
@@ -144,19 +217,38 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
default:
printk(KERN_ERR "module %s: Unknown relocation: %x\n",
me->name,
(int) (ELF32_R_TYPE(rel[i].r_info) & 0xff));
(int) (ELF_R_TYPE(rel[i].r_info) & 0xff));
return -ENOEXEC;
};
}
return 0;
}
#ifdef CONFIG_SPARC64
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
/* Cheetah's I-cache is fully coherent. */
if (tlb_type == spitfire) {
unsigned long va;
flushw_all();
for (va = 0; va < (PAGE_SIZE << 1); va += 32)
spitfire_put_icache_tag(va, 0x0);
__asm__ __volatile__("flush %g6");
}
return 0;
}
#else
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
return 0;
}
#endif /* CONFIG_SPARC64 */
void module_arch_cleanup(struct module *mod)
{

View File

@@ -17,6 +17,8 @@
#include <asm/system.h>
#include <asm/uaccess.h>
#include "kernel.h"
/* #define DEBUG_MULDIV */
static inline int has_imm13(int insn)
@@ -88,9 +90,6 @@ store_reg(unsigned int result, unsigned int reg, struct pt_regs *regs)
return (put_user(result, &win->locals[reg - 16]));
}
}
extern void handle_hw_divzero (struct pt_regs *regs, unsigned long pc,
unsigned long npc, unsigned long psr);
/* Should return 0 if mul/div emulation succeeded and SIGILL should
* not be issued.

View File

@@ -0,0 +1,898 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/irq.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name)
{
unsigned long ret = res->start + offset;
struct resource *r;
if (res->flags & IORESOURCE_MEM)
r = request_mem_region(ret, size, name);
else
r = request_region(ret, size, name);
if (!r)
ret = 0;
return (void __iomem *) ret;
}
EXPORT_SYMBOL(of_ioremap);
void of_iounmap(struct resource *res, void __iomem *base, unsigned long size)
{
if (res->flags & IORESOURCE_MEM)
release_mem_region((unsigned long) base, size);
else
release_region((unsigned long) base, size);
}
EXPORT_SYMBOL(of_iounmap);
static int node_match(struct device *dev, void *data)
{
struct of_device *op = to_of_device(dev);
struct device_node *dp = data;
return (op->node == dp);
}
struct of_device *of_find_device_by_node(struct device_node *dp)
{
struct device *dev = bus_find_device(&of_platform_bus_type, NULL,
dp, node_match);
if (dev)
return to_of_device(dev);
return NULL;
}
EXPORT_SYMBOL(of_find_device_by_node);
unsigned int irq_of_parse_and_map(struct device_node *node, int index)
{
struct of_device *op = of_find_device_by_node(node);
if (!op || index >= op->num_irqs)
return 0;
return op->irqs[index];
}
EXPORT_SYMBOL(irq_of_parse_and_map);
/* Take the archdata values for IOMMU, STC, and HOSTDATA found in
* BUS and propagate to all child of_device objects.
*/
void of_propagate_archdata(struct of_device *bus)
{
struct dev_archdata *bus_sd = &bus->dev.archdata;
struct device_node *bus_dp = bus->node;
struct device_node *dp;
for (dp = bus_dp->child; dp; dp = dp->sibling) {
struct of_device *op = of_find_device_by_node(dp);
op->dev.archdata.iommu = bus_sd->iommu;
op->dev.archdata.stc = bus_sd->stc;
op->dev.archdata.host_controller = bus_sd->host_controller;
op->dev.archdata.numa_node = bus_sd->numa_node;
if (dp->child)
of_propagate_archdata(op);
}
}
struct bus_type of_platform_bus_type;
EXPORT_SYMBOL(of_platform_bus_type);
static inline u64 of_read_addr(const u32 *cell, int size)
{
u64 r = 0;
while (size--)
r = (r << 32) | *(cell++);
return r;
}
static void __init get_cells(struct device_node *dp,
int *addrc, int *sizec)
{
if (addrc)
*addrc = of_n_addr_cells(dp);
if (sizec)
*sizec = of_n_size_cells(dp);
}
/* Max address size we deal with */
#define OF_MAX_ADDR_CELLS 4
struct of_bus {
const char *name;
const char *addr_prop_name;
int (*match)(struct device_node *parent);
void (*count_cells)(struct device_node *child,
int *addrc, int *sizec);
int (*map)(u32 *addr, const u32 *range,
int na, int ns, int pna);
unsigned long (*get_flags)(const u32 *addr, unsigned long);
};
/*
* Default translator (generic bus)
*/
static void of_bus_default_count_cells(struct device_node *dev,
int *addrc, int *sizec)
{
get_cells(dev, addrc, sizec);
}
/* Make sure the least significant 64-bits are in-range. Even
* for 3 or 4 cell values it is a good enough approximation.
*/
static int of_out_of_range(const u32 *addr, const u32 *base,
const u32 *size, int na, int ns)
{
u64 a = of_read_addr(addr, na);
u64 b = of_read_addr(base, na);
if (a < b)
return 1;
b += of_read_addr(size, ns);
if (a >= b)
return 1;
return 0;
}
static int of_bus_default_map(u32 *addr, const u32 *range,
int na, int ns, int pna)
{
u32 result[OF_MAX_ADDR_CELLS];
int i;
if (ns > 2) {
printk("of_device: Cannot handle size cells (%d) > 2.", ns);
return -EINVAL;
}
if (of_out_of_range(addr, range, range + na + pna, na, ns))
return -EINVAL;
/* Start with the parent range base. */
memcpy(result, range + na, pna * 4);
/* Add in the child address offset. */
for (i = 0; i < na; i++)
result[pna - 1 - i] +=
(addr[na - 1 - i] -
range[na - 1 - i]);
memcpy(addr, result, pna * 4);
return 0;
}
static unsigned long of_bus_default_get_flags(const u32 *addr, unsigned long flags)
{
if (flags)
return flags;
return IORESOURCE_MEM;
}
/*
* PCI bus specific translator
*/
static int of_bus_pci_match(struct device_node *np)
{
if (!strcmp(np->name, "pci")) {
const char *model = of_get_property(np, "model", NULL);
if (model && !strcmp(model, "SUNW,simba"))
return 0;
/* Do not do PCI specific frobbing if the
* PCI bridge lacks a ranges property. We
* want to pass it through up to the next
* parent as-is, not with the PCI translate
* method which chops off the top address cell.
*/
if (!of_find_property(np, "ranges", NULL))
return 0;
return 1;
}
return 0;
}
static int of_bus_simba_match(struct device_node *np)
{
const char *model = of_get_property(np, "model", NULL);
if (model && !strcmp(model, "SUNW,simba"))
return 1;
/* Treat PCI busses lacking ranges property just like
* simba.
*/
if (!strcmp(np->name, "pci")) {
if (!of_find_property(np, "ranges", NULL))
return 1;
}
return 0;
}
static int of_bus_simba_map(u32 *addr, const u32 *range,
int na, int ns, int pna)
{
return 0;
}
static void of_bus_pci_count_cells(struct device_node *np,
int *addrc, int *sizec)
{
if (addrc)
*addrc = 3;
if (sizec)
*sizec = 2;
}
static int of_bus_pci_map(u32 *addr, const u32 *range,
int na, int ns, int pna)
{
u32 result[OF_MAX_ADDR_CELLS];
int i;
/* Check address type match */
if ((addr[0] ^ range[0]) & 0x03000000)
return -EINVAL;
if (of_out_of_range(addr + 1, range + 1, range + na + pna,
na - 1, ns))
return -EINVAL;
/* Start with the parent range base. */
memcpy(result, range + na, pna * 4);
/* Add in the child address offset, skipping high cell. */
for (i = 0; i < na - 1; i++)
result[pna - 1 - i] +=
(addr[na - 1 - i] -
range[na - 1 - i]);
memcpy(addr, result, pna * 4);
return 0;
}
static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
{
u32 w = addr[0];
/* For PCI, we override whatever child busses may have used. */
flags = 0;
switch((w >> 24) & 0x03) {
case 0x01:
flags |= IORESOURCE_IO;
break;
case 0x02: /* 32 bits */
case 0x03: /* 64 bits */
flags |= IORESOURCE_MEM;
break;
}
if (w & 0x40000000)
flags |= IORESOURCE_PREFETCH;
return flags;
}
/*
* SBUS bus specific translator
*/
static int of_bus_sbus_match(struct device_node *np)
{
return !strcmp(np->name, "sbus") ||
!strcmp(np->name, "sbi");
}
static void of_bus_sbus_count_cells(struct device_node *child,
int *addrc, int *sizec)
{
if (addrc)
*addrc = 2;
if (sizec)
*sizec = 1;
}
/*
* FHC/Central bus specific translator.
*
* This is just needed to hard-code the address and size cell
* counts. 'fhc' and 'central' nodes lack the #address-cells and
* #size-cells properties, and if you walk to the root on such
* Enterprise boxes all you'll get is a #size-cells of 2 which is
* not what we want to use.
*/
static int of_bus_fhc_match(struct device_node *np)
{
return !strcmp(np->name, "fhc") ||
!strcmp(np->name, "central");
}
#define of_bus_fhc_count_cells of_bus_sbus_count_cells
/*
* Array of bus specific translators
*/
static struct of_bus of_busses[] = {
/* PCI */
{
.name = "pci",
.addr_prop_name = "assigned-addresses",
.match = of_bus_pci_match,
.count_cells = of_bus_pci_count_cells,
.map = of_bus_pci_map,
.get_flags = of_bus_pci_get_flags,
},
/* SIMBA */
{
.name = "simba",
.addr_prop_name = "assigned-addresses",
.match = of_bus_simba_match,
.count_cells = of_bus_pci_count_cells,
.map = of_bus_simba_map,
.get_flags = of_bus_pci_get_flags,
},
/* SBUS */
{
.name = "sbus",
.addr_prop_name = "reg",
.match = of_bus_sbus_match,
.count_cells = of_bus_sbus_count_cells,
.map = of_bus_default_map,
.get_flags = of_bus_default_get_flags,
},
/* FHC */
{
.name = "fhc",
.addr_prop_name = "reg",
.match = of_bus_fhc_match,
.count_cells = of_bus_fhc_count_cells,
.map = of_bus_default_map,
.get_flags = of_bus_default_get_flags,
},
/* Default */
{
.name = "default",
.addr_prop_name = "reg",
.match = NULL,
.count_cells = of_bus_default_count_cells,
.map = of_bus_default_map,
.get_flags = of_bus_default_get_flags,
},
};
static struct of_bus *of_match_bus(struct device_node *np)
{
int i;
for (i = 0; i < ARRAY_SIZE(of_busses); i ++)
if (!of_busses[i].match || of_busses[i].match(np))
return &of_busses[i];
BUG();
return NULL;
}
static int __init build_one_resource(struct device_node *parent,
struct of_bus *bus,
struct of_bus *pbus,
u32 *addr,
int na, int ns, int pna)
{
const u32 *ranges;
int rone, rlen;
ranges = of_get_property(parent, "ranges", &rlen);
if (ranges == NULL || rlen == 0) {
u32 result[OF_MAX_ADDR_CELLS];
int i;
memset(result, 0, pna * 4);
for (i = 0; i < na; i++)
result[pna - 1 - i] =
addr[na - 1 - i];
memcpy(addr, result, pna * 4);
return 0;
}
/* Now walk through the ranges */
rlen /= 4;
rone = na + pna + ns;
for (; rlen >= rone; rlen -= rone, ranges += rone) {
if (!bus->map(addr, ranges, na, ns, pna))
return 0;
}
/* When we miss an I/O space match on PCI, just pass it up
* to the next PCI bridge and/or controller.
*/
if (!strcmp(bus->name, "pci") &&
(addr[0] & 0x03000000) == 0x01000000)
return 0;
return 1;
}
static int __init use_1to1_mapping(struct device_node *pp)
{
/* If we have a ranges property in the parent, use it. */
if (of_find_property(pp, "ranges", NULL) != NULL)
return 0;
/* If the parent is the dma node of an ISA bus, pass
* the translation up to the root.
*
* Some SBUS devices use intermediate nodes to express
* hierarchy within the device itself. These aren't
* real bus nodes, and don't have a 'ranges' property.
* But, we should still pass the translation work up
* to the SBUS itself.
*/
if (!strcmp(pp->name, "dma") ||
!strcmp(pp->name, "espdma") ||
!strcmp(pp->name, "ledma") ||
!strcmp(pp->name, "lebuffer"))
return 0;
/* Similarly for all PCI bridges, if we get this far
* it lacks a ranges property, and this will include
* cases like Simba.
*/
if (!strcmp(pp->name, "pci"))
return 0;
return 1;
}
static int of_resource_verbose;
static void __init build_device_resources(struct of_device *op,
struct device *parent)
{
struct of_device *p_op;
struct of_bus *bus;
int na, ns;
int index, num_reg;
const void *preg;
if (!parent)
return;
p_op = to_of_device(parent);
bus = of_match_bus(p_op->node);
bus->count_cells(op->node, &na, &ns);
preg = of_get_property(op->node, bus->addr_prop_name, &num_reg);
if (!preg || num_reg == 0)
return;
/* Convert to num-cells. */
num_reg /= 4;
/* Convert to num-entries. */
num_reg /= na + ns;
/* Prevent overrunning the op->resources[] array. */
if (num_reg > PROMREG_MAX) {
printk(KERN_WARNING "%s: Too many regs (%d), "
"limiting to %d.\n",
op->node->full_name, num_reg, PROMREG_MAX);
num_reg = PROMREG_MAX;
}
for (index = 0; index < num_reg; index++) {
struct resource *r = &op->resource[index];
u32 addr[OF_MAX_ADDR_CELLS];
const u32 *reg = (preg + (index * ((na + ns) * 4)));
struct device_node *dp = op->node;
struct device_node *pp = p_op->node;
struct of_bus *pbus, *dbus;
u64 size, result = OF_BAD_ADDR;
unsigned long flags;
int dna, dns;
int pna, pns;
size = of_read_addr(reg + na, ns);
memcpy(addr, reg, na * 4);
flags = bus->get_flags(addr, 0);
if (use_1to1_mapping(pp)) {
result = of_read_addr(addr, na);
goto build_res;
}
dna = na;
dns = ns;
dbus = bus;
while (1) {
dp = pp;
pp = dp->parent;
if (!pp) {
result = of_read_addr(addr, dna);
break;
}
pbus = of_match_bus(pp);
pbus->count_cells(dp, &pna, &pns);
if (build_one_resource(dp, dbus, pbus, addr,
dna, dns, pna))
break;
flags = pbus->get_flags(addr, flags);
dna = pna;
dns = pns;
dbus = pbus;
}
build_res:
memset(r, 0, sizeof(*r));
if (of_resource_verbose)
printk("%s reg[%d] -> %lx\n",
op->node->full_name, index,
result);
if (result != OF_BAD_ADDR) {
if (tlb_type == hypervisor)
result &= 0x0fffffffffffffffUL;
r->start = result;
r->end = result + size - 1;
r->flags = flags;
}
r->name = op->node->name;
}
}
static struct device_node * __init
apply_interrupt_map(struct device_node *dp, struct device_node *pp,
const u32 *imap, int imlen, const u32 *imask,
unsigned int *irq_p)
{
struct device_node *cp;
unsigned int irq = *irq_p;
struct of_bus *bus;
phandle handle;
const u32 *reg;
int na, num_reg, i;
bus = of_match_bus(pp);
bus->count_cells(dp, &na, NULL);
reg = of_get_property(dp, "reg", &num_reg);
if (!reg || !num_reg)
return NULL;
imlen /= ((na + 3) * 4);
handle = 0;
for (i = 0; i < imlen; i++) {
int j;
for (j = 0; j < na; j++) {
if ((reg[j] & imask[j]) != imap[j])
goto next;
}
if (imap[na] == irq) {
handle = imap[na + 1];
irq = imap[na + 2];
break;
}
next:
imap += (na + 3);
}
if (i == imlen) {
/* Psycho and Sabre PCI controllers can have 'interrupt-map'
* properties that do not include the on-board device
* interrupts. Instead, the device's 'interrupts' property
* is already a fully specified INO value.
*
* Handle this by deciding that, if we didn't get a
* match in the parent's 'interrupt-map', and the
* parent is an IRQ translater, then use the parent as
* our IRQ controller.
*/
if (pp->irq_trans)
return pp;
return NULL;
}
*irq_p = irq;
cp = of_find_node_by_phandle(handle);
return cp;
}
static unsigned int __init pci_irq_swizzle(struct device_node *dp,
struct device_node *pp,
unsigned int irq)
{
const struct linux_prom_pci_registers *regs;
unsigned int bus, devfn, slot, ret;
if (irq < 1 || irq > 4)
return irq;
regs = of_get_property(dp, "reg", NULL);
if (!regs)
return irq;
bus = (regs->phys_hi >> 16) & 0xff;
devfn = (regs->phys_hi >> 8) & 0xff;
slot = (devfn >> 3) & 0x1f;
if (pp->irq_trans) {
/* Derived from Table 8-3, U2P User's Manual. This branch
* is handling a PCI controller that lacks a proper set of
* interrupt-map and interrupt-map-mask properties. The
* Ultra-E450 is one example.
*
* The bit layout is BSSLL, where:
* B: 0 on bus A, 1 on bus B
* D: 2-bit slot number, derived from PCI device number as
* (dev - 1) for bus A, or (dev - 2) for bus B
* L: 2-bit line number
*/
if (bus & 0x80) {
/* PBM-A */
bus = 0x00;
slot = (slot - 1) << 2;
} else {
/* PBM-B */
bus = 0x10;
slot = (slot - 2) << 2;
}
irq -= 1;
ret = (bus | slot | irq);
} else {
/* Going through a PCI-PCI bridge that lacks a set of
* interrupt-map and interrupt-map-mask properties.
*/
ret = ((irq - 1 + (slot & 3)) & 3) + 1;
}
return ret;
}
static int of_irq_verbose;
static unsigned int __init build_one_device_irq(struct of_device *op,
struct device *parent,
unsigned int irq)
{
struct device_node *dp = op->node;
struct device_node *pp, *ip;
unsigned int orig_irq = irq;
int nid;
if (irq == 0xffffffff)
return irq;
if (dp->irq_trans) {
irq = dp->irq_trans->irq_build(dp, irq,
dp->irq_trans->data);
if (of_irq_verbose)
printk("%s: direct translate %x --> %x\n",
dp->full_name, orig_irq, irq);
goto out;
}
/* Something more complicated. Walk up to the root, applying
* interrupt-map or bus specific translations, until we hit
* an IRQ translator.
*
* If we hit a bus type or situation we cannot handle, we
* stop and assume that the original IRQ number was in a
* format which has special meaning to it's immediate parent.
*/
pp = dp->parent;
ip = NULL;
while (pp) {
const void *imap, *imsk;
int imlen;
imap = of_get_property(pp, "interrupt-map", &imlen);
imsk = of_get_property(pp, "interrupt-map-mask", NULL);
if (imap && imsk) {
struct device_node *iret;
int this_orig_irq = irq;
iret = apply_interrupt_map(dp, pp,
imap, imlen, imsk,
&irq);
if (of_irq_verbose)
printk("%s: Apply [%s:%x] imap --> [%s:%x]\n",
op->node->full_name,
pp->full_name, this_orig_irq,
(iret ? iret->full_name : "NULL"), irq);
if (!iret)
break;
if (iret->irq_trans) {
ip = iret;
break;
}
} else {
if (!strcmp(pp->name, "pci")) {
unsigned int this_orig_irq = irq;
irq = pci_irq_swizzle(dp, pp, irq);
if (of_irq_verbose)
printk("%s: PCI swizzle [%s] "
"%x --> %x\n",
op->node->full_name,
pp->full_name, this_orig_irq,
irq);
}
if (pp->irq_trans) {
ip = pp;
break;
}
}
dp = pp;
pp = pp->parent;
}
if (!ip)
return orig_irq;
irq = ip->irq_trans->irq_build(op->node, irq,
ip->irq_trans->data);
if (of_irq_verbose)
printk("%s: Apply IRQ trans [%s] %x --> %x\n",
op->node->full_name, ip->full_name, orig_irq, irq);
out:
nid = of_node_to_nid(dp);
if (nid != -1) {
cpumask_t numa_mask = *cpumask_of_node(nid);
irq_set_affinity(irq, &numa_mask);
}
return irq;
}
static struct of_device * __init scan_one_device(struct device_node *dp,
struct device *parent)
{
struct of_device *op = kzalloc(sizeof(*op), GFP_KERNEL);
const unsigned int *irq;
struct dev_archdata *sd;
int len, i;
if (!op)
return NULL;
sd = &op->dev.archdata;
sd->prom_node = dp;
sd->op = op;
op->node = dp;
op->clock_freq = of_getintprop_default(dp, "clock-frequency",
(25*1000*1000));
op->portid = of_getintprop_default(dp, "upa-portid", -1);
if (op->portid == -1)
op->portid = of_getintprop_default(dp, "portid", -1);
irq = of_get_property(dp, "interrupts", &len);
if (irq) {
op->num_irqs = len / 4;
/* Prevent overrunning the op->irqs[] array. */
if (op->num_irqs > PROMINTR_MAX) {
printk(KERN_WARNING "%s: Too many irqs (%d), "
"limiting to %d.\n",
dp->full_name, op->num_irqs, PROMINTR_MAX);
op->num_irqs = PROMINTR_MAX;
}
memcpy(op->irqs, irq, op->num_irqs * 4);
} else {
op->num_irqs = 0;
}
build_device_resources(op, parent);
for (i = 0; i < op->num_irqs; i++)
op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]);
op->dev.parent = parent;
op->dev.bus = &of_platform_bus_type;
if (!parent)
dev_set_name(&op->dev, "root");
else
dev_set_name(&op->dev, "%08x", dp->node);
if (of_device_register(op)) {
printk("%s: Could not register of device.\n",
dp->full_name);
kfree(op);
op = NULL;
}
return op;
}
static void __init scan_tree(struct device_node *dp, struct device *parent)
{
while (dp) {
struct of_device *op = scan_one_device(dp, parent);
if (op)
scan_tree(dp->child, &op->dev);
dp = dp->sibling;
}
}
static void __init scan_of_devices(void)
{
struct device_node *root = of_find_node_by_path("/");
struct of_device *parent;
parent = scan_one_device(root, NULL);
if (!parent)
return;
scan_tree(root->child, &parent->dev);
}
static int __init of_bus_driver_init(void)
{
int err;
err = of_bus_type_init(&of_platform_bus_type, "of");
if (!err)
scan_of_devices();
return err;
}
postcore_initcall(of_bus_driver_init);
static int __init of_debug(char *str)
{
int val = 0;
get_option(&str, &val);
if (val & 1)
of_resource_verbose = 1;
if (val & 2)
of_irq_verbose = 1;
return 1;
}
__setup("of_debug=", of_debug);

1095
arch/sparc/kernel/pci.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,545 @@
/* pci_common.c: PCI controller common support.
*
* Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include "pci_impl.h"
#include "pci_sun4v.h"
static int config_out_of_range(struct pci_pbm_info *pbm,
unsigned long bus,
unsigned long devfn,
unsigned long reg)
{
if (bus < pbm->pci_first_busno ||
bus > pbm->pci_last_busno)
return 1;
return 0;
}
static void *sun4u_config_mkaddr(struct pci_pbm_info *pbm,
unsigned long bus,
unsigned long devfn,
unsigned long reg)
{
unsigned long rbits = pbm->config_space_reg_bits;
if (config_out_of_range(pbm, bus, devfn, reg))
return NULL;
reg = (reg & ((1 << rbits) - 1));
devfn <<= rbits;
bus <<= rbits + 8;
return (void *) (pbm->config_space | bus | devfn | reg);
}
/* At least on Sabre, it is necessary to access all PCI host controller
* registers at their natural size, otherwise zeros are returned.
* Strange but true, and I see no language in the UltraSPARC-IIi
* programmer's manual that mentions this even indirectly.
*/
static int sun4u_read_pci_cfg_host(struct pci_pbm_info *pbm,
unsigned char bus, unsigned int devfn,
int where, int size, u32 *value)
{
u32 tmp32, *addr;
u16 tmp16;
u8 tmp8;
addr = sun4u_config_mkaddr(pbm, bus, devfn, where);
if (!addr)
return PCIBIOS_SUCCESSFUL;
switch (size) {
case 1:
if (where < 8) {
unsigned long align = (unsigned long) addr;
align &= ~1;
pci_config_read16((u16 *)align, &tmp16);
if (where & 1)
*value = tmp16 >> 8;
else
*value = tmp16 & 0xff;
} else {
pci_config_read8((u8 *)addr, &tmp8);
*value = (u32) tmp8;
}
break;
case 2:
if (where < 8) {
pci_config_read16((u16 *)addr, &tmp16);
*value = (u32) tmp16;
} else {
pci_config_read8((u8 *)addr, &tmp8);
*value = (u32) tmp8;
pci_config_read8(((u8 *)addr) + 1, &tmp8);
*value |= ((u32) tmp8) << 8;
}
break;
case 4:
tmp32 = 0xffffffff;
sun4u_read_pci_cfg_host(pbm, bus, devfn,
where, 2, &tmp32);
*value = tmp32;
tmp32 = 0xffffffff;
sun4u_read_pci_cfg_host(pbm, bus, devfn,
where + 2, 2, &tmp32);
*value |= tmp32 << 16;
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int sun4u_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
int where, int size, u32 *value)
{
struct pci_pbm_info *pbm = bus_dev->sysdata;
unsigned char bus = bus_dev->number;
u32 *addr;
u16 tmp16;
u8 tmp8;
switch (size) {
case 1:
*value = 0xff;
break;
case 2:
*value = 0xffff;
break;
case 4:
*value = 0xffffffff;
break;
}
if (!bus_dev->number && !PCI_SLOT(devfn))
return sun4u_read_pci_cfg_host(pbm, bus, devfn, where,
size, value);
addr = sun4u_config_mkaddr(pbm, bus, devfn, where);
if (!addr)
return PCIBIOS_SUCCESSFUL;
switch (size) {
case 1:
pci_config_read8((u8 *)addr, &tmp8);
*value = (u32) tmp8;
break;
case 2:
if (where & 0x01) {
printk("pci_read_config_word: misaligned reg [%x]\n",
where);
return PCIBIOS_SUCCESSFUL;
}
pci_config_read16((u16 *)addr, &tmp16);
*value = (u32) tmp16;
break;
case 4:
if (where & 0x03) {
printk("pci_read_config_dword: misaligned reg [%x]\n",
where);
return PCIBIOS_SUCCESSFUL;
}
pci_config_read32(addr, value);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int sun4u_write_pci_cfg_host(struct pci_pbm_info *pbm,
unsigned char bus, unsigned int devfn,
int where, int size, u32 value)
{
u32 *addr;
addr = sun4u_config_mkaddr(pbm, bus, devfn, where);
if (!addr)
return PCIBIOS_SUCCESSFUL;
switch (size) {
case 1:
if (where < 8) {
unsigned long align = (unsigned long) addr;
u16 tmp16;
align &= ~1;
pci_config_read16((u16 *)align, &tmp16);
if (where & 1) {
tmp16 &= 0x00ff;
tmp16 |= value << 8;
} else {
tmp16 &= 0xff00;
tmp16 |= value;
}
pci_config_write16((u16 *)align, tmp16);
} else
pci_config_write8((u8 *)addr, value);
break;
case 2:
if (where < 8) {
pci_config_write16((u16 *)addr, value);
} else {
pci_config_write8((u8 *)addr, value & 0xff);
pci_config_write8(((u8 *)addr) + 1, value >> 8);
}
break;
case 4:
sun4u_write_pci_cfg_host(pbm, bus, devfn,
where, 2, value & 0xffff);
sun4u_write_pci_cfg_host(pbm, bus, devfn,
where + 2, 2, value >> 16);
break;
}
return PCIBIOS_SUCCESSFUL;
}
static int sun4u_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
int where, int size, u32 value)
{
struct pci_pbm_info *pbm = bus_dev->sysdata;
unsigned char bus = bus_dev->number;
u32 *addr;
if (!bus_dev->number && !PCI_SLOT(devfn))
return sun4u_write_pci_cfg_host(pbm, bus, devfn, where,
size, value);
addr = sun4u_config_mkaddr(pbm, bus, devfn, where);
if (!addr)
return PCIBIOS_SUCCESSFUL;
switch (size) {
case 1:
pci_config_write8((u8 *)addr, value);
break;
case 2:
if (where & 0x01) {
printk("pci_write_config_word: misaligned reg [%x]\n",
where);
return PCIBIOS_SUCCESSFUL;
}
pci_config_write16((u16 *)addr, value);
break;
case 4:
if (where & 0x03) {
printk("pci_write_config_dword: misaligned reg [%x]\n",
where);
return PCIBIOS_SUCCESSFUL;
}
pci_config_write32(addr, value);
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops sun4u_pci_ops = {
.read = sun4u_read_pci_cfg,
.write = sun4u_write_pci_cfg,
};
static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
int where, int size, u32 *value)
{
struct pci_pbm_info *pbm = bus_dev->sysdata;
u32 devhandle = pbm->devhandle;
unsigned int bus = bus_dev->number;
unsigned int device = PCI_SLOT(devfn);
unsigned int func = PCI_FUNC(devfn);
unsigned long ret;
if (config_out_of_range(pbm, bus, devfn, where)) {
ret = ~0UL;
} else {
ret = pci_sun4v_config_get(devhandle,
HV_PCI_DEVICE_BUILD(bus, device, func),
where, size);
}
switch (size) {
case 1:
*value = ret & 0xff;
break;
case 2:
*value = ret & 0xffff;
break;
case 4:
*value = ret & 0xffffffff;
break;
};
return PCIBIOS_SUCCESSFUL;
}
static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
int where, int size, u32 value)
{
struct pci_pbm_info *pbm = bus_dev->sysdata;
u32 devhandle = pbm->devhandle;
unsigned int bus = bus_dev->number;
unsigned int device = PCI_SLOT(devfn);
unsigned int func = PCI_FUNC(devfn);
unsigned long ret;
if (config_out_of_range(pbm, bus, devfn, where)) {
/* Do nothing. */
} else {
ret = pci_sun4v_config_put(devhandle,
HV_PCI_DEVICE_BUILD(bus, device, func),
where, size, value);
}
return PCIBIOS_SUCCESSFUL;
}
struct pci_ops sun4v_pci_ops = {
.read = sun4v_read_pci_cfg,
.write = sun4v_write_pci_cfg,
};
void pci_get_pbm_props(struct pci_pbm_info *pbm)
{
const u32 *val = of_get_property(pbm->op->node, "bus-range", NULL);
pbm->pci_first_busno = val[0];
pbm->pci_last_busno = val[1];
val = of_get_property(pbm->op->node, "ino-bitmap", NULL);
if (val) {
pbm->ino_bitmap = (((u64)val[1] << 32UL) |
((u64)val[0] << 0UL));
}
}
static void pci_register_legacy_regions(struct resource *io_res,
struct resource *mem_res)
{
struct resource *p;
/* VGA Video RAM. */
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return;
p->name = "Video RAM area";
p->start = mem_res->start + 0xa0000UL;
p->end = p->start + 0x1ffffUL;
p->flags = IORESOURCE_BUSY;
request_resource(mem_res, p);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return;
p->name = "System ROM";
p->start = mem_res->start + 0xf0000UL;
p->end = p->start + 0xffffUL;
p->flags = IORESOURCE_BUSY;
request_resource(mem_res, p);
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (!p)
return;
p->name = "Video ROM";
p->start = mem_res->start + 0xc0000UL;
p->end = p->start + 0x7fffUL;
p->flags = IORESOURCE_BUSY;
request_resource(mem_res, p);
}
static void pci_register_iommu_region(struct pci_pbm_info *pbm)
{
const u32 *vdma = of_get_property(pbm->op->node, "virtual-dma", NULL);
if (vdma) {
struct resource *rp = kmalloc(sizeof(*rp), GFP_KERNEL);
if (!rp) {
prom_printf("Cannot allocate IOMMU resource.\n");
prom_halt();
}
rp->name = "IOMMU";
rp->start = pbm->mem_space.start + (unsigned long) vdma[0];
rp->end = rp->start + (unsigned long) vdma[1] - 1UL;
rp->flags = IORESOURCE_BUSY;
request_resource(&pbm->mem_space, rp);
}
}
void pci_determine_mem_io_space(struct pci_pbm_info *pbm)
{
const struct linux_prom_pci_ranges *pbm_ranges;
int i, saw_mem, saw_io;
int num_pbm_ranges;
saw_mem = saw_io = 0;
pbm_ranges = of_get_property(pbm->op->node, "ranges", &i);
if (!pbm_ranges) {
prom_printf("PCI: Fatal error, missing PBM ranges property "
" for %s\n",
pbm->name);
prom_halt();
}
num_pbm_ranges = i / sizeof(*pbm_ranges);
for (i = 0; i < num_pbm_ranges; i++) {
const struct linux_prom_pci_ranges *pr = &pbm_ranges[i];
unsigned long a, size;
u32 parent_phys_hi, parent_phys_lo;
u32 size_hi, size_lo;
int type;
parent_phys_hi = pr->parent_phys_hi;
parent_phys_lo = pr->parent_phys_lo;
if (tlb_type == hypervisor)
parent_phys_hi &= 0x0fffffff;
size_hi = pr->size_hi;
size_lo = pr->size_lo;
type = (pr->child_phys_hi >> 24) & 0x3;
a = (((unsigned long)parent_phys_hi << 32UL) |
((unsigned long)parent_phys_lo << 0UL));
size = (((unsigned long)size_hi << 32UL) |
((unsigned long)size_lo << 0UL));
switch (type) {
case 0:
/* PCI config space, 16MB */
pbm->config_space = a;
break;
case 1:
/* 16-bit IO space, 16MB */
pbm->io_space.start = a;
pbm->io_space.end = a + size - 1UL;
pbm->io_space.flags = IORESOURCE_IO;
saw_io = 1;
break;
case 2:
/* 32-bit MEM space, 2GB */
pbm->mem_space.start = a;
pbm->mem_space.end = a + size - 1UL;
pbm->mem_space.flags = IORESOURCE_MEM;
saw_mem = 1;
break;
case 3:
/* XXX 64-bit MEM handling XXX */
default:
break;
};
}
if (!saw_io || !saw_mem) {
prom_printf("%s: Fatal error, missing %s PBM range.\n",
pbm->name,
(!saw_io ? "IO" : "MEM"));
prom_halt();
}
printk("%s: PCI IO[%lx] MEM[%lx]\n",
pbm->name,
pbm->io_space.start,
pbm->mem_space.start);
pbm->io_space.name = pbm->mem_space.name = pbm->name;
request_resource(&ioport_resource, &pbm->io_space);
request_resource(&iomem_resource, &pbm->mem_space);
pci_register_legacy_regions(&pbm->io_space,
&pbm->mem_space);
pci_register_iommu_region(pbm);
}
/* Generic helper routines for PCI error reporting. */
void pci_scan_for_target_abort(struct pci_pbm_info *pbm,
struct pci_bus *pbus)
{
struct pci_dev *pdev;
struct pci_bus *bus;
list_for_each_entry(pdev, &pbus->devices, bus_list) {
u16 status, error_bits;
pci_read_config_word(pdev, PCI_STATUS, &status);
error_bits =
(status & (PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT));
if (error_bits) {
pci_write_config_word(pdev, PCI_STATUS, error_bits);
printk("%s: Device %s saw Target Abort [%016x]\n",
pbm->name, pci_name(pdev), status);
}
}
list_for_each_entry(bus, &pbus->children, node)
pci_scan_for_target_abort(pbm, bus);
}
void pci_scan_for_master_abort(struct pci_pbm_info *pbm,
struct pci_bus *pbus)
{
struct pci_dev *pdev;
struct pci_bus *bus;
list_for_each_entry(pdev, &pbus->devices, bus_list) {
u16 status, error_bits;
pci_read_config_word(pdev, PCI_STATUS, &status);
error_bits =
(status & (PCI_STATUS_REC_MASTER_ABORT));
if (error_bits) {
pci_write_config_word(pdev, PCI_STATUS, error_bits);
printk("%s: Device %s received Master Abort [%016x]\n",
pbm->name, pci_name(pdev), status);
}
}
list_for_each_entry(bus, &pbus->children, node)
pci_scan_for_master_abort(pbm, bus);
}
void pci_scan_for_parity_error(struct pci_pbm_info *pbm,
struct pci_bus *pbus)
{
struct pci_dev *pdev;
struct pci_bus *bus;
list_for_each_entry(pdev, &pbus->devices, bus_list) {
u16 status, error_bits;
pci_read_config_word(pdev, PCI_STATUS, &status);
error_bits =
(status & (PCI_STATUS_PARITY |
PCI_STATUS_DETECTED_PARITY));
if (error_bits) {
pci_write_config_word(pdev, PCI_STATUS, error_bits);
printk("%s: Device %s saw Parity Error [%016x]\n",
pbm->name, pci_name(pdev), status);
}
}
list_for_each_entry(bus, &pbus->children, node)
pci_scan_for_parity_error(pbm, bus);
}

View File

@@ -0,0 +1,521 @@
/* pci_fire.c: Sun4u platform PCI-E controller support.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/msi.h>
#include <linux/irq.h>
#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/upa.h>
#include "pci_impl.h"
#define DRIVER_NAME "fire"
#define PFX DRIVER_NAME ": "
#define FIRE_IOMMU_CONTROL 0x40000UL
#define FIRE_IOMMU_TSBBASE 0x40008UL
#define FIRE_IOMMU_FLUSH 0x40100UL
#define FIRE_IOMMU_FLUSHINV 0x40108UL
static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm)
{
struct iommu *iommu = pbm->iommu;
u32 vdma[2], dma_mask;
u64 control;
int tsbsize, err;
/* No virtual-dma property on these guys, use largest size. */
vdma[0] = 0xc0000000; /* base */
vdma[1] = 0x40000000; /* size */
dma_mask = 0xffffffff;
tsbsize = 128;
/* Register addresses. */
iommu->iommu_control = pbm->pbm_regs + FIRE_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->pbm_regs + FIRE_IOMMU_TSBBASE;
iommu->iommu_flush = pbm->pbm_regs + FIRE_IOMMU_FLUSH;
iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV;
/* We use the main control/status register of FIRE as the write
* completion register.
*/
iommu->write_complete_reg = pbm->controller_regs + 0x410000UL;
/*
* Invalidate TLB Entries.
*/
upa_writeq(~(u64)0, iommu->iommu_flushinv);
err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask,
pbm->numa_node);
if (err)
return err;
upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase);
control = upa_readq(iommu->iommu_control);
control |= (0x00000400 /* TSB cache snoop enable */ |
0x00000300 /* Cache mode */ |
0x00000002 /* Bypass enable */ |
0x00000001 /* Translation enable */);
upa_writeq(control, iommu->iommu_control);
return 0;
}
#ifdef CONFIG_PCI_MSI
struct pci_msiq_entry {
u64 word0;
#define MSIQ_WORD0_RESV 0x8000000000000000UL
#define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL
#define MSIQ_WORD0_FMT_TYPE_SHIFT 56
#define MSIQ_WORD0_LEN 0x00ffc00000000000UL
#define MSIQ_WORD0_LEN_SHIFT 46
#define MSIQ_WORD0_ADDR0 0x00003fff00000000UL
#define MSIQ_WORD0_ADDR0_SHIFT 32
#define MSIQ_WORD0_RID 0x00000000ffff0000UL
#define MSIQ_WORD0_RID_SHIFT 16
#define MSIQ_WORD0_DATA0 0x000000000000ffffUL
#define MSIQ_WORD0_DATA0_SHIFT 0
#define MSIQ_TYPE_MSG 0x6
#define MSIQ_TYPE_MSI32 0xb
#define MSIQ_TYPE_MSI64 0xf
u64 word1;
#define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL
#define MSIQ_WORD1_ADDR1_SHIFT 16
#define MSIQ_WORD1_DATA1 0x000000000000ffffUL
#define MSIQ_WORD1_DATA1_SHIFT 0
u64 resv[6];
};
/* All MSI registers are offset from pbm->pbm_regs */
#define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL
#define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL
#define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL
#define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL
#define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL
#define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL
#define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL
#define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL
#define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL
#define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL
#define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL
#define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL
#define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL
#define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL)
#define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL
#define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL)
#define MSI_MAP_VALID 0x8000000000000000UL
#define MSI_MAP_EQWR_N 0x4000000000000000UL
#define MSI_MAP_EQNUM 0x000000000000003fUL
#define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL)
#define MSI_CLEAR_EQWR_N 0x4000000000000000UL
#define IMONDO_DATA0 0x02C000UL
#define IMONDO_DATA0_DATA 0xffffffffffffffc0UL
#define IMONDO_DATA1 0x02C008UL
#define IMONDO_DATA1_DATA 0xffffffffffffffffUL
#define MSI_32BIT_ADDR 0x034000UL
#define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL
#define MSI_64BIT_ADDR 0x034008UL
#define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL
static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head)
{
*head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
return 0;
}
static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head, unsigned long *msi)
{
unsigned long type_fmt, type, msi_num;
struct pci_msiq_entry *base, *ep;
base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192));
ep = &base[*head];
if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0)
return 0;
type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >>
MSIQ_WORD0_FMT_TYPE_SHIFT);
type = (type_fmt >> 3);
if (unlikely(type != MSIQ_TYPE_MSI32 &&
type != MSIQ_TYPE_MSI64))
return -EINVAL;
*msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >>
MSIQ_WORD0_DATA0_SHIFT);
upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi_num));
/* Clear the entry. */
ep->word0 &= ~MSIQ_WORD0_FMT_TYPE;
/* Go to next entry in ring. */
(*head)++;
if (*head >= pbm->msiq_ent_count)
*head = 0;
return 1;
}
static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long head)
{
upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid));
return 0;
}
static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long msi, int is_msi64)
{
u64 val;
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
val &= ~(MSI_MAP_EQNUM);
val |= msiqid;
upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi));
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
val |= MSI_MAP_VALID;
upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
return 0;
}
static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
{
unsigned long msiqid;
u64 val;
val = upa_readq(pbm->pbm_regs + MSI_MAP(msi));
msiqid = (val & MSI_MAP_EQNUM);
val &= ~MSI_MAP_VALID;
upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi));
return 0;
}
static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm)
{
unsigned long pages, order, i;
order = get_order(512 * 1024);
pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
if (pages == 0UL) {
printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
order);
return -ENOMEM;
}
memset((char *)pages, 0, PAGE_SIZE << order);
pbm->msi_queues = (void *) pages;
upa_writeq((EVENT_QUEUE_BASE_ADDR_ALL_ONES |
__pa(pbm->msi_queues)),
pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG);
upa_writeq(pbm->portid << 6, pbm->pbm_regs + IMONDO_DATA0);
upa_writeq(0, pbm->pbm_regs + IMONDO_DATA1);
upa_writeq(pbm->msi32_start, pbm->pbm_regs + MSI_32BIT_ADDR);
upa_writeq(pbm->msi64_start, pbm->pbm_regs + MSI_64BIT_ADDR);
for (i = 0; i < pbm->msiq_num; i++) {
upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_HEAD(i));
upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_TAIL(i));
}
return 0;
}
static void pci_fire_msiq_free(struct pci_pbm_info *pbm)
{
unsigned long pages, order;
order = get_order(512 * 1024);
pages = (unsigned long) pbm->msi_queues;
free_pages(pages, order);
pbm->msi_queues = NULL;
}
static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm,
unsigned long msiqid,
unsigned long devino)
{
unsigned long cregs = (unsigned long) pbm->pbm_regs;
unsigned long imap_reg, iclr_reg, int_ctrlr;
unsigned int virt_irq;
int fixup;
u64 val;
imap_reg = cregs + (0x001000UL + (devino * 0x08UL));
iclr_reg = cregs + (0x001400UL + (devino * 0x08UL));
/* XXX iterate amongst the 4 IRQ controllers XXX */
int_ctrlr = (1UL << 6);
val = upa_readq(imap_reg);
val |= (1UL << 63) | int_ctrlr;
upa_writeq(val, imap_reg);
fixup = ((pbm->portid << 6) | devino) - int_ctrlr;
virt_irq = build_irq(fixup, iclr_reg, imap_reg);
if (!virt_irq)
return -ENOMEM;
upa_writeq(EVENT_QUEUE_CONTROL_SET_EN,
pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid));
return virt_irq;
}
static const struct sparc64_msiq_ops pci_fire_msiq_ops = {
.get_head = pci_fire_get_head,
.dequeue_msi = pci_fire_dequeue_msi,
.set_head = pci_fire_set_head,
.msi_setup = pci_fire_msi_setup,
.msi_teardown = pci_fire_msi_teardown,
.msiq_alloc = pci_fire_msiq_alloc,
.msiq_free = pci_fire_msiq_free,
.msiq_build_irq = pci_fire_msiq_build_irq,
};
static void pci_fire_msi_init(struct pci_pbm_info *pbm)
{
sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops);
}
#else /* CONFIG_PCI_MSI */
static void pci_fire_msi_init(struct pci_pbm_info *pbm)
{
}
#endif /* !(CONFIG_PCI_MSI) */
/* Based at pbm->controller_regs */
#define FIRE_PARITY_CONTROL 0x470010UL
#define FIRE_PARITY_ENAB 0x8000000000000000UL
#define FIRE_FATAL_RESET_CTL 0x471028UL
#define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL
#define FIRE_FATAL_RESET_MB 0x0000000002000000UL
#define FIRE_FATAL_RESET_CPE 0x0000000000008000UL
#define FIRE_FATAL_RESET_APE 0x0000000000004000UL
#define FIRE_FATAL_RESET_PIO 0x0000000000000040UL
#define FIRE_FATAL_RESET_JW 0x0000000000000004UL
#define FIRE_FATAL_RESET_JI 0x0000000000000002UL
#define FIRE_FATAL_RESET_JR 0x0000000000000001UL
#define FIRE_CORE_INTR_ENABLE 0x471800UL
/* Based at pbm->pbm_regs */
#define FIRE_TLU_CTRL 0x80000UL
#define FIRE_TLU_CTRL_TIM 0x00000000da000000UL
#define FIRE_TLU_CTRL_QDET 0x0000000000000100UL
#define FIRE_TLU_CTRL_CFG 0x0000000000000001UL
#define FIRE_TLU_DEV_CTRL 0x90008UL
#define FIRE_TLU_LINK_CTRL 0x90020UL
#define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL
#define FIRE_LPU_RESET 0xe2008UL
#define FIRE_LPU_LLCFG 0xe2200UL
#define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL
#define FIRE_LPU_FCTRL_UCTRL 0xe2240UL
#define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL
#define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL
#define FIRE_LPU_TXL_FIFOP 0xe2430UL
#define FIRE_LPU_LTSSM_CFG2 0xe2788UL
#define FIRE_LPU_LTSSM_CFG3 0xe2790UL
#define FIRE_LPU_LTSSM_CFG4 0xe2798UL
#define FIRE_LPU_LTSSM_CFG5 0xe27a0UL
#define FIRE_DMC_IENAB 0x31800UL
#define FIRE_DMC_DBG_SEL_A 0x53000UL
#define FIRE_DMC_DBG_SEL_B 0x53008UL
#define FIRE_PEC_IENAB 0x51800UL
static void pci_fire_hw_init(struct pci_pbm_info *pbm)
{
u64 val;
upa_writeq(FIRE_PARITY_ENAB,
pbm->controller_regs + FIRE_PARITY_CONTROL);
upa_writeq((FIRE_FATAL_RESET_SPARE |
FIRE_FATAL_RESET_MB |
FIRE_FATAL_RESET_CPE |
FIRE_FATAL_RESET_APE |
FIRE_FATAL_RESET_PIO |
FIRE_FATAL_RESET_JW |
FIRE_FATAL_RESET_JI |
FIRE_FATAL_RESET_JR),
pbm->controller_regs + FIRE_FATAL_RESET_CTL);
upa_writeq(~(u64)0, pbm->controller_regs + FIRE_CORE_INTR_ENABLE);
val = upa_readq(pbm->pbm_regs + FIRE_TLU_CTRL);
val |= (FIRE_TLU_CTRL_TIM |
FIRE_TLU_CTRL_QDET |
FIRE_TLU_CTRL_CFG);
upa_writeq(val, pbm->pbm_regs + FIRE_TLU_CTRL);
upa_writeq(0, pbm->pbm_regs + FIRE_TLU_DEV_CTRL);
upa_writeq(FIRE_TLU_LINK_CTRL_CLK,
pbm->pbm_regs + FIRE_TLU_LINK_CTRL);
upa_writeq(0, pbm->pbm_regs + FIRE_LPU_RESET);
upa_writeq(FIRE_LPU_LLCFG_VC0, pbm->pbm_regs + FIRE_LPU_LLCFG);
upa_writeq((FIRE_LPU_FCTRL_UCTRL_N | FIRE_LPU_FCTRL_UCTRL_P),
pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL);
upa_writeq(((0xffff << 16) | (0x0000 << 0)),
pbm->pbm_regs + FIRE_LPU_TXL_FIFOP);
upa_writeq(3000000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2);
upa_writeq(500000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3);
upa_writeq((2 << 16) | (140 << 8),
pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4);
upa_writeq(0, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5);
upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_DMC_IENAB);
upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_A);
upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_B);
upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB);
}
static int __init pci_fire_pbm_init(struct pci_pbm_info *pbm,
struct of_device *op, u32 portid)
{
const struct linux_prom64_registers *regs;
struct device_node *dp = op->node;
int err;
pbm->numa_node = -1;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 12;
pbm->index = pci_num_pbms++;
pbm->portid = portid;
pbm->op = op;
pbm->name = dp->full_name;
regs = of_get_property(dp, "reg", NULL);
pbm->pbm_regs = regs[0].phys_addr;
pbm->controller_regs = regs[1].phys_addr - 0x410000UL;
printk("%s: SUN4U PCIE Bus Module\n", pbm->name);
pci_determine_mem_io_space(pbm);
pci_get_pbm_props(pbm);
pci_fire_hw_init(pbm);
err = pci_fire_pbm_iommu_init(pbm);
if (err)
return err;
pci_fire_msi_init(pbm);
pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev);
/* XXX register error interrupt handlers XXX */
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
return 0;
}
static int __devinit fire_probe(struct of_device *op,
const struct of_device_id *match)
{
struct device_node *dp = op->node;
struct pci_pbm_info *pbm;
struct iommu *iommu;
u32 portid;
int err;
portid = of_getintprop_default(dp, "portid", 0xff);
err = -ENOMEM;
pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
if (!pbm) {
printk(KERN_ERR PFX "Cannot allocate pci_pbminfo.\n");
goto out_err;
}
iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
if (!iommu) {
printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
goto out_free_controller;
}
pbm->iommu = iommu;
err = pci_fire_pbm_init(pbm, op, portid);
if (err)
goto out_free_iommu;
dev_set_drvdata(&op->dev, pbm);
return 0;
out_free_iommu:
kfree(pbm->iommu);
out_free_controller:
kfree(pbm);
out_err:
return err;
}
static struct of_device_id __initdata fire_match[] = {
{
.name = "pci",
.compatible = "pciex108e,80f0",
},
{},
};
static struct of_platform_driver fire_driver = {
.name = DRIVER_NAME,
.match_table = fire_match,
.probe = fire_probe,
};
static int __init fire_init(void)
{
return of_register_driver(&fire_driver, &of_bus_type);
}
subsys_initcall(fire_init);

View File

@@ -0,0 +1,185 @@
/* pci_impl.h: Helper definitions for PCI controller support.
*
* Copyright (C) 1999, 2007 David S. Miller (davem@davemloft.net)
*/
#ifndef PCI_IMPL_H
#define PCI_IMPL_H
#include <linux/types.h>
#include <linux/spinlock.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/iommu.h>
/* The abstraction used here is that there are PCI controllers,
* each with one (Sabre) or two (PSYCHO/SCHIZO) PCI bus modules
* underneath. Each PCI bus module uses an IOMMU (shared by both
* PBMs of a controller, or per-PBM), and if a streaming buffer
* is present, each PCI bus module has it's own. (ie. the IOMMU
* might be shared between PBMs, the STC is never shared)
* Furthermore, each PCI bus module controls it's own autonomous
* PCI bus.
*/
#define PCI_STC_FLUSHFLAG_INIT(STC) \
(*((STC)->strbuf_flushflag) = 0UL)
#define PCI_STC_FLUSHFLAG_SET(STC) \
(*((STC)->strbuf_flushflag) != 0UL)
#ifdef CONFIG_PCI_MSI
struct pci_pbm_info;
struct sparc64_msiq_ops {
int (*get_head)(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head);
int (*dequeue_msi)(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long *head, unsigned long *msi);
int (*set_head)(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long head);
int (*msi_setup)(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long msi, int is_msi64);
int (*msi_teardown)(struct pci_pbm_info *pbm, unsigned long msi);
int (*msiq_alloc)(struct pci_pbm_info *pbm);
void (*msiq_free)(struct pci_pbm_info *pbm);
int (*msiq_build_irq)(struct pci_pbm_info *pbm, unsigned long msiqid,
unsigned long devino);
};
extern void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
const struct sparc64_msiq_ops *ops);
struct sparc64_msiq_cookie {
struct pci_pbm_info *pbm;
unsigned long msiqid;
};
#endif
struct pci_pbm_info {
struct pci_pbm_info *next;
struct pci_pbm_info *sibling;
int index;
/* Physical address base of controller registers. */
unsigned long controller_regs;
/* Physical address base of PBM registers. */
unsigned long pbm_regs;
/* Physical address of DMA sync register, if any. */
unsigned long sync_reg;
/* Opaque 32-bit system bus Port ID. */
u32 portid;
/* Opaque 32-bit handle used for hypervisor calls. */
u32 devhandle;
/* Chipset version information. */
int chip_type;
#define PBM_CHIP_TYPE_SABRE 1
#define PBM_CHIP_TYPE_PSYCHO 2
#define PBM_CHIP_TYPE_SCHIZO 3
#define PBM_CHIP_TYPE_SCHIZO_PLUS 4
#define PBM_CHIP_TYPE_TOMATILLO 5
int chip_version;
int chip_revision;
/* Name used for top-level resources. */
char *name;
/* OBP specific information. */
struct of_device *op;
u64 ino_bitmap;
/* PBM I/O and Memory space resources. */
struct resource io_space;
struct resource mem_space;
/* Base of PCI Config space, can be per-PBM or shared. */
unsigned long config_space;
/* This will be 12 on PCI-E controllers, 8 elsewhere. */
unsigned long config_space_reg_bits;
unsigned long pci_afsr;
unsigned long pci_afar;
unsigned long pci_csr;
/* State of 66MHz capabilities on this PBM. */
int is_66mhz_capable;
int all_devs_66mhz;
#ifdef CONFIG_PCI_MSI
/* MSI info. */
u32 msiq_num;
u32 msiq_ent_count;
u32 msiq_first;
u32 msiq_first_devino;
u32 msiq_rotor;
struct sparc64_msiq_cookie *msiq_irq_cookies;
u32 msi_num;
u32 msi_first;
u32 msi_data_mask;
u32 msix_data_width;
u64 msi32_start;
u64 msi64_start;
u32 msi32_len;
u32 msi64_len;
void *msi_queues;
unsigned long *msi_bitmap;
unsigned int *msi_irq_table;
int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev,
struct msi_desc *entry);
void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev);
const struct sparc64_msiq_ops *msi_ops;
#endif /* !(CONFIG_PCI_MSI) */
/* This PBM's streaming buffer. */
struct strbuf stc;
/* IOMMU state, potentially shared by both PBM segments. */
struct iommu *iommu;
/* Now things for the actual PCI bus probes. */
unsigned int pci_first_busno;
unsigned int pci_last_busno;
struct pci_bus *pci_bus;
struct pci_ops *pci_ops;
int numa_node;
};
extern struct pci_pbm_info *pci_pbm_root;
extern int pci_num_pbms;
/* PCI bus scanning and fixup support. */
extern void pci_get_pbm_props(struct pci_pbm_info *pbm);
extern struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
struct device *parent);
extern void pci_determine_mem_io_space(struct pci_pbm_info *pbm);
/* Error reporting support. */
extern void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *);
extern void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *);
extern void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *);
/* Configuration space access. */
extern void pci_config_read8(u8 *addr, u8 *ret);
extern void pci_config_read16(u16 *addr, u16 *ret);
extern void pci_config_read32(u32 *addr, u32 *ret);
extern void pci_config_write8(u8 *addr, u8 val);
extern void pci_config_write16(u16 *addr, u16 val);
extern void pci_config_write32(u32 *addr, u32 val);
extern struct pci_ops sun4u_pci_ops;
extern struct pci_ops sun4v_pci_ops;
extern volatile int pci_poke_in_progress;
extern volatile int pci_poke_cpu;
extern volatile int pci_poke_faulted;
#endif /* !(PCI_IMPL_H) */

447
arch/sparc/kernel/pci_msi.c Normal file
View File

@@ -0,0 +1,447 @@
/* pci_msi.c: Sparc64 MSI support common layer.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include "pci_impl.h"
static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie)
{
struct sparc64_msiq_cookie *msiq_cookie = cookie;
struct pci_pbm_info *pbm = msiq_cookie->pbm;
unsigned long msiqid = msiq_cookie->msiqid;
const struct sparc64_msiq_ops *ops;
unsigned long orig_head, head;
int err;
ops = pbm->msi_ops;
err = ops->get_head(pbm, msiqid, &head);
if (unlikely(err < 0))
goto err_get_head;
orig_head = head;
for (;;) {
unsigned long msi;
err = ops->dequeue_msi(pbm, msiqid, &head, &msi);
if (likely(err > 0)) {
struct irq_desc *desc;
unsigned int virt_irq;
virt_irq = pbm->msi_irq_table[msi - pbm->msi_first];
desc = irq_desc + virt_irq;
desc->handle_irq(virt_irq, desc);
}
if (unlikely(err < 0))
goto err_dequeue;
if (err == 0)
break;
}
if (likely(head != orig_head)) {
err = ops->set_head(pbm, msiqid, head);
if (unlikely(err < 0))
goto err_set_head;
}
return IRQ_HANDLED;
err_get_head:
printk(KERN_EMERG "MSI: Get head on msiqid[%lu] gives error %d\n",
msiqid, err);
goto err_out;
err_dequeue:
printk(KERN_EMERG "MSI: Dequeue head[%lu] from msiqid[%lu] "
"gives error %d\n",
head, msiqid, err);
goto err_out;
err_set_head:
printk(KERN_EMERG "MSI: Set head[%lu] on msiqid[%lu] "
"gives error %d\n",
head, msiqid, err);
goto err_out;
err_out:
return IRQ_NONE;
}
static u32 pick_msiq(struct pci_pbm_info *pbm)
{
static DEFINE_SPINLOCK(rotor_lock);
unsigned long flags;
u32 ret, rotor;
spin_lock_irqsave(&rotor_lock, flags);
rotor = pbm->msiq_rotor;
ret = pbm->msiq_first + rotor;
if (++rotor >= pbm->msiq_num)
rotor = 0;
pbm->msiq_rotor = rotor;
spin_unlock_irqrestore(&rotor_lock, flags);
return ret;
}
static int alloc_msi(struct pci_pbm_info *pbm)
{
int i;
for (i = 0; i < pbm->msi_num; i++) {
if (!test_and_set_bit(i, pbm->msi_bitmap))
return i + pbm->msi_first;
}
return -ENOENT;
}
static void free_msi(struct pci_pbm_info *pbm, int msi_num)
{
msi_num -= pbm->msi_first;
clear_bit(msi_num, pbm->msi_bitmap);
}
static struct irq_chip msi_irq = {
.typename = "PCI-MSI",
.mask = mask_msi_irq,
.unmask = unmask_msi_irq,
.enable = unmask_msi_irq,
.disable = mask_msi_irq,
/* XXX affinity XXX */
};
static int sparc64_setup_msi_irq(unsigned int *virt_irq_p,
struct pci_dev *pdev,
struct msi_desc *entry)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
const struct sparc64_msiq_ops *ops = pbm->msi_ops;
struct msi_msg msg;
int msi, err;
u32 msiqid;
*virt_irq_p = virt_irq_alloc(0, 0);
err = -ENOMEM;
if (!*virt_irq_p)
goto out_err;
set_irq_chip_and_handler_name(*virt_irq_p, &msi_irq,
handle_simple_irq, "MSI");
err = alloc_msi(pbm);
if (unlikely(err < 0))
goto out_virt_irq_free;
msi = err;
msiqid = pick_msiq(pbm);
err = ops->msi_setup(pbm, msiqid, msi,
(entry->msi_attrib.is_64 ? 1 : 0));
if (err)
goto out_msi_free;
pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p;
if (entry->msi_attrib.is_64) {
msg.address_hi = pbm->msi64_start >> 32;
msg.address_lo = pbm->msi64_start & 0xffffffff;
} else {
msg.address_hi = 0;
msg.address_lo = pbm->msi32_start;
}
msg.data = msi;
set_irq_msi(*virt_irq_p, entry);
write_msi_msg(*virt_irq_p, &msg);
return 0;
out_msi_free:
free_msi(pbm, msi);
out_virt_irq_free:
set_irq_chip(*virt_irq_p, NULL);
virt_irq_free(*virt_irq_p);
*virt_irq_p = 0;
out_err:
return err;
}
static void sparc64_teardown_msi_irq(unsigned int virt_irq,
struct pci_dev *pdev)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
const struct sparc64_msiq_ops *ops = pbm->msi_ops;
unsigned int msi_num;
int i, err;
for (i = 0; i < pbm->msi_num; i++) {
if (pbm->msi_irq_table[i] == virt_irq)
break;
}
if (i >= pbm->msi_num) {
printk(KERN_ERR "%s: teardown: No MSI for irq %u\n",
pbm->name, virt_irq);
return;
}
msi_num = pbm->msi_first + i;
pbm->msi_irq_table[i] = ~0U;
err = ops->msi_teardown(pbm, msi_num);
if (err) {
printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, "
"irq %u, gives error %d\n",
pbm->name, msi_num, virt_irq, err);
return;
}
free_msi(pbm, msi_num);
set_irq_chip(virt_irq, NULL);
virt_irq_free(virt_irq);
}
static int msi_bitmap_alloc(struct pci_pbm_info *pbm)
{
unsigned long size, bits_per_ulong;
bits_per_ulong = sizeof(unsigned long) * 8;
size = (pbm->msi_num + (bits_per_ulong - 1)) & ~(bits_per_ulong - 1);
size /= 8;
BUG_ON(size % sizeof(unsigned long));
pbm->msi_bitmap = kzalloc(size, GFP_KERNEL);
if (!pbm->msi_bitmap)
return -ENOMEM;
return 0;
}
static void msi_bitmap_free(struct pci_pbm_info *pbm)
{
kfree(pbm->msi_bitmap);
pbm->msi_bitmap = NULL;
}
static int msi_table_alloc(struct pci_pbm_info *pbm)
{
int size, i;
size = pbm->msiq_num * sizeof(struct sparc64_msiq_cookie);
pbm->msiq_irq_cookies = kzalloc(size, GFP_KERNEL);
if (!pbm->msiq_irq_cookies)
return -ENOMEM;
for (i = 0; i < pbm->msiq_num; i++) {
struct sparc64_msiq_cookie *p;
p = &pbm->msiq_irq_cookies[i];
p->pbm = pbm;
p->msiqid = pbm->msiq_first + i;
}
size = pbm->msi_num * sizeof(unsigned int);
pbm->msi_irq_table = kzalloc(size, GFP_KERNEL);
if (!pbm->msi_irq_table) {
kfree(pbm->msiq_irq_cookies);
pbm->msiq_irq_cookies = NULL;
return -ENOMEM;
}
return 0;
}
static void msi_table_free(struct pci_pbm_info *pbm)
{
kfree(pbm->msiq_irq_cookies);
pbm->msiq_irq_cookies = NULL;
kfree(pbm->msi_irq_table);
pbm->msi_irq_table = NULL;
}
static int bringup_one_msi_queue(struct pci_pbm_info *pbm,
const struct sparc64_msiq_ops *ops,
unsigned long msiqid,
unsigned long devino)
{
int irq = ops->msiq_build_irq(pbm, msiqid, devino);
int err, nid;
if (irq < 0)
return irq;
nid = pbm->numa_node;
if (nid != -1) {
cpumask_t numa_mask = *cpumask_of_node(nid);
irq_set_affinity(irq, &numa_mask);
}
err = request_irq(irq, sparc64_msiq_interrupt, 0,
"MSIQ",
&pbm->msiq_irq_cookies[msiqid - pbm->msiq_first]);
if (err)
return err;
return 0;
}
static int sparc64_bringup_msi_queues(struct pci_pbm_info *pbm,
const struct sparc64_msiq_ops *ops)
{
int i;
for (i = 0; i < pbm->msiq_num; i++) {
unsigned long msiqid = i + pbm->msiq_first;
unsigned long devino = i + pbm->msiq_first_devino;
int err;
err = bringup_one_msi_queue(pbm, ops, msiqid, devino);
if (err)
return err;
}
return 0;
}
void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
const struct sparc64_msiq_ops *ops)
{
const u32 *val;
int len;
val = of_get_property(pbm->op->node, "#msi-eqs", &len);
if (!val || len != 4)
goto no_msi;
pbm->msiq_num = *val;
if (pbm->msiq_num) {
const struct msiq_prop {
u32 first_msiq;
u32 num_msiq;
u32 first_devino;
} *mqp;
const struct msi_range_prop {
u32 first_msi;
u32 num_msi;
} *mrng;
const struct addr_range_prop {
u32 msi32_high;
u32 msi32_low;
u32 msi32_len;
u32 msi64_high;
u32 msi64_low;
u32 msi64_len;
} *arng;
val = of_get_property(pbm->op->node, "msi-eq-size", &len);
if (!val || len != 4)
goto no_msi;
pbm->msiq_ent_count = *val;
mqp = of_get_property(pbm->op->node,
"msi-eq-to-devino", &len);
if (!mqp)
mqp = of_get_property(pbm->op->node,
"msi-eq-devino", &len);
if (!mqp || len != sizeof(struct msiq_prop))
goto no_msi;
pbm->msiq_first = mqp->first_msiq;
pbm->msiq_first_devino = mqp->first_devino;
val = of_get_property(pbm->op->node, "#msi", &len);
if (!val || len != 4)
goto no_msi;
pbm->msi_num = *val;
mrng = of_get_property(pbm->op->node, "msi-ranges", &len);
if (!mrng || len != sizeof(struct msi_range_prop))
goto no_msi;
pbm->msi_first = mrng->first_msi;
val = of_get_property(pbm->op->node, "msi-data-mask", &len);
if (!val || len != 4)
goto no_msi;
pbm->msi_data_mask = *val;
val = of_get_property(pbm->op->node, "msix-data-width", &len);
if (!val || len != 4)
goto no_msi;
pbm->msix_data_width = *val;
arng = of_get_property(pbm->op->node, "msi-address-ranges",
&len);
if (!arng || len != sizeof(struct addr_range_prop))
goto no_msi;
pbm->msi32_start = ((u64)arng->msi32_high << 32) |
(u64) arng->msi32_low;
pbm->msi64_start = ((u64)arng->msi64_high << 32) |
(u64) arng->msi64_low;
pbm->msi32_len = arng->msi32_len;
pbm->msi64_len = arng->msi64_len;
if (msi_bitmap_alloc(pbm))
goto no_msi;
if (msi_table_alloc(pbm)) {
msi_bitmap_free(pbm);
goto no_msi;
}
if (ops->msiq_alloc(pbm)) {
msi_table_free(pbm);
msi_bitmap_free(pbm);
goto no_msi;
}
if (sparc64_bringup_msi_queues(pbm, ops)) {
ops->msiq_free(pbm);
msi_table_free(pbm);
msi_bitmap_free(pbm);
goto no_msi;
}
printk(KERN_INFO "%s: MSI Queue first[%u] num[%u] count[%u] "
"devino[0x%x]\n",
pbm->name,
pbm->msiq_first, pbm->msiq_num,
pbm->msiq_ent_count,
pbm->msiq_first_devino);
printk(KERN_INFO "%s: MSI first[%u] num[%u] mask[0x%x] "
"width[%u]\n",
pbm->name,
pbm->msi_first, pbm->msi_num, pbm->msi_data_mask,
pbm->msix_data_width);
printk(KERN_INFO "%s: MSI addr32[0x%lx:0x%x] "
"addr64[0x%lx:0x%x]\n",
pbm->name,
pbm->msi32_start, pbm->msi32_len,
pbm->msi64_start, pbm->msi64_len);
printk(KERN_INFO "%s: MSI queues at RA [%016lx]\n",
pbm->name,
__pa(pbm->msi_queues));
pbm->msi_ops = ops;
pbm->setup_msi_irq = sparc64_setup_msi_irq;
pbm->teardown_msi_irq = sparc64_teardown_msi_irq;
}
return;
no_msi:
pbm->msiq_num = 0;
printk(KERN_INFO "%s: No MSI support.\n", pbm->name);
}

View File

@@ -0,0 +1,618 @@
/* pci_psycho.c: PSYCHO/U2P specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
#include <asm/iommu.h>
#include <asm/irq.h>
#include <asm/starfire.h>
#include <asm/prom.h>
#include <asm/upa.h>
#include "pci_impl.h"
#include "iommu_common.h"
#include "psycho_common.h"
#define DRIVER_NAME "psycho"
#define PFX DRIVER_NAME ": "
/* Misc. PSYCHO PCI controller register offsets and definitions. */
#define PSYCHO_CONTROL 0x0010UL
#define PSYCHO_CONTROL_IMPL 0xf000000000000000UL /* Implementation of this PSYCHO*/
#define PSYCHO_CONTROL_VER 0x0f00000000000000UL /* Version of this PSYCHO */
#define PSYCHO_CONTROL_MID 0x00f8000000000000UL /* UPA Module ID of PSYCHO */
#define PSYCHO_CONTROL_IGN 0x0007c00000000000UL /* Interrupt Group Number */
#define PSYCHO_CONTROL_RESV 0x00003ffffffffff0UL /* Reserved */
#define PSYCHO_CONTROL_APCKEN 0x0000000000000008UL /* Address Parity Check Enable */
#define PSYCHO_CONTROL_APERR 0x0000000000000004UL /* Incoming System Addr Parerr */
#define PSYCHO_CONTROL_IAP 0x0000000000000002UL /* Invert UPA Parity */
#define PSYCHO_CONTROL_MODE 0x0000000000000001UL /* PSYCHO clock mode */
#define PSYCHO_PCIA_CTRL 0x2000UL
#define PSYCHO_PCIB_CTRL 0x4000UL
#define PSYCHO_PCICTRL_RESV1 0xfffffff000000000UL /* Reserved */
#define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL /* Streaming byte hole error */
#define PSYCHO_PCICTRL_SERR 0x0000000400000000UL /* SERR signal asserted */
#define PSYCHO_PCICTRL_SPEED 0x0000000200000000UL /* PCI speed (1 is U2P clock) */
#define PSYCHO_PCICTRL_RESV2 0x00000001ffc00000UL /* Reserved */
#define PSYCHO_PCICTRL_ARB_PARK 0x0000000000200000UL /* PCI arbitration parking */
#define PSYCHO_PCICTRL_RESV3 0x00000000001ff800UL /* Reserved */
#define PSYCHO_PCICTRL_SBH_INT 0x0000000000000400UL /* Streaming byte hole int enab */
#define PSYCHO_PCICTRL_WEN 0x0000000000000200UL /* Power Mgmt Wake Enable */
#define PSYCHO_PCICTRL_EEN 0x0000000000000100UL /* PCI Error Interrupt Enable */
#define PSYCHO_PCICTRL_RESV4 0x00000000000000c0UL /* Reserved */
#define PSYCHO_PCICTRL_AEN 0x000000000000003fUL /* PCI DVMA Arbitration Enable */
/* PSYCHO error handling support. */
/* Helper function of IOMMU error checking, which checks out
* the state of the streaming buffers. The IOMMU lock is
* held when this is called.
*
* For the PCI error case we know which PBM (and thus which
* streaming buffer) caused the error, but for the uncorrectable
* error case we do not. So we always check both streaming caches.
*/
#define PSYCHO_STRBUF_CONTROL_A 0x2800UL
#define PSYCHO_STRBUF_CONTROL_B 0x4800UL
#define PSYCHO_STRBUF_CTRL_LPTR 0x00000000000000f0UL /* LRU Lock Pointer */
#define PSYCHO_STRBUF_CTRL_LENAB 0x0000000000000008UL /* LRU Lock Enable */
#define PSYCHO_STRBUF_CTRL_RRDIS 0x0000000000000004UL /* Rerun Disable */
#define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
#define PSYCHO_STRBUF_CTRL_ENAB 0x0000000000000001UL /* Streaming Buffer Enable */
#define PSYCHO_STRBUF_FLUSH_A 0x2808UL
#define PSYCHO_STRBUF_FLUSH_B 0x4808UL
#define PSYCHO_STRBUF_FSYNC_A 0x2810UL
#define PSYCHO_STRBUF_FSYNC_B 0x4810UL
#define PSYCHO_STC_DATA_A 0xb000UL
#define PSYCHO_STC_DATA_B 0xc000UL
#define PSYCHO_STC_ERR_A 0xb400UL
#define PSYCHO_STC_ERR_B 0xc400UL
#define PSYCHO_STC_TAG_A 0xb800UL
#define PSYCHO_STC_TAG_B 0xc800UL
#define PSYCHO_STC_LINE_A 0xb900UL
#define PSYCHO_STC_LINE_B 0xc900UL
/* When an Uncorrectable Error or a PCI Error happens, we
* interrogate the IOMMU state to see if it is the cause.
*/
#define PSYCHO_IOMMU_CONTROL 0x0200UL
#define PSYCHO_IOMMU_CTRL_RESV 0xfffffffff9000000UL /* Reserved */
#define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL /* Translation Error Status */
#define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL /* Translation Error encountered */
#define PSYCHO_IOMMU_CTRL_LCKEN 0x0000000000800000UL /* Enable translation locking */
#define PSYCHO_IOMMU_CTRL_LCKPTR 0x0000000000780000UL /* Translation lock pointer */
#define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
#define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL /* TSB Table 1024 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL /* TSB Table 2048 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL /* TSB Table 4096 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL /* TSB Table 8192 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL /* TSB Table 16k 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL /* TSB Table 32k 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL /* TSB Table 64k 8-byte entries */
#define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL /* TSB Table 128k 8-byte entries */
#define PSYCHO_IOMMU_CTRL_RESV2 0x000000000000fff8UL /* Reserved */
#define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL /* Assumed page size, 0=8k 1=64k */
#define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL /* Diagnostic mode enable */
#define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
#define PSYCHO_IOMMU_TSBBASE 0x0208UL
#define PSYCHO_IOMMU_FLUSH 0x0210UL
#define PSYCHO_IOMMU_TAG 0xa580UL
#define PSYCHO_IOMMU_DATA 0xa600UL
/* Uncorrectable Errors. Cause of the error and the address are
* recorded in the UE_AFSR and UE_AFAR of PSYCHO. They are errors
* relating to UPA interface transactions.
*/
#define PSYCHO_UE_AFSR 0x0030UL
#define PSYCHO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */
#define PSYCHO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */
#define PSYCHO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */
#define PSYCHO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
#define PSYCHO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */
#define PSYCHO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/
#define PSYCHO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
#define PSYCHO_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
#define PSYCHO_UEAFSR_DOFF 0x00000000e0000000UL /* Doubleword Offset */
#define PSYCHO_UEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */
#define PSYCHO_UEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */
#define PSYCHO_UEAFSR_RESV2 0x00000000007fffffUL /* Reserved */
#define PSYCHO_UE_AFAR 0x0038UL
static irqreturn_t psycho_ue_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg = pbm->controller_regs + PSYCHO_UE_AFSR;
unsigned long afar_reg = pbm->controller_regs + PSYCHO_UE_AFAR;
unsigned long afsr, afar, error_bits;
int reported;
/* Latch uncorrectable error status. */
afar = upa_readq(afar_reg);
afsr = upa_readq(afsr_reg);
/* Clear the primary/secondary error status bits. */
error_bits = afsr &
(PSYCHO_UEAFSR_PPIO | PSYCHO_UEAFSR_PDRD | PSYCHO_UEAFSR_PDWR |
PSYCHO_UEAFSR_SPIO | PSYCHO_UEAFSR_SDRD | PSYCHO_UEAFSR_SDWR);
if (!error_bits)
return IRQ_NONE;
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: Uncorrectable Error, primary error type[%s]\n",
pbm->name,
(((error_bits & PSYCHO_UEAFSR_PPIO) ?
"PIO" :
((error_bits & PSYCHO_UEAFSR_PDRD) ?
"DMA Read" :
((error_bits & PSYCHO_UEAFSR_PDWR) ?
"DMA Write" : "???")))));
printk("%s: bytemask[%04lx] dword_offset[%lx] UPA_MID[%02lx] was_block(%d)\n",
pbm->name,
(afsr & PSYCHO_UEAFSR_BMSK) >> 32UL,
(afsr & PSYCHO_UEAFSR_DOFF) >> 29UL,
(afsr & PSYCHO_UEAFSR_MID) >> 24UL,
((afsr & PSYCHO_UEAFSR_BLK) ? 1 : 0));
printk("%s: UE AFAR [%016lx]\n", pbm->name, afar);
printk("%s: UE Secondary errors [", pbm->name);
reported = 0;
if (afsr & PSYCHO_UEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & PSYCHO_UEAFSR_SDRD) {
reported++;
printk("(DMA Read)");
}
if (afsr & PSYCHO_UEAFSR_SDWR) {
reported++;
printk("(DMA Write)");
}
if (!reported)
printk("(none)");
printk("]\n");
/* Interrogate both IOMMUs for error status. */
psycho_check_iommu_error(pbm, afsr, afar, UE_ERR);
if (pbm->sibling)
psycho_check_iommu_error(pbm->sibling, afsr, afar, UE_ERR);
return IRQ_HANDLED;
}
/* Correctable Errors. */
#define PSYCHO_CE_AFSR 0x0040UL
#define PSYCHO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO is cause */
#define PSYCHO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read is cause */
#define PSYCHO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write is cause */
#define PSYCHO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
#define PSYCHO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read is cause */
#define PSYCHO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write is cause*/
#define PSYCHO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
#define PSYCHO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
#define PSYCHO_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask of failed transfer */
#define PSYCHO_CEAFSR_DOFF 0x00000000e0000000UL /* Double Offset */
#define PSYCHO_CEAFSR_MID 0x000000001f000000UL /* UPA MID causing the fault */
#define PSYCHO_CEAFSR_BLK 0x0000000000800000UL /* Trans was block operation */
#define PSYCHO_CEAFSR_RESV2 0x00000000007fffffUL /* Reserved */
#define PSYCHO_CE_AFAR 0x0040UL
static irqreturn_t psycho_ce_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg = pbm->controller_regs + PSYCHO_CE_AFSR;
unsigned long afar_reg = pbm->controller_regs + PSYCHO_CE_AFAR;
unsigned long afsr, afar, error_bits;
int reported;
/* Latch error status. */
afar = upa_readq(afar_reg);
afsr = upa_readq(afsr_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(PSYCHO_CEAFSR_PPIO | PSYCHO_CEAFSR_PDRD | PSYCHO_CEAFSR_PDWR |
PSYCHO_CEAFSR_SPIO | PSYCHO_CEAFSR_SDRD | PSYCHO_CEAFSR_SDWR);
if (!error_bits)
return IRQ_NONE;
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: Correctable Error, primary error type[%s]\n",
pbm->name,
(((error_bits & PSYCHO_CEAFSR_PPIO) ?
"PIO" :
((error_bits & PSYCHO_CEAFSR_PDRD) ?
"DMA Read" :
((error_bits & PSYCHO_CEAFSR_PDWR) ?
"DMA Write" : "???")))));
/* XXX Use syndrome and afar to print out module string just like
* XXX UDB CE trap handler does... -DaveM
*/
printk("%s: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
"UPA_MID[%02lx] was_block(%d)\n",
pbm->name,
(afsr & PSYCHO_CEAFSR_ESYND) >> 48UL,
(afsr & PSYCHO_CEAFSR_BMSK) >> 32UL,
(afsr & PSYCHO_CEAFSR_DOFF) >> 29UL,
(afsr & PSYCHO_CEAFSR_MID) >> 24UL,
((afsr & PSYCHO_CEAFSR_BLK) ? 1 : 0));
printk("%s: CE AFAR [%016lx]\n", pbm->name, afar);
printk("%s: CE Secondary errors [", pbm->name);
reported = 0;
if (afsr & PSYCHO_CEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & PSYCHO_CEAFSR_SDRD) {
reported++;
printk("(DMA Read)");
}
if (afsr & PSYCHO_CEAFSR_SDWR) {
reported++;
printk("(DMA Write)");
}
if (!reported)
printk("(none)");
printk("]\n");
return IRQ_HANDLED;
}
/* PCI Errors. They are signalled by the PCI bus module since they
* are associated with a specific bus segment.
*/
#define PSYCHO_PCI_AFSR_A 0x2010UL
#define PSYCHO_PCI_AFSR_B 0x4010UL
#define PSYCHO_PCI_AFAR_A 0x2018UL
#define PSYCHO_PCI_AFAR_B 0x4018UL
/* XXX What about PowerFail/PowerManagement??? -DaveM */
#define PSYCHO_ECC_CTRL 0x0020
#define PSYCHO_ECCCTRL_EE 0x8000000000000000UL /* Enable ECC Checking */
#define PSYCHO_ECCCTRL_UE 0x4000000000000000UL /* Enable UE Interrupts */
#define PSYCHO_ECCCTRL_CE 0x2000000000000000UL /* Enable CE INterrupts */
static void psycho_register_error_handlers(struct pci_pbm_info *pbm)
{
struct of_device *op = of_find_device_by_node(pbm->op->node);
unsigned long base = pbm->controller_regs;
u64 tmp;
int err;
if (!op)
return;
/* Psycho interrupt property order is:
* 0: PCIERR INO for this PBM
* 1: UE ERR
* 2: CE ERR
* 3: POWER FAIL
* 4: SPARE HARDWARE
* 5: POWER MANAGEMENT
*/
if (op->num_irqs < 6)
return;
/* We really mean to ignore the return result here. Two
* PCI controller share the same interrupt numbers and
* drive the same front-end hardware. Whichever of the
* two get in here first will register the IRQ handler
* the second will just error out since we do not pass in
* IRQF_SHARED.
*/
err = request_irq(op->irqs[1], psycho_ue_intr, IRQF_SHARED,
"PSYCHO_UE", pbm);
err = request_irq(op->irqs[2], psycho_ce_intr, IRQF_SHARED,
"PSYCHO_CE", pbm);
/* This one, however, ought not to fail. We can just warn
* about it since the system can still operate properly even
* if this fails.
*/
err = request_irq(op->irqs[0], psycho_pcierr_intr, IRQF_SHARED,
"PSYCHO_PCIERR", pbm);
if (err)
printk(KERN_WARNING "%s: Could not register PCIERR, "
"err=%d\n", pbm->name, err);
/* Enable UE and CE interrupts for controller. */
upa_writeq((PSYCHO_ECCCTRL_EE |
PSYCHO_ECCCTRL_UE |
PSYCHO_ECCCTRL_CE), base + PSYCHO_ECC_CTRL);
/* Enable PCI Error interrupts and clear error
* bits for each PBM.
*/
tmp = upa_readq(base + PSYCHO_PCIA_CTRL);
tmp |= (PSYCHO_PCICTRL_SERR |
PSYCHO_PCICTRL_SBH_ERR |
PSYCHO_PCICTRL_EEN);
tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
upa_writeq(tmp, base + PSYCHO_PCIA_CTRL);
tmp = upa_readq(base + PSYCHO_PCIB_CTRL);
tmp |= (PSYCHO_PCICTRL_SERR |
PSYCHO_PCICTRL_SBH_ERR |
PSYCHO_PCICTRL_EEN);
tmp &= ~(PSYCHO_PCICTRL_SBH_INT);
upa_writeq(tmp, base + PSYCHO_PCIB_CTRL);
}
/* PSYCHO boot time probing and initialization. */
static void pbm_config_busmastering(struct pci_pbm_info *pbm)
{
u8 *addr;
/* Set cache-line size to 64 bytes, this is actually
* a nop but I do it for completeness.
*/
addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
0, PCI_CACHE_LINE_SIZE);
pci_config_write8(addr, 64 / sizeof(u32));
/* Set PBM latency timer to 64 PCI clocks. */
addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
0, PCI_LATENCY_TIMER);
pci_config_write8(addr, 64);
}
static void __init psycho_scan_bus(struct pci_pbm_info *pbm,
struct device *parent)
{
pbm_config_busmastering(pbm);
pbm->is_66mhz_capable = 0;
pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
/* After the PCI bus scan is complete, we can register
* the error interrupt handlers.
*/
psycho_register_error_handlers(pbm);
}
#define PSYCHO_IRQ_RETRY 0x1a00UL
#define PSYCHO_PCIA_DIAG 0x2020UL
#define PSYCHO_PCIB_DIAG 0x4020UL
#define PSYCHO_PCIDIAG_RESV 0xffffffffffffff80UL /* Reserved */
#define PSYCHO_PCIDIAG_DRETRY 0x0000000000000040UL /* Disable retry limit */
#define PSYCHO_PCIDIAG_DISYNC 0x0000000000000020UL /* Disable DMA wr / irq sync */
#define PSYCHO_PCIDIAG_DDWSYNC 0x0000000000000010UL /* Disable DMA wr / PIO rd sync */
#define PSYCHO_PCIDIAG_IDDPAR 0x0000000000000008UL /* Invert DMA data parity */
#define PSYCHO_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO data parity */
#define PSYCHO_PCIDIAG_IPAPAR 0x0000000000000002UL /* Invert PIO address parity */
#define PSYCHO_PCIDIAG_LPBACK 0x0000000000000001UL /* Enable loopback mode */
static void psycho_controller_hwinit(struct pci_pbm_info *pbm)
{
u64 tmp;
upa_writeq(5, pbm->controller_regs + PSYCHO_IRQ_RETRY);
/* Enable arbiter for all PCI slots. */
tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIA_CTRL);
tmp |= PSYCHO_PCICTRL_AEN;
upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIA_CTRL);
tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIB_CTRL);
tmp |= PSYCHO_PCICTRL_AEN;
upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIB_CTRL);
/* Disable DMA write / PIO read synchronization on
* both PCI bus segments.
* [ U2P Erratum 1243770, STP2223BGA data sheet ]
*/
tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIA_DIAG);
tmp |= PSYCHO_PCIDIAG_DDWSYNC;
upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIA_DIAG);
tmp = upa_readq(pbm->controller_regs + PSYCHO_PCIB_DIAG);
tmp |= PSYCHO_PCIDIAG_DDWSYNC;
upa_writeq(tmp, pbm->controller_regs + PSYCHO_PCIB_DIAG);
}
static void psycho_pbm_strbuf_init(struct pci_pbm_info *pbm,
int is_pbm_a)
{
unsigned long base = pbm->controller_regs;
u64 control;
if (is_pbm_a) {
pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_A;
pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_A;
pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_A;
pbm->stc.strbuf_err_stat = base + PSYCHO_STC_ERR_A;
pbm->stc.strbuf_tag_diag = base + PSYCHO_STC_TAG_A;
pbm->stc.strbuf_line_diag= base + PSYCHO_STC_LINE_A;
} else {
pbm->stc.strbuf_control = base + PSYCHO_STRBUF_CONTROL_B;
pbm->stc.strbuf_pflush = base + PSYCHO_STRBUF_FLUSH_B;
pbm->stc.strbuf_fsync = base + PSYCHO_STRBUF_FSYNC_B;
pbm->stc.strbuf_err_stat = base + PSYCHO_STC_ERR_B;
pbm->stc.strbuf_tag_diag = base + PSYCHO_STC_TAG_B;
pbm->stc.strbuf_line_diag= base + PSYCHO_STC_LINE_B;
}
/* PSYCHO's streaming buffer lacks ctx flushing. */
pbm->stc.strbuf_ctxflush = 0;
pbm->stc.strbuf_ctxmatch_base = 0;
pbm->stc.strbuf_flushflag = (volatile unsigned long *)
((((unsigned long)&pbm->stc.__flushflag_buf[0])
+ 63UL)
& ~63UL);
pbm->stc.strbuf_flushflag_pa = (unsigned long)
__pa(pbm->stc.strbuf_flushflag);
/* Enable the streaming buffer. We have to be careful
* just in case OBP left it with LRU locking enabled.
*
* It is possible to control if PBM will be rerun on
* line misses. Currently I just retain whatever setting
* OBP left us with. All checks so far show it having
* a value of zero.
*/
#undef PSYCHO_STRBUF_RERUN_ENABLE
#undef PSYCHO_STRBUF_RERUN_DISABLE
control = upa_readq(pbm->stc.strbuf_control);
control |= PSYCHO_STRBUF_CTRL_ENAB;
control &= ~(PSYCHO_STRBUF_CTRL_LENAB | PSYCHO_STRBUF_CTRL_LPTR);
#ifdef PSYCHO_STRBUF_RERUN_ENABLE
control &= ~(PSYCHO_STRBUF_CTRL_RRDIS);
#else
#ifdef PSYCHO_STRBUF_RERUN_DISABLE
control |= PSYCHO_STRBUF_CTRL_RRDIS;
#endif
#endif
upa_writeq(control, pbm->stc.strbuf_control);
pbm->stc.strbuf_enabled = 1;
}
#define PSYCHO_IOSPACE_A 0x002000000UL
#define PSYCHO_IOSPACE_B 0x002010000UL
#define PSYCHO_IOSPACE_SIZE 0x00000ffffUL
#define PSYCHO_MEMSPACE_A 0x100000000UL
#define PSYCHO_MEMSPACE_B 0x180000000UL
#define PSYCHO_MEMSPACE_SIZE 0x07fffffffUL
static void __init psycho_pbm_init(struct pci_pbm_info *pbm,
struct of_device *op, int is_pbm_a)
{
psycho_pbm_init_common(pbm, op, "PSYCHO", PBM_CHIP_TYPE_PSYCHO);
psycho_pbm_strbuf_init(pbm, is_pbm_a);
psycho_scan_bus(pbm, &op->dev);
}
static struct pci_pbm_info * __devinit psycho_find_sibling(u32 upa_portid)
{
struct pci_pbm_info *pbm;
for (pbm = pci_pbm_root; pbm; pbm = pbm->next) {
if (pbm->portid == upa_portid)
return pbm;
}
return NULL;
}
#define PSYCHO_CONFIGSPACE 0x001000000UL
static int __devinit psycho_probe(struct of_device *op,
const struct of_device_id *match)
{
const struct linux_prom64_registers *pr_regs;
struct device_node *dp = op->node;
struct pci_pbm_info *pbm;
struct iommu *iommu;
int is_pbm_a, err;
u32 upa_portid;
upa_portid = of_getintprop_default(dp, "upa-portid", 0xff);
err = -ENOMEM;
pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
if (!pbm) {
printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n");
goto out_err;
}
pbm->sibling = psycho_find_sibling(upa_portid);
if (pbm->sibling) {
iommu = pbm->sibling->iommu;
} else {
iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
if (!iommu) {
printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
goto out_free_controller;
}
}
pbm->iommu = iommu;
pbm->portid = upa_portid;
pr_regs = of_get_property(dp, "reg", NULL);
err = -ENODEV;
if (!pr_regs) {
printk(KERN_ERR PFX "No reg property.\n");
goto out_free_iommu;
}
is_pbm_a = ((pr_regs[0].phys_addr & 0x6000) == 0x2000);
pbm->controller_regs = pr_regs[2].phys_addr;
pbm->config_space = (pr_regs[2].phys_addr + PSYCHO_CONFIGSPACE);
if (is_pbm_a) {
pbm->pci_afsr = pbm->controller_regs + PSYCHO_PCI_AFSR_A;
pbm->pci_afar = pbm->controller_regs + PSYCHO_PCI_AFAR_A;
pbm->pci_csr = pbm->controller_regs + PSYCHO_PCIA_CTRL;
} else {
pbm->pci_afsr = pbm->controller_regs + PSYCHO_PCI_AFSR_B;
pbm->pci_afar = pbm->controller_regs + PSYCHO_PCI_AFAR_B;
pbm->pci_csr = pbm->controller_regs + PSYCHO_PCIB_CTRL;
}
psycho_controller_hwinit(pbm);
if (!pbm->sibling) {
err = psycho_iommu_init(pbm, 128, 0xc0000000,
0xffffffff, PSYCHO_CONTROL);
if (err)
goto out_free_iommu;
/* If necessary, hook us up for starfire IRQ translations. */
if (this_is_starfire)
starfire_hookup(pbm->portid);
}
psycho_pbm_init(pbm, op, is_pbm_a);
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
if (pbm->sibling)
pbm->sibling->sibling = pbm;
dev_set_drvdata(&op->dev, pbm);
return 0;
out_free_iommu:
if (!pbm->sibling)
kfree(pbm->iommu);
out_free_controller:
kfree(pbm);
out_err:
return err;
}
static struct of_device_id __initdata psycho_match[] = {
{
.name = "pci",
.compatible = "pci108e,8000",
},
{},
};
static struct of_platform_driver psycho_driver = {
.name = DRIVER_NAME,
.match_table = psycho_match,
.probe = psycho_probe,
};
static int __init psycho_init(void)
{
return of_register_driver(&psycho_driver, &of_bus_type);
}
subsys_initcall(psycho_init);

View File

@@ -0,0 +1,609 @@
/* pci_sabre.c: Sabre specific PCI controller support.
*
* Copyright (C) 1997, 1998, 1999, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jakub@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/of_device.h>
#include <asm/apb.h>
#include <asm/iommu.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/upa.h>
#include "pci_impl.h"
#include "iommu_common.h"
#include "psycho_common.h"
#define DRIVER_NAME "sabre"
#define PFX DRIVER_NAME ": "
/* SABRE PCI controller register offsets and definitions. */
#define SABRE_UE_AFSR 0x0030UL
#define SABRE_UEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */
#define SABRE_UEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */
#define SABRE_UEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */
#define SABRE_UEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */
#define SABRE_UEAFSR_SDTE 0x0200000000000000UL /* Secondary DMA Translation Error */
#define SABRE_UEAFSR_PDTE 0x0100000000000000UL /* Primary DMA Translation Error */
#define SABRE_UEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */
#define SABRE_UEAFSR_OFF 0x00000000e0000000UL /* Offset (AFAR bits [5:3] */
#define SABRE_UEAFSR_BLK 0x0000000000800000UL /* Was block operation */
#define SABRE_UECE_AFAR 0x0038UL
#define SABRE_CE_AFSR 0x0040UL
#define SABRE_CEAFSR_PDRD 0x4000000000000000UL /* Primary PCI DMA Read */
#define SABRE_CEAFSR_PDWR 0x2000000000000000UL /* Primary PCI DMA Write */
#define SABRE_CEAFSR_SDRD 0x0800000000000000UL /* Secondary PCI DMA Read */
#define SABRE_CEAFSR_SDWR 0x0400000000000000UL /* Secondary PCI DMA Write */
#define SABRE_CEAFSR_ESYND 0x00ff000000000000UL /* ECC Syndrome */
#define SABRE_CEAFSR_BMSK 0x0000ffff00000000UL /* Bytemask */
#define SABRE_CEAFSR_OFF 0x00000000e0000000UL /* Offset */
#define SABRE_CEAFSR_BLK 0x0000000000800000UL /* Was block operation */
#define SABRE_UECE_AFAR_ALIAS 0x0048UL /* Aliases to 0x0038 */
#define SABRE_IOMMU_CONTROL 0x0200UL
#define SABRE_IOMMUCTRL_ERRSTS 0x0000000006000000UL /* Error status bits */
#define SABRE_IOMMUCTRL_ERR 0x0000000001000000UL /* Error present in IOTLB */
#define SABRE_IOMMUCTRL_LCKEN 0x0000000000800000UL /* IOTLB lock enable */
#define SABRE_IOMMUCTRL_LCKPTR 0x0000000000780000UL /* IOTLB lock pointer */
#define SABRE_IOMMUCTRL_TSBSZ 0x0000000000070000UL /* TSB Size */
#define SABRE_IOMMU_TSBSZ_1K 0x0000000000000000
#define SABRE_IOMMU_TSBSZ_2K 0x0000000000010000
#define SABRE_IOMMU_TSBSZ_4K 0x0000000000020000
#define SABRE_IOMMU_TSBSZ_8K 0x0000000000030000
#define SABRE_IOMMU_TSBSZ_16K 0x0000000000040000
#define SABRE_IOMMU_TSBSZ_32K 0x0000000000050000
#define SABRE_IOMMU_TSBSZ_64K 0x0000000000060000
#define SABRE_IOMMU_TSBSZ_128K 0x0000000000070000
#define SABRE_IOMMUCTRL_TBWSZ 0x0000000000000004UL /* TSB assumed page size */
#define SABRE_IOMMUCTRL_DENAB 0x0000000000000002UL /* Diagnostic Mode Enable */
#define SABRE_IOMMUCTRL_ENAB 0x0000000000000001UL /* IOMMU Enable */
#define SABRE_IOMMU_TSBBASE 0x0208UL
#define SABRE_IOMMU_FLUSH 0x0210UL
#define SABRE_IMAP_A_SLOT0 0x0c00UL
#define SABRE_IMAP_B_SLOT0 0x0c20UL
#define SABRE_IMAP_SCSI 0x1000UL
#define SABRE_IMAP_ETH 0x1008UL
#define SABRE_IMAP_BPP 0x1010UL
#define SABRE_IMAP_AU_REC 0x1018UL
#define SABRE_IMAP_AU_PLAY 0x1020UL
#define SABRE_IMAP_PFAIL 0x1028UL
#define SABRE_IMAP_KMS 0x1030UL
#define SABRE_IMAP_FLPY 0x1038UL
#define SABRE_IMAP_SHW 0x1040UL
#define SABRE_IMAP_KBD 0x1048UL
#define SABRE_IMAP_MS 0x1050UL
#define SABRE_IMAP_SER 0x1058UL
#define SABRE_IMAP_UE 0x1070UL
#define SABRE_IMAP_CE 0x1078UL
#define SABRE_IMAP_PCIERR 0x1080UL
#define SABRE_IMAP_GFX 0x1098UL
#define SABRE_IMAP_EUPA 0x10a0UL
#define SABRE_ICLR_A_SLOT0 0x1400UL
#define SABRE_ICLR_B_SLOT0 0x1480UL
#define SABRE_ICLR_SCSI 0x1800UL
#define SABRE_ICLR_ETH 0x1808UL
#define SABRE_ICLR_BPP 0x1810UL
#define SABRE_ICLR_AU_REC 0x1818UL
#define SABRE_ICLR_AU_PLAY 0x1820UL
#define SABRE_ICLR_PFAIL 0x1828UL
#define SABRE_ICLR_KMS 0x1830UL
#define SABRE_ICLR_FLPY 0x1838UL
#define SABRE_ICLR_SHW 0x1840UL
#define SABRE_ICLR_KBD 0x1848UL
#define SABRE_ICLR_MS 0x1850UL
#define SABRE_ICLR_SER 0x1858UL
#define SABRE_ICLR_UE 0x1870UL
#define SABRE_ICLR_CE 0x1878UL
#define SABRE_ICLR_PCIERR 0x1880UL
#define SABRE_WRSYNC 0x1c20UL
#define SABRE_PCICTRL 0x2000UL
#define SABRE_PCICTRL_MRLEN 0x0000001000000000UL /* Use MemoryReadLine for block loads/stores */
#define SABRE_PCICTRL_SERR 0x0000000400000000UL /* Set when SERR asserted on PCI bus */
#define SABRE_PCICTRL_ARBPARK 0x0000000000200000UL /* Bus Parking 0=Ultra-IIi 1=prev-bus-owner */
#define SABRE_PCICTRL_CPUPRIO 0x0000000000100000UL /* Ultra-IIi granted every other bus cycle */
#define SABRE_PCICTRL_ARBPRIO 0x00000000000f0000UL /* Slot which is granted every other bus cycle */
#define SABRE_PCICTRL_ERREN 0x0000000000000100UL /* PCI Error Interrupt Enable */
#define SABRE_PCICTRL_RTRYWE 0x0000000000000080UL /* DMA Flow Control 0=wait-if-possible 1=retry */
#define SABRE_PCICTRL_AEN 0x000000000000000fUL /* Slot PCI arbitration enables */
#define SABRE_PIOAFSR 0x2010UL
#define SABRE_PIOAFSR_PMA 0x8000000000000000UL /* Primary Master Abort */
#define SABRE_PIOAFSR_PTA 0x4000000000000000UL /* Primary Target Abort */
#define SABRE_PIOAFSR_PRTRY 0x2000000000000000UL /* Primary Excessive Retries */
#define SABRE_PIOAFSR_PPERR 0x1000000000000000UL /* Primary Parity Error */
#define SABRE_PIOAFSR_SMA 0x0800000000000000UL /* Secondary Master Abort */
#define SABRE_PIOAFSR_STA 0x0400000000000000UL /* Secondary Target Abort */
#define SABRE_PIOAFSR_SRTRY 0x0200000000000000UL /* Secondary Excessive Retries */
#define SABRE_PIOAFSR_SPERR 0x0100000000000000UL /* Secondary Parity Error */
#define SABRE_PIOAFSR_BMSK 0x0000ffff00000000UL /* Byte Mask */
#define SABRE_PIOAFSR_BLK 0x0000000080000000UL /* Was Block Operation */
#define SABRE_PIOAFAR 0x2018UL
#define SABRE_PCIDIAG 0x2020UL
#define SABRE_PCIDIAG_DRTRY 0x0000000000000040UL /* Disable PIO Retry Limit */
#define SABRE_PCIDIAG_IPAPAR 0x0000000000000008UL /* Invert PIO Address Parity */
#define SABRE_PCIDIAG_IPDPAR 0x0000000000000004UL /* Invert PIO Data Parity */
#define SABRE_PCIDIAG_IDDPAR 0x0000000000000002UL /* Invert DMA Data Parity */
#define SABRE_PCIDIAG_ELPBK 0x0000000000000001UL /* Loopback Enable - not supported */
#define SABRE_PCITASR 0x2028UL
#define SABRE_PCITASR_EF 0x0000000000000080UL /* Respond to 0xe0000000-0xffffffff */
#define SABRE_PCITASR_CD 0x0000000000000040UL /* Respond to 0xc0000000-0xdfffffff */
#define SABRE_PCITASR_AB 0x0000000000000020UL /* Respond to 0xa0000000-0xbfffffff */
#define SABRE_PCITASR_89 0x0000000000000010UL /* Respond to 0x80000000-0x9fffffff */
#define SABRE_PCITASR_67 0x0000000000000008UL /* Respond to 0x60000000-0x7fffffff */
#define SABRE_PCITASR_45 0x0000000000000004UL /* Respond to 0x40000000-0x5fffffff */
#define SABRE_PCITASR_23 0x0000000000000002UL /* Respond to 0x20000000-0x3fffffff */
#define SABRE_PCITASR_01 0x0000000000000001UL /* Respond to 0x00000000-0x1fffffff */
#define SABRE_PIOBUF_DIAG 0x5000UL
#define SABRE_DMABUF_DIAGLO 0x5100UL
#define SABRE_DMABUF_DIAGHI 0x51c0UL
#define SABRE_IMAP_GFX_ALIAS 0x6000UL /* Aliases to 0x1098 */
#define SABRE_IMAP_EUPA_ALIAS 0x8000UL /* Aliases to 0x10a0 */
#define SABRE_IOMMU_VADIAG 0xa400UL
#define SABRE_IOMMU_TCDIAG 0xa408UL
#define SABRE_IOMMU_TAG 0xa580UL
#define SABRE_IOMMUTAG_ERRSTS 0x0000000001800000UL /* Error status bits */
#define SABRE_IOMMUTAG_ERR 0x0000000000400000UL /* Error present */
#define SABRE_IOMMUTAG_WRITE 0x0000000000200000UL /* Page is writable */
#define SABRE_IOMMUTAG_STREAM 0x0000000000100000UL /* Streamable bit - unused */
#define SABRE_IOMMUTAG_SIZE 0x0000000000080000UL /* 0=8k 1=16k */
#define SABRE_IOMMUTAG_VPN 0x000000000007ffffUL /* Virtual Page Number [31:13] */
#define SABRE_IOMMU_DATA 0xa600UL
#define SABRE_IOMMUDATA_VALID 0x0000000040000000UL /* Valid */
#define SABRE_IOMMUDATA_USED 0x0000000020000000UL /* Used (for LRU algorithm) */
#define SABRE_IOMMUDATA_CACHE 0x0000000010000000UL /* Cacheable */
#define SABRE_IOMMUDATA_PPN 0x00000000001fffffUL /* Physical Page Number [33:13] */
#define SABRE_PCI_IRQSTATE 0xa800UL
#define SABRE_OBIO_IRQSTATE 0xa808UL
#define SABRE_FFBCFG 0xf000UL
#define SABRE_FFBCFG_SPRQS 0x000000000f000000 /* Slave P_RQST queue size */
#define SABRE_FFBCFG_ONEREAD 0x0000000000004000 /* Slave supports one outstanding read */
#define SABRE_MCCTRL0 0xf010UL
#define SABRE_MCCTRL0_RENAB 0x0000000080000000 /* Refresh Enable */
#define SABRE_MCCTRL0_EENAB 0x0000000010000000 /* Enable all ECC functions */
#define SABRE_MCCTRL0_11BIT 0x0000000000001000 /* Enable 11-bit column addressing */
#define SABRE_MCCTRL0_DPP 0x0000000000000f00 /* DIMM Pair Present Bits */
#define SABRE_MCCTRL0_RINTVL 0x00000000000000ff /* Refresh Interval */
#define SABRE_MCCTRL1 0xf018UL
#define SABRE_MCCTRL1_AMDC 0x0000000038000000 /* Advance Memdata Clock */
#define SABRE_MCCTRL1_ARDC 0x0000000007000000 /* Advance DRAM Read Data Clock */
#define SABRE_MCCTRL1_CSR 0x0000000000e00000 /* CAS to RAS delay for CBR refresh */
#define SABRE_MCCTRL1_CASRW 0x00000000001c0000 /* CAS length for read/write */
#define SABRE_MCCTRL1_RCD 0x0000000000038000 /* RAS to CAS delay */
#define SABRE_MCCTRL1_CP 0x0000000000007000 /* CAS Precharge */
#define SABRE_MCCTRL1_RP 0x0000000000000e00 /* RAS Precharge */
#define SABRE_MCCTRL1_RAS 0x00000000000001c0 /* Length of RAS for refresh */
#define SABRE_MCCTRL1_CASRW2 0x0000000000000038 /* Must be same as CASRW */
#define SABRE_MCCTRL1_RSC 0x0000000000000007 /* RAS after CAS hold time */
#define SABRE_RESETCTRL 0xf020UL
#define SABRE_CONFIGSPACE 0x001000000UL
#define SABRE_IOSPACE 0x002000000UL
#define SABRE_IOSPACE_SIZE 0x000ffffffUL
#define SABRE_MEMSPACE 0x100000000UL
#define SABRE_MEMSPACE_SIZE 0x07fffffffUL
static int hummingbird_p;
static struct pci_bus *sabre_root_bus;
static irqreturn_t sabre_ue_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg = pbm->controller_regs + SABRE_UE_AFSR;
unsigned long afar_reg = pbm->controller_regs + SABRE_UECE_AFAR;
unsigned long afsr, afar, error_bits;
int reported;
/* Latch uncorrectable error status. */
afar = upa_readq(afar_reg);
afsr = upa_readq(afsr_reg);
/* Clear the primary/secondary error status bits. */
error_bits = afsr &
(SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE);
if (!error_bits)
return IRQ_NONE;
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: Uncorrectable Error, primary error type[%s%s]\n",
pbm->name,
((error_bits & SABRE_UEAFSR_PDRD) ?
"DMA Read" :
((error_bits & SABRE_UEAFSR_PDWR) ?
"DMA Write" : "???")),
((error_bits & SABRE_UEAFSR_PDTE) ?
":Translation Error" : ""));
printk("%s: bytemask[%04lx] dword_offset[%lx] was_block(%d)\n",
pbm->name,
(afsr & SABRE_UEAFSR_BMSK) >> 32UL,
(afsr & SABRE_UEAFSR_OFF) >> 29UL,
((afsr & SABRE_UEAFSR_BLK) ? 1 : 0));
printk("%s: UE AFAR [%016lx]\n", pbm->name, afar);
printk("%s: UE Secondary errors [", pbm->name);
reported = 0;
if (afsr & SABRE_UEAFSR_SDRD) {
reported++;
printk("(DMA Read)");
}
if (afsr & SABRE_UEAFSR_SDWR) {
reported++;
printk("(DMA Write)");
}
if (afsr & SABRE_UEAFSR_SDTE) {
reported++;
printk("(Translation Error)");
}
if (!reported)
printk("(none)");
printk("]\n");
/* Interrogate IOMMU for error status. */
psycho_check_iommu_error(pbm, afsr, afar, UE_ERR);
return IRQ_HANDLED;
}
static irqreturn_t sabre_ce_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
unsigned long afsr_reg = pbm->controller_regs + SABRE_CE_AFSR;
unsigned long afar_reg = pbm->controller_regs + SABRE_UECE_AFAR;
unsigned long afsr, afar, error_bits;
int reported;
/* Latch error status. */
afar = upa_readq(afar_reg);
afsr = upa_readq(afsr_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR);
if (!error_bits)
return IRQ_NONE;
upa_writeq(error_bits, afsr_reg);
/* Log the error. */
printk("%s: Correctable Error, primary error type[%s]\n",
pbm->name,
((error_bits & SABRE_CEAFSR_PDRD) ?
"DMA Read" :
((error_bits & SABRE_CEAFSR_PDWR) ?
"DMA Write" : "???")));
/* XXX Use syndrome and afar to print out module string just like
* XXX UDB CE trap handler does... -DaveM
*/
printk("%s: syndrome[%02lx] bytemask[%04lx] dword_offset[%lx] "
"was_block(%d)\n",
pbm->name,
(afsr & SABRE_CEAFSR_ESYND) >> 48UL,
(afsr & SABRE_CEAFSR_BMSK) >> 32UL,
(afsr & SABRE_CEAFSR_OFF) >> 29UL,
((afsr & SABRE_CEAFSR_BLK) ? 1 : 0));
printk("%s: CE AFAR [%016lx]\n", pbm->name, afar);
printk("%s: CE Secondary errors [", pbm->name);
reported = 0;
if (afsr & SABRE_CEAFSR_SDRD) {
reported++;
printk("(DMA Read)");
}
if (afsr & SABRE_CEAFSR_SDWR) {
reported++;
printk("(DMA Write)");
}
if (!reported)
printk("(none)");
printk("]\n");
return IRQ_HANDLED;
}
static void sabre_register_error_handlers(struct pci_pbm_info *pbm)
{
struct device_node *dp = pbm->op->node;
struct of_device *op;
unsigned long base = pbm->controller_regs;
u64 tmp;
int err;
if (pbm->chip_type == PBM_CHIP_TYPE_SABRE)
dp = dp->parent;
op = of_find_device_by_node(dp);
if (!op)
return;
/* Sabre/Hummingbird IRQ property layout is:
* 0: PCI ERR
* 1: UE ERR
* 2: CE ERR
* 3: POWER FAIL
*/
if (op->num_irqs < 4)
return;
/* We clear the error bits in the appropriate AFSR before
* registering the handler so that we don't get spurious
* interrupts.
*/
upa_writeq((SABRE_UEAFSR_PDRD | SABRE_UEAFSR_PDWR |
SABRE_UEAFSR_SDRD | SABRE_UEAFSR_SDWR |
SABRE_UEAFSR_SDTE | SABRE_UEAFSR_PDTE),
base + SABRE_UE_AFSR);
err = request_irq(op->irqs[1], sabre_ue_intr, 0, "SABRE_UE", pbm);
if (err)
printk(KERN_WARNING "%s: Couldn't register UE, err=%d.\n",
pbm->name, err);
upa_writeq((SABRE_CEAFSR_PDRD | SABRE_CEAFSR_PDWR |
SABRE_CEAFSR_SDRD | SABRE_CEAFSR_SDWR),
base + SABRE_CE_AFSR);
err = request_irq(op->irqs[2], sabre_ce_intr, 0, "SABRE_CE", pbm);
if (err)
printk(KERN_WARNING "%s: Couldn't register CE, err=%d.\n",
pbm->name, err);
err = request_irq(op->irqs[0], psycho_pcierr_intr, 0,
"SABRE_PCIERR", pbm);
if (err)
printk(KERN_WARNING "%s: Couldn't register PCIERR, err=%d.\n",
pbm->name, err);
tmp = upa_readq(base + SABRE_PCICTRL);
tmp |= SABRE_PCICTRL_ERREN;
upa_writeq(tmp, base + SABRE_PCICTRL);
}
static void apb_init(struct pci_bus *sabre_bus)
{
struct pci_dev *pdev;
list_for_each_entry(pdev, &sabre_bus->devices, bus_list) {
if (pdev->vendor == PCI_VENDOR_ID_SUN &&
pdev->device == PCI_DEVICE_ID_SUN_SIMBA) {
u16 word16;
pci_read_config_word(pdev, PCI_COMMAND, &word16);
word16 |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY |
PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY |
PCI_COMMAND_IO;
pci_write_config_word(pdev, PCI_COMMAND, word16);
/* Status register bits are "write 1 to clear". */
pci_write_config_word(pdev, PCI_STATUS, 0xffff);
pci_write_config_word(pdev, PCI_SEC_STATUS, 0xffff);
/* Use a primary/seconday latency timer value
* of 64.
*/
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 64);
pci_write_config_byte(pdev, PCI_SEC_LATENCY_TIMER, 64);
/* Enable reporting/forwarding of master aborts,
* parity, and SERR.
*/
pci_write_config_byte(pdev, PCI_BRIDGE_CONTROL,
(PCI_BRIDGE_CTL_PARITY |
PCI_BRIDGE_CTL_SERR |
PCI_BRIDGE_CTL_MASTER_ABORT));
}
}
}
static void __init sabre_scan_bus(struct pci_pbm_info *pbm,
struct device *parent)
{
static int once;
/* The APB bridge speaks to the Sabre host PCI bridge
* at 66Mhz, but the front side of APB runs at 33Mhz
* for both segments.
*
* Hummingbird systems do not use APB, so they run
* at 66MHZ.
*/
if (hummingbird_p)
pbm->is_66mhz_capable = 1;
else
pbm->is_66mhz_capable = 0;
/* This driver has not been verified to handle
* multiple SABREs yet, so trap this.
*
* Also note that the SABRE host bridge is hardwired
* to live at bus 0.
*/
if (once != 0) {
printk(KERN_ERR PFX "Multiple controllers unsupported.\n");
return;
}
once++;
pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
if (!pbm->pci_bus)
return;
sabre_root_bus = pbm->pci_bus;
apb_init(pbm->pci_bus);
sabre_register_error_handlers(pbm);
}
static void __init sabre_pbm_init(struct pci_pbm_info *pbm,
struct of_device *op)
{
psycho_pbm_init_common(pbm, op, "SABRE", PBM_CHIP_TYPE_SABRE);
pbm->pci_afsr = pbm->controller_regs + SABRE_PIOAFSR;
pbm->pci_afar = pbm->controller_regs + SABRE_PIOAFAR;
pbm->pci_csr = pbm->controller_regs + SABRE_PCICTRL;
sabre_scan_bus(pbm, &op->dev);
}
static int __devinit sabre_probe(struct of_device *op,
const struct of_device_id *match)
{
const struct linux_prom64_registers *pr_regs;
struct device_node *dp = op->node;
struct pci_pbm_info *pbm;
u32 upa_portid, dma_mask;
struct iommu *iommu;
int tsbsize, err;
const u32 *vdma;
u64 clear_irq;
hummingbird_p = (match->data != NULL);
if (!hummingbird_p) {
struct device_node *cpu_dp;
/* Of course, Sun has to encode things a thousand
* different ways, inconsistently.
*/
for_each_node_by_type(cpu_dp, "cpu") {
if (!strcmp(cpu_dp->name, "SUNW,UltraSPARC-IIe"))
hummingbird_p = 1;
}
}
err = -ENOMEM;
pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
if (!pbm) {
printk(KERN_ERR PFX "Cannot allocate pci_pbm_info.\n");
goto out_err;
}
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu) {
printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n");
goto out_free_controller;
}
pbm->iommu = iommu;
upa_portid = of_getintprop_default(dp, "upa-portid", 0xff);
pbm->portid = upa_portid;
/*
* Map in SABRE register set and report the presence of this SABRE.
*/
pr_regs = of_get_property(dp, "reg", NULL);
err = -ENODEV;
if (!pr_regs) {
printk(KERN_ERR PFX "No reg property\n");
goto out_free_iommu;
}
/*
* First REG in property is base of entire SABRE register space.
*/
pbm->controller_regs = pr_regs[0].phys_addr;
/* Clear interrupts */
/* PCI first */
for (clear_irq = SABRE_ICLR_A_SLOT0; clear_irq < SABRE_ICLR_B_SLOT0 + 0x80; clear_irq += 8)
upa_writeq(0x0UL, pbm->controller_regs + clear_irq);
/* Then OBIO */
for (clear_irq = SABRE_ICLR_SCSI; clear_irq < SABRE_ICLR_SCSI + 0x80; clear_irq += 8)
upa_writeq(0x0UL, pbm->controller_regs + clear_irq);
/* Error interrupts are enabled later after the bus scan. */
upa_writeq((SABRE_PCICTRL_MRLEN | SABRE_PCICTRL_SERR |
SABRE_PCICTRL_ARBPARK | SABRE_PCICTRL_AEN),
pbm->controller_regs + SABRE_PCICTRL);
/* Now map in PCI config space for entire SABRE. */
pbm->config_space = pbm->controller_regs + SABRE_CONFIGSPACE;
vdma = of_get_property(dp, "virtual-dma", NULL);
if (!vdma) {
printk(KERN_ERR PFX "No virtual-dma property\n");
goto out_free_iommu;
}
dma_mask = vdma[0];
switch(vdma[1]) {
case 0x20000000:
dma_mask |= 0x1fffffff;
tsbsize = 64;
break;
case 0x40000000:
dma_mask |= 0x3fffffff;
tsbsize = 128;
break;
case 0x80000000:
dma_mask |= 0x7fffffff;
tsbsize = 128;
break;
default:
printk(KERN_ERR PFX "Strange virtual-dma size.\n");
goto out_free_iommu;
}
err = psycho_iommu_init(pbm, tsbsize, vdma[0], dma_mask, SABRE_WRSYNC);
if (err)
goto out_free_iommu;
/*
* Look for APB underneath.
*/
sabre_pbm_init(pbm, op);
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
dev_set_drvdata(&op->dev, pbm);
return 0;
out_free_iommu:
kfree(pbm->iommu);
out_free_controller:
kfree(pbm);
out_err:
return err;
}
static struct of_device_id __initdata sabre_match[] = {
{
.name = "pci",
.compatible = "pci108e,a001",
.data = (void *) 1,
},
{
.name = "pci",
.compatible = "pci108e,a000",
},
{},
};
static struct of_platform_driver sabre_driver = {
.name = DRIVER_NAME,
.match_table = sabre_match,
.probe = sabre_probe,
};
static int __init sabre_init(void)
{
return of_register_driver(&sabre_driver, &of_bus_type);
}
subsys_initcall(sabre_init);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,92 @@
/* pci_sun4v.h: SUN4V specific PCI controller support.
*
* Copyright (C) 2006 David S. Miller (davem@davemloft.net)
*/
#ifndef _PCI_SUN4V_H
#define _PCI_SUN4V_H
extern long pci_sun4v_iommu_map(unsigned long devhandle,
unsigned long tsbid,
unsigned long num_ttes,
unsigned long io_attributes,
unsigned long io_page_list_pa);
extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle,
unsigned long tsbid,
unsigned long num_ttes);
extern unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle,
unsigned long tsbid,
unsigned long *io_attributes,
unsigned long *real_address);
extern unsigned long pci_sun4v_config_get(unsigned long devhandle,
unsigned long pci_device,
unsigned long config_offset,
unsigned long size);
extern int pci_sun4v_config_put(unsigned long devhandle,
unsigned long pci_device,
unsigned long config_offset,
unsigned long size,
unsigned long data);
extern unsigned long pci_sun4v_msiq_conf(unsigned long devhandle,
unsigned long msiqid,
unsigned long msiq_paddr,
unsigned long num_entries);
extern unsigned long pci_sun4v_msiq_info(unsigned long devhandle,
unsigned long msiqid,
unsigned long *msiq_paddr,
unsigned long *num_entries);
extern unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle,
unsigned long msiqid,
unsigned long *valid);
extern unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle,
unsigned long msiqid,
unsigned long valid);
extern unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle,
unsigned long msiqid,
unsigned long *state);
extern unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle,
unsigned long msiqid,
unsigned long state);
extern unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle,
unsigned long msiqid,
unsigned long *head);
extern unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle,
unsigned long msiqid,
unsigned long head);
extern unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle,
unsigned long msiqid,
unsigned long *head);
extern unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle,
unsigned long msinum,
unsigned long *valid);
extern unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle,
unsigned long msinum,
unsigned long valid);
extern unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle,
unsigned long msinum,
unsigned long *msiq);
extern unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle,
unsigned long msinum,
unsigned long msiq,
unsigned long msitype);
extern unsigned long pci_sun4v_msi_getstate(unsigned long devhandle,
unsigned long msinum,
unsigned long *state);
extern unsigned long pci_sun4v_msi_setstate(unsigned long devhandle,
unsigned long msinum,
unsigned long state);
extern unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle,
unsigned long msinum,
unsigned long *msiq);
extern unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle,
unsigned long msinum,
unsigned long msiq);
extern unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle,
unsigned long msinum,
unsigned long *valid);
extern unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
unsigned long msinum,
unsigned long valid);
#endif /* !(_PCI_SUN4V_H) */

View File

@@ -0,0 +1,362 @@
/* pci_sun4v_asm: Hypervisor calls for PCI support.
*
* Copyright (C) 2006, 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/linkage.h>
#include <asm/hypervisor.h>
/* %o0: devhandle
* %o1: tsbid
* %o2: num ttes
* %o3: io_attributes
* %o4: io_page_list phys address
*
* returns %o0: -status if status was non-zero, else
* %o0: num pages mapped
*/
ENTRY(pci_sun4v_iommu_map)
mov %o5, %g1
mov HV_FAST_PCI_IOMMU_MAP, %o5
ta HV_FAST_TRAP
brnz,pn %o0, 1f
sub %g0, %o0, %o0
mov %o1, %o0
1: retl
nop
ENDPROC(pci_sun4v_iommu_map)
/* %o0: devhandle
* %o1: tsbid
* %o2: num ttes
*
* returns %o0: num ttes demapped
*/
ENTRY(pci_sun4v_iommu_demap)
mov HV_FAST_PCI_IOMMU_DEMAP, %o5
ta HV_FAST_TRAP
retl
mov %o1, %o0
ENDPROC(pci_sun4v_iommu_demap)
/* %o0: devhandle
* %o1: tsbid
* %o2: &io_attributes
* %o3: &real_address
*
* returns %o0: status
*/
ENTRY(pci_sun4v_iommu_getmap)
mov %o2, %o4
mov HV_FAST_PCI_IOMMU_GETMAP, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
stx %o2, [%o3]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_iommu_getmap)
/* %o0: devhandle
* %o1: pci_device
* %o2: pci_config_offset
* %o3: size
*
* returns %o0: data
*
* If there is an error, the data will be returned
* as all 1's.
*/
ENTRY(pci_sun4v_config_get)
mov HV_FAST_PCI_CONFIG_GET, %o5
ta HV_FAST_TRAP
brnz,a,pn %o1, 1f
mov -1, %o2
1: retl
mov %o2, %o0
ENDPROC(pci_sun4v_config_get)
/* %o0: devhandle
* %o1: pci_device
* %o2: pci_config_offset
* %o3: size
* %o4: data
*
* returns %o0: status
*
* status will be zero if the operation completed
* successfully, else -1 if not
*/
ENTRY(pci_sun4v_config_put)
mov HV_FAST_PCI_CONFIG_PUT, %o5
ta HV_FAST_TRAP
brnz,a,pn %o1, 1f
mov -1, %o1
1: retl
mov %o1, %o0
ENDPROC(pci_sun4v_config_put)
/* %o0: devhandle
* %o1: msiqid
* %o2: msiq phys address
* %o3: num entries
*
* returns %o0: status
*
* status will be zero if the operation completed
* successfully, else -1 if not
*/
ENTRY(pci_sun4v_msiq_conf)
mov HV_FAST_PCI_MSIQ_CONF, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_conf)
/* %o0: devhandle
* %o1: msiqid
* %o2: &msiq_phys_addr
* %o3: &msiq_num_entries
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_info)
mov %o2, %o4
mov HV_FAST_PCI_MSIQ_INFO, %o5
ta HV_FAST_TRAP
stx %o1, [%o4]
stx %o2, [%o3]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_info)
/* %o0: devhandle
* %o1: msiqid
* %o2: &valid
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_getvalid)
mov HV_FAST_PCI_MSIQ_GETVALID, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_getvalid)
/* %o0: devhandle
* %o1: msiqid
* %o2: valid
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_setvalid)
mov HV_FAST_PCI_MSIQ_SETVALID, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_setvalid)
/* %o0: devhandle
* %o1: msiqid
* %o2: &state
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_getstate)
mov HV_FAST_PCI_MSIQ_GETSTATE, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_getstate)
/* %o0: devhandle
* %o1: msiqid
* %o2: state
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_setstate)
mov HV_FAST_PCI_MSIQ_SETSTATE, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_setstate)
/* %o0: devhandle
* %o1: msiqid
* %o2: &head
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_gethead)
mov HV_FAST_PCI_MSIQ_GETHEAD, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_gethead)
/* %o0: devhandle
* %o1: msiqid
* %o2: head
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_sethead)
mov HV_FAST_PCI_MSIQ_SETHEAD, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_sethead)
/* %o0: devhandle
* %o1: msiqid
* %o2: &tail
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msiq_gettail)
mov HV_FAST_PCI_MSIQ_GETTAIL, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msiq_gettail)
/* %o0: devhandle
* %o1: msinum
* %o2: &valid
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msi_getvalid)
mov HV_FAST_PCI_MSI_GETVALID, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msi_getvalid)
/* %o0: devhandle
* %o1: msinum
* %o2: valid
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msi_setvalid)
mov HV_FAST_PCI_MSI_SETVALID, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msi_setvalid)
/* %o0: devhandle
* %o1: msinum
* %o2: &msiq
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msi_getmsiq)
mov HV_FAST_PCI_MSI_GETMSIQ, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msi_getmsiq)
/* %o0: devhandle
* %o1: msinum
* %o2: msitype
* %o3: msiq
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msi_setmsiq)
mov HV_FAST_PCI_MSI_SETMSIQ, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msi_setmsiq)
/* %o0: devhandle
* %o1: msinum
* %o2: &state
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msi_getstate)
mov HV_FAST_PCI_MSI_GETSTATE, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msi_getstate)
/* %o0: devhandle
* %o1: msinum
* %o2: state
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msi_setstate)
mov HV_FAST_PCI_MSI_SETSTATE, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msi_setstate)
/* %o0: devhandle
* %o1: msinum
* %o2: &msiq
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msg_getmsiq)
mov HV_FAST_PCI_MSG_GETMSIQ, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msg_getmsiq)
/* %o0: devhandle
* %o1: msinum
* %o2: msiq
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msg_setmsiq)
mov HV_FAST_PCI_MSG_SETMSIQ, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msg_setmsiq)
/* %o0: devhandle
* %o1: msinum
* %o2: &valid
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msg_getvalid)
mov HV_FAST_PCI_MSG_GETVALID, %o5
ta HV_FAST_TRAP
stx %o1, [%o2]
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msg_getvalid)
/* %o0: devhandle
* %o1: msinum
* %o2: valid
*
* returns %o0: status
*/
ENTRY(pci_sun4v_msg_setvalid)
mov HV_FAST_PCI_MSG_SETVALID, %o5
ta HV_FAST_TRAP
retl
mov %o0, %o0
ENDPROC(pci_sun4v_msg_setvalid)

View File

@@ -436,7 +436,7 @@ int pcic_present(void)
return pcic0_up;
}
static int __init pdev_to_pnode(struct linux_pbm_info *pbm,
static int __devinit pdev_to_pnode(struct linux_pbm_info *pbm,
struct pci_dev *pdev)
{
struct linux_prom_pci_registers regs[PROMREG_MAX];

View File

@@ -24,32 +24,32 @@
*/
#define PMC_OBPNAME "SUNW,pmc"
#define PMC_DEVNAME "pmc"
#define PMC_DEVNAME "pmc"
#define PMC_IDLE_REG 0x00
#define PMC_IDLE_ON 0x01
#define PMC_IDLE_ON 0x01
static u8 __iomem *regs;
#define pmc_readb(offs) (sbus_readb(regs+offs))
#define pmc_writeb(val, offs) (sbus_writeb(val, regs+offs))
#define pmc_writeb(val, offs) (sbus_writeb(val, regs+offs))
/*
/*
* CPU idle callback function
* See .../arch/sparc/kernel/process.c
*/
void pmc_swift_idle(void)
static void pmc_swift_idle(void)
{
#ifdef PMC_DEBUG_LED
set_auxio(0x00, AUXIO_LED);
set_auxio(0x00, AUXIO_LED);
#endif
pmc_writeb(pmc_readb(PMC_IDLE_REG) | PMC_IDLE_ON, PMC_IDLE_REG);
#ifdef PMC_DEBUG_LED
set_auxio(AUXIO_LED, 0x00);
set_auxio(AUXIO_LED, 0x00);
#endif
}
}
static int __devinit pmc_probe(struct of_device *op,
const struct of_device_id *match)
@@ -63,7 +63,7 @@ static int __devinit pmc_probe(struct of_device *op,
#ifndef PMC_NO_IDLE
/* Assign power management IDLE handler */
pm_idle = pmc_swift_idle;
pm_idle = pmc_swift_idle;
#endif
printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME);

75
arch/sparc/kernel/power.c Normal file
View File

@@ -0,0 +1,75 @@
/* power.c: Power management driver.
*
* Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/io.h>
static void __iomem *power_reg;
static irqreturn_t power_handler(int irq, void *dev_id)
{
orderly_poweroff(true);
/* FIXME: Check registers for status... */
return IRQ_HANDLED;
}
static int __init has_button_interrupt(unsigned int irq, struct device_node *dp)
{
if (irq == 0xffffffff)
return 0;
if (!of_find_property(dp, "button", NULL))
return 0;
return 1;
}
static int __devinit power_probe(struct of_device *op, const struct of_device_id *match)
{
struct resource *res = &op->resource[0];
unsigned int irq= op->irqs[0];
power_reg = of_ioremap(res, 0, 0x4, "power");
printk(KERN_INFO "%s: Control reg at %lx\n",
op->node->name, res->start);
if (has_button_interrupt(irq, op->node)) {
if (request_irq(irq,
power_handler, 0, "power", NULL) < 0)
printk(KERN_ERR "power: Cannot setup IRQ handler.\n");
}
return 0;
}
static struct of_device_id __initdata power_match[] = {
{
.name = "power",
},
{},
};
static struct of_platform_driver power_driver = {
.match_table = power_match,
.probe = power_probe,
.driver = {
.name = "power",
},
};
static int __init power_init(void)
{
return of_register_driver(&power_driver, &of_platform_bus_type);
}
device_initcall(power_init);

View File

@@ -168,11 +168,9 @@ void machine_restart(char * cmd)
void machine_power_off(void)
{
#ifdef CONFIG_SUN_AUXIO
if (auxio_power_register &&
(strcmp(of_console_device->type, "serial") || scons_pwroff))
*auxio_power_register |= AUXIO_POWER_OFF;
#endif
machine_halt();
}

View File

@@ -0,0 +1,812 @@
/* arch/sparc64/kernel/process.c
*
* Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997, 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
/*
* This file handles the architecture-dependent parts of process handling..
*/
#include <stdarg.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/delay.h>
#include <linux/compat.h>
#include <linux/tick.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/elfcore.h>
#include <linux/sysrq.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/pstate.h>
#include <asm/elf.h>
#include <asm/fpumacro.h>
#include <asm/head.h>
#include <asm/cpudata.h>
#include <asm/mmu_context.h>
#include <asm/unistd.h>
#include <asm/hypervisor.h>
#include <asm/syscalls.h>
#include <asm/irq_regs.h>
#include <asm/smp.h>
#include "kstack.h"
static void sparc64_yield(int cpu)
{
if (tlb_type != hypervisor)
return;
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb__after_clear_bit();
while (!need_resched() && !cpu_is_offline(cpu)) {
unsigned long pstate;
/* Disable interrupts. */
__asm__ __volatile__(
"rdpr %%pstate, %0\n\t"
"andn %0, %1, %0\n\t"
"wrpr %0, %%g0, %%pstate"
: "=&r" (pstate)
: "i" (PSTATE_IE));
if (!need_resched() && !cpu_is_offline(cpu))
sun4v_cpu_yield();
/* Re-enable interrupts. */
__asm__ __volatile__(
"rdpr %%pstate, %0\n\t"
"or %0, %1, %0\n\t"
"wrpr %0, %%g0, %%pstate"
: "=&r" (pstate)
: "i" (PSTATE_IE));
}
set_thread_flag(TIF_POLLING_NRFLAG);
}
/* The idle loop on sparc64. */
void cpu_idle(void)
{
int cpu = smp_processor_id();
set_thread_flag(TIF_POLLING_NRFLAG);
while(1) {
tick_nohz_stop_sched_tick(1);
while (!need_resched() && !cpu_is_offline(cpu))
sparc64_yield(cpu);
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(cpu))
cpu_play_dead();
#endif
schedule();
preempt_disable();
}
}
#ifdef CONFIG_COMPAT
static void show_regwindow32(struct pt_regs *regs)
{
struct reg_window32 __user *rw;
struct reg_window32 r_w;
mm_segment_t old_fs;
__asm__ __volatile__ ("flushw");
rw = compat_ptr((unsigned)regs->u_regs[14]);
old_fs = get_fs();
set_fs (USER_DS);
if (copy_from_user (&r_w, rw, sizeof(r_w))) {
set_fs (old_fs);
return;
}
set_fs (old_fs);
printk("l0: %08x l1: %08x l2: %08x l3: %08x "
"l4: %08x l5: %08x l6: %08x l7: %08x\n",
r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
printk("i0: %08x i1: %08x i2: %08x i3: %08x "
"i4: %08x i5: %08x i6: %08x i7: %08x\n",
r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
}
#else
#define show_regwindow32(regs) do { } while (0)
#endif
static void show_regwindow(struct pt_regs *regs)
{
struct reg_window __user *rw;
struct reg_window *rwk;
struct reg_window r_w;
mm_segment_t old_fs;
if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
__asm__ __volatile__ ("flushw");
rw = (struct reg_window __user *)
(regs->u_regs[14] + STACK_BIAS);
rwk = (struct reg_window *)
(regs->u_regs[14] + STACK_BIAS);
if (!(regs->tstate & TSTATE_PRIV)) {
old_fs = get_fs();
set_fs (USER_DS);
if (copy_from_user (&r_w, rw, sizeof(r_w))) {
set_fs (old_fs);
return;
}
rwk = &r_w;
set_fs (old_fs);
}
} else {
show_regwindow32(regs);
return;
}
printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
if (regs->tstate & TSTATE_PRIV)
printk("I7: <%pS>\n", (void *) rwk->ins[7]);
}
void show_regs(struct pt_regs *regs)
{
printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
regs->tpc, regs->tnpc, regs->y, print_tainted());
printk("TPC: <%pS>\n", (void *) regs->tpc);
printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
regs->u_regs[3]);
printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
regs->u_regs[7]);
printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
regs->u_regs[11]);
printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
regs->u_regs[15]);
printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
show_regwindow(regs);
}
struct global_reg_snapshot global_reg_snapshot[NR_CPUS];
static DEFINE_SPINLOCK(global_reg_snapshot_lock);
static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
int this_cpu)
{
flushw_all();
global_reg_snapshot[this_cpu].tstate = regs->tstate;
global_reg_snapshot[this_cpu].tpc = regs->tpc;
global_reg_snapshot[this_cpu].tnpc = regs->tnpc;
global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7];
if (regs->tstate & TSTATE_PRIV) {
struct reg_window *rw;
rw = (struct reg_window *)
(regs->u_regs[UREG_FP] + STACK_BIAS);
if (kstack_valid(tp, (unsigned long) rw)) {
global_reg_snapshot[this_cpu].i7 = rw->ins[7];
rw = (struct reg_window *)
(rw->ins[6] + STACK_BIAS);
if (kstack_valid(tp, (unsigned long) rw))
global_reg_snapshot[this_cpu].rpc = rw->ins[7];
}
} else {
global_reg_snapshot[this_cpu].i7 = 0;
global_reg_snapshot[this_cpu].rpc = 0;
}
global_reg_snapshot[this_cpu].thread = tp;
}
/* In order to avoid hangs we do not try to synchronize with the
* global register dump client cpus. The last store they make is to
* the thread pointer, so do a short poll waiting for that to become
* non-NULL.
*/
static void __global_reg_poll(struct global_reg_snapshot *gp)
{
int limit = 0;
while (!gp->thread && ++limit < 100) {
barrier();
udelay(1);
}
}
void __trigger_all_cpu_backtrace(void)
{
struct thread_info *tp = current_thread_info();
struct pt_regs *regs = get_irq_regs();
unsigned long flags;
int this_cpu, cpu;
if (!regs)
regs = tp->kregs;
spin_lock_irqsave(&global_reg_snapshot_lock, flags);
memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot));
this_cpu = raw_smp_processor_id();
__global_reg_self(tp, regs, this_cpu);
smp_fetch_global_regs();
for_each_online_cpu(cpu) {
struct global_reg_snapshot *gp = &global_reg_snapshot[cpu];
__global_reg_poll(gp);
tp = gp->thread;
printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n",
(cpu == this_cpu ? '*' : ' '), cpu,
gp->tstate, gp->tpc, gp->tnpc,
((tp && tp->task) ? tp->task->comm : "NULL"),
((tp && tp->task) ? tp->task->pid : -1));
if (gp->tstate & TSTATE_PRIV) {
printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
(void *) gp->tpc,
(void *) gp->o7,
(void *) gp->i7,
(void *) gp->rpc);
} else {
printk(" TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
gp->tpc, gp->o7, gp->i7, gp->rpc);
}
}
memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot));
spin_unlock_irqrestore(&global_reg_snapshot_lock, flags);
}
#ifdef CONFIG_MAGIC_SYSRQ
static void sysrq_handle_globreg(int key, struct tty_struct *tty)
{
__trigger_all_cpu_backtrace();
}
static struct sysrq_key_op sparc_globalreg_op = {
.handler = sysrq_handle_globreg,
.help_msg = "Globalregs",
.action_msg = "Show Global CPU Regs",
};
static int __init sparc_globreg_init(void)
{
return register_sysrq_key('y', &sparc_globalreg_op);
}
core_initcall(sparc_globreg_init);
#endif
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct thread_info *ti = task_thread_info(tsk);
unsigned long ret = 0xdeadbeefUL;
if (ti && ti->ksp) {
unsigned long *sp;
sp = (unsigned long *)(ti->ksp + STACK_BIAS);
if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
sp[14]) {
unsigned long *fp;
fp = (unsigned long *)(sp[14] + STACK_BIAS);
if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
ret = fp[15];
}
}
return ret;
}
/* Free current thread data structures etc.. */
void exit_thread(void)
{
struct thread_info *t = current_thread_info();
if (t->utraps) {
if (t->utraps[0] < 2)
kfree (t->utraps);
else
t->utraps[0]--;
}
if (test_and_clear_thread_flag(TIF_PERFCTR)) {
t->user_cntd0 = t->user_cntd1 = NULL;
t->pcr_reg = 0;
write_pcr(0);
}
}
void flush_thread(void)
{
struct thread_info *t = current_thread_info();
struct mm_struct *mm;
if (test_ti_thread_flag(t, TIF_ABI_PENDING)) {
clear_ti_thread_flag(t, TIF_ABI_PENDING);
if (test_ti_thread_flag(t, TIF_32BIT))
clear_ti_thread_flag(t, TIF_32BIT);
else
set_ti_thread_flag(t, TIF_32BIT);
}
mm = t->task->mm;
if (mm)
tsb_context_switch(mm);
set_thread_wsaved(0);
/* Turn off performance counters if on. */
if (test_and_clear_thread_flag(TIF_PERFCTR)) {
t->user_cntd0 = t->user_cntd1 = NULL;
t->pcr_reg = 0;
write_pcr(0);
}
/* Clear FPU register state. */
t->fpsaved[0] = 0;
if (get_thread_current_ds() != ASI_AIUS)
set_fs(USER_DS);
}
/* It's a bit more tricky when 64-bit tasks are involved... */
static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
{
unsigned long fp, distance, rval;
if (!(test_thread_flag(TIF_32BIT))) {
csp += STACK_BIAS;
psp += STACK_BIAS;
__get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
fp += STACK_BIAS;
} else
__get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
/* Now 8-byte align the stack as this is mandatory in the
* Sparc ABI due to how register windows work. This hides
* the restriction from thread libraries etc. -DaveM
*/
csp &= ~7UL;
distance = fp - psp;
rval = (csp - distance);
if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
rval = 0;
else if (test_thread_flag(TIF_32BIT)) {
if (put_user(((u32)csp),
&(((struct reg_window32 __user *)rval)->ins[6])))
rval = 0;
} else {
if (put_user(((u64)csp - STACK_BIAS),
&(((struct reg_window __user *)rval)->ins[6])))
rval = 0;
else
rval = rval - STACK_BIAS;
}
return rval;
}
/* Standard stuff. */
static inline void shift_window_buffer(int first_win, int last_win,
struct thread_info *t)
{
int i;
for (i = first_win; i < last_win; i++) {
t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
memcpy(&t->reg_window[i], &t->reg_window[i+1],
sizeof(struct reg_window));
}
}
void synchronize_user_stack(void)
{
struct thread_info *t = current_thread_info();
unsigned long window;
flush_user_windows();
if ((window = get_thread_wsaved()) != 0) {
int winsize = sizeof(struct reg_window);
int bias = 0;
if (test_thread_flag(TIF_32BIT))
winsize = sizeof(struct reg_window32);
else
bias = STACK_BIAS;
window -= 1;
do {
unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
struct reg_window *rwin = &t->reg_window[window];
if (!copy_to_user((char __user *)sp, rwin, winsize)) {
shift_window_buffer(window, get_thread_wsaved() - 1, t);
set_thread_wsaved(get_thread_wsaved() - 1);
}
} while (window--);
}
}
static void stack_unaligned(unsigned long sp)
{
siginfo_t info;
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void __user *) sp;
info.si_trapno = 0;
force_sig_info(SIGBUS, &info, current);
}
void fault_in_user_windows(void)
{
struct thread_info *t = current_thread_info();
unsigned long window;
int winsize = sizeof(struct reg_window);
int bias = 0;
if (test_thread_flag(TIF_32BIT))
winsize = sizeof(struct reg_window32);
else
bias = STACK_BIAS;
flush_user_windows();
window = get_thread_wsaved();
if (likely(window != 0)) {
window -= 1;
do {
unsigned long sp = (t->rwbuf_stkptrs[window] + bias);
struct reg_window *rwin = &t->reg_window[window];
if (unlikely(sp & 0x7UL))
stack_unaligned(sp);
if (unlikely(copy_to_user((char __user *)sp,
rwin, winsize)))
goto barf;
} while (window--);
}
set_thread_wsaved(0);
return;
barf:
set_thread_wsaved(window + 1);
do_exit(SIGILL);
}
asmlinkage long sparc_do_fork(unsigned long clone_flags,
unsigned long stack_start,
struct pt_regs *regs,
unsigned long stack_size)
{
int __user *parent_tid_ptr, *child_tid_ptr;
unsigned long orig_i1 = regs->u_regs[UREG_I1];
long ret;
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT)) {
parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
} else
#endif
{
parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
}
ret = do_fork(clone_flags, stack_start,
regs, stack_size,
parent_tid_ptr, child_tid_ptr);
/* If we get an error and potentially restart the system
* call, we're screwed because copy_thread() clobbered
* the parent's %o1. So detect that case and restore it
* here.
*/
if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
regs->u_regs[UREG_I1] = orig_i1;
return ret;
}
/* Copy a Sparc thread. The fork() return value conventions
* under SunOS are nothing short of bletcherous:
* Parent --> %o0 == childs pid, %o1 == 0
* Child --> %o0 == parents pid, %o1 == 1
*/
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
unsigned long unused,
struct task_struct *p, struct pt_regs *regs)
{
struct thread_info *t = task_thread_info(p);
struct sparc_stackf *parent_sf;
unsigned long child_stack_sz;
char *child_trap_frame;
int kernel_thread;
kernel_thread = (regs->tstate & TSTATE_PRIV) ? 1 : 0;
parent_sf = ((struct sparc_stackf *) regs) - 1;
/* Calculate offset to stack_frame & pt_regs */
child_stack_sz = ((STACKFRAME_SZ + TRACEREG_SZ) +
(kernel_thread ? STACKFRAME_SZ : 0));
child_trap_frame = (task_stack_page(p) +
(THREAD_SIZE - child_stack_sz));
memcpy(child_trap_frame, parent_sf, child_stack_sz);
t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) |
(0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) |
(((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT);
t->new_child = 1;
t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
t->kregs = (struct pt_regs *) (child_trap_frame +
sizeof(struct sparc_stackf));
t->fpsaved[0] = 0;
if (kernel_thread) {
struct sparc_stackf *child_sf = (struct sparc_stackf *)
(child_trap_frame + (STACKFRAME_SZ + TRACEREG_SZ));
/* Zero terminate the stack backtrace. */
child_sf->fp = NULL;
t->kregs->u_regs[UREG_FP] =
((unsigned long) child_sf) - STACK_BIAS;
/* Special case, if we are spawning a kernel thread from
* a userspace task (usermode helper, NFS or similar), we
* must disable performance counters in the child because
* the address space and protection realm are changing.
*/
if (t->flags & _TIF_PERFCTR) {
t->user_cntd0 = t->user_cntd1 = NULL;
t->pcr_reg = 0;
t->flags &= ~_TIF_PERFCTR;
}
t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT);
t->kregs->u_regs[UREG_G6] = (unsigned long) t;
t->kregs->u_regs[UREG_G4] = (unsigned long) t->task;
} else {
if (t->flags & _TIF_32BIT) {
sp &= 0x00000000ffffffffUL;
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
}
t->kregs->u_regs[UREG_FP] = sp;
t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT);
if (sp != regs->u_regs[UREG_FP]) {
unsigned long csp;
csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
if (!csp)
return -EFAULT;
t->kregs->u_regs[UREG_FP] = csp;
}
if (t->utraps)
t->utraps[0]++;
}
/* Set the return value for the child. */
t->kregs->u_regs[UREG_I0] = current->pid;
t->kregs->u_regs[UREG_I1] = 1;
/* Set the second return value for the parent. */
regs->u_regs[UREG_I1] = 0;
if (clone_flags & CLONE_SETTLS)
t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
return 0;
}
/*
* This is the mechanism for creating a new kernel thread.
*
* NOTE! Only a kernel-only process(ie the swapper or direct descendants
* who haven't done an "execve()") should use this: it will work within
* a system call from a "real" process, but the process memory space will
* not be freed until both the parent and the child have exited.
*/
pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
long retval;
/* If the parent runs before fn(arg) is called by the child,
* the input registers of this function can be clobbered.
* So we stash 'fn' and 'arg' into global registers which
* will not be modified by the parent.
*/
__asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */
"mov %5, %%g3\n\t" /* Save ARG into global */
"mov %1, %%g1\n\t" /* Clone syscall nr. */
"mov %2, %%o0\n\t" /* Clone flags. */
"mov 0, %%o1\n\t" /* usp arg == 0 */
"t 0x6d\n\t" /* Linux/Sparc clone(). */
"brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */
" mov %%o0, %0\n\t"
"jmpl %%g2, %%o7\n\t" /* Call the function. */
" mov %%g3, %%o0\n\t" /* Set arg in delay. */
"mov %3, %%g1\n\t"
"t 0x6d\n\t" /* Linux/Sparc exit(). */
/* Notreached by child. */
"1:" :
"=r" (retval) :
"i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED),
"i" (__NR_exit), "r" (fn), "r" (arg) :
"g1", "g2", "g3", "o0", "o1", "memory", "cc");
return retval;
}
typedef struct {
union {
unsigned int pr_regs[32];
unsigned long pr_dregs[16];
} pr_fr;
unsigned int __unused;
unsigned int pr_fsr;
unsigned char pr_qcnt;
unsigned char pr_q_entrysize;
unsigned char pr_en;
unsigned int pr_q[64];
} elf_fpregset_t32;
/*
* fill in the fpu structure for a core dump.
*/
int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
{
unsigned long *kfpregs = current_thread_info()->fpregs;
unsigned long fprs = current_thread_info()->fpsaved[0];
if (test_thread_flag(TIF_32BIT)) {
elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
if (fprs & FPRS_DL)
memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
sizeof(unsigned int) * 32);
else
memset(&fpregs32->pr_fr.pr_regs[0], 0,
sizeof(unsigned int) * 32);
fpregs32->pr_qcnt = 0;
fpregs32->pr_q_entrysize = 8;
memset(&fpregs32->pr_q[0], 0,
(sizeof(unsigned int) * 64));
if (fprs & FPRS_FEF) {
fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
fpregs32->pr_en = 1;
} else {
fpregs32->pr_fsr = 0;
fpregs32->pr_en = 0;
}
} else {
if(fprs & FPRS_DL)
memcpy(&fpregs->pr_regs[0], kfpregs,
sizeof(unsigned int) * 32);
else
memset(&fpregs->pr_regs[0], 0,
sizeof(unsigned int) * 32);
if(fprs & FPRS_DU)
memcpy(&fpregs->pr_regs[16], kfpregs+16,
sizeof(unsigned int) * 32);
else
memset(&fpregs->pr_regs[16], 0,
sizeof(unsigned int) * 32);
if(fprs & FPRS_FEF) {
fpregs->pr_fsr = current_thread_info()->xfsr[0];
fpregs->pr_gsr = current_thread_info()->gsr[0];
} else {
fpregs->pr_fsr = fpregs->pr_gsr = 0;
}
fpregs->pr_fprs = fprs;
}
return 1;
}
/*
* sparc_execve() executes a new program after the asm stub has set
* things up for us. This should basically do what I want it to.
*/
asmlinkage int sparc_execve(struct pt_regs *regs)
{
int error, base = 0;
char *filename;
/* User register window flush is done by entry.S */
/* Check for indirect call. */
if (regs->u_regs[UREG_G1] == 0)
base = 1;
filename = getname((char __user *)regs->u_regs[base + UREG_I0]);
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = do_execve(filename,
(char __user * __user *)
regs->u_regs[base + UREG_I1],
(char __user * __user *)
regs->u_regs[base + UREG_I2], regs);
putname(filename);
if (!error) {
fprs_write(0);
current_thread_info()->xfsr[0] = 0;
current_thread_info()->fpsaved[0] = 0;
regs->tstate &= ~TSTATE_PEF;
}
out:
return error;
}
unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc, fp, bias = 0;
struct thread_info *tp;
struct reg_window *rw;
unsigned long ret = 0;
int count = 0;
if (!task || task == current ||
task->state == TASK_RUNNING)
goto out;
tp = task_thread_info(task);
bias = STACK_BIAS;
fp = task_thread_info(task)->ksp + bias;
do {
if (!kstack_valid(tp, fp))
break;
rw = (struct reg_window *) fp;
pc = rw->ins[7];
if (!in_sched_functions(pc)) {
ret = pc;
goto out;
}
fp = rw->ins[6] + bias;
} while (++count < 16);
out:
return ret;
}

29
arch/sparc/kernel/prom.h Normal file
View File

@@ -0,0 +1,29 @@
#ifndef __PROM_H
#define __PROM_H
#include <linux/spinlock.h>
#include <asm/prom.h>
extern struct device_node *allnodes; /* temporary while merging */
extern rwlock_t devtree_lock; /* temporary while merging */
extern void * prom_early_alloc(unsigned long size);
extern void irq_trans_init(struct device_node *dp);
extern unsigned int prom_unique_id;
static inline int is_root_node(const struct device_node *dp)
{
if (!dp)
return 0;
return (dp->parent == NULL);
}
extern char *build_path_component(struct device_node *dp);
extern void of_console_init(void);
extern void of_fill_in_cpu_data(void);
extern unsigned int prom_early_allocated;
#endif /* __PROM_H */

View File

@@ -25,107 +25,9 @@
#include <asm/prom.h>
#include <asm/oplib.h>
extern struct device_node *allnodes; /* temporary while merging */
#include "prom.h"
extern rwlock_t devtree_lock; /* temporary while merging */
struct device_node *of_find_node_by_phandle(phandle handle)
{
struct device_node *np;
for (np = allnodes; np != 0; np = np->allnext)
if (np->node == handle)
break;
return np;
}
EXPORT_SYMBOL(of_find_node_by_phandle);
int of_getintprop_default(struct device_node *np, const char *name, int def)
{
struct property *prop;
int len;
prop = of_find_property(np, name, &len);
if (!prop || len != 4)
return def;
return *(int *) prop->value;
}
EXPORT_SYMBOL(of_getintprop_default);
DEFINE_MUTEX(of_set_property_mutex);
EXPORT_SYMBOL(of_set_property_mutex);
int of_set_property(struct device_node *dp, const char *name, void *val, int len)
{
struct property **prevp;
void *new_val;
int err;
new_val = kmalloc(len, GFP_KERNEL);
if (!new_val)
return -ENOMEM;
memcpy(new_val, val, len);
err = -ENODEV;
write_lock(&devtree_lock);
prevp = &dp->properties;
while (*prevp) {
struct property *prop = *prevp;
if (!strcasecmp(prop->name, name)) {
void *old_val = prop->value;
int ret;
mutex_lock(&of_set_property_mutex);
ret = prom_setprop(dp->node, (char *) name, val, len);
mutex_unlock(&of_set_property_mutex);
err = -EINVAL;
if (ret >= 0) {
prop->value = new_val;
prop->length = len;
if (OF_IS_DYNAMIC(prop))
kfree(old_val);
OF_MARK_DYNAMIC(prop);
err = 0;
}
break;
}
prevp = &(*prevp)->next;
}
write_unlock(&devtree_lock);
/* XXX Upate procfs if necessary... */
return err;
}
EXPORT_SYMBOL(of_set_property);
int of_find_in_proplist(const char *list, const char *match, int len)
{
while (len > 0) {
int l;
if (!strcmp(list, match))
return 1;
l = strlen(list) + 1;
list += l;
len -= l;
}
return 0;
}
EXPORT_SYMBOL(of_find_in_proplist);
static unsigned int prom_early_allocated;
static void * __init prom_early_alloc(unsigned long size)
void * __init prom_early_alloc(unsigned long size)
{
void *ret;
@@ -138,14 +40,6 @@ static void * __init prom_early_alloc(unsigned long size)
return ret;
}
static int is_root_node(const struct device_node *dp)
{
if (!dp)
return 0;
return (dp->parent == NULL);
}
/* The following routines deal with the black magic of fully naming a
* node.
*
@@ -257,7 +151,7 @@ static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
return sparc32_path_component(dp, tmp_buf);
}
static char * __init build_path_component(struct device_node *dp)
char * __init build_path_component(struct device_node *dp)
{
char tmp_buf[64], *n;
@@ -272,164 +166,9 @@ static char * __init build_path_component(struct device_node *dp)
return n;
}
static char * __init build_full_name(struct device_node *dp)
{
int len, ourlen, plen;
char *n;
plen = strlen(dp->parent->full_name);
ourlen = strlen(dp->path_component_name);
len = ourlen + plen + 2;
n = prom_early_alloc(len);
strcpy(n, dp->parent->full_name);
if (!is_root_node(dp->parent)) {
strcpy(n + plen, "/");
plen++;
}
strcpy(n + plen, dp->path_component_name);
return n;
}
static unsigned int unique_id;
static struct property * __init build_one_prop(phandle node, char *prev, char *special_name, void *special_val, int special_len)
{
static struct property *tmp = NULL;
struct property *p;
int len;
const char *name;
if (tmp) {
p = tmp;
memset(p, 0, sizeof(*p) + 32);
tmp = NULL;
} else {
p = prom_early_alloc(sizeof(struct property) + 32);
p->unique_id = unique_id++;
}
p->name = (char *) (p + 1);
if (special_name) {
strcpy(p->name, special_name);
p->length = special_len;
p->value = prom_early_alloc(special_len);
memcpy(p->value, special_val, special_len);
} else {
if (prev == NULL) {
name = prom_firstprop(node, NULL);
} else {
name = prom_nextprop(node, prev, NULL);
}
if (strlen(name) == 0) {
tmp = p;
return NULL;
}
strcpy(p->name, name);
p->length = prom_getproplen(node, p->name);
if (p->length <= 0) {
p->length = 0;
} else {
p->value = prom_early_alloc(p->length + 1);
len = prom_getproperty(node, p->name, p->value,
p->length);
if (len <= 0)
p->length = 0;
((unsigned char *)p->value)[p->length] = '\0';
}
}
return p;
}
static struct property * __init build_prop_list(phandle node)
{
struct property *head, *tail;
head = tail = build_one_prop(node, NULL,
".node", &node, sizeof(node));
tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
tail = tail->next;
while(tail) {
tail->next = build_one_prop(node, tail->name,
NULL, NULL, 0);
tail = tail->next;
}
return head;
}
static char * __init get_one_property(phandle node, char *name)
{
char *buf = "<NULL>";
int len;
len = prom_getproplen(node, name);
if (len > 0) {
buf = prom_early_alloc(len);
len = prom_getproperty(node, name, buf, len);
}
return buf;
}
static struct device_node * __init create_node(phandle node)
{
struct device_node *dp;
if (!node)
return NULL;
dp = prom_early_alloc(sizeof(*dp));
dp->unique_id = unique_id++;
kref_init(&dp->kref);
dp->name = get_one_property(node, "name");
dp->type = get_one_property(node, "device_type");
dp->node = node;
/* Build interrupts later... */
dp->properties = build_prop_list(node);
return dp;
}
static struct device_node * __init build_tree(struct device_node *parent, phandle node, struct device_node ***nextp)
{
struct device_node *dp;
dp = create_node(node);
if (dp) {
*(*nextp) = dp;
*nextp = &dp->allnext;
dp->parent = parent;
dp->path_component_name = build_path_component(dp);
dp->full_name = build_full_name(dp);
dp->child = build_tree(dp, prom_getchild(node), nextp);
dp->sibling = build_tree(parent, prom_getsibling(node), nextp);
}
return dp;
}
struct device_node *of_console_device;
EXPORT_SYMBOL(of_console_device);
char *of_console_path;
EXPORT_SYMBOL(of_console_path);
char *of_console_options;
EXPORT_SYMBOL(of_console_options);
extern void restore_current(void);
static void __init of_console_init(void)
void __init of_console_init(void)
{
char *msg = "OF stdout device is: %s\n";
struct device_node *dp;
@@ -547,20 +286,10 @@ static void __init of_console_init(void)
printk(msg, of_console_path);
}
void __init prom_build_devicetree(void)
void __init of_fill_in_cpu_data(void)
{
}
void __init irq_trans_init(struct device_node *dp)
{
struct device_node **nextp;
allnodes = create_node(prom_root_node);
allnodes->path_component_name = "";
allnodes->full_name = "/";
nextp = &allnodes->allnext;
allnodes->child = build_tree(allnodes,
prom_getchild(allnodes->node),
&nextp);
of_console_init();
printk("PROM: Built device tree with %u bytes of memory.\n",
prom_early_allocated);
}

571
arch/sparc/kernel/prom_64.c Normal file
View File

@@ -0,0 +1,571 @@
/*
* Procedures for creating, accessing and interpreting the device tree.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc64 by David S. Miller davem@davemloft.net
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/lmb.h>
#include <linux/of_device.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include <asm/irq.h>
#include <asm/asi.h>
#include <asm/upa.h>
#include <asm/smp.h>
#include "prom.h"
void * __init prom_early_alloc(unsigned long size)
{
unsigned long paddr = lmb_alloc(size, SMP_CACHE_BYTES);
void *ret;
if (!paddr) {
prom_printf("prom_early_alloc(%lu) failed\n");
prom_halt();
}
ret = __va(paddr);
memset(ret, 0, size);
prom_early_allocated += size;
return ret;
}
/* The following routines deal with the black magic of fully naming a
* node.
*
* Certain well known named nodes are just the simple name string.
*
* Actual devices have an address specifier appended to the base name
* string, like this "foo@addr". The "addr" can be in any number of
* formats, and the platform plus the type of the node determine the
* format and how it is constructed.
*
* For children of the ROOT node, the naming convention is fixed and
* determined by whether this is a sun4u or sun4v system.
*
* For children of other nodes, it is bus type specific. So
* we walk up the tree until we discover a "device_type" property
* we recognize and we go from there.
*
* As an example, the boot device on my workstation has a full path:
*
* /pci@1e,600000/ide@d/disk@0,0:c
*/
static void __init sun4v_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom64_registers *regs;
struct property *rprop;
u32 high_bits, low_bits, type;
rprop = of_find_property(dp, "reg", NULL);
if (!rprop)
return;
regs = rprop->value;
if (!is_root_node(dp->parent)) {
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
return;
}
type = regs->phys_addr >> 60UL;
high_bits = (regs->phys_addr >> 32UL) & 0x0fffffffUL;
low_bits = (regs->phys_addr & 0xffffffffUL);
if (type == 0 || type == 8) {
const char *prefix = (type == 0) ? "m" : "i";
if (low_bits)
sprintf(tmp_buf, "%s@%s%x,%x",
dp->name, prefix,
high_bits, low_bits);
else
sprintf(tmp_buf, "%s@%s%x",
dp->name,
prefix,
high_bits);
} else if (type == 12) {
sprintf(tmp_buf, "%s@%x",
dp->name, high_bits);
}
}
static void __init sun4u_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom64_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
if (!is_root_node(dp->parent)) {
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
return;
}
prop = of_find_property(dp, "upa-portid", NULL);
if (!prop)
prop = of_find_property(dp, "portid", NULL);
if (prop) {
unsigned long mask = 0xffffffffUL;
if (tlb_type >= cheetah)
mask = 0x7fffff;
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
*(u32 *)prop->value,
(unsigned int) (regs->phys_addr & mask));
}
}
/* "name@slot,offset" */
static void __init sbus_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
regs->which_io,
regs->phys_addr);
}
/* "name@devnum[,func]" */
static void __init pci_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom_pci_registers *regs;
struct property *prop;
unsigned int devfn;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
devfn = (regs->phys_hi >> 8) & 0xff;
if (devfn & 0x07) {
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
devfn >> 3,
devfn & 0x07);
} else {
sprintf(tmp_buf, "%s@%x",
dp->name,
devfn >> 3);
}
}
/* "name@UPA_PORTID,offset" */
static void __init upa_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom64_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
prop = of_find_property(dp, "upa-portid", NULL);
if (!prop)
return;
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
*(u32 *) prop->value,
(unsigned int) (regs->phys_addr & 0xffffffffUL));
}
/* "name@reg" */
static void __init vdev_path_component(struct device_node *dp, char *tmp_buf)
{
struct property *prop;
u32 *regs;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
sprintf(tmp_buf, "%s@%x", dp->name, *regs);
}
/* "name@addrhi,addrlo" */
static void __init ebus_path_component(struct device_node *dp, char *tmp_buf)
{
struct linux_prom64_registers *regs;
struct property *prop;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
sprintf(tmp_buf, "%s@%x,%x",
dp->name,
(unsigned int) (regs->phys_addr >> 32UL),
(unsigned int) (regs->phys_addr & 0xffffffffUL));
}
/* "name@bus,addr" */
static void __init i2c_path_component(struct device_node *dp, char *tmp_buf)
{
struct property *prop;
u32 *regs;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
/* This actually isn't right... should look at the #address-cells
* property of the i2c bus node etc. etc.
*/
sprintf(tmp_buf, "%s@%x,%x",
dp->name, regs[0], regs[1]);
}
/* "name@reg0[,reg1]" */
static void __init usb_path_component(struct device_node *dp, char *tmp_buf)
{
struct property *prop;
u32 *regs;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
if (prop->length == sizeof(u32) || regs[1] == 1) {
sprintf(tmp_buf, "%s@%x",
dp->name, regs[0]);
} else {
sprintf(tmp_buf, "%s@%x,%x",
dp->name, regs[0], regs[1]);
}
}
/* "name@reg0reg1[,reg2reg3]" */
static void __init ieee1394_path_component(struct device_node *dp, char *tmp_buf)
{
struct property *prop;
u32 *regs;
prop = of_find_property(dp, "reg", NULL);
if (!prop)
return;
regs = prop->value;
if (regs[2] || regs[3]) {
sprintf(tmp_buf, "%s@%08x%08x,%04x%08x",
dp->name, regs[0], regs[1], regs[2], regs[3]);
} else {
sprintf(tmp_buf, "%s@%08x%08x",
dp->name, regs[0], regs[1]);
}
}
static void __init __build_path_component(struct device_node *dp, char *tmp_buf)
{
struct device_node *parent = dp->parent;
if (parent != NULL) {
if (!strcmp(parent->type, "pci") ||
!strcmp(parent->type, "pciex")) {
pci_path_component(dp, tmp_buf);
return;
}
if (!strcmp(parent->type, "sbus")) {
sbus_path_component(dp, tmp_buf);
return;
}
if (!strcmp(parent->type, "upa")) {
upa_path_component(dp, tmp_buf);
return;
}
if (!strcmp(parent->type, "ebus")) {
ebus_path_component(dp, tmp_buf);
return;
}
if (!strcmp(parent->name, "usb") ||
!strcmp(parent->name, "hub")) {
usb_path_component(dp, tmp_buf);
return;
}
if (!strcmp(parent->type, "i2c")) {
i2c_path_component(dp, tmp_buf);
return;
}
if (!strcmp(parent->type, "firewire")) {
ieee1394_path_component(dp, tmp_buf);
return;
}
if (!strcmp(parent->type, "virtual-devices")) {
vdev_path_component(dp, tmp_buf);
return;
}
/* "isa" is handled with platform naming */
}
/* Use platform naming convention. */
if (tlb_type == hypervisor) {
sun4v_path_component(dp, tmp_buf);
return;
} else {
sun4u_path_component(dp, tmp_buf);
}
}
char * __init build_path_component(struct device_node *dp)
{
char tmp_buf[64], *n;
tmp_buf[0] = '\0';
__build_path_component(dp, tmp_buf);
if (tmp_buf[0] == '\0')
strcpy(tmp_buf, dp->name);
n = prom_early_alloc(strlen(tmp_buf) + 1);
strcpy(n, tmp_buf);
return n;
}
static const char *get_mid_prop(void)
{
return (tlb_type == spitfire ? "upa-portid" : "portid");
}
struct device_node *of_find_node_by_cpuid(int cpuid)
{
struct device_node *dp;
const char *mid_prop = get_mid_prop();
for_each_node_by_type(dp, "cpu") {
int id = of_getintprop_default(dp, mid_prop, -1);
const char *this_mid_prop = mid_prop;
if (id < 0) {
this_mid_prop = "cpuid";
id = of_getintprop_default(dp, this_mid_prop, -1);
}
if (id < 0) {
prom_printf("OF: Serious problem, cpu lacks "
"%s property", this_mid_prop);
prom_halt();
}
if (cpuid == id)
return dp;
}
return NULL;
}
void __init of_fill_in_cpu_data(void)
{
struct device_node *dp;
const char *mid_prop;
if (tlb_type == hypervisor)
return;
mid_prop = get_mid_prop();
ncpus_probed = 0;
for_each_node_by_type(dp, "cpu") {
int cpuid = of_getintprop_default(dp, mid_prop, -1);
const char *this_mid_prop = mid_prop;
struct device_node *portid_parent;
int portid = -1;
portid_parent = NULL;
if (cpuid < 0) {
this_mid_prop = "cpuid";
cpuid = of_getintprop_default(dp, this_mid_prop, -1);
if (cpuid >= 0) {
int limit = 2;
portid_parent = dp;
while (limit--) {
portid_parent = portid_parent->parent;
if (!portid_parent)
break;
portid = of_getintprop_default(portid_parent,
"portid", -1);
if (portid >= 0)
break;
}
}
}
if (cpuid < 0) {
prom_printf("OF: Serious problem, cpu lacks "
"%s property", this_mid_prop);
prom_halt();
}
ncpus_probed++;
#ifdef CONFIG_SMP
if (cpuid >= NR_CPUS) {
printk(KERN_WARNING "Ignoring CPU %d which is "
">= NR_CPUS (%d)\n",
cpuid, NR_CPUS);
continue;
}
#else
/* On uniprocessor we only want the values for the
* real physical cpu the kernel booted onto, however
* cpu_data() only has one entry at index 0.
*/
if (cpuid != real_hard_smp_processor_id())
continue;
cpuid = 0;
#endif
cpu_data(cpuid).clock_tick =
of_getintprop_default(dp, "clock-frequency", 0);
if (portid_parent) {
cpu_data(cpuid).dcache_size =
of_getintprop_default(dp, "l1-dcache-size",
16 * 1024);
cpu_data(cpuid).dcache_line_size =
of_getintprop_default(dp, "l1-dcache-line-size",
32);
cpu_data(cpuid).icache_size =
of_getintprop_default(dp, "l1-icache-size",
8 * 1024);
cpu_data(cpuid).icache_line_size =
of_getintprop_default(dp, "l1-icache-line-size",
32);
cpu_data(cpuid).ecache_size =
of_getintprop_default(dp, "l2-cache-size", 0);
cpu_data(cpuid).ecache_line_size =
of_getintprop_default(dp, "l2-cache-line-size", 0);
if (!cpu_data(cpuid).ecache_size ||
!cpu_data(cpuid).ecache_line_size) {
cpu_data(cpuid).ecache_size =
of_getintprop_default(portid_parent,
"l2-cache-size",
(4 * 1024 * 1024));
cpu_data(cpuid).ecache_line_size =
of_getintprop_default(portid_parent,
"l2-cache-line-size", 64);
}
cpu_data(cpuid).core_id = portid + 1;
cpu_data(cpuid).proc_id = portid;
#ifdef CONFIG_SMP
sparc64_multi_core = 1;
#endif
} else {
cpu_data(cpuid).dcache_size =
of_getintprop_default(dp, "dcache-size", 16 * 1024);
cpu_data(cpuid).dcache_line_size =
of_getintprop_default(dp, "dcache-line-size", 32);
cpu_data(cpuid).icache_size =
of_getintprop_default(dp, "icache-size", 16 * 1024);
cpu_data(cpuid).icache_line_size =
of_getintprop_default(dp, "icache-line-size", 32);
cpu_data(cpuid).ecache_size =
of_getintprop_default(dp, "ecache-size",
(4 * 1024 * 1024));
cpu_data(cpuid).ecache_line_size =
of_getintprop_default(dp, "ecache-line-size", 64);
cpu_data(cpuid).core_id = 0;
cpu_data(cpuid).proc_id = -1;
}
#ifdef CONFIG_SMP
cpu_set(cpuid, cpu_present_map);
cpu_set(cpuid, cpu_possible_map);
#endif
}
smp_fill_in_sib_core_maps();
}
void __init of_console_init(void)
{
char *msg = "OF stdout device is: %s\n";
struct device_node *dp;
const char *type;
phandle node;
of_console_path = prom_early_alloc(256);
if (prom_ihandle2path(prom_stdout, of_console_path, 256) < 0) {
prom_printf("Cannot obtain path of stdout.\n");
prom_halt();
}
of_console_options = strrchr(of_console_path, ':');
if (of_console_options) {
of_console_options++;
if (*of_console_options == '\0')
of_console_options = NULL;
}
node = prom_inst2pkg(prom_stdout);
if (!node) {
prom_printf("Cannot resolve stdout node from "
"instance %08x.\n", prom_stdout);
prom_halt();
}
dp = of_find_node_by_phandle(node);
type = of_get_property(dp, "device_type", NULL);
if (!type) {
prom_printf("Console stdout lacks device_type property.\n");
prom_halt();
}
if (strcmp(type, "display") && strcmp(type, "serial")) {
prom_printf("Console device_type is neither display "
"nor serial.\n");
prom_halt();
}
of_console_device = dp;
printk(msg, of_console_path);
}

View File

@@ -0,0 +1,326 @@
/* prom_common.c: OF device tree support common code.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996-2005 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*
* Adapted for sparc by David S. Miller davem@davemloft.net
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include "prom.h"
struct device_node *of_console_device;
EXPORT_SYMBOL(of_console_device);
char *of_console_path;
EXPORT_SYMBOL(of_console_path);
char *of_console_options;
EXPORT_SYMBOL(of_console_options);
struct device_node *of_find_node_by_phandle(phandle handle)
{
struct device_node *np;
for (np = allnodes; np; np = np->allnext)
if (np->node == handle)
break;
return np;
}
EXPORT_SYMBOL(of_find_node_by_phandle);
int of_getintprop_default(struct device_node *np, const char *name, int def)
{
struct property *prop;
int len;
prop = of_find_property(np, name, &len);
if (!prop || len != 4)
return def;
return *(int *) prop->value;
}
EXPORT_SYMBOL(of_getintprop_default);
DEFINE_MUTEX(of_set_property_mutex);
EXPORT_SYMBOL(of_set_property_mutex);
int of_set_property(struct device_node *dp, const char *name, void *val, int len)
{
struct property **prevp;
void *new_val;
int err;
new_val = kmalloc(len, GFP_KERNEL);
if (!new_val)
return -ENOMEM;
memcpy(new_val, val, len);
err = -ENODEV;
write_lock(&devtree_lock);
prevp = &dp->properties;
while (*prevp) {
struct property *prop = *prevp;
if (!strcasecmp(prop->name, name)) {
void *old_val = prop->value;
int ret;
mutex_lock(&of_set_property_mutex);
ret = prom_setprop(dp->node, name, val, len);
mutex_unlock(&of_set_property_mutex);
err = -EINVAL;
if (ret >= 0) {
prop->value = new_val;
prop->length = len;
if (OF_IS_DYNAMIC(prop))
kfree(old_val);
OF_MARK_DYNAMIC(prop);
err = 0;
}
break;
}
prevp = &(*prevp)->next;
}
write_unlock(&devtree_lock);
/* XXX Upate procfs if necessary... */
return err;
}
EXPORT_SYMBOL(of_set_property);
int of_find_in_proplist(const char *list, const char *match, int len)
{
while (len > 0) {
int l;
if (!strcmp(list, match))
return 1;
l = strlen(list) + 1;
list += l;
len -= l;
}
return 0;
}
EXPORT_SYMBOL(of_find_in_proplist);
unsigned int prom_unique_id;
static struct property * __init build_one_prop(phandle node, char *prev,
char *special_name,
void *special_val,
int special_len)
{
static struct property *tmp = NULL;
struct property *p;
const char *name;
if (tmp) {
p = tmp;
memset(p, 0, sizeof(*p) + 32);
tmp = NULL;
} else {
p = prom_early_alloc(sizeof(struct property) + 32);
p->unique_id = prom_unique_id++;
}
p->name = (char *) (p + 1);
if (special_name) {
strcpy(p->name, special_name);
p->length = special_len;
p->value = prom_early_alloc(special_len);
memcpy(p->value, special_val, special_len);
} else {
#ifdef CONFIG_SPARC32
if (prev == NULL) {
name = prom_firstprop(node, NULL);
} else {
name = prom_nextprop(node, prev, NULL);
}
#else
if (prev == NULL) {
prom_firstprop(node, p->name);
} else {
prom_nextprop(node, prev, p->name);
}
name = p->name;
#endif
if (strlen(name) == 0) {
tmp = p;
return NULL;
}
#ifdef CONFIG_SPARC32
strcpy(p->name, name);
#endif
p->length = prom_getproplen(node, p->name);
if (p->length <= 0) {
p->length = 0;
} else {
int len;
p->value = prom_early_alloc(p->length + 1);
len = prom_getproperty(node, p->name, p->value,
p->length);
if (len <= 0)
p->length = 0;
((unsigned char *)p->value)[p->length] = '\0';
}
}
return p;
}
static struct property * __init build_prop_list(phandle node)
{
struct property *head, *tail;
head = tail = build_one_prop(node, NULL,
".node", &node, sizeof(node));
tail->next = build_one_prop(node, NULL, NULL, NULL, 0);
tail = tail->next;
while(tail) {
tail->next = build_one_prop(node, tail->name,
NULL, NULL, 0);
tail = tail->next;
}
return head;
}
static char * __init get_one_property(phandle node, const char *name)
{
char *buf = "<NULL>";
int len;
len = prom_getproplen(node, name);
if (len > 0) {
buf = prom_early_alloc(len);
len = prom_getproperty(node, name, buf, len);
}
return buf;
}
static struct device_node * __init prom_create_node(phandle node,
struct device_node *parent)
{
struct device_node *dp;
if (!node)
return NULL;
dp = prom_early_alloc(sizeof(*dp));
dp->unique_id = prom_unique_id++;
dp->parent = parent;
kref_init(&dp->kref);
dp->name = get_one_property(node, "name");
dp->type = get_one_property(node, "device_type");
dp->node = node;
dp->properties = build_prop_list(node);
irq_trans_init(dp);
return dp;
}
static char * __init build_full_name(struct device_node *dp)
{
int len, ourlen, plen;
char *n;
plen = strlen(dp->parent->full_name);
ourlen = strlen(dp->path_component_name);
len = ourlen + plen + 2;
n = prom_early_alloc(len);
strcpy(n, dp->parent->full_name);
if (!is_root_node(dp->parent)) {
strcpy(n + plen, "/");
plen++;
}
strcpy(n + plen, dp->path_component_name);
return n;
}
static struct device_node * __init prom_build_tree(struct device_node *parent,
phandle node,
struct device_node ***nextp)
{
struct device_node *ret = NULL, *prev_sibling = NULL;
struct device_node *dp;
while (1) {
dp = prom_create_node(node, parent);
if (!dp)
break;
if (prev_sibling)
prev_sibling->sibling = dp;
if (!ret)
ret = dp;
prev_sibling = dp;
*(*nextp) = dp;
*nextp = &dp->allnext;
dp->path_component_name = build_path_component(dp);
dp->full_name = build_full_name(dp);
dp->child = prom_build_tree(dp, prom_getchild(node), nextp);
node = prom_getsibling(node);
}
return ret;
}
unsigned int prom_early_allocated __initdata;
void __init prom_build_devicetree(void)
{
struct device_node **nextp;
allnodes = prom_create_node(prom_root_node, NULL);
allnodes->path_component_name = "";
allnodes->full_name = "/";
nextp = &allnodes->allnext;
allnodes->child = prom_build_tree(allnodes,
prom_getchild(allnodes->node),
&nextp);
of_console_init();
printk("PROM: Built device tree with %u bytes of memory.\n",
prom_early_allocated);
of_fill_in_cpu_data();
}

View File

@@ -0,0 +1,842 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/irq.h>
#include <asm/upa.h>
#include "prom.h"
#ifdef CONFIG_PCI
/* PSYCHO interrupt mapping support. */
#define PSYCHO_IMAP_A_SLOT0 0x0c00UL
#define PSYCHO_IMAP_B_SLOT0 0x0c20UL
static unsigned long psycho_pcislot_imap_offset(unsigned long ino)
{
unsigned int bus = (ino & 0x10) >> 4;
unsigned int slot = (ino & 0x0c) >> 2;
if (bus == 0)
return PSYCHO_IMAP_A_SLOT0 + (slot * 8);
else
return PSYCHO_IMAP_B_SLOT0 + (slot * 8);
}
#define PSYCHO_OBIO_IMAP_BASE 0x1000UL
#define PSYCHO_ONBOARD_IRQ_BASE 0x20
#define psycho_onboard_imap_offset(__ino) \
(PSYCHO_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3))
#define PSYCHO_ICLR_A_SLOT0 0x1400UL
#define PSYCHO_ICLR_SCSI 0x1800UL
#define psycho_iclr_offset(ino) \
((ino & 0x20) ? (PSYCHO_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
(PSYCHO_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
static unsigned int psycho_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
unsigned long controller_regs = (unsigned long) _data;
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int inofixup = 0;
ino &= 0x3f;
if (ino < PSYCHO_ONBOARD_IRQ_BASE) {
/* PCI slot */
imap_off = psycho_pcislot_imap_offset(ino);
} else {
/* Onboard device */
imap_off = psycho_onboard_imap_offset(ino);
}
/* Now build the IRQ bucket. */
imap = controller_regs + imap_off;
iclr_off = psycho_iclr_offset(ino);
iclr = controller_regs + iclr_off;
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
return build_irq(inofixup, iclr, imap);
}
static void __init psycho_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = psycho_irq_build;
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = (void *) regs[2].phys_addr;
}
#define sabre_read(__reg) \
({ u64 __ret; \
__asm__ __volatile__("ldxa [%1] %2, %0" \
: "=r" (__ret) \
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
: "memory"); \
__ret; \
})
struct sabre_irq_data {
unsigned long controller_regs;
unsigned int pci_first_busno;
};
#define SABRE_CONFIGSPACE 0x001000000UL
#define SABRE_WRSYNC 0x1c20UL
#define SABRE_CONFIG_BASE(CONFIG_SPACE) \
(CONFIG_SPACE | (1UL << 24))
#define SABRE_CONFIG_ENCODE(BUS, DEVFN, REG) \
(((unsigned long)(BUS) << 16) | \
((unsigned long)(DEVFN) << 8) | \
((unsigned long)(REG)))
/* When a device lives behind a bridge deeper in the PCI bus topology
* than APB, a special sequence must run to make sure all pending DMA
* transfers at the time of IRQ delivery are visible in the coherency
* domain by the cpu. This sequence is to perform a read on the far
* side of the non-APB bridge, then perform a read of Sabre's DMA
* write-sync register.
*/
static void sabre_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
{
unsigned int phys_hi = (unsigned int) (unsigned long) _arg1;
struct sabre_irq_data *irq_data = _arg2;
unsigned long controller_regs = irq_data->controller_regs;
unsigned long sync_reg = controller_regs + SABRE_WRSYNC;
unsigned long config_space = controller_regs + SABRE_CONFIGSPACE;
unsigned int bus, devfn;
u16 _unused;
config_space = SABRE_CONFIG_BASE(config_space);
bus = (phys_hi >> 16) & 0xff;
devfn = (phys_hi >> 8) & 0xff;
config_space |= SABRE_CONFIG_ENCODE(bus, devfn, 0x00);
__asm__ __volatile__("membar #Sync\n\t"
"lduha [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (_unused)
: "r" ((u16 *) config_space),
"i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
sabre_read(sync_reg);
}
#define SABRE_IMAP_A_SLOT0 0x0c00UL
#define SABRE_IMAP_B_SLOT0 0x0c20UL
#define SABRE_ICLR_A_SLOT0 0x1400UL
#define SABRE_ICLR_B_SLOT0 0x1480UL
#define SABRE_ICLR_SCSI 0x1800UL
#define SABRE_ICLR_ETH 0x1808UL
#define SABRE_ICLR_BPP 0x1810UL
#define SABRE_ICLR_AU_REC 0x1818UL
#define SABRE_ICLR_AU_PLAY 0x1820UL
#define SABRE_ICLR_PFAIL 0x1828UL
#define SABRE_ICLR_KMS 0x1830UL
#define SABRE_ICLR_FLPY 0x1838UL
#define SABRE_ICLR_SHW 0x1840UL
#define SABRE_ICLR_KBD 0x1848UL
#define SABRE_ICLR_MS 0x1850UL
#define SABRE_ICLR_SER 0x1858UL
#define SABRE_ICLR_UE 0x1870UL
#define SABRE_ICLR_CE 0x1878UL
#define SABRE_ICLR_PCIERR 0x1880UL
static unsigned long sabre_pcislot_imap_offset(unsigned long ino)
{
unsigned int bus = (ino & 0x10) >> 4;
unsigned int slot = (ino & 0x0c) >> 2;
if (bus == 0)
return SABRE_IMAP_A_SLOT0 + (slot * 8);
else
return SABRE_IMAP_B_SLOT0 + (slot * 8);
}
#define SABRE_OBIO_IMAP_BASE 0x1000UL
#define SABRE_ONBOARD_IRQ_BASE 0x20
#define sabre_onboard_imap_offset(__ino) \
(SABRE_OBIO_IMAP_BASE + (((__ino) & 0x1f) << 3))
#define sabre_iclr_offset(ino) \
((ino & 0x20) ? (SABRE_ICLR_SCSI + (((ino) & 0x1f) << 3)) : \
(SABRE_ICLR_A_SLOT0 + (((ino) & 0x1f)<<3)))
static int sabre_device_needs_wsync(struct device_node *dp)
{
struct device_node *parent = dp->parent;
const char *parent_model, *parent_compat;
/* This traversal up towards the root is meant to
* handle two cases:
*
* 1) non-PCI bus sitting under PCI, such as 'ebus'
* 2) the PCI controller interrupts themselves, which
* will use the sabre_irq_build but do not need
* the DMA synchronization handling
*/
while (parent) {
if (!strcmp(parent->type, "pci"))
break;
parent = parent->parent;
}
if (!parent)
return 0;
parent_model = of_get_property(parent,
"model", NULL);
if (parent_model &&
(!strcmp(parent_model, "SUNW,sabre") ||
!strcmp(parent_model, "SUNW,simba")))
return 0;
parent_compat = of_get_property(parent,
"compatible", NULL);
if (parent_compat &&
(!strcmp(parent_compat, "pci108e,a000") ||
!strcmp(parent_compat, "pci108e,a001")))
return 0;
return 1;
}
static unsigned int sabre_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct sabre_irq_data *irq_data = _data;
unsigned long controller_regs = irq_data->controller_regs;
const struct linux_prom_pci_registers *regs;
unsigned long imap, iclr;
unsigned long imap_off, iclr_off;
int inofixup = 0;
int virt_irq;
ino &= 0x3f;
if (ino < SABRE_ONBOARD_IRQ_BASE) {
/* PCI slot */
imap_off = sabre_pcislot_imap_offset(ino);
} else {
/* onboard device */
imap_off = sabre_onboard_imap_offset(ino);
}
/* Now build the IRQ bucket. */
imap = controller_regs + imap_off;
iclr_off = sabre_iclr_offset(ino);
iclr = controller_regs + iclr_off;
if ((ino & 0x20) == 0)
inofixup = ino & 0x03;
virt_irq = build_irq(inofixup, iclr, imap);
/* If the parent device is a PCI<->PCI bridge other than
* APB, we have to install a pre-handler to ensure that
* all pending DMA is drained before the interrupt handler
* is run.
*/
regs = of_get_property(dp, "reg", NULL);
if (regs && sabre_device_needs_wsync(dp)) {
irq_install_pre_handler(virt_irq,
sabre_wsync_handler,
(void *) (long) regs->phys_hi,
(void *) irq_data);
}
return virt_irq;
}
static void __init sabre_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
struct sabre_irq_data *irq_data;
const u32 *busrange;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = sabre_irq_build;
irq_data = prom_early_alloc(sizeof(struct sabre_irq_data));
regs = of_get_property(dp, "reg", NULL);
irq_data->controller_regs = regs[0].phys_addr;
busrange = of_get_property(dp, "bus-range", NULL);
irq_data->pci_first_busno = busrange[0];
dp->irq_trans->data = irq_data;
}
/* SCHIZO interrupt mapping support. Unlike Psycho, for this controller the
* imap/iclr registers are per-PBM.
*/
#define SCHIZO_IMAP_BASE 0x1000UL
#define SCHIZO_ICLR_BASE 0x1400UL
static unsigned long schizo_imap_offset(unsigned long ino)
{
return SCHIZO_IMAP_BASE + (ino * 8UL);
}
static unsigned long schizo_iclr_offset(unsigned long ino)
{
return SCHIZO_ICLR_BASE + (ino * 8UL);
}
static unsigned long schizo_ino_to_iclr(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + schizo_iclr_offset(ino);
}
static unsigned long schizo_ino_to_imap(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + schizo_imap_offset(ino);
}
#define schizo_read(__reg) \
({ u64 __ret; \
__asm__ __volatile__("ldxa [%1] %2, %0" \
: "=r" (__ret) \
: "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
: "memory"); \
__ret; \
})
#define schizo_write(__reg, __val) \
__asm__ __volatile__("stxa %0, [%1] %2" \
: /* no outputs */ \
: "r" (__val), "r" (__reg), \
"i" (ASI_PHYS_BYPASS_EC_E) \
: "memory")
static void tomatillo_wsync_handler(unsigned int ino, void *_arg1, void *_arg2)
{
unsigned long sync_reg = (unsigned long) _arg2;
u64 mask = 1UL << (ino & IMAP_INO);
u64 val;
int limit;
schizo_write(sync_reg, mask);
limit = 100000;
val = 0;
while (--limit) {
val = schizo_read(sync_reg);
if (!(val & mask))
break;
}
if (limit <= 0) {
printk("tomatillo_wsync_handler: DMA won't sync [%lx:%lx]\n",
val, mask);
}
if (_arg1) {
static unsigned char cacheline[64]
__attribute__ ((aligned (64)));
__asm__ __volatile__("rd %%fprs, %0\n\t"
"or %0, %4, %1\n\t"
"wr %1, 0x0, %%fprs\n\t"
"stda %%f0, [%5] %6\n\t"
"wr %0, 0x0, %%fprs\n\t"
"membar #Sync"
: "=&r" (mask), "=&r" (val)
: "0" (mask), "1" (val),
"i" (FPRS_FEF), "r" (&cacheline[0]),
"i" (ASI_BLK_COMMIT_P));
}
}
struct schizo_irq_data {
unsigned long pbm_regs;
unsigned long sync_reg;
u32 portid;
int chip_version;
};
static unsigned int schizo_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct schizo_irq_data *irq_data = _data;
unsigned long pbm_regs = irq_data->pbm_regs;
unsigned long imap, iclr;
int ign_fixup;
int virt_irq;
int is_tomatillo;
ino &= 0x3f;
/* Now build the IRQ bucket. */
imap = schizo_ino_to_imap(pbm_regs, ino);
iclr = schizo_ino_to_iclr(pbm_regs, ino);
/* On Schizo, no inofixup occurs. This is because each
* INO has it's own IMAP register. On Psycho and Sabre
* there is only one IMAP register for each PCI slot even
* though four different INOs can be generated by each
* PCI slot.
*
* But, for JBUS variants (essentially, Tomatillo), we have
* to fixup the lowest bit of the interrupt group number.
*/
ign_fixup = 0;
is_tomatillo = (irq_data->sync_reg != 0UL);
if (is_tomatillo) {
if (irq_data->portid & 1)
ign_fixup = (1 << 6);
}
virt_irq = build_irq(ign_fixup, iclr, imap);
if (is_tomatillo) {
irq_install_pre_handler(virt_irq,
tomatillo_wsync_handler,
((irq_data->chip_version <= 4) ?
(void *) 1 : (void *) 0),
(void *) irq_data->sync_reg);
}
return virt_irq;
}
static void __init __schizo_irq_trans_init(struct device_node *dp,
int is_tomatillo)
{
const struct linux_prom64_registers *regs;
struct schizo_irq_data *irq_data;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = schizo_irq_build;
irq_data = prom_early_alloc(sizeof(struct schizo_irq_data));
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = irq_data;
irq_data->pbm_regs = regs[0].phys_addr;
if (is_tomatillo)
irq_data->sync_reg = regs[3].phys_addr + 0x1a18UL;
else
irq_data->sync_reg = 0UL;
irq_data->portid = of_getintprop_default(dp, "portid", 0);
irq_data->chip_version = of_getintprop_default(dp, "version#", 0);
}
static void __init schizo_irq_trans_init(struct device_node *dp)
{
__schizo_irq_trans_init(dp, 0);
}
static void __init tomatillo_irq_trans_init(struct device_node *dp)
{
__schizo_irq_trans_init(dp, 1);
}
static unsigned int pci_sun4v_irq_build(struct device_node *dp,
unsigned int devino,
void *_data)
{
u32 devhandle = (u32) (unsigned long) _data;
return sun4v_build_irq(devhandle, devino);
}
static void __init pci_sun4v_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = pci_sun4v_irq_build;
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = (void *) (unsigned long)
((regs->phys_addr >> 32UL) & 0x0fffffff);
}
struct fire_irq_data {
unsigned long pbm_regs;
u32 portid;
};
#define FIRE_IMAP_BASE 0x001000
#define FIRE_ICLR_BASE 0x001400
static unsigned long fire_imap_offset(unsigned long ino)
{
return FIRE_IMAP_BASE + (ino * 8UL);
}
static unsigned long fire_iclr_offset(unsigned long ino)
{
return FIRE_ICLR_BASE + (ino * 8UL);
}
static unsigned long fire_ino_to_iclr(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + fire_iclr_offset(ino);
}
static unsigned long fire_ino_to_imap(unsigned long pbm_regs,
unsigned int ino)
{
return pbm_regs + fire_imap_offset(ino);
}
static unsigned int fire_irq_build(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct fire_irq_data *irq_data = _data;
unsigned long pbm_regs = irq_data->pbm_regs;
unsigned long imap, iclr;
unsigned long int_ctrlr;
ino &= 0x3f;
/* Now build the IRQ bucket. */
imap = fire_ino_to_imap(pbm_regs, ino);
iclr = fire_ino_to_iclr(pbm_regs, ino);
/* Set the interrupt controller number. */
int_ctrlr = 1 << 6;
upa_writeq(int_ctrlr, imap);
/* The interrupt map registers do not have an INO field
* like other chips do. They return zero in the INO
* field, and the interrupt controller number is controlled
* in bits 6 to 9. So in order for build_irq() to get
* the INO right we pass it in as part of the fixup
* which will get added to the map register zero value
* read by build_irq().
*/
ino |= (irq_data->portid << 6);
ino -= int_ctrlr;
return build_irq(ino, iclr, imap);
}
static void __init fire_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
struct fire_irq_data *irq_data;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = fire_irq_build;
irq_data = prom_early_alloc(sizeof(struct fire_irq_data));
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = irq_data;
irq_data->pbm_regs = regs[0].phys_addr;
irq_data->portid = of_getintprop_default(dp, "portid", 0);
}
#endif /* CONFIG_PCI */
#ifdef CONFIG_SBUS
/* INO number to IMAP register offset for SYSIO external IRQ's.
* This should conform to both Sunfire/Wildfire server and Fusion
* desktop designs.
*/
#define SYSIO_IMAP_SLOT0 0x2c00UL
#define SYSIO_IMAP_SLOT1 0x2c08UL
#define SYSIO_IMAP_SLOT2 0x2c10UL
#define SYSIO_IMAP_SLOT3 0x2c18UL
#define SYSIO_IMAP_SCSI 0x3000UL
#define SYSIO_IMAP_ETH 0x3008UL
#define SYSIO_IMAP_BPP 0x3010UL
#define SYSIO_IMAP_AUDIO 0x3018UL
#define SYSIO_IMAP_PFAIL 0x3020UL
#define SYSIO_IMAP_KMS 0x3028UL
#define SYSIO_IMAP_FLPY 0x3030UL
#define SYSIO_IMAP_SHW 0x3038UL
#define SYSIO_IMAP_KBD 0x3040UL
#define SYSIO_IMAP_MS 0x3048UL
#define SYSIO_IMAP_SER 0x3050UL
#define SYSIO_IMAP_TIM0 0x3060UL
#define SYSIO_IMAP_TIM1 0x3068UL
#define SYSIO_IMAP_UE 0x3070UL
#define SYSIO_IMAP_CE 0x3078UL
#define SYSIO_IMAP_SBERR 0x3080UL
#define SYSIO_IMAP_PMGMT 0x3088UL
#define SYSIO_IMAP_GFX 0x3090UL
#define SYSIO_IMAP_EUPA 0x3098UL
#define bogon ((unsigned long) -1)
static unsigned long sysio_irq_offsets[] = {
/* SBUS Slot 0 --> 3, level 1 --> 7 */
SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
/* Onboard devices (not relevant/used on SunFire). */
SYSIO_IMAP_SCSI,
SYSIO_IMAP_ETH,
SYSIO_IMAP_BPP,
bogon,
SYSIO_IMAP_AUDIO,
SYSIO_IMAP_PFAIL,
bogon,
bogon,
SYSIO_IMAP_KMS,
SYSIO_IMAP_FLPY,
SYSIO_IMAP_SHW,
SYSIO_IMAP_KBD,
SYSIO_IMAP_MS,
SYSIO_IMAP_SER,
bogon,
bogon,
SYSIO_IMAP_TIM0,
SYSIO_IMAP_TIM1,
bogon,
bogon,
SYSIO_IMAP_UE,
SYSIO_IMAP_CE,
SYSIO_IMAP_SBERR,
SYSIO_IMAP_PMGMT,
SYSIO_IMAP_GFX,
SYSIO_IMAP_EUPA,
};
#undef bogon
#define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
/* Convert Interrupt Mapping register pointer to associated
* Interrupt Clear register pointer, SYSIO specific version.
*/
#define SYSIO_ICLR_UNUSED0 0x3400UL
#define SYSIO_ICLR_SLOT0 0x3408UL
#define SYSIO_ICLR_SLOT1 0x3448UL
#define SYSIO_ICLR_SLOT2 0x3488UL
#define SYSIO_ICLR_SLOT3 0x34c8UL
static unsigned long sysio_imap_to_iclr(unsigned long imap)
{
unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
return imap + diff;
}
static unsigned int sbus_of_build_irq(struct device_node *dp,
unsigned int ino,
void *_data)
{
unsigned long reg_base = (unsigned long) _data;
const struct linux_prom_registers *regs;
unsigned long imap, iclr;
int sbus_slot = 0;
int sbus_level = 0;
ino &= 0x3f;
regs = of_get_property(dp, "reg", NULL);
if (regs)
sbus_slot = regs->which_io;
if (ino < 0x20)
ino += (sbus_slot * 8);
imap = sysio_irq_offsets[ino];
if (imap == ((unsigned long)-1)) {
prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
ino);
prom_halt();
}
imap += reg_base;
/* SYSIO inconsistency. For external SLOTS, we have to select
* the right ICLR register based upon the lower SBUS irq level
* bits.
*/
if (ino >= 0x20) {
iclr = sysio_imap_to_iclr(imap);
} else {
sbus_level = ino & 0x7;
switch(sbus_slot) {
case 0:
iclr = reg_base + SYSIO_ICLR_SLOT0;
break;
case 1:
iclr = reg_base + SYSIO_ICLR_SLOT1;
break;
case 2:
iclr = reg_base + SYSIO_ICLR_SLOT2;
break;
default:
case 3:
iclr = reg_base + SYSIO_ICLR_SLOT3;
break;
};
iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
}
return build_irq(sbus_level, iclr, imap);
}
static void __init sbus_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = sbus_of_build_irq;
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = (void *) (unsigned long) regs->phys_addr;
}
#endif /* CONFIG_SBUS */
static unsigned int central_build_irq(struct device_node *dp,
unsigned int ino,
void *_data)
{
struct device_node *central_dp = _data;
struct of_device *central_op = of_find_device_by_node(central_dp);
struct resource *res;
unsigned long imap, iclr;
u32 tmp;
if (!strcmp(dp->name, "eeprom")) {
res = &central_op->resource[5];
} else if (!strcmp(dp->name, "zs")) {
res = &central_op->resource[4];
} else if (!strcmp(dp->name, "clock-board")) {
res = &central_op->resource[3];
} else {
return ino;
}
imap = res->start + 0x00UL;
iclr = res->start + 0x10UL;
/* Set the INO state to idle, and disable. */
upa_writel(0, iclr);
upa_readl(iclr);
tmp = upa_readl(imap);
tmp &= ~0x80000000;
upa_writel(tmp, imap);
return build_irq(0, iclr, imap);
}
static void __init central_irq_trans_init(struct device_node *dp)
{
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = central_build_irq;
dp->irq_trans->data = dp;
}
struct irq_trans {
const char *name;
void (*init)(struct device_node *);
};
#ifdef CONFIG_PCI
static struct irq_trans __initdata pci_irq_trans_table[] = {
{ "SUNW,sabre", sabre_irq_trans_init },
{ "pci108e,a000", sabre_irq_trans_init },
{ "pci108e,a001", sabre_irq_trans_init },
{ "SUNW,psycho", psycho_irq_trans_init },
{ "pci108e,8000", psycho_irq_trans_init },
{ "SUNW,schizo", schizo_irq_trans_init },
{ "pci108e,8001", schizo_irq_trans_init },
{ "SUNW,schizo+", schizo_irq_trans_init },
{ "pci108e,8002", schizo_irq_trans_init },
{ "SUNW,tomatillo", tomatillo_irq_trans_init },
{ "pci108e,a801", tomatillo_irq_trans_init },
{ "SUNW,sun4v-pci", pci_sun4v_irq_trans_init },
{ "pciex108e,80f0", fire_irq_trans_init },
};
#endif
static unsigned int sun4v_vdev_irq_build(struct device_node *dp,
unsigned int devino,
void *_data)
{
u32 devhandle = (u32) (unsigned long) _data;
return sun4v_build_irq(devhandle, devino);
}
static void __init sun4v_vdev_irq_trans_init(struct device_node *dp)
{
const struct linux_prom64_registers *regs;
dp->irq_trans = prom_early_alloc(sizeof(struct of_irq_controller));
dp->irq_trans->irq_build = sun4v_vdev_irq_build;
regs = of_get_property(dp, "reg", NULL);
dp->irq_trans->data = (void *) (unsigned long)
((regs->phys_addr >> 32UL) & 0x0fffffff);
}
void __init irq_trans_init(struct device_node *dp)
{
#ifdef CONFIG_PCI
const char *model;
int i;
#endif
#ifdef CONFIG_PCI
model = of_get_property(dp, "model", NULL);
if (!model)
model = of_get_property(dp, "compatible", NULL);
if (model) {
for (i = 0; i < ARRAY_SIZE(pci_irq_trans_table); i++) {
struct irq_trans *t = &pci_irq_trans_table[i];
if (!strcmp(model, t->name)) {
t->init(dp);
return;
}
}
}
#endif
#ifdef CONFIG_SBUS
if (!strcmp(dp->name, "sbus") ||
!strcmp(dp->name, "sbi")) {
sbus_irq_trans_init(dp);
return;
}
#endif
if (!strcmp(dp->name, "fhc") &&
!strcmp(dp->parent->name, "central")) {
central_irq_trans_init(dp);
return;
}
if (!strcmp(dp->name, "virtual-devices") ||
!strcmp(dp->name, "niu")) {
sun4v_vdev_irq_trans_init(dp);
return;
}
}

View File

@@ -0,0 +1,470 @@
/* psycho_common.c: Code common to PSYCHO and derivative PCI controllers.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <asm/upa.h>
#include "pci_impl.h"
#include "iommu_common.h"
#include "psycho_common.h"
#define PSYCHO_STRBUF_CTRL_DENAB 0x0000000000000002UL
#define PSYCHO_STCERR_WRITE 0x0000000000000002UL
#define PSYCHO_STCERR_READ 0x0000000000000001UL
#define PSYCHO_STCTAG_PPN 0x0fffffff00000000UL
#define PSYCHO_STCTAG_VPN 0x00000000ffffe000UL
#define PSYCHO_STCTAG_VALID 0x0000000000000002UL
#define PSYCHO_STCTAG_WRITE 0x0000000000000001UL
#define PSYCHO_STCLINE_LINDX 0x0000000001e00000UL
#define PSYCHO_STCLINE_SPTR 0x00000000001f8000UL
#define PSYCHO_STCLINE_LADDR 0x0000000000007f00UL
#define PSYCHO_STCLINE_EPTR 0x00000000000000fcUL
#define PSYCHO_STCLINE_VALID 0x0000000000000002UL
#define PSYCHO_STCLINE_FOFN 0x0000000000000001UL
static DEFINE_SPINLOCK(stc_buf_lock);
static unsigned long stc_error_buf[128];
static unsigned long stc_tag_buf[16];
static unsigned long stc_line_buf[16];
static void psycho_check_stc_error(struct pci_pbm_info *pbm)
{
unsigned long err_base, tag_base, line_base;
struct strbuf *strbuf = &pbm->stc;
u64 control;
int i;
if (!strbuf->strbuf_control)
return;
err_base = strbuf->strbuf_err_stat;
tag_base = strbuf->strbuf_tag_diag;
line_base = strbuf->strbuf_line_diag;
spin_lock(&stc_buf_lock);
/* This is __REALLY__ dangerous. When we put the streaming
* buffer into diagnostic mode to probe it's tags and error
* status, we _must_ clear all of the line tag valid bits
* before re-enabling the streaming buffer. If any dirty data
* lives in the STC when we do this, we will end up
* invalidating it before it has a chance to reach main
* memory.
*/
control = upa_readq(strbuf->strbuf_control);
upa_writeq(control | PSYCHO_STRBUF_CTRL_DENAB, strbuf->strbuf_control);
for (i = 0; i < 128; i++) {
u64 val;
val = upa_readq(err_base + (i * 8UL));
upa_writeq(0UL, err_base + (i * 8UL));
stc_error_buf[i] = val;
}
for (i = 0; i < 16; i++) {
stc_tag_buf[i] = upa_readq(tag_base + (i * 8UL));
stc_line_buf[i] = upa_readq(line_base + (i * 8UL));
upa_writeq(0UL, tag_base + (i * 8UL));
upa_writeq(0UL, line_base + (i * 8UL));
}
/* OK, state is logged, exit diagnostic mode. */
upa_writeq(control, strbuf->strbuf_control);
for (i = 0; i < 16; i++) {
int j, saw_error, first, last;
saw_error = 0;
first = i * 8;
last = first + 8;
for (j = first; j < last; j++) {
u64 errval = stc_error_buf[j];
if (errval != 0) {
saw_error++;
printk(KERN_ERR "%s: STC_ERR(%d)[wr(%d)"
"rd(%d)]\n",
pbm->name,
j,
(errval & PSYCHO_STCERR_WRITE) ? 1 : 0,
(errval & PSYCHO_STCERR_READ) ? 1 : 0);
}
}
if (saw_error != 0) {
u64 tagval = stc_tag_buf[i];
u64 lineval = stc_line_buf[i];
printk(KERN_ERR "%s: STC_TAG(%d)[PA(%016lx)VA(%08lx)"
"V(%d)W(%d)]\n",
pbm->name,
i,
((tagval & PSYCHO_STCTAG_PPN) >> 19UL),
(tagval & PSYCHO_STCTAG_VPN),
((tagval & PSYCHO_STCTAG_VALID) ? 1 : 0),
((tagval & PSYCHO_STCTAG_WRITE) ? 1 : 0));
printk(KERN_ERR "%s: STC_LINE(%d)[LIDX(%lx)SP(%lx)"
"LADDR(%lx)EP(%lx)V(%d)FOFN(%d)]\n",
pbm->name,
i,
((lineval & PSYCHO_STCLINE_LINDX) >> 21UL),
((lineval & PSYCHO_STCLINE_SPTR) >> 15UL),
((lineval & PSYCHO_STCLINE_LADDR) >> 8UL),
((lineval & PSYCHO_STCLINE_EPTR) >> 2UL),
((lineval & PSYCHO_STCLINE_VALID) ? 1 : 0),
((lineval & PSYCHO_STCLINE_FOFN) ? 1 : 0));
}
}
spin_unlock(&stc_buf_lock);
}
#define PSYCHO_IOMMU_TAG 0xa580UL
#define PSYCHO_IOMMU_DATA 0xa600UL
static void psycho_record_iommu_tags_and_data(struct pci_pbm_info *pbm,
u64 *tag, u64 *data)
{
int i;
for (i = 0; i < 16; i++) {
unsigned long base = pbm->controller_regs;
unsigned long off = i * 8UL;
tag[i] = upa_readq(base + PSYCHO_IOMMU_TAG+off);
data[i] = upa_readq(base + PSYCHO_IOMMU_DATA+off);
/* Now clear out the entry. */
upa_writeq(0, base + PSYCHO_IOMMU_TAG + off);
upa_writeq(0, base + PSYCHO_IOMMU_DATA + off);
}
}
#define PSYCHO_IOMMU_TAG_ERRSTS (0x3UL << 23UL)
#define PSYCHO_IOMMU_TAG_ERR (0x1UL << 22UL)
#define PSYCHO_IOMMU_TAG_WRITE (0x1UL << 21UL)
#define PSYCHO_IOMMU_TAG_STREAM (0x1UL << 20UL)
#define PSYCHO_IOMMU_TAG_SIZE (0x1UL << 19UL)
#define PSYCHO_IOMMU_TAG_VPAGE 0x7ffffUL
#define PSYCHO_IOMMU_DATA_VALID (1UL << 30UL)
#define PSYCHO_IOMMU_DATA_CACHE (1UL << 28UL)
#define PSYCHO_IOMMU_DATA_PPAGE 0xfffffffUL
static void psycho_dump_iommu_tags_and_data(struct pci_pbm_info *pbm,
u64 *tag, u64 *data)
{
int i;
for (i = 0; i < 16; i++) {
u64 tag_val, data_val;
const char *type_str;
tag_val = tag[i];
if (!(tag_val & PSYCHO_IOMMU_TAG_ERR))
continue;
data_val = data[i];
switch((tag_val & PSYCHO_IOMMU_TAG_ERRSTS) >> 23UL) {
case 0:
type_str = "Protection Error";
break;
case 1:
type_str = "Invalid Error";
break;
case 2:
type_str = "TimeOut Error";
break;
case 3:
default:
type_str = "ECC Error";
break;
}
printk(KERN_ERR "%s: IOMMU TAG(%d)[error(%s) wr(%d) "
"str(%d) sz(%dK) vpg(%08lx)]\n",
pbm->name, i, type_str,
((tag_val & PSYCHO_IOMMU_TAG_WRITE) ? 1 : 0),
((tag_val & PSYCHO_IOMMU_TAG_STREAM) ? 1 : 0),
((tag_val & PSYCHO_IOMMU_TAG_SIZE) ? 64 : 8),
(tag_val & PSYCHO_IOMMU_TAG_VPAGE) << IOMMU_PAGE_SHIFT);
printk(KERN_ERR "%s: IOMMU DATA(%d)[valid(%d) cache(%d) "
"ppg(%016lx)]\n",
pbm->name, i,
((data_val & PSYCHO_IOMMU_DATA_VALID) ? 1 : 0),
((data_val & PSYCHO_IOMMU_DATA_CACHE) ? 1 : 0),
(data_val & PSYCHO_IOMMU_DATA_PPAGE)<<IOMMU_PAGE_SHIFT);
}
}
#define PSYCHO_IOMMU_CTRL_XLTESTAT 0x0000000006000000UL
#define PSYCHO_IOMMU_CTRL_XLTEERR 0x0000000001000000UL
void psycho_check_iommu_error(struct pci_pbm_info *pbm,
unsigned long afsr,
unsigned long afar,
enum psycho_error_type type)
{
u64 control, iommu_tag[16], iommu_data[16];
struct iommu *iommu = pbm->iommu;
unsigned long flags;
spin_lock_irqsave(&iommu->lock, flags);
control = upa_readq(iommu->iommu_control);
if (control & PSYCHO_IOMMU_CTRL_XLTEERR) {
const char *type_str;
control &= ~PSYCHO_IOMMU_CTRL_XLTEERR;
upa_writeq(control, iommu->iommu_control);
switch ((control & PSYCHO_IOMMU_CTRL_XLTESTAT) >> 25UL) {
case 0:
type_str = "Protection Error";
break;
case 1:
type_str = "Invalid Error";
break;
case 2:
type_str = "TimeOut Error";
break;
case 3:
default:
type_str = "ECC Error";
break;
};
printk(KERN_ERR "%s: IOMMU Error, type[%s]\n",
pbm->name, type_str);
/* It is very possible for another DVMA to occur while
* we do this probe, and corrupt the system further.
* But we are so screwed at this point that we are
* likely to crash hard anyways, so get as much
* diagnostic information to the console as we can.
*/
psycho_record_iommu_tags_and_data(pbm, iommu_tag, iommu_data);
psycho_dump_iommu_tags_and_data(pbm, iommu_tag, iommu_data);
}
psycho_check_stc_error(pbm);
spin_unlock_irqrestore(&iommu->lock, flags);
}
#define PSYCHO_PCICTRL_SBH_ERR 0x0000000800000000UL
#define PSYCHO_PCICTRL_SERR 0x0000000400000000UL
static irqreturn_t psycho_pcierr_intr_other(struct pci_pbm_info *pbm)
{
irqreturn_t ret = IRQ_NONE;
u64 csr, csr_error_bits;
u16 stat, *addr;
csr = upa_readq(pbm->pci_csr);
csr_error_bits = csr & (PSYCHO_PCICTRL_SBH_ERR | PSYCHO_PCICTRL_SERR);
if (csr_error_bits) {
/* Clear the errors. */
upa_writeq(csr, pbm->pci_csr);
/* Log 'em. */
if (csr_error_bits & PSYCHO_PCICTRL_SBH_ERR)
printk(KERN_ERR "%s: PCI streaming byte hole "
"error asserted.\n", pbm->name);
if (csr_error_bits & PSYCHO_PCICTRL_SERR)
printk(KERN_ERR "%s: PCI SERR signal asserted.\n",
pbm->name);
ret = IRQ_HANDLED;
}
addr = psycho_pci_config_mkaddr(pbm, pbm->pci_first_busno,
0, PCI_STATUS);
pci_config_read16(addr, &stat);
if (stat & (PCI_STATUS_PARITY |
PCI_STATUS_SIG_TARGET_ABORT |
PCI_STATUS_REC_TARGET_ABORT |
PCI_STATUS_REC_MASTER_ABORT |
PCI_STATUS_SIG_SYSTEM_ERROR)) {
printk(KERN_ERR "%s: PCI bus error, PCI_STATUS[%04x]\n",
pbm->name, stat);
pci_config_write16(addr, 0xffff);
ret = IRQ_HANDLED;
}
return ret;
}
#define PSYCHO_PCIAFSR_PMA 0x8000000000000000UL
#define PSYCHO_PCIAFSR_PTA 0x4000000000000000UL
#define PSYCHO_PCIAFSR_PRTRY 0x2000000000000000UL
#define PSYCHO_PCIAFSR_PPERR 0x1000000000000000UL
#define PSYCHO_PCIAFSR_SMA 0x0800000000000000UL
#define PSYCHO_PCIAFSR_STA 0x0400000000000000UL
#define PSYCHO_PCIAFSR_SRTRY 0x0200000000000000UL
#define PSYCHO_PCIAFSR_SPERR 0x0100000000000000UL
#define PSYCHO_PCIAFSR_RESV1 0x00ff000000000000UL
#define PSYCHO_PCIAFSR_BMSK 0x0000ffff00000000UL
#define PSYCHO_PCIAFSR_BLK 0x0000000080000000UL
#define PSYCHO_PCIAFSR_RESV2 0x0000000040000000UL
#define PSYCHO_PCIAFSR_MID 0x000000003e000000UL
#define PSYCHO_PCIAFSR_RESV3 0x0000000001ffffffUL
irqreturn_t psycho_pcierr_intr(int irq, void *dev_id)
{
struct pci_pbm_info *pbm = dev_id;
u64 afsr, afar, error_bits;
int reported;
afsr = upa_readq(pbm->pci_afsr);
afar = upa_readq(pbm->pci_afar);
error_bits = afsr &
(PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_PTA |
PSYCHO_PCIAFSR_PRTRY | PSYCHO_PCIAFSR_PPERR |
PSYCHO_PCIAFSR_SMA | PSYCHO_PCIAFSR_STA |
PSYCHO_PCIAFSR_SRTRY | PSYCHO_PCIAFSR_SPERR);
if (!error_bits)
return psycho_pcierr_intr_other(pbm);
upa_writeq(error_bits, pbm->pci_afsr);
printk(KERN_ERR "%s: PCI Error, primary error type[%s]\n",
pbm->name,
(((error_bits & PSYCHO_PCIAFSR_PMA) ?
"Master Abort" :
((error_bits & PSYCHO_PCIAFSR_PTA) ?
"Target Abort" :
((error_bits & PSYCHO_PCIAFSR_PRTRY) ?
"Excessive Retries" :
((error_bits & PSYCHO_PCIAFSR_PPERR) ?
"Parity Error" : "???"))))));
printk(KERN_ERR "%s: bytemask[%04lx] UPA_MID[%02lx] was_block(%d)\n",
pbm->name,
(afsr & PSYCHO_PCIAFSR_BMSK) >> 32UL,
(afsr & PSYCHO_PCIAFSR_MID) >> 25UL,
(afsr & PSYCHO_PCIAFSR_BLK) ? 1 : 0);
printk(KERN_ERR "%s: PCI AFAR [%016lx]\n", pbm->name, afar);
printk(KERN_ERR "%s: PCI Secondary errors [", pbm->name);
reported = 0;
if (afsr & PSYCHO_PCIAFSR_SMA) {
reported++;
printk("(Master Abort)");
}
if (afsr & PSYCHO_PCIAFSR_STA) {
reported++;
printk("(Target Abort)");
}
if (afsr & PSYCHO_PCIAFSR_SRTRY) {
reported++;
printk("(Excessive Retries)");
}
if (afsr & PSYCHO_PCIAFSR_SPERR) {
reported++;
printk("(Parity Error)");
}
if (!reported)
printk("(none)");
printk("]\n");
if (error_bits & (PSYCHO_PCIAFSR_PTA | PSYCHO_PCIAFSR_STA)) {
psycho_check_iommu_error(pbm, afsr, afar, PCI_ERR);
pci_scan_for_target_abort(pbm, pbm->pci_bus);
}
if (error_bits & (PSYCHO_PCIAFSR_PMA | PSYCHO_PCIAFSR_SMA))
pci_scan_for_master_abort(pbm, pbm->pci_bus);
if (error_bits & (PSYCHO_PCIAFSR_PPERR | PSYCHO_PCIAFSR_SPERR))
pci_scan_for_parity_error(pbm, pbm->pci_bus);
return IRQ_HANDLED;
}
static void psycho_iommu_flush(struct pci_pbm_info *pbm)
{
int i;
for (i = 0; i < 16; i++) {
unsigned long off = i * 8;
upa_writeq(0, pbm->controller_regs + PSYCHO_IOMMU_TAG + off);
upa_writeq(0, pbm->controller_regs + PSYCHO_IOMMU_DATA + off);
}
}
#define PSYCHO_IOMMU_CONTROL 0x0200UL
#define PSYCHO_IOMMU_CTRL_TSBSZ 0x0000000000070000UL
#define PSYCHO_IOMMU_TSBSZ_1K 0x0000000000000000UL
#define PSYCHO_IOMMU_TSBSZ_2K 0x0000000000010000UL
#define PSYCHO_IOMMU_TSBSZ_4K 0x0000000000020000UL
#define PSYCHO_IOMMU_TSBSZ_8K 0x0000000000030000UL
#define PSYCHO_IOMMU_TSBSZ_16K 0x0000000000040000UL
#define PSYCHO_IOMMU_TSBSZ_32K 0x0000000000050000UL
#define PSYCHO_IOMMU_TSBSZ_64K 0x0000000000060000UL
#define PSYCHO_IOMMU_TSBSZ_128K 0x0000000000070000UL
#define PSYCHO_IOMMU_CTRL_TBWSZ 0x0000000000000004UL
#define PSYCHO_IOMMU_CTRL_DENAB 0x0000000000000002UL
#define PSYCHO_IOMMU_CTRL_ENAB 0x0000000000000001UL
#define PSYCHO_IOMMU_FLUSH 0x0210UL
#define PSYCHO_IOMMU_TSBBASE 0x0208UL
int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize,
u32 dvma_offset, u32 dma_mask,
unsigned long write_complete_offset)
{
struct iommu *iommu = pbm->iommu;
u64 control;
int err;
iommu->iommu_control = pbm->controller_regs + PSYCHO_IOMMU_CONTROL;
iommu->iommu_tsbbase = pbm->controller_regs + PSYCHO_IOMMU_TSBBASE;
iommu->iommu_flush = pbm->controller_regs + PSYCHO_IOMMU_FLUSH;
iommu->iommu_tags = pbm->controller_regs + PSYCHO_IOMMU_TAG;
iommu->write_complete_reg = (pbm->controller_regs +
write_complete_offset);
iommu->iommu_ctxflush = 0;
control = upa_readq(iommu->iommu_control);
control |= PSYCHO_IOMMU_CTRL_DENAB;
upa_writeq(control, iommu->iommu_control);
psycho_iommu_flush(pbm);
/* Leave diag mode enabled for full-flushing done in pci_iommu.c */
err = iommu_table_init(iommu, tsbsize * 1024 * 8,
dvma_offset, dma_mask, pbm->numa_node);
if (err)
return err;
upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
control = upa_readq(iommu->iommu_control);
control &= ~(PSYCHO_IOMMU_CTRL_TSBSZ | PSYCHO_IOMMU_CTRL_TBWSZ);
control |= PSYCHO_IOMMU_CTRL_ENAB;
switch (tsbsize) {
case 64:
control |= PSYCHO_IOMMU_TSBSZ_64K;
break;
case 128:
control |= PSYCHO_IOMMU_TSBSZ_128K;
break;
default:
return -EINVAL;
}
upa_writeq(control, iommu->iommu_control);
return 0;
}
void psycho_pbm_init_common(struct pci_pbm_info *pbm, struct of_device *op,
const char *chip_name, int chip_type)
{
struct device_node *dp = op->node;
pbm->name = dp->full_name;
pbm->numa_node = -1;
pbm->chip_type = chip_type;
pbm->chip_version = of_getintprop_default(dp, "version#", 0);
pbm->chip_revision = of_getintprop_default(dp, "module-revision#", 0);
pbm->op = op;
pbm->pci_ops = &sun4u_pci_ops;
pbm->config_space_reg_bits = 8;
pbm->index = pci_num_pbms++;
pci_get_pbm_props(pbm);
pci_determine_mem_io_space(pbm);
printk(KERN_INFO "%s: %s PCI Bus Module ver[%x:%x]\n",
pbm->name, chip_name,
pbm->chip_version, pbm->chip_revision);
}

View File

@@ -0,0 +1,48 @@
#ifndef _PSYCHO_COMMON_H
#define _PSYCHO_COMMON_H
/* U2P Programmer's Manual, page 13-55, configuration space
* address format:
*
* 32 24 23 16 15 11 10 8 7 2 1 0
* ---------------------------------------------------------
* |0 0 0 0 0 0 0 0 1| bus | device | function | reg | 0 0 |
* ---------------------------------------------------------
*/
#define PSYCHO_CONFIG_BASE(PBM) \
((PBM)->config_space | (1UL << 24))
#define PSYCHO_CONFIG_ENCODE(BUS, DEVFN, REG) \
(((unsigned long)(BUS) << 16) | \
((unsigned long)(DEVFN) << 8) | \
((unsigned long)(REG)))
static inline void *psycho_pci_config_mkaddr(struct pci_pbm_info *pbm,
unsigned char bus,
unsigned int devfn,
int where)
{
return (void *)
(PSYCHO_CONFIG_BASE(pbm) |
PSYCHO_CONFIG_ENCODE(bus, devfn, where));
}
enum psycho_error_type {
UE_ERR, CE_ERR, PCI_ERR
};
extern void psycho_check_iommu_error(struct pci_pbm_info *pbm,
unsigned long afsr,
unsigned long afar,
enum psycho_error_type type);
extern irqreturn_t psycho_pcierr_intr(int irq, void *dev_id);
extern int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize,
u32 dvma_offset, u32 dma_mask,
unsigned long write_complete_offset);
extern void psycho_pbm_init_common(struct pci_pbm_info *pbm,
struct of_device *op,
const char *chip_name, int chip_type);
#endif /* _PSYCHO_COMMON_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,53 @@
/* reboot.c: reboot/shutdown/halt/poweroff handling
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/reboot.h>
#include <linux/module.h>
#include <linux/pm.h>
#include <asm/system.h>
#include <asm/oplib.h>
#include <asm/prom.h>
/* sysctl - toggle power-off restriction for serial console
* systems in machine_power_off()
*/
int scons_pwroff = 1;
/* This isn't actually used, it exists merely to satisfy the
* reference in kernel/sys.c
*/
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
void machine_power_off(void)
{
if (strcmp(of_console_device->type, "serial") || scons_pwroff)
prom_halt_power_off();
prom_halt();
}
void machine_halt(void)
{
prom_halt();
panic("Halt failed!");
}
void machine_restart(char *cmd)
{
char *p;
p = strchr(reboot_command, '\n');
if (p)
*p = 0;
if (cmd)
prom_reboot(cmd);
if (*reboot_command)
prom_reboot(reboot_command);
prom_reboot("");
panic("Reboot failed!");
}

View File

@@ -0,0 +1,450 @@
/*
* rtrap.S: Preparing for return from trap on Sparc V9.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
#include <asm/spitfire.h>
#include <asm/head.h>
#include <asm/visasm.h>
#include <asm/processor.h>
#define RTRAP_PSTATE (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_IE)
#define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV)
#define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG)
.text
.align 32
__handle_softirq:
call do_softirq
nop
ba,a,pt %xcc, __handle_softirq_continue
nop
__handle_preemption:
call schedule
wrpr %g0, RTRAP_PSTATE, %pstate
ba,pt %xcc, __handle_preemption_continue
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
__handle_user_windows:
call fault_in_user_windows
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
/* Redo sched+sig checks */
ldx [%g6 + TI_FLAGS], %l0
andcc %l0, _TIF_NEED_RESCHED, %g0
be,pt %xcc, 1f
nop
call schedule
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
ldx [%g6 + TI_FLAGS], %l0
1: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
be,pt %xcc, __handle_user_windows_continue
nop
mov %l5, %o1
add %sp, PTREGS_OFF, %o0
mov %l0, %o2
call do_notify_resume
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
/* Signal delivery can modify pt_regs tstate, so we must
* reload it.
*/
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
ba,pt %xcc, __handle_user_windows_continue
andn %l1, %l4, %l1
__handle_perfctrs:
call update_perfctrs
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
ldub [%g6 + TI_WSAVED], %o2
brz,pt %o2, 1f
nop
/* Redo userwin+sched+sig checks */
call fault_in_user_windows
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
ldx [%g6 + TI_FLAGS], %l0
andcc %l0, _TIF_NEED_RESCHED, %g0
be,pt %xcc, 1f
nop
call schedule
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
ldx [%g6 + TI_FLAGS], %l0
1: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
be,pt %xcc, __handle_perfctrs_continue
sethi %hi(TSTATE_PEF), %o0
mov %l5, %o1
add %sp, PTREGS_OFF, %o0
mov %l0, %o2
call do_notify_resume
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
/* Signal delivery can modify pt_regs tstate, so we must
* reload it.
*/
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
andn %l1, %l4, %l1
ba,pt %xcc, __handle_perfctrs_continue
sethi %hi(TSTATE_PEF), %o0
__handle_userfpu:
rd %fprs, %l5
andcc %l5, FPRS_FEF, %g0
sethi %hi(TSTATE_PEF), %o0
be,a,pn %icc, __handle_userfpu_continue
andn %l1, %o0, %l1
ba,a,pt %xcc, __handle_userfpu_continue
__handle_signal:
mov %l5, %o1
add %sp, PTREGS_OFF, %o0
mov %l0, %o2
call do_notify_resume
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
/* Signal delivery can modify pt_regs tstate, so we must
* reload it.
*/
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
ba,pt %xcc, __handle_signal_continue
andn %l1, %l4, %l1
/* When returning from a NMI (%pil==15) interrupt we want to
* avoid running softirqs, doing IRQ tracing, preempting, etc.
*/
.globl rtrap_nmi
rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
andn %l1, %l4, %l1
srl %l4, 20, %l4
ba,pt %xcc, rtrap_no_irq_enable
wrpr %l4, %pil
.align 64
.globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall
rtrap_irq:
rtrap:
#ifndef CONFIG_SMP
sethi %hi(per_cpu____cpu_data), %l0
lduw [%l0 + %lo(per_cpu____cpu_data)], %l1
#else
sethi %hi(per_cpu____cpu_data), %l0
or %l0, %lo(per_cpu____cpu_data), %l0
lduw [%l0 + %g5], %l1
#endif
cmp %l1, 0
/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
bne,pn %icc, __handle_softirq
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
__handle_softirq_continue:
rtrap_xcall:
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
andn %l1, %l4, %l1
srl %l4, 20, %l4
#ifdef CONFIG_TRACE_IRQFLAGS
brnz,pn %l4, rtrap_no_irq_enable
nop
call trace_hardirqs_on
nop
wrpr %l4, %pil
#endif
rtrap_no_irq_enable:
andcc %l1, TSTATE_PRIV, %l3
bne,pn %icc, to_kernel
nop
/* We must hold IRQs off and atomically test schedule+signal
* state, then hold them off all the way back to userspace.
* If we are returning to kernel, none of this matters. Note
* that we are disabling interrupts via PSTATE_IE, not using
* %pil.
*
* If we do not do this, there is a window where we would do
* the tests, later the signal/resched event arrives but we do
* not process it since we are still in kernel mode. It would
* take until the next local IRQ before the signal/resched
* event would be handled.
*
* This also means that if we have to deal with performance
* counters or user windows, we have to redo all of these
* sched+signal checks with IRQs disabled.
*/
to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
wrpr 0, %pil
__handle_preemption_continue:
ldx [%g6 + TI_FLAGS], %l0
sethi %hi(_TIF_USER_WORK_MASK), %o0
or %o0, %lo(_TIF_USER_WORK_MASK), %o0
andcc %l0, %o0, %g0
sethi %hi(TSTATE_PEF), %o0
be,pt %xcc, user_nowork
andcc %l1, %o0, %g0
andcc %l0, _TIF_NEED_RESCHED, %g0
bne,pn %xcc, __handle_preemption
andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0
bne,pn %xcc, __handle_signal
__handle_signal_continue:
ldub [%g6 + TI_WSAVED], %o2
brnz,pn %o2, __handle_user_windows
nop
__handle_user_windows_continue:
ldx [%g6 + TI_FLAGS], %l5
andcc %l5, _TIF_PERFCTR, %g0
sethi %hi(TSTATE_PEF), %o0
bne,pn %xcc, __handle_perfctrs
__handle_perfctrs_continue:
andcc %l1, %o0, %g0
/* This fpdepth clear is necessary for non-syscall rtraps only */
user_nowork:
bne,pn %xcc, __handle_userfpu
stb %g0, [%g6 + TI_FPDEPTH]
__handle_userfpu_continue:
rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
ldx [%sp + PTREGS_OFF + PT_V9_G2], %g2
ldx [%sp + PTREGS_OFF + PT_V9_G3], %g3
ldx [%sp + PTREGS_OFF + PT_V9_G4], %g4
ldx [%sp + PTREGS_OFF + PT_V9_G5], %g5
brz,pt %l3, 1f
mov %g6, %l2
/* Must do this before thread reg is clobbered below. */
LOAD_PER_CPU_BASE(%g5, %g6, %i0, %i1, %i2)
1:
ldx [%sp + PTREGS_OFF + PT_V9_G6], %g6
ldx [%sp + PTREGS_OFF + PT_V9_G7], %g7
/* Normal globals are restored, go to trap globals. */
661: wrpr %g0, RTRAP_PSTATE_AG_IRQOFF, %pstate
nop
.section .sun4v_2insn_patch, "ax"
.word 661b
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
SET_GL(1)
.previous
mov %l2, %g6
ldx [%sp + PTREGS_OFF + PT_V9_I0], %i0
ldx [%sp + PTREGS_OFF + PT_V9_I1], %i1
ldx [%sp + PTREGS_OFF + PT_V9_I2], %i2
ldx [%sp + PTREGS_OFF + PT_V9_I3], %i3
ldx [%sp + PTREGS_OFF + PT_V9_I4], %i4
ldx [%sp + PTREGS_OFF + PT_V9_I5], %i5
ldx [%sp + PTREGS_OFF + PT_V9_I6], %i6
ldx [%sp + PTREGS_OFF + PT_V9_I7], %i7
ldx [%sp + PTREGS_OFF + PT_V9_TPC], %l2
ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %o2
ld [%sp + PTREGS_OFF + PT_V9_Y], %o3
wr %o3, %g0, %y
wrpr %l4, 0x0, %pil
wrpr %g0, 0x1, %tl
andn %l1, TSTATE_SYSCALL, %l1
wrpr %l1, %g0, %tstate
wrpr %l2, %g0, %tpc
wrpr %o2, %g0, %tnpc
brnz,pn %l3, kern_rtt
mov PRIMARY_CONTEXT, %l7
661: ldxa [%l7 + %l7] ASI_DMMU, %l0
.section .sun4v_1insn_patch, "ax"
.word 661b
ldxa [%l7 + %l7] ASI_MMU, %l0
.previous
sethi %hi(sparc64_kern_pri_nuc_bits), %l1
ldx [%l1 + %lo(sparc64_kern_pri_nuc_bits)], %l1
or %l0, %l1, %l0
661: stxa %l0, [%l7] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %l0, [%l7] ASI_MMU
.previous
sethi %hi(KERNBASE), %l7
flush %l7
rdpr %wstate, %l1
rdpr %otherwin, %l2
srl %l1, 3, %l1
wrpr %l2, %g0, %canrestore
wrpr %l1, %g0, %wstate
brnz,pt %l2, user_rtt_restore
wrpr %g0, %g0, %otherwin
ldx [%g6 + TI_FLAGS], %g3
wr %g0, ASI_AIUP, %asi
rdpr %cwp, %g1
andcc %g3, _TIF_32BIT, %g0
sub %g1, 1, %g1
bne,pt %xcc, user_rtt_fill_32bit
wrpr %g1, %cwp
ba,a,pt %xcc, user_rtt_fill_64bit
user_rtt_fill_fixup:
rdpr %cwp, %g1
add %g1, 1, %g1
wrpr %g1, 0x0, %cwp
rdpr %wstate, %g2
sll %g2, 3, %g2
wrpr %g2, 0x0, %wstate
/* We know %canrestore and %otherwin are both zero. */
sethi %hi(sparc64_kern_pri_context), %g2
ldx [%g2 + %lo(sparc64_kern_pri_context)], %g2
mov PRIMARY_CONTEXT, %g1
661: stxa %g2, [%g1] ASI_DMMU
.section .sun4v_1insn_patch, "ax"
.word 661b
stxa %g2, [%g1] ASI_MMU
.previous
sethi %hi(KERNBASE), %g1
flush %g1
or %g4, FAULT_CODE_WINFIXUP, %g4
stb %g4, [%g6 + TI_FAULT_CODE]
stx %g5, [%g6 + TI_FAULT_ADDR]
mov %g6, %l1
wrpr %g0, 0x0, %tl
661: nop
.section .sun4v_1insn_patch, "ax"
.word 661b
SET_GL(0)
.previous
wrpr %g0, RTRAP_PSTATE, %pstate
mov %l1, %g6
ldx [%g6 + TI_TASK], %g4
LOAD_PER_CPU_BASE(%g5, %g6, %g1, %g2, %g3)
call do_sparc64_fault
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
user_rtt_pre_restore:
add %g1, 1, %g1
wrpr %g1, 0x0, %cwp
user_rtt_restore:
restore
rdpr %canrestore, %g1
wrpr %g1, 0x0, %cleanwin
retry
nop
kern_rtt: rdpr %canrestore, %g1
brz,pn %g1, kern_rtt_fill
nop
kern_rtt_restore:
stw %g0, [%sp + PTREGS_OFF + PT_V9_MAGIC]
restore
retry
to_kernel:
#ifdef CONFIG_PREEMPT
ldsw [%g6 + TI_PRE_COUNT], %l5
brnz %l5, kern_fpucheck
ldx [%g6 + TI_FLAGS], %l5
andcc %l5, _TIF_NEED_RESCHED, %g0
be,pt %xcc, kern_fpucheck
nop
cmp %l4, 0
bne,pn %xcc, kern_fpucheck
sethi %hi(PREEMPT_ACTIVE), %l6
stw %l6, [%g6 + TI_PRE_COUNT]
call schedule
nop
ba,pt %xcc, rtrap
stw %g0, [%g6 + TI_PRE_COUNT]
#endif
kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
brz,pt %l5, rt_continue
srl %l5, 1, %o0
add %g6, TI_FPSAVED, %l6
ldub [%l6 + %o0], %l2
sub %l5, 2, %l5
add %g6, TI_GSR, %o1
andcc %l2, (FPRS_FEF|FPRS_DU), %g0
be,pt %icc, 2f
and %l2, FPRS_DL, %l6
andcc %l2, FPRS_FEF, %g0
be,pn %icc, 5f
sll %o0, 3, %o5
rd %fprs, %g1
wr %g1, FPRS_FEF, %fprs
ldx [%o1 + %o5], %g1
add %g6, TI_XFSR, %o1
sll %o0, 8, %o2
add %g6, TI_FPREGS, %o3
brz,pn %l6, 1f
add %g6, TI_FPREGS+0x40, %o4
membar #Sync
ldda [%o3 + %o2] ASI_BLK_P, %f0
ldda [%o4 + %o2] ASI_BLK_P, %f16
membar #Sync
1: andcc %l2, FPRS_DU, %g0
be,pn %icc, 1f
wr %g1, 0, %gsr
add %o2, 0x80, %o2
membar #Sync
ldda [%o3 + %o2] ASI_BLK_P, %f32
ldda [%o4 + %o2] ASI_BLK_P, %f48
1: membar #Sync
ldx [%o1 + %o5], %fsr
2: stb %l5, [%g6 + TI_FPDEPTH]
ba,pt %xcc, rt_continue
nop
5: wr %g0, FPRS_FEF, %fprs
sll %o0, 8, %o2
add %g6, TI_FPREGS+0x80, %o3
add %g6, TI_FPREGS+0xc0, %o4
membar #Sync
ldda [%o3 + %o2] ASI_BLK_P, %f32
ldda [%o4 + %o2] ASI_BLK_P, %f48
membar #Sync
wr %g0, FPRS_DU, %fprs
ba,pt %xcc, rt_continue
stb %l5, [%g6 + TI_FPDEPTH]

674
arch/sparc/kernel/sbus.c Normal file
View File

@@ -0,0 +1,674 @@
/*
* sbus.c: UltraSparc SBUS controller support.
*
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/page.h>
#include <asm/io.h>
#include <asm/upa.h>
#include <asm/cache.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/oplib.h>
#include <asm/starfire.h>
#include "iommu_common.h"
#define MAP_BASE ((u32)0xc0000000)
/* Offsets from iommu_regs */
#define SYSIO_IOMMUREG_BASE 0x2400UL
#define IOMMU_CONTROL (0x2400UL - 0x2400UL) /* IOMMU control register */
#define IOMMU_TSBBASE (0x2408UL - 0x2400UL) /* TSB base address register */
#define IOMMU_FLUSH (0x2410UL - 0x2400UL) /* IOMMU flush register */
#define IOMMU_VADIAG (0x4400UL - 0x2400UL) /* SBUS virtual address diagnostic */
#define IOMMU_TAGCMP (0x4408UL - 0x2400UL) /* TLB tag compare diagnostics */
#define IOMMU_LRUDIAG (0x4500UL - 0x2400UL) /* IOMMU LRU queue diagnostics */
#define IOMMU_TAGDIAG (0x4580UL - 0x2400UL) /* TLB tag diagnostics */
#define IOMMU_DRAMDIAG (0x4600UL - 0x2400UL) /* TLB data RAM diagnostics */
#define IOMMU_DRAM_VALID (1UL << 30UL)
/* Offsets from strbuf_regs */
#define SYSIO_STRBUFREG_BASE 0x2800UL
#define STRBUF_CONTROL (0x2800UL - 0x2800UL) /* Control */
#define STRBUF_PFLUSH (0x2808UL - 0x2800UL) /* Page flush/invalidate */
#define STRBUF_FSYNC (0x2810UL - 0x2800UL) /* Flush synchronization */
#define STRBUF_DRAMDIAG (0x5000UL - 0x2800UL) /* data RAM diagnostic */
#define STRBUF_ERRDIAG (0x5400UL - 0x2800UL) /* error status diagnostics */
#define STRBUF_PTAGDIAG (0x5800UL - 0x2800UL) /* Page tag diagnostics */
#define STRBUF_LTAGDIAG (0x5900UL - 0x2800UL) /* Line tag diagnostics */
#define STRBUF_TAG_VALID 0x02UL
/* Enable 64-bit DVMA mode for the given device. */
void sbus_set_sbus64(struct device *dev, int bursts)
{
struct iommu *iommu = dev->archdata.iommu;
struct of_device *op = to_of_device(dev);
const struct linux_prom_registers *regs;
unsigned long cfg_reg;
int slot;
u64 val;
regs = of_get_property(op->node, "reg", NULL);
if (!regs) {
printk(KERN_ERR "sbus_set_sbus64: Cannot find regs for %s\n",
op->node->full_name);
return;
}
slot = regs->which_io;
cfg_reg = iommu->write_complete_reg;
switch (slot) {
case 0:
cfg_reg += 0x20UL;
break;
case 1:
cfg_reg += 0x28UL;
break;
case 2:
cfg_reg += 0x30UL;
break;
case 3:
cfg_reg += 0x38UL;
break;
case 13:
cfg_reg += 0x40UL;
break;
case 14:
cfg_reg += 0x48UL;
break;
case 15:
cfg_reg += 0x50UL;
break;
default:
return;
};
val = upa_readq(cfg_reg);
if (val & (1UL << 14UL)) {
/* Extended transfer mode already enabled. */
return;
}
val |= (1UL << 14UL);
if (bursts & DMA_BURST8)
val |= (1UL << 1UL);
if (bursts & DMA_BURST16)
val |= (1UL << 2UL);
if (bursts & DMA_BURST32)
val |= (1UL << 3UL);
if (bursts & DMA_BURST64)
val |= (1UL << 4UL);
upa_writeq(val, cfg_reg);
}
/* INO number to IMAP register offset for SYSIO external IRQ's.
* This should conform to both Sunfire/Wildfire server and Fusion
* desktop designs.
*/
#define SYSIO_IMAP_SLOT0 0x2c00UL
#define SYSIO_IMAP_SLOT1 0x2c08UL
#define SYSIO_IMAP_SLOT2 0x2c10UL
#define SYSIO_IMAP_SLOT3 0x2c18UL
#define SYSIO_IMAP_SCSI 0x3000UL
#define SYSIO_IMAP_ETH 0x3008UL
#define SYSIO_IMAP_BPP 0x3010UL
#define SYSIO_IMAP_AUDIO 0x3018UL
#define SYSIO_IMAP_PFAIL 0x3020UL
#define SYSIO_IMAP_KMS 0x3028UL
#define SYSIO_IMAP_FLPY 0x3030UL
#define SYSIO_IMAP_SHW 0x3038UL
#define SYSIO_IMAP_KBD 0x3040UL
#define SYSIO_IMAP_MS 0x3048UL
#define SYSIO_IMAP_SER 0x3050UL
#define SYSIO_IMAP_TIM0 0x3060UL
#define SYSIO_IMAP_TIM1 0x3068UL
#define SYSIO_IMAP_UE 0x3070UL
#define SYSIO_IMAP_CE 0x3078UL
#define SYSIO_IMAP_SBERR 0x3080UL
#define SYSIO_IMAP_PMGMT 0x3088UL
#define SYSIO_IMAP_GFX 0x3090UL
#define SYSIO_IMAP_EUPA 0x3098UL
#define bogon ((unsigned long) -1)
static unsigned long sysio_irq_offsets[] = {
/* SBUS Slot 0 --> 3, level 1 --> 7 */
SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0, SYSIO_IMAP_SLOT0,
SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1, SYSIO_IMAP_SLOT1,
SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2, SYSIO_IMAP_SLOT2,
SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3, SYSIO_IMAP_SLOT3,
/* Onboard devices (not relevant/used on SunFire). */
SYSIO_IMAP_SCSI,
SYSIO_IMAP_ETH,
SYSIO_IMAP_BPP,
bogon,
SYSIO_IMAP_AUDIO,
SYSIO_IMAP_PFAIL,
bogon,
bogon,
SYSIO_IMAP_KMS,
SYSIO_IMAP_FLPY,
SYSIO_IMAP_SHW,
SYSIO_IMAP_KBD,
SYSIO_IMAP_MS,
SYSIO_IMAP_SER,
bogon,
bogon,
SYSIO_IMAP_TIM0,
SYSIO_IMAP_TIM1,
bogon,
bogon,
SYSIO_IMAP_UE,
SYSIO_IMAP_CE,
SYSIO_IMAP_SBERR,
SYSIO_IMAP_PMGMT,
};
#undef bogon
#define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
/* Convert Interrupt Mapping register pointer to associated
* Interrupt Clear register pointer, SYSIO specific version.
*/
#define SYSIO_ICLR_UNUSED0 0x3400UL
#define SYSIO_ICLR_SLOT0 0x3408UL
#define SYSIO_ICLR_SLOT1 0x3448UL
#define SYSIO_ICLR_SLOT2 0x3488UL
#define SYSIO_ICLR_SLOT3 0x34c8UL
static unsigned long sysio_imap_to_iclr(unsigned long imap)
{
unsigned long diff = SYSIO_ICLR_UNUSED0 - SYSIO_IMAP_SLOT0;
return imap + diff;
}
static unsigned int sbus_build_irq(struct of_device *op, unsigned int ino)
{
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long imap, iclr;
int sbus_level = 0;
imap = sysio_irq_offsets[ino];
if (imap == ((unsigned long)-1)) {
prom_printf("get_irq_translations: Bad SYSIO INO[%x]\n",
ino);
prom_halt();
}
imap += reg_base;
/* SYSIO inconsistency. For external SLOTS, we have to select
* the right ICLR register based upon the lower SBUS irq level
* bits.
*/
if (ino >= 0x20) {
iclr = sysio_imap_to_iclr(imap);
} else {
int sbus_slot = (ino & 0x18)>>3;
sbus_level = ino & 0x7;
switch(sbus_slot) {
case 0:
iclr = reg_base + SYSIO_ICLR_SLOT0;
break;
case 1:
iclr = reg_base + SYSIO_ICLR_SLOT1;
break;
case 2:
iclr = reg_base + SYSIO_ICLR_SLOT2;
break;
default:
case 3:
iclr = reg_base + SYSIO_ICLR_SLOT3;
break;
};
iclr += ((unsigned long)sbus_level - 1UL) * 8UL;
}
return build_irq(sbus_level, iclr, imap);
}
/* Error interrupt handling. */
#define SYSIO_UE_AFSR 0x0030UL
#define SYSIO_UE_AFAR 0x0038UL
#define SYSIO_UEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
#define SYSIO_UEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
#define SYSIO_UEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
#define SYSIO_UEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO is cause */
#define SYSIO_UEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
#define SYSIO_UEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
#define SYSIO_UEAFSR_RESV1 0x03ff000000000000UL /* Reserved */
#define SYSIO_UEAFSR_DOFF 0x0000e00000000000UL /* Doubleword Offset */
#define SYSIO_UEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
#define SYSIO_UEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
#define SYSIO_UEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
static irqreturn_t sysio_ue_handler(int irq, void *dev_id)
{
struct of_device *op = dev_id;
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long afsr_reg, afar_reg;
unsigned long afsr, afar, error_bits;
int reported, portid;
afsr_reg = reg_base + SYSIO_UE_AFSR;
afar_reg = reg_base + SYSIO_UE_AFAR;
/* Latch error status. */
afsr = upa_readq(afsr_reg);
afar = upa_readq(afar_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SYSIO_UEAFSR_PPIO | SYSIO_UEAFSR_PDRD | SYSIO_UEAFSR_PDWR |
SYSIO_UEAFSR_SPIO | SYSIO_UEAFSR_SDRD | SYSIO_UEAFSR_SDWR);
upa_writeq(error_bits, afsr_reg);
portid = of_getintprop_default(op->node, "portid", -1);
/* Log the error. */
printk("SYSIO[%x]: Uncorrectable ECC Error, primary error type[%s]\n",
portid,
(((error_bits & SYSIO_UEAFSR_PPIO) ?
"PIO" :
((error_bits & SYSIO_UEAFSR_PDRD) ?
"DVMA Read" :
((error_bits & SYSIO_UEAFSR_PDWR) ?
"DVMA Write" : "???")))));
printk("SYSIO[%x]: DOFF[%lx] SIZE[%lx] MID[%lx]\n",
portid,
(afsr & SYSIO_UEAFSR_DOFF) >> 45UL,
(afsr & SYSIO_UEAFSR_SIZE) >> 42UL,
(afsr & SYSIO_UEAFSR_MID) >> 37UL);
printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar);
printk("SYSIO[%x]: Secondary UE errors [", portid);
reported = 0;
if (afsr & SYSIO_UEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & SYSIO_UEAFSR_SDRD) {
reported++;
printk("(DVMA Read)");
}
if (afsr & SYSIO_UEAFSR_SDWR) {
reported++;
printk("(DVMA Write)");
}
if (!reported)
printk("(none)");
printk("]\n");
return IRQ_HANDLED;
}
#define SYSIO_CE_AFSR 0x0040UL
#define SYSIO_CE_AFAR 0x0048UL
#define SYSIO_CEAFSR_PPIO 0x8000000000000000UL /* Primary PIO cause */
#define SYSIO_CEAFSR_PDRD 0x4000000000000000UL /* Primary DVMA read cause */
#define SYSIO_CEAFSR_PDWR 0x2000000000000000UL /* Primary DVMA write cause */
#define SYSIO_CEAFSR_SPIO 0x1000000000000000UL /* Secondary PIO cause */
#define SYSIO_CEAFSR_SDRD 0x0800000000000000UL /* Secondary DVMA read cause */
#define SYSIO_CEAFSR_SDWR 0x0400000000000000UL /* Secondary DVMA write cause*/
#define SYSIO_CEAFSR_RESV1 0x0300000000000000UL /* Reserved */
#define SYSIO_CEAFSR_ESYND 0x00ff000000000000UL /* Syndrome Bits */
#define SYSIO_CEAFSR_DOFF 0x0000e00000000000UL /* Double Offset */
#define SYSIO_CEAFSR_SIZE 0x00001c0000000000UL /* Bad transfer size 2^SIZE */
#define SYSIO_CEAFSR_MID 0x000003e000000000UL /* UPA MID causing the fault */
#define SYSIO_CEAFSR_RESV2 0x0000001fffffffffUL /* Reserved */
static irqreturn_t sysio_ce_handler(int irq, void *dev_id)
{
struct of_device *op = dev_id;
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned long afsr_reg, afar_reg;
unsigned long afsr, afar, error_bits;
int reported, portid;
afsr_reg = reg_base + SYSIO_CE_AFSR;
afar_reg = reg_base + SYSIO_CE_AFAR;
/* Latch error status. */
afsr = upa_readq(afsr_reg);
afar = upa_readq(afar_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SYSIO_CEAFSR_PPIO | SYSIO_CEAFSR_PDRD | SYSIO_CEAFSR_PDWR |
SYSIO_CEAFSR_SPIO | SYSIO_CEAFSR_SDRD | SYSIO_CEAFSR_SDWR);
upa_writeq(error_bits, afsr_reg);
portid = of_getintprop_default(op->node, "portid", -1);
printk("SYSIO[%x]: Correctable ECC Error, primary error type[%s]\n",
portid,
(((error_bits & SYSIO_CEAFSR_PPIO) ?
"PIO" :
((error_bits & SYSIO_CEAFSR_PDRD) ?
"DVMA Read" :
((error_bits & SYSIO_CEAFSR_PDWR) ?
"DVMA Write" : "???")))));
/* XXX Use syndrome and afar to print out module string just like
* XXX UDB CE trap handler does... -DaveM
*/
printk("SYSIO[%x]: DOFF[%lx] ECC Syndrome[%lx] Size[%lx] MID[%lx]\n",
portid,
(afsr & SYSIO_CEAFSR_DOFF) >> 45UL,
(afsr & SYSIO_CEAFSR_ESYND) >> 48UL,
(afsr & SYSIO_CEAFSR_SIZE) >> 42UL,
(afsr & SYSIO_CEAFSR_MID) >> 37UL);
printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar);
printk("SYSIO[%x]: Secondary CE errors [", portid);
reported = 0;
if (afsr & SYSIO_CEAFSR_SPIO) {
reported++;
printk("(PIO)");
}
if (afsr & SYSIO_CEAFSR_SDRD) {
reported++;
printk("(DVMA Read)");
}
if (afsr & SYSIO_CEAFSR_SDWR) {
reported++;
printk("(DVMA Write)");
}
if (!reported)
printk("(none)");
printk("]\n");
return IRQ_HANDLED;
}
#define SYSIO_SBUS_AFSR 0x2010UL
#define SYSIO_SBUS_AFAR 0x2018UL
#define SYSIO_SBAFSR_PLE 0x8000000000000000UL /* Primary Late PIO Error */
#define SYSIO_SBAFSR_PTO 0x4000000000000000UL /* Primary SBUS Timeout */
#define SYSIO_SBAFSR_PBERR 0x2000000000000000UL /* Primary SBUS Error ACK */
#define SYSIO_SBAFSR_SLE 0x1000000000000000UL /* Secondary Late PIO Error */
#define SYSIO_SBAFSR_STO 0x0800000000000000UL /* Secondary SBUS Timeout */
#define SYSIO_SBAFSR_SBERR 0x0400000000000000UL /* Secondary SBUS Error ACK */
#define SYSIO_SBAFSR_RESV1 0x03ff000000000000UL /* Reserved */
#define SYSIO_SBAFSR_RD 0x0000800000000000UL /* Primary was late PIO read */
#define SYSIO_SBAFSR_RESV2 0x0000600000000000UL /* Reserved */
#define SYSIO_SBAFSR_SIZE 0x00001c0000000000UL /* Size of transfer */
#define SYSIO_SBAFSR_MID 0x000003e000000000UL /* MID causing the error */
#define SYSIO_SBAFSR_RESV3 0x0000001fffffffffUL /* Reserved */
static irqreturn_t sysio_sbus_error_handler(int irq, void *dev_id)
{
struct of_device *op = dev_id;
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long afsr_reg, afar_reg, reg_base;
unsigned long afsr, afar, error_bits;
int reported, portid;
reg_base = iommu->write_complete_reg - 0x2000UL;
afsr_reg = reg_base + SYSIO_SBUS_AFSR;
afar_reg = reg_base + SYSIO_SBUS_AFAR;
afsr = upa_readq(afsr_reg);
afar = upa_readq(afar_reg);
/* Clear primary/secondary error status bits. */
error_bits = afsr &
(SYSIO_SBAFSR_PLE | SYSIO_SBAFSR_PTO | SYSIO_SBAFSR_PBERR |
SYSIO_SBAFSR_SLE | SYSIO_SBAFSR_STO | SYSIO_SBAFSR_SBERR);
upa_writeq(error_bits, afsr_reg);
portid = of_getintprop_default(op->node, "portid", -1);
/* Log the error. */
printk("SYSIO[%x]: SBUS Error, primary error type[%s] read(%d)\n",
portid,
(((error_bits & SYSIO_SBAFSR_PLE) ?
"Late PIO Error" :
((error_bits & SYSIO_SBAFSR_PTO) ?
"Time Out" :
((error_bits & SYSIO_SBAFSR_PBERR) ?
"Error Ack" : "???")))),
(afsr & SYSIO_SBAFSR_RD) ? 1 : 0);
printk("SYSIO[%x]: size[%lx] MID[%lx]\n",
portid,
(afsr & SYSIO_SBAFSR_SIZE) >> 42UL,
(afsr & SYSIO_SBAFSR_MID) >> 37UL);
printk("SYSIO[%x]: AFAR[%016lx]\n", portid, afar);
printk("SYSIO[%x]: Secondary SBUS errors [", portid);
reported = 0;
if (afsr & SYSIO_SBAFSR_SLE) {
reported++;
printk("(Late PIO Error)");
}
if (afsr & SYSIO_SBAFSR_STO) {
reported++;
printk("(Time Out)");
}
if (afsr & SYSIO_SBAFSR_SBERR) {
reported++;
printk("(Error Ack)");
}
if (!reported)
printk("(none)");
printk("]\n");
/* XXX check iommu/strbuf for further error status XXX */
return IRQ_HANDLED;
}
#define ECC_CONTROL 0x0020UL
#define SYSIO_ECNTRL_ECCEN 0x8000000000000000UL /* Enable ECC Checking */
#define SYSIO_ECNTRL_UEEN 0x4000000000000000UL /* Enable UE Interrupts */
#define SYSIO_ECNTRL_CEEN 0x2000000000000000UL /* Enable CE Interrupts */
#define SYSIO_UE_INO 0x34
#define SYSIO_CE_INO 0x35
#define SYSIO_SBUSERR_INO 0x36
static void __init sysio_register_error_handlers(struct of_device *op)
{
struct iommu *iommu = op->dev.archdata.iommu;
unsigned long reg_base = iommu->write_complete_reg - 0x2000UL;
unsigned int irq;
u64 control;
int portid;
portid = of_getintprop_default(op->node, "portid", -1);
irq = sbus_build_irq(op, SYSIO_UE_INO);
if (request_irq(irq, sysio_ue_handler, 0,
"SYSIO_UE", op) < 0) {
prom_printf("SYSIO[%x]: Cannot register UE interrupt.\n",
portid);
prom_halt();
}
irq = sbus_build_irq(op, SYSIO_CE_INO);
if (request_irq(irq, sysio_ce_handler, 0,
"SYSIO_CE", op) < 0) {
prom_printf("SYSIO[%x]: Cannot register CE interrupt.\n",
portid);
prom_halt();
}
irq = sbus_build_irq(op, SYSIO_SBUSERR_INO);
if (request_irq(irq, sysio_sbus_error_handler, 0,
"SYSIO_SBERR", op) < 0) {
prom_printf("SYSIO[%x]: Cannot register SBUS Error interrupt.\n",
portid);
prom_halt();
}
/* Now turn the error interrupts on and also enable ECC checking. */
upa_writeq((SYSIO_ECNTRL_ECCEN |
SYSIO_ECNTRL_UEEN |
SYSIO_ECNTRL_CEEN),
reg_base + ECC_CONTROL);
control = upa_readq(iommu->write_complete_reg);
control |= 0x100UL; /* SBUS Error Interrupt Enable */
upa_writeq(control, iommu->write_complete_reg);
}
/* Boot time initialization. */
static void __init sbus_iommu_init(struct of_device *op)
{
const struct linux_prom64_registers *pr;
struct device_node *dp = op->node;
struct iommu *iommu;
struct strbuf *strbuf;
unsigned long regs, reg_base;
int i, portid;
u64 control;
pr = of_get_property(dp, "reg", NULL);
if (!pr) {
prom_printf("sbus_iommu_init: Cannot map SYSIO "
"control registers.\n");
prom_halt();
}
regs = pr->phys_addr;
iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC);
if (!iommu)
goto fatal_memory_error;
strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC);
if (!strbuf)
goto fatal_memory_error;
op->dev.archdata.iommu = iommu;
op->dev.archdata.stc = strbuf;
op->dev.archdata.numa_node = -1;
reg_base = regs + SYSIO_IOMMUREG_BASE;
iommu->iommu_control = reg_base + IOMMU_CONTROL;
iommu->iommu_tsbbase = reg_base + IOMMU_TSBBASE;
iommu->iommu_flush = reg_base + IOMMU_FLUSH;
iommu->iommu_tags = iommu->iommu_control +
(IOMMU_TAGDIAG - IOMMU_CONTROL);
reg_base = regs + SYSIO_STRBUFREG_BASE;
strbuf->strbuf_control = reg_base + STRBUF_CONTROL;
strbuf->strbuf_pflush = reg_base + STRBUF_PFLUSH;
strbuf->strbuf_fsync = reg_base + STRBUF_FSYNC;
strbuf->strbuf_enabled = 1;
strbuf->strbuf_flushflag = (volatile unsigned long *)
((((unsigned long)&strbuf->__flushflag_buf[0])
+ 63UL)
& ~63UL);
strbuf->strbuf_flushflag_pa = (unsigned long)
__pa(strbuf->strbuf_flushflag);
/* The SYSIO SBUS control register is used for dummy reads
* in order to ensure write completion.
*/
iommu->write_complete_reg = regs + 0x2000UL;
portid = of_getintprop_default(op->node, "portid", -1);
printk(KERN_INFO "SYSIO: UPA portID %x, at %016lx\n",
portid, regs);
/* Setup for TSB_SIZE=7, TBW_SIZE=0, MMU_DE=1, MMU_EN=1 */
if (iommu_table_init(iommu, IO_TSB_SIZE, MAP_BASE, 0xffffffff, -1))
goto fatal_memory_error;
control = upa_readq(iommu->iommu_control);
control = ((7UL << 16UL) |
(0UL << 2UL) |
(1UL << 1UL) |
(1UL << 0UL));
upa_writeq(control, iommu->iommu_control);
/* Clean out any cruft in the IOMMU using
* diagnostic accesses.
*/
for (i = 0; i < 16; i++) {
unsigned long dram, tag;
dram = iommu->iommu_control + (IOMMU_DRAMDIAG - IOMMU_CONTROL);
tag = iommu->iommu_control + (IOMMU_TAGDIAG - IOMMU_CONTROL);
dram += (unsigned long)i * 8UL;
tag += (unsigned long)i * 8UL;
upa_writeq(0, dram);
upa_writeq(0, tag);
}
upa_readq(iommu->write_complete_reg);
/* Give the TSB to SYSIO. */
upa_writeq(__pa(iommu->page_table), iommu->iommu_tsbbase);
/* Setup streaming buffer, DE=1 SB_EN=1 */
control = (1UL << 1UL) | (1UL << 0UL);
upa_writeq(control, strbuf->strbuf_control);
/* Clear out the tags using diagnostics. */
for (i = 0; i < 16; i++) {
unsigned long ptag, ltag;
ptag = strbuf->strbuf_control +
(STRBUF_PTAGDIAG - STRBUF_CONTROL);
ltag = strbuf->strbuf_control +
(STRBUF_LTAGDIAG - STRBUF_CONTROL);
ptag += (unsigned long)i * 8UL;
ltag += (unsigned long)i * 8UL;
upa_writeq(0UL, ptag);
upa_writeq(0UL, ltag);
}
/* Enable DVMA arbitration for all devices/slots. */
control = upa_readq(iommu->write_complete_reg);
control |= 0x3fUL;
upa_writeq(control, iommu->write_complete_reg);
/* Now some Xfire specific grot... */
if (this_is_starfire)
starfire_hookup(portid);
sysio_register_error_handlers(op);
return;
fatal_memory_error:
prom_printf("sbus_iommu_init: Fatal memory allocation error.\n");
}
static int __init sbus_init(void)
{
struct device_node *dp;
for_each_node_by_name(dp, "sbus") {
struct of_device *op = of_find_device_by_node(dp);
sbus_iommu_init(op);
of_propagate_archdata(op);
}
return 0;
}
subsys_initcall(sbus_init);

View File

@@ -46,6 +46,8 @@
#include <asm/cpudata.h>
#include <asm/setup.h>
#include "kernel.h"
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
0, /* unused */
@@ -308,9 +310,6 @@ void __init setup_arch(char **cmdline_p)
smp_setup_cpu_possible_map();
}
extern char *sparc_cpu_type;
extern char *sparc_fpu_type;
static int ncpus_probed;
static int show_cpuinfo(struct seq_file *m, void *__unused)
@@ -328,8 +327,8 @@ static int show_cpuinfo(struct seq_file *m, void *__unused)
"CPU0ClkTck\t: %ld\n"
#endif
,
sparc_cpu_type ? sparc_cpu_type : "undetermined",
sparc_fpu_type ? sparc_fpu_type : "undetermined",
sparc_cpu_type,
sparc_fpu_type ,
romvec->pv_romvers,
prom_rev,
romvec->pv_printrev >> 16,

View File

@@ -0,0 +1,429 @@
/*
* linux/arch/sparc64/kernel/setup.c
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <asm/smp.h>
#include <linux/user.h>
#include <linux/screen_info.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/inet.h>
#include <linux/console.h>
#include <linux/root_dev.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <linux/initrd.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/oplib.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/idprom.h>
#include <asm/head.h>
#include <asm/starfire.h>
#include <asm/mmu_context.h>
#include <asm/timer.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/mmu.h>
#include <asm/ns87303.h>
#ifdef CONFIG_IP_PNP
#include <net/ipconfig.h>
#endif
#include "entry.h"
#include "kernel.h"
/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
* operations in asm/ns87303.h
*/
DEFINE_SPINLOCK(ns87303_lock);
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
0, /* unused */
0, /* orig-video-page */
0, /* orig-video-mode */
128, /* orig-video-cols */
0, 0, 0, /* unused, ega_bx, unused */
54, /* orig-video-lines */
0, /* orig-video-isVGA */
16 /* orig-video-points */
};
static void
prom_console_write(struct console *con, const char *s, unsigned n)
{
prom_write(s, n);
}
/* Exported for mm/init.c:paging_init. */
unsigned long cmdline_memory_size = 0;
static struct console prom_early_console = {
.name = "earlyprom",
.write = prom_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
.index = -1,
};
/*
* Process kernel command line switches that are specific to the
* SPARC or that require special low-level processing.
*/
static void __init process_switch(char c)
{
switch (c) {
case 'd':
case 's':
break;
case 'h':
prom_printf("boot_flags_init: Halt!\n");
prom_halt();
break;
case 'p':
/* Just ignore, this behavior is now the default. */
break;
case 'P':
/* Force UltraSPARC-III P-Cache on. */
if (tlb_type != cheetah) {
printk("BOOT: Ignoring P-Cache force option.\n");
break;
}
cheetah_pcache_forced_on = 1;
add_taint(TAINT_MACHINE_CHECK);
cheetah_enable_pcache();
break;
default:
printk("Unknown boot switch (-%c)\n", c);
break;
}
}
static void __init boot_flags_init(char *commands)
{
while (*commands) {
/* Move to the start of the next "argument". */
while (*commands && *commands == ' ')
commands++;
/* Process any command switches, otherwise skip it. */
if (*commands == '\0')
break;
if (*commands == '-') {
commands++;
while (*commands && *commands != ' ')
process_switch(*commands++);
continue;
}
if (!strncmp(commands, "mem=", 4)) {
/*
* "mem=XXX[kKmM]" overrides the PROM-reported
* memory size.
*/
cmdline_memory_size = simple_strtoul(commands + 4,
&commands, 0);
if (*commands == 'K' || *commands == 'k') {
cmdline_memory_size <<= 10;
commands++;
} else if (*commands=='M' || *commands=='m') {
cmdline_memory_size <<= 20;
commands++;
}
}
while (*commands && *commands != ' ')
commands++;
}
}
extern unsigned short root_flags;
extern unsigned short root_dev;
extern unsigned short ram_flags;
#define RAMDISK_IMAGE_START_MASK 0x07FF
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
extern int root_mountflags;
char reboot_command[COMMAND_LINE_SIZE];
static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
void __init per_cpu_patch(void)
{
struct cpuid_patch_entry *p;
unsigned long ver;
int is_jbus;
if (tlb_type == spitfire && !this_is_starfire)
return;
is_jbus = 0;
if (tlb_type != hypervisor) {
__asm__ ("rdpr %%ver, %0" : "=r" (ver));
is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID);
}
p = &__cpuid_patch;
while (p < &__cpuid_patch_end) {
unsigned long addr = p->addr;
unsigned int *insns;
switch (tlb_type) {
case spitfire:
insns = &p->starfire[0];
break;
case cheetah:
case cheetah_plus:
if (is_jbus)
insns = &p->cheetah_jbus[0];
else
insns = &p->cheetah_safari[0];
break;
case hypervisor:
insns = &p->sun4v[0];
break;
default:
prom_printf("Unknown cpu type, halting.\n");
prom_halt();
};
*(unsigned int *) (addr + 0) = insns[0];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
*(unsigned int *) (addr + 4) = insns[1];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 4));
*(unsigned int *) (addr + 8) = insns[2];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 8));
*(unsigned int *) (addr + 12) = insns[3];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 12));
p++;
}
}
void __init sun4v_patch(void)
{
extern void sun4v_hvapi_init(void);
struct sun4v_1insn_patch_entry *p1;
struct sun4v_2insn_patch_entry *p2;
if (tlb_type != hypervisor)
return;
p1 = &__sun4v_1insn_patch;
while (p1 < &__sun4v_1insn_patch_end) {
unsigned long addr = p1->addr;
*(unsigned int *) (addr + 0) = p1->insn;
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
p1++;
}
p2 = &__sun4v_2insn_patch;
while (p2 < &__sun4v_2insn_patch_end) {
unsigned long addr = p2->addr;
*(unsigned int *) (addr + 0) = p2->insns[0];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
*(unsigned int *) (addr + 4) = p2->insns[1];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 4));
p2++;
}
sun4v_hvapi_init();
}
#ifdef CONFIG_SMP
void __init boot_cpu_id_too_large(int cpu)
{
prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
cpu, NR_CPUS);
prom_halt();
}
#endif
void __init setup_arch(char **cmdline_p)
{
/* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs();
strcpy(boot_command_line, *cmdline_p);
parse_early_param();
boot_flags_init(*cmdline_p);
register_console(&prom_early_console);
if (tlb_type == hypervisor)
printk("ARCH: SUN4V\n");
else
printk("ARCH: SUN4U\n");
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#elif defined(CONFIG_PROM_CONSOLE)
conswitchp = &prom_con;
#endif
idprom_init();
if (!root_flags)
root_mountflags &= ~MS_RDONLY;
ROOT_DEV = old_decode_dev(root_dev);
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
#endif
task_thread_info(&init_task)->kregs = &fake_swapper_regs;
#ifdef CONFIG_IP_PNP
if (!ic_set_manually) {
int chosen = prom_finddevice ("/chosen");
u32 cl, sv, gw;
cl = prom_getintdefault (chosen, "client-ip", 0);
sv = prom_getintdefault (chosen, "server-ip", 0);
gw = prom_getintdefault (chosen, "gateway-ip", 0);
if (cl && sv) {
ic_myaddr = cl;
ic_servaddr = sv;
if (gw)
ic_gateway = gw;
#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
ic_proto_enabled = 0;
#endif
}
}
#endif
/* Get boot processor trap_block[] setup. */
init_cur_cpu_trap(current_thread_info());
paging_init();
}
/* BUFFER is PAGE_SIZE bytes long. */
extern void smp_info(struct seq_file *);
extern void smp_bogo(struct seq_file *);
extern void mmu_info(struct seq_file *);
unsigned int dcache_parity_tl1_occurred;
unsigned int icache_parity_tl1_occurred;
int ncpus_probed;
static int show_cpuinfo(struct seq_file *m, void *__unused)
{
seq_printf(m,
"cpu\t\t: %s\n"
"fpu\t\t: %s\n"
"prom\t\t: %s\n"
"type\t\t: %s\n"
"ncpus probed\t: %d\n"
"ncpus active\t: %d\n"
"D$ parity tl1\t: %u\n"
"I$ parity tl1\t: %u\n"
#ifndef CONFIG_SMP
"Cpu0ClkTck\t: %016lx\n"
#endif
,
sparc_cpu_type,
sparc_fpu_type,
prom_version,
((tlb_type == hypervisor) ?
"sun4v" :
"sun4u"),
ncpus_probed,
num_online_cpus(),
dcache_parity_tl1_occurred,
icache_parity_tl1_occurred
#ifndef CONFIG_SMP
, cpu_data(0).clock_tick
#endif
);
#ifdef CONFIG_SMP
smp_bogo(m);
#endif
mmu_info(m);
#ifdef CONFIG_SMP
smp_info(m);
#endif
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
/* The pointer we are returning is arbitrary,
* it just has to be non-NULL and not IS_ERR
* in the success case.
*/
return *pos == 0 ? &c_start : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return c_start(m, pos);
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start =c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
extern int stop_a_enabled;
void sun_do_break(void)
{
if (!stop_a_enabled)
return;
prom_printf("\n");
flush_user_windows();
prom_cmdline();
}
int stop_a_enabled = 1;

View File

@@ -0,0 +1,899 @@
/* arch/sparc64/kernel/signal32.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/compat.h>
#include <linux/bitops.h>
#include <linux/tracehook.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/pgtable.h>
#include <asm/psrcompat.h>
#include <asm/fpumacro.h>
#include <asm/visasm.h>
#include <asm/compat_signal.h>
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
/* This magic should be in g_upper[0] for all upper parts
* to be valid.
*/
#define SIGINFO_EXTRA_V8PLUS_MAGIC 0x130e269
typedef struct {
unsigned int g_upper[8];
unsigned int o_upper[8];
unsigned int asi;
} siginfo_extra_v8plus_t;
struct signal_frame32 {
struct sparc_stackf32 ss;
__siginfo32_t info;
/* __siginfo_fpu32_t * */ u32 fpu_save;
unsigned int insns[2];
unsigned int extramask[_COMPAT_NSIG_WORDS - 1];
unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
/* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
siginfo_extra_v8plus_t v8plus;
__siginfo_fpu_t fpu_state;
};
typedef struct compat_siginfo{
int si_signo;
int si_errno;
int si_code;
union {
int _pad[SI_PAD_SIZE32];
/* kill() */
struct {
compat_pid_t _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
compat_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
compat_sigval_t _sigval; /* same as below */
int _sys_private; /* not to be passed to user */
} _timer;
/* POSIX.1b signals */
struct {
compat_pid_t _pid; /* sender's pid */
unsigned int _uid; /* sender's uid */
compat_sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
compat_pid_t _pid; /* which child */
unsigned int _uid; /* sender's uid */
int _status; /* exit code */
compat_clock_t _utime;
compat_clock_t _stime;
} _sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */
struct {
u32 _addr; /* faulting insn/memory ref. */
int _trapno;
} _sigfault;
/* SIGPOLL */
struct {
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
int _fd;
} _sigpoll;
} _sifields;
}compat_siginfo_t;
struct rt_signal_frame32 {
struct sparc_stackf32 ss;
compat_siginfo_t info;
struct pt_regs32 regs;
compat_sigset_t mask;
/* __siginfo_fpu32_t * */ u32 fpu_save;
unsigned int insns[2];
stack_t32 stack;
unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */
/* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */
siginfo_extra_v8plus_t v8plus;
__siginfo_fpu_t fpu_state;
};
/* Align macros */
#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7)))
#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7)))
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
{
int err;
if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
return -EFAULT;
/* If you change siginfo_t structure, please be sure
this code is fixed accordingly.
It should never copy any pad contained in the structure
to avoid security leaks, but must copy the generic
3 ints plus the relevant union member.
This routine must convert siginfo from 64bit to 32bit as well
at the same time. */
err = __put_user(from->si_signo, &to->si_signo);
err |= __put_user(from->si_errno, &to->si_errno);
err |= __put_user((short)from->si_code, &to->si_code);
if (from->si_code < 0)
err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
else {
switch (from->si_code >> 16) {
case __SI_TIMER >> 16:
err |= __put_user(from->si_tid, &to->si_tid);
err |= __put_user(from->si_overrun, &to->si_overrun);
err |= __put_user(from->si_int, &to->si_int);
break;
case __SI_CHLD >> 16:
err |= __put_user(from->si_utime, &to->si_utime);
err |= __put_user(from->si_stime, &to->si_stime);
err |= __put_user(from->si_status, &to->si_status);
default:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
break;
case __SI_FAULT >> 16:
err |= __put_user(from->si_trapno, &to->si_trapno);
err |= __put_user((unsigned long)from->si_addr, &to->si_addr);
break;
case __SI_POLL >> 16:
err |= __put_user(from->si_band, &to->si_band);
err |= __put_user(from->si_fd, &to->si_fd);
break;
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
case __SI_MESGQ >> 16:
err |= __put_user(from->si_pid, &to->si_pid);
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_int, &to->si_int);
break;
}
}
return err;
}
/* CAUTION: This is just a very minimalist implementation for the
* sake of compat_sys_rt_sigqueueinfo()
*/
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
{
if (!access_ok(VERIFY_WRITE, from, sizeof(compat_siginfo_t)))
return -EFAULT;
if (copy_from_user(to, from, 3*sizeof(int)) ||
copy_from_user(to->_sifields._pad, from->_sifields._pad,
SI_PAD_SIZE))
return -EFAULT;
return 0;
}
static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
int err;
err = __get_user(fprs, &fpu->si_fprs);
fprs_write(0);
regs->tstate &= ~TSTATE_PEF;
if (fprs & FPRS_DL)
err |= copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], (sizeof(unsigned int) * 32));
err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
current_thread_info()->fpsaved[0] |= fprs;
return err;
}
void do_sigreturn32(struct pt_regs *regs)
{
struct signal_frame32 __user *sf;
unsigned int psr;
unsigned pc, npc, fpu_save;
sigset_t set;
unsigned seta[_COMPAT_NSIG_WORDS];
int err, i;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
(((unsigned long) sf) & 3))
goto segv;
get_user(pc, &sf->info.si_regs.pc);
__get_user(npc, &sf->info.si_regs.npc);
if ((pc | npc) & 3)
goto segv;
if (test_thread_flag(TIF_32BIT)) {
pc &= 0xffffffff;
npc &= 0xffffffff;
}
regs->tpc = pc;
regs->tnpc = npc;
/* 2. Restore the state */
err = __get_user(regs->y, &sf->info.si_regs.y);
err |= __get_user(psr, &sf->info.si_regs.psr);
for (i = UREG_G1; i <= UREG_I7; i++)
err |= __get_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
err |= __get_user(i, &sf->v8plus.g_upper[0]);
if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
unsigned long asi;
for (i = UREG_G1; i <= UREG_I7; i++)
err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
err |= __get_user(asi, &sf->v8plus.asi);
regs->tstate &= ~TSTATE_ASI;
regs->tstate |= ((asi & 0xffUL) << 24UL);
}
}
/* User can only change condition codes in %tstate. */
regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
regs->tstate |= psr_to_tstate_icc(psr);
/* Prevent syscall restart. */
pt_regs_clear_syscall(regs);
err |= __get_user(fpu_save, &sf->fpu_save);
if (fpu_save)
err |= restore_fpu_state32(regs, &sf->fpu_state);
err |= __get_user(seta[0], &sf->info.si_mask);
err |= copy_from_user(seta+1, &sf->extramask,
(_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
if (err)
goto segv;
switch (_NSIG_WORDS) {
case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
}
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
return;
segv:
force_sig(SIGSEGV, current);
}
asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
{
struct rt_signal_frame32 __user *sf;
unsigned int psr, pc, npc, fpu_save, u_ss_sp;
mm_segment_t old_fs;
sigset_t set;
compat_sigset_t seta;
stack_t st;
int err, i;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack();
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) ||
(((unsigned long) sf) & 3))
goto segv;
get_user(pc, &sf->regs.pc);
__get_user(npc, &sf->regs.npc);
if ((pc | npc) & 3)
goto segv;
if (test_thread_flag(TIF_32BIT)) {
pc &= 0xffffffff;
npc &= 0xffffffff;
}
regs->tpc = pc;
regs->tnpc = npc;
/* 2. Restore the state */
err = __get_user(regs->y, &sf->regs.y);
err |= __get_user(psr, &sf->regs.psr);
for (i = UREG_G1; i <= UREG_I7; i++)
err |= __get_user(regs->u_regs[i], &sf->regs.u_regs[i]);
if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) {
err |= __get_user(i, &sf->v8plus.g_upper[0]);
if (i == SIGINFO_EXTRA_V8PLUS_MAGIC) {
unsigned long asi;
for (i = UREG_G1; i <= UREG_I7; i++)
err |= __get_user(((u32 *)regs->u_regs)[2*i], &sf->v8plus.g_upper[i]);
err |= __get_user(asi, &sf->v8plus.asi);
regs->tstate &= ~TSTATE_ASI;
regs->tstate |= ((asi & 0xffUL) << 24UL);
}
}
/* User can only change condition codes in %tstate. */
regs->tstate &= ~(TSTATE_ICC|TSTATE_XCC);
regs->tstate |= psr_to_tstate_icc(psr);
/* Prevent syscall restart. */
pt_regs_clear_syscall(regs);
err |= __get_user(fpu_save, &sf->fpu_save);
if (fpu_save)
err |= restore_fpu_state32(regs, &sf->fpu_state);
err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t));
err |= __get_user(u_ss_sp, &sf->stack.ss_sp);
st.ss_sp = compat_ptr(u_ss_sp);
err |= __get_user(st.ss_flags, &sf->stack.ss_flags);
err |= __get_user(st.ss_size, &sf->stack.ss_size);
if (err)
goto segv;
/* It is more difficult to avoid calling this function than to
call it and ignore errors. */
old_fs = get_fs();
set_fs(KERNEL_DS);
do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf);
set_fs(old_fs);
switch (_NSIG_WORDS) {
case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32);
case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32);
case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
}
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
return;
segv:
force_sig(SIGSEGV, current);
}
/* Checks if the fp is valid */
static int invalid_frame_pointer(void __user *fp, int fplen)
{
if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen)
return 1;
return 0;
}
static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp;
regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
sp = regs->u_regs[UREG_FP];
/*
* If we are on the alternate signal stack and would overflow it, don't.
* Return an always-bogus address instead so we will die with SIGSEGV.
*/
if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
return (void __user *) -1L;
/* This is the X/Open sanctioned signal stack switching. */
if (sa->sa_flags & SA_ONSTACK) {
if (sas_ss_flags(sp) == 0)
sp = current->sas_ss_sp + current->sas_ss_size;
}
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
sp &= ~7UL;
return (void __user *)(sp - framesize);
}
static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
int err = 0;
fprs = current_thread_info()->fpsaved[0];
if (fprs & FPRS_DL)
err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
(sizeof(unsigned int) * 32));
err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
err |= __put_user(fprs, &fpu->si_fprs);
return err;
}
static void setup_frame32(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset)
{
struct signal_frame32 __user *sf;
int sigframe_size;
u32 psr;
int i, err;
unsigned int seta[_COMPAT_NSIG_WORDS];
/* 1. Make sure everything is clean */
synchronize_user_stack();
save_and_clear_fpu();
sigframe_size = SF_ALIGNEDSZ;
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
sigframe_size -= sizeof(__siginfo_fpu_t);
sf = (struct signal_frame32 __user *)
get_sigframe(&ka->sa, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size))
goto sigill;
if (get_thread_wsaved() != 0)
goto sigill;
/* 2. Save the current process state */
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
err = put_user(regs->tpc, &sf->info.si_regs.pc);
err |= __put_user(regs->tnpc, &sf->info.si_regs.npc);
err |= __put_user(regs->y, &sf->info.si_regs.y);
psr = tstate_to_psr(regs->tstate);
if (current_thread_info()->fpsaved[0] & FPRS_FEF)
psr |= PSR_EF;
err |= __put_user(psr, &sf->info.si_regs.psr);
for (i = 0; i < 16; i++)
err |= __put_user(regs->u_regs[i], &sf->info.si_regs.u_regs[i]);
err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
for (i = 1; i < 16; i++)
err |= __put_user(((u32 *)regs->u_regs)[2*i],
&sf->v8plus.g_upper[i]);
err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
&sf->v8plus.asi);
if (psr & PSR_EF) {
err |= save_fpu_state32(regs, &sf->fpu_state);
err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
} else {
err |= __put_user(0, &sf->fpu_save);
}
switch (_NSIG_WORDS) {
case 4: seta[7] = (oldset->sig[3] >> 32);
seta[6] = oldset->sig[3];
case 3: seta[5] = (oldset->sig[2] >> 32);
seta[4] = oldset->sig[2];
case 2: seta[3] = (oldset->sig[1] >> 32);
seta[2] = oldset->sig[1];
case 1: seta[1] = (oldset->sig[0] >> 32);
seta[0] = oldset->sig[0];
}
err |= __put_user(seta[0], &sf->info.si_mask);
err |= __copy_to_user(sf->extramask, seta + 1,
(_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
err |= copy_in_user((u32 __user *)sf,
(u32 __user *)(regs->u_regs[UREG_FP]),
sizeof(struct reg_window32));
if (err)
goto sigsegv;
/* 3. signal handler back-trampoline and parameters */
regs->u_regs[UREG_FP] = (unsigned long) sf;
regs->u_regs[UREG_I0] = signo;
regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
/* 4. signal handler */
regs->tpc = (unsigned long) ka->sa.sa_handler;
regs->tnpc = (regs->tpc + 4);
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
/* 5. return to kernel instructions */
if (ka->ka_restorer) {
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
} else {
/* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0]));
pgd_t *pgdp = pgd_offset(current->mm, address);
pud_t *pudp = pud_offset(pgdp, address);
pmd_t *pmdp = pmd_offset(pudp, address);
pte_t *ptep;
pte_t pte;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
err = __put_user(0x821020d8, &sf->insns[0]); /*mov __NR_sigreturn, %g1*/
err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/
if (err)
goto sigsegv;
preempt_disable();
ptep = pte_offset_map(pmdp, address);
pte = *ptep;
if (pte_present(pte)) {
unsigned long page = (unsigned long)
page_address(pte_page(pte));
wmb();
__asm__ __volatile__("flush %0 + %1"
: /* no outputs */
: "r" (page),
"r" (address & (PAGE_SIZE - 1))
: "memory");
}
pte_unmap(ptep);
preempt_enable();
}
return;
sigill:
do_exit(SIGILL);
sigsegv:
force_sigsegv(signo, current);
}
static void setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs,
unsigned long signr, sigset_t *oldset,
siginfo_t *info)
{
struct rt_signal_frame32 __user *sf;
int sigframe_size;
u32 psr;
int i, err;
compat_sigset_t seta;
/* 1. Make sure everything is clean */
synchronize_user_stack();
save_and_clear_fpu();
sigframe_size = RT_ALIGNEDSZ;
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
sigframe_size -= sizeof(__siginfo_fpu_t);
sf = (struct rt_signal_frame32 __user *)
get_sigframe(&ka->sa, regs, sigframe_size);
if (invalid_frame_pointer(sf, sigframe_size))
goto sigill;
if (get_thread_wsaved() != 0)
goto sigill;
/* 2. Save the current process state */
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
err = put_user(regs->tpc, &sf->regs.pc);
err |= __put_user(regs->tnpc, &sf->regs.npc);
err |= __put_user(regs->y, &sf->regs.y);
psr = tstate_to_psr(regs->tstate);
if (current_thread_info()->fpsaved[0] & FPRS_FEF)
psr |= PSR_EF;
err |= __put_user(psr, &sf->regs.psr);
for (i = 0; i < 16; i++)
err |= __put_user(regs->u_regs[i], &sf->regs.u_regs[i]);
err |= __put_user(sizeof(siginfo_extra_v8plus_t), &sf->extra_size);
err |= __put_user(SIGINFO_EXTRA_V8PLUS_MAGIC, &sf->v8plus.g_upper[0]);
for (i = 1; i < 16; i++)
err |= __put_user(((u32 *)regs->u_regs)[2*i],
&sf->v8plus.g_upper[i]);
err |= __put_user((regs->tstate & TSTATE_ASI) >> 24UL,
&sf->v8plus.asi);
if (psr & PSR_EF) {
err |= save_fpu_state32(regs, &sf->fpu_state);
err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
} else {
err |= __put_user(0, &sf->fpu_save);
}
/* Update the siginfo structure. */
err |= copy_siginfo_to_user32(&sf->info, info);
/* Setup sigaltstack */
err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
switch (_NSIG_WORDS) {
case 4: seta.sig[7] = (oldset->sig[3] >> 32);
seta.sig[6] = oldset->sig[3];
case 3: seta.sig[5] = (oldset->sig[2] >> 32);
seta.sig[4] = oldset->sig[2];
case 2: seta.sig[3] = (oldset->sig[1] >> 32);
seta.sig[2] = oldset->sig[1];
case 1: seta.sig[1] = (oldset->sig[0] >> 32);
seta.sig[0] = oldset->sig[0];
}
err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
err |= copy_in_user((u32 __user *)sf,
(u32 __user *)(regs->u_regs[UREG_FP]),
sizeof(struct reg_window32));
if (err)
goto sigsegv;
/* 3. signal handler back-trampoline and parameters */
regs->u_regs[UREG_FP] = (unsigned long) sf;
regs->u_regs[UREG_I0] = signr;
regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
regs->u_regs[UREG_I2] = (unsigned long) &sf->regs;
/* 4. signal handler */
regs->tpc = (unsigned long) ka->sa.sa_handler;
regs->tnpc = (regs->tpc + 4);
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
/* 5. return to kernel instructions */
if (ka->ka_restorer)
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
else {
/* Flush instruction space. */
unsigned long address = ((unsigned long)&(sf->insns[0]));
pgd_t *pgdp = pgd_offset(current->mm, address);
pud_t *pudp = pud_offset(pgdp, address);
pmd_t *pmdp = pmd_offset(pudp, address);
pte_t *ptep;
regs->u_regs[UREG_I7] = (unsigned long) (&(sf->insns[0]) - 2);
/* mov __NR_rt_sigreturn, %g1 */
err |= __put_user(0x82102065, &sf->insns[0]);
/* t 0x10 */
err |= __put_user(0x91d02010, &sf->insns[1]);
if (err)
goto sigsegv;
preempt_disable();
ptep = pte_offset_map(pmdp, address);
if (pte_present(*ptep)) {
unsigned long page = (unsigned long)
page_address(pte_page(*ptep));
wmb();
__asm__ __volatile__("flush %0 + %1"
: /* no outputs */
: "r" (page),
"r" (address & (PAGE_SIZE - 1))
: "memory");
}
pte_unmap(ptep);
preempt_enable();
}
return;
sigill:
do_exit(SIGILL);
sigsegv:
force_sigsegv(signr, current);
}
static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
if (ka->sa.sa_flags & SA_SIGINFO)
setup_rt_frame32(ka, regs, signr, oldset, info);
else
setup_frame32(ka, regs, signr, oldset);
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(&current->blocked,signr);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
struct sigaction *sa)
{
switch (regs->u_regs[UREG_I0]) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
no_system_call_restart:
regs->u_regs[UREG_I0] = EINTR;
regs->tstate |= TSTATE_ICARRY;
break;
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
/* fallthrough */
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
}
}
/* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
void do_signal32(sigset_t *oldset, struct pt_regs * regs,
int restart_syscall, unsigned long orig_i0)
{
struct k_sigaction ka;
siginfo_t info;
int signr;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
/* If the debugger messes with the program counter, it clears
* the "in syscall" bit, directing us to not perform a syscall
* restart.
*/
if (restart_syscall && !pt_regs_is_syscall(regs))
restart_syscall = 0;
if (signr > 0) {
if (restart_syscall)
syscall_restart32(orig_i0, regs, &ka.sa);
handle_signal32(signr, &ka, &info, oldset, regs);
/* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TS_RESTORE_SIGMASK flag.
*/
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
tracehook_signal_handler(signr, &info, &ka, regs, 0);
return;
}
if (restart_syscall &&
(regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
regs->u_regs[UREG_I0] == ERESTARTSYS ||
regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
/* replay the system call when we are done */
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
}
/* If there's no signal to deliver, we just put the saved sigmask
* back
*/
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
struct sigstack32 {
u32 the_stack;
int cur_status;
};
asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp)
{
struct sigstack32 __user *ssptr =
(struct sigstack32 __user *)((unsigned long)(u_ssptr));
struct sigstack32 __user *ossptr =
(struct sigstack32 __user *)((unsigned long)(u_ossptr));
int ret = -EFAULT;
/* First see if old state is wanted. */
if (ossptr) {
if (put_user(current->sas_ss_sp + current->sas_ss_size,
&ossptr->the_stack) ||
__put_user(on_sig_stack(sp), &ossptr->cur_status))
goto out;
}
/* Now see if we want to update the new state. */
if (ssptr) {
u32 ss_sp;
if (get_user(ss_sp, &ssptr->the_stack))
goto out;
/* If the current stack was set with sigaltstack, don't
* swap stacks while we are on it.
*/
ret = -EPERM;
if (current->sas_ss_sp && on_sig_stack(sp))
goto out;
/* Since we don't know the extent of the stack, and we don't
* track onstack-ness, but rather calculate it, we must
* presume a size. Ho hum this interface is lossy.
*/
current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
current->sas_ss_size = SIGSTKSZ;
}
ret = 0;
out:
return ret;
}
asmlinkage long do_sys32_sigaltstack(u32 ussa, u32 uossa, unsigned long sp)
{
stack_t uss, uoss;
u32 u_ss_sp = 0;
int ret;
mm_segment_t old_fs;
stack_t32 __user *uss32 = compat_ptr(ussa);
stack_t32 __user *uoss32 = compat_ptr(uossa);
if (ussa && (get_user(u_ss_sp, &uss32->ss_sp) ||
__get_user(uss.ss_flags, &uss32->ss_flags) ||
__get_user(uss.ss_size, &uss32->ss_size)))
return -EFAULT;
uss.ss_sp = compat_ptr(u_ss_sp);
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = do_sigaltstack(ussa ? (stack_t __user *) &uss : NULL,
uossa ? (stack_t __user *) &uoss : NULL, sp);
set_fs(old_fs);
if (!ret && uossa && (put_user(ptr_to_compat(uoss.ss_sp), &uoss32->ss_sp) ||
__put_user(uoss.ss_flags, &uoss32->ss_flags) ||
__put_user(uoss.ss_size, &uoss32->ss_size)))
return -EFAULT;
return ret;
}

View File

@@ -0,0 +1,617 @@
/*
* arch/sparc64/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1995, 2008 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx)
* Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#ifdef CONFIG_COMPAT
#include <linux/compat.h> /* for compat_old_sigset_t */
#endif
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/tracehook.h>
#include <linux/unistd.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/bitops.h>
#include <asm/uaccess.h>
#include <asm/ptrace.h>
#include <asm/pgtable.h>
#include <asm/fpumacro.h>
#include <asm/uctx.h>
#include <asm/siginfo.h>
#include <asm/visasm.h>
#include "entry.h"
#include "systbls.h"
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
/* {set, get}context() needed for 64-bit SparcLinux userland. */
asmlinkage void sparc64_set_context(struct pt_regs *regs)
{
struct ucontext __user *ucp = (struct ucontext __user *)
regs->u_regs[UREG_I0];
mc_gregset_t __user *grp;
unsigned long pc, npc, tstate;
unsigned long fp, i7;
unsigned char fenab;
int err;
flush_user_windows();
if (get_thread_wsaved() ||
(((unsigned long)ucp) & (sizeof(unsigned long)-1)) ||
(!__access_ok(ucp, sizeof(*ucp))))
goto do_sigsegv;
grp = &ucp->uc_mcontext.mc_gregs;
err = __get_user(pc, &((*grp)[MC_PC]));
err |= __get_user(npc, &((*grp)[MC_NPC]));
if (err || ((pc | npc) & 3))
goto do_sigsegv;
if (regs->u_regs[UREG_I1]) {
sigset_t set;
if (_NSIG_WORDS == 1) {
if (__get_user(set.sig[0], &ucp->uc_sigmask.sig[0]))
goto do_sigsegv;
} else {
if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
goto do_sigsegv;
}
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
if (test_thread_flag(TIF_32BIT)) {
pc &= 0xffffffff;
npc &= 0xffffffff;
}
regs->tpc = pc;
regs->tnpc = npc;
err |= __get_user(regs->y, &((*grp)[MC_Y]));
err |= __get_user(tstate, &((*grp)[MC_TSTATE]));
regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
err |= __get_user(regs->u_regs[UREG_G1], (&(*grp)[MC_G1]));
err |= __get_user(regs->u_regs[UREG_G2], (&(*grp)[MC_G2]));
err |= __get_user(regs->u_regs[UREG_G3], (&(*grp)[MC_G3]));
err |= __get_user(regs->u_regs[UREG_G4], (&(*grp)[MC_G4]));
err |= __get_user(regs->u_regs[UREG_G5], (&(*grp)[MC_G5]));
err |= __get_user(regs->u_regs[UREG_G6], (&(*grp)[MC_G6]));
/* Skip %g7 as that's the thread register in userspace. */
err |= __get_user(regs->u_regs[UREG_I0], (&(*grp)[MC_O0]));
err |= __get_user(regs->u_regs[UREG_I1], (&(*grp)[MC_O1]));
err |= __get_user(regs->u_regs[UREG_I2], (&(*grp)[MC_O2]));
err |= __get_user(regs->u_regs[UREG_I3], (&(*grp)[MC_O3]));
err |= __get_user(regs->u_regs[UREG_I4], (&(*grp)[MC_O4]));
err |= __get_user(regs->u_regs[UREG_I5], (&(*grp)[MC_O5]));
err |= __get_user(regs->u_regs[UREG_I6], (&(*grp)[MC_O6]));
err |= __get_user(regs->u_regs[UREG_I7], (&(*grp)[MC_O7]));
err |= __get_user(fp, &(ucp->uc_mcontext.mc_fp));
err |= __get_user(i7, &(ucp->uc_mcontext.mc_i7));
err |= __put_user(fp,
(&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
err |= __put_user(i7,
(&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
err |= __get_user(fenab, &(ucp->uc_mcontext.mc_fpregs.mcfpu_enab));
if (fenab) {
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
fprs_write(0);
err |= __get_user(fprs, &(ucp->uc_mcontext.mc_fpregs.mcfpu_fprs));
if (fprs & FPRS_DL)
err |= copy_from_user(fpregs,
&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs),
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_from_user(fpregs+16,
((unsigned long __user *)&(ucp->uc_mcontext.mc_fpregs.mcfpu_fregs))+16,
(sizeof(unsigned int) * 32));
err |= __get_user(current_thread_info()->xfsr[0],
&(ucp->uc_mcontext.mc_fpregs.mcfpu_fsr));
err |= __get_user(current_thread_info()->gsr[0],
&(ucp->uc_mcontext.mc_fpregs.mcfpu_gsr));
regs->tstate &= ~TSTATE_PEF;
}
if (err)
goto do_sigsegv;
return;
do_sigsegv:
force_sig(SIGSEGV, current);
}
asmlinkage void sparc64_get_context(struct pt_regs *regs)
{
struct ucontext __user *ucp = (struct ucontext __user *)
regs->u_regs[UREG_I0];
mc_gregset_t __user *grp;
mcontext_t __user *mcp;
unsigned long fp, i7;
unsigned char fenab;
int err;
synchronize_user_stack();
if (get_thread_wsaved() || clear_user(ucp, sizeof(*ucp)))
goto do_sigsegv;
#if 1
fenab = 0; /* IMO get_context is like any other system call, thus modifies FPU state -jj */
#else
fenab = (current_thread_info()->fpsaved[0] & FPRS_FEF);
#endif
mcp = &ucp->uc_mcontext;
grp = &mcp->mc_gregs;
/* Skip over the trap instruction, first. */
if (test_thread_flag(TIF_32BIT)) {
regs->tpc = (regs->tnpc & 0xffffffff);
regs->tnpc = (regs->tnpc + 4) & 0xffffffff;
} else {
regs->tpc = regs->tnpc;
regs->tnpc += 4;
}
err = 0;
if (_NSIG_WORDS == 1)
err |= __put_user(current->blocked.sig[0],
(unsigned long __user *)&ucp->uc_sigmask);
else
err |= __copy_to_user(&ucp->uc_sigmask, &current->blocked,
sizeof(sigset_t));
err |= __put_user(regs->tstate, &((*grp)[MC_TSTATE]));
err |= __put_user(regs->tpc, &((*grp)[MC_PC]));
err |= __put_user(regs->tnpc, &((*grp)[MC_NPC]));
err |= __put_user(regs->y, &((*grp)[MC_Y]));
err |= __put_user(regs->u_regs[UREG_G1], &((*grp)[MC_G1]));
err |= __put_user(regs->u_regs[UREG_G2], &((*grp)[MC_G2]));
err |= __put_user(regs->u_regs[UREG_G3], &((*grp)[MC_G3]));
err |= __put_user(regs->u_regs[UREG_G4], &((*grp)[MC_G4]));
err |= __put_user(regs->u_regs[UREG_G5], &((*grp)[MC_G5]));
err |= __put_user(regs->u_regs[UREG_G6], &((*grp)[MC_G6]));
err |= __put_user(regs->u_regs[UREG_G7], &((*grp)[MC_G7]));
err |= __put_user(regs->u_regs[UREG_I0], &((*grp)[MC_O0]));
err |= __put_user(regs->u_regs[UREG_I1], &((*grp)[MC_O1]));
err |= __put_user(regs->u_regs[UREG_I2], &((*grp)[MC_O2]));
err |= __put_user(regs->u_regs[UREG_I3], &((*grp)[MC_O3]));
err |= __put_user(regs->u_regs[UREG_I4], &((*grp)[MC_O4]));
err |= __put_user(regs->u_regs[UREG_I5], &((*grp)[MC_O5]));
err |= __put_user(regs->u_regs[UREG_I6], &((*grp)[MC_O6]));
err |= __put_user(regs->u_regs[UREG_I7], &((*grp)[MC_O7]));
err |= __get_user(fp,
(&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[6])));
err |= __get_user(i7,
(&(((struct reg_window __user *)(STACK_BIAS+regs->u_regs[UREG_I6]))->ins[7])));
err |= __put_user(fp, &(mcp->mc_fp));
err |= __put_user(i7, &(mcp->mc_i7));
err |= __put_user(fenab, &(mcp->mc_fpregs.mcfpu_enab));
if (fenab) {
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
fprs = current_thread_info()->fpsaved[0];
if (fprs & FPRS_DL)
err |= copy_to_user(&(mcp->mc_fpregs.mcfpu_fregs), fpregs,
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_to_user(
((unsigned long __user *)&(mcp->mc_fpregs.mcfpu_fregs))+16, fpregs+16,
(sizeof(unsigned int) * 32));
err |= __put_user(current_thread_info()->xfsr[0], &(mcp->mc_fpregs.mcfpu_fsr));
err |= __put_user(current_thread_info()->gsr[0], &(mcp->mc_fpregs.mcfpu_gsr));
err |= __put_user(fprs, &(mcp->mc_fpregs.mcfpu_fprs));
}
if (err)
goto do_sigsegv;
return;
do_sigsegv:
force_sig(SIGSEGV, current);
}
struct rt_signal_frame {
struct sparc_stackf ss;
siginfo_t info;
struct pt_regs regs;
__siginfo_fpu_t __user *fpu_save;
stack_t stack;
sigset_t mask;
__siginfo_fpu_t fpu_state;
};
static long _sigpause_common(old_sigset_t set)
{
set &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked;
siginitset(&current->blocked, set);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_restore_sigmask();
return -ERESTARTNOHAND;
}
asmlinkage long sys_sigpause(unsigned int set)
{
return _sigpause_common(set);
}
asmlinkage long sys_sigsuspend(old_sigset_t set)
{
return _sigpause_common(set);
}
static inline int
restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
int err;
err = __get_user(fprs, &fpu->si_fprs);
fprs_write(0);
regs->tstate &= ~TSTATE_PEF;
if (fprs & FPRS_DL)
err |= copy_from_user(fpregs, &fpu->si_float_regs[0],
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32],
(sizeof(unsigned int) * 32));
err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr);
current_thread_info()->fpsaved[0] |= fprs;
return err;
}
void do_rt_sigreturn(struct pt_regs *regs)
{
struct rt_signal_frame __user *sf;
unsigned long tpc, tnpc, tstate;
__siginfo_fpu_t __user *fpu_save;
sigset_t set;
int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
synchronize_user_stack ();
sf = (struct rt_signal_frame __user *)
(regs->u_regs [UREG_FP] + STACK_BIAS);
/* 1. Make sure we are not getting garbage from the user */
if (((unsigned long) sf) & 3)
goto segv;
err = get_user(tpc, &sf->regs.tpc);
err |= __get_user(tnpc, &sf->regs.tnpc);
if (test_thread_flag(TIF_32BIT)) {
tpc &= 0xffffffff;
tnpc &= 0xffffffff;
}
err |= ((tpc | tnpc) & 3);
/* 2. Restore the state */
err |= __get_user(regs->y, &sf->regs.y);
err |= __get_user(tstate, &sf->regs.tstate);
err |= copy_from_user(regs->u_regs, sf->regs.u_regs, sizeof(regs->u_regs));
/* User can only change condition codes and %asi in %tstate. */
regs->tstate &= ~(TSTATE_ASI | TSTATE_ICC | TSTATE_XCC);
regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC));
err |= __get_user(fpu_save, &sf->fpu_save);
if (fpu_save)
err |= restore_fpu_state(regs, &sf->fpu_state);
err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
if (err)
goto segv;
regs->tpc = tpc;
regs->tnpc = tnpc;
/* Prevent syscall restart. */
pt_regs_clear_syscall(regs);
sigdelsetmask(&set, ~_BLOCKABLE);
spin_lock_irq(&current->sighand->siglock);
current->blocked = set;
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
return;
segv:
force_sig(SIGSEGV, current);
}
/* Checks if the fp is valid */
static int invalid_frame_pointer(void __user *fp, int fplen)
{
if (((unsigned long) fp) & 7)
return 1;
return 0;
}
static inline int
save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu)
{
unsigned long *fpregs = current_thread_info()->fpregs;
unsigned long fprs;
int err = 0;
fprs = current_thread_info()->fpsaved[0];
if (fprs & FPRS_DL)
err |= copy_to_user(&fpu->si_float_regs[0], fpregs,
(sizeof(unsigned int) * 32));
if (fprs & FPRS_DU)
err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16,
(sizeof(unsigned int) * 32));
err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr);
err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr);
err |= __put_user(fprs, &fpu->si_fprs);
return err;
}
static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize)
{
unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS;
/*
* If we are on the alternate signal stack and would overflow it, don't.
* Return an always-bogus address instead so we will die with SIGSEGV.
*/
if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
return (void __user *) -1L;
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (sas_ss_flags(sp) == 0)
sp = current->sas_ss_sp + current->sas_ss_size;
}
/* Always align the stack frame. This handles two cases. First,
* sigaltstack need not be mindful of platform specific stack
* alignment. Second, if we took this signal because the stack
* is not aligned properly, we'd like to take the signal cleanly
* and report that.
*/
sp &= ~7UL;
return (void __user *)(sp - framesize);
}
static inline void
setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs,
int signo, sigset_t *oldset, siginfo_t *info)
{
struct rt_signal_frame __user *sf;
int sigframe_size, err;
/* 1. Make sure everything is clean */
synchronize_user_stack();
save_and_clear_fpu();
sigframe_size = sizeof(struct rt_signal_frame);
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF))
sigframe_size -= sizeof(__siginfo_fpu_t);
sf = (struct rt_signal_frame __user *)
get_sigframe(ka, regs, sigframe_size);
if (invalid_frame_pointer (sf, sigframe_size))
goto sigill;
if (get_thread_wsaved() != 0)
goto sigill;
/* 2. Save the current process state */
err = copy_to_user(&sf->regs, regs, sizeof (*regs));
if (current_thread_info()->fpsaved[0] & FPRS_FEF) {
err |= save_fpu_state(regs, &sf->fpu_state);
err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save);
} else {
err |= __put_user(0, &sf->fpu_save);
}
/* Setup sigaltstack */
err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags);
err |= __put_user(current->sas_ss_size, &sf->stack.ss_size);
err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t));
err |= copy_in_user((u64 __user *)sf,
(u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS),
sizeof(struct reg_window));
if (info)
err |= copy_siginfo_to_user(&sf->info, info);
else {
err |= __put_user(signo, &sf->info.si_signo);
err |= __put_user(SI_NOINFO, &sf->info.si_code);
}
if (err)
goto sigsegv;
/* 3. signal handler back-trampoline and parameters */
regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS;
regs->u_regs[UREG_I0] = signo;
regs->u_regs[UREG_I1] = (unsigned long) &sf->info;
/* The sigcontext is passed in this way because of how it
* is defined in GLIBC's /usr/include/bits/sigcontext.h
* for sparc64. It includes the 128 bytes of siginfo_t.
*/
regs->u_regs[UREG_I2] = (unsigned long) &sf->info;
/* 5. signal handler */
regs->tpc = (unsigned long) ka->sa.sa_handler;
regs->tnpc = (regs->tpc + 4);
if (test_thread_flag(TIF_32BIT)) {
regs->tpc &= 0xffffffff;
regs->tnpc &= 0xffffffff;
}
/* 4. return to kernel instructions */
regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer;
return;
sigill:
do_exit(SIGILL);
sigsegv:
force_sigsegv(signo, current);
}
static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
setup_rt_frame(ka, regs, signr, oldset,
(ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
spin_lock_irq(&current->sighand->siglock);
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NOMASK))
sigaddset(&current->blocked,signr);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
}
static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
struct sigaction *sa)
{
switch (regs->u_regs[UREG_I0]) {
case ERESTART_RESTARTBLOCK:
case ERESTARTNOHAND:
no_system_call_restart:
regs->u_regs[UREG_I0] = EINTR;
regs->tstate |= (TSTATE_ICARRY|TSTATE_XCARRY);
break;
case ERESTARTSYS:
if (!(sa->sa_flags & SA_RESTART))
goto no_system_call_restart;
/* fallthrough */
case ERESTARTNOINTR:
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
}
}
/* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
{
struct k_sigaction ka;
int restart_syscall;
sigset_t *oldset;
siginfo_t info;
int signr;
if (pt_regs_is_syscall(regs) &&
(regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) {
restart_syscall = 1;
} else
restart_syscall = 0;
if (current_thread_info()->status & TS_RESTORE_SIGMASK)
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT)) {
extern void do_signal32(sigset_t *, struct pt_regs *,
int restart_syscall,
unsigned long orig_i0);
do_signal32(oldset, regs, restart_syscall, orig_i0);
return;
}
#endif
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
/* If the debugger messes with the program counter, it clears
* the software "in syscall" bit, directing us to not perform
* a syscall restart.
*/
if (restart_syscall && !pt_regs_is_syscall(regs))
restart_syscall = 0;
if (signr > 0) {
if (restart_syscall)
syscall_restart(orig_i0, regs, &ka.sa);
handle_signal(signr, &ka, &info, oldset, regs);
/* A signal was successfully delivered; the saved
* sigmask will have been stored in the signal frame,
* and will be restored by sigreturn, so we can simply
* clear the TS_RESTORE_SIGMASK flag.
*/
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
tracehook_signal_handler(signr, &info, &ka, regs, 0);
return;
}
if (restart_syscall &&
(regs->u_regs[UREG_I0] == ERESTARTNOHAND ||
regs->u_regs[UREG_I0] == ERESTARTSYS ||
regs->u_regs[UREG_I0] == ERESTARTNOINTR)) {
/* replay the system call when we are done */
regs->u_regs[UREG_I0] = orig_i0;
regs->tpc -= 4;
regs->tnpc -= 4;
}
if (restart_syscall &&
regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) {
regs->u_regs[UREG_G1] = __NR_restart_syscall;
regs->tpc -= 4;
regs->tnpc -= 4;
}
/* If there's no signal to deliver, we just put the saved sigmask
* back
*/
if (current_thread_info()->status & TS_RESTORE_SIGMASK) {
current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
{
if (thread_info_flags & _TIF_SIGPENDING)
do_signal(regs, orig_i0);
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
}
}

1408
arch/sparc/kernel/smp_64.c Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -61,7 +61,6 @@ extern void (*bzero_1page)(void *);
extern void *__bzero(void *, size_t);
extern void *__memscan_zero(void *, size_t);
extern void *__memscan_generic(void *, int, size_t);
extern int __memcmp(const void *, const void *, __kernel_size_t);
extern int __strncmp(const char *, const char *, __kernel_size_t);
extern int __ashrdi3(int, int);
@@ -118,10 +117,8 @@ EXPORT_SYMBOL(synchronize_irq);
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(rtc_lock);
#ifdef CONFIG_SUN_AUXIO
EXPORT_SYMBOL(set_auxio);
EXPORT_SYMBOL(get_auxio);
#endif
EXPORT_SYMBOL(io_remap_pfn_range);
#ifndef CONFIG_SMP
@@ -209,7 +206,6 @@ EXPORT_SYMBOL(bzero_1page);
EXPORT_SYMBOL(__bzero);
EXPORT_SYMBOL(__memscan_zero);
EXPORT_SYMBOL(__memscan_generic);
EXPORT_SYMBOL(__memcmp);
EXPORT_SYMBOL(__strncmp);
EXPORT_SYMBOL(__memmove);

View File

@@ -0,0 +1,284 @@
/* arch/sparc64/kernel/sparc64_ksyms.c: Sparc64 specific ksyms support.
*
* Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net)
* Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
/* Tell string.h we don't want memcpy etc. as cpp defines */
#define EXPORT_SYMTAB_STROPS
#define PROMLIB_INTERNAL
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/in6.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/fs_struct.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/syscalls.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/rwsem.h>
#include <net/compat.h>
#include <asm/oplib.h>
#include <asm/system.h>
#include <asm/auxio.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/idprom.h>
#include <asm/elf.h>
#include <asm/head.h>
#include <asm/smp.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
#include <asm/checksum.h>
#include <asm/fpumacro.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_SBUS
#include <asm/dma.h>
#endif
#include <asm/ns87303.h>
#include <asm/timer.h>
#include <asm/cpudata.h>
#include <asm/ftrace.h>
#include <asm/hypervisor.h>
struct poll {
int fd;
short events;
short revents;
};
extern void die_if_kernel(char *str, struct pt_regs *regs);
extern pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
extern void *__bzero(void *, size_t);
extern void *__memscan_zero(void *, size_t);
extern void *__memscan_generic(void *, int, size_t);
extern __kernel_size_t strlen(const char *);
extern void sys_sigsuspend(void);
extern int compat_sys_ioctl(unsigned int fd, unsigned int cmd, u32 arg);
extern int (*handle_mathemu)(struct pt_regs *, struct fpustate *);
extern long sparc32_open(const char __user * filename, int flags, int mode);
extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
unsigned long pfn, unsigned long size, pgprot_t prot);
extern int __ashrdi3(int, int);
extern int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs);
extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *);
extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *);
extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *, unsigned long *);
extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
unsigned long *);
extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *);
extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
unsigned long *, unsigned long *, unsigned long *);
/* Per-CPU information table */
EXPORT_PER_CPU_SYMBOL(__cpu_data);
/* used by various drivers */
#ifdef CONFIG_SMP
/* Out of line rw-locking implementation. */
EXPORT_SYMBOL(__read_lock);
EXPORT_SYMBOL(__read_unlock);
EXPORT_SYMBOL(__write_lock);
EXPORT_SYMBOL(__write_unlock);
EXPORT_SYMBOL(__write_trylock);
#endif /* CONFIG_SMP */
#ifdef CONFIG_MCOUNT
EXPORT_SYMBOL(_mcount);
#endif
EXPORT_SYMBOL(sparc64_get_clock_tick);
/* RW semaphores */
EXPORT_SYMBOL(__down_read);
EXPORT_SYMBOL(__down_read_trylock);
EXPORT_SYMBOL(__down_write);
EXPORT_SYMBOL(__down_write_trylock);
EXPORT_SYMBOL(__up_read);
EXPORT_SYMBOL(__up_write);
EXPORT_SYMBOL(__downgrade_write);
/* Atomic counter implementation. */
EXPORT_SYMBOL(atomic_add);
EXPORT_SYMBOL(atomic_add_ret);
EXPORT_SYMBOL(atomic_sub);
EXPORT_SYMBOL(atomic_sub_ret);
EXPORT_SYMBOL(atomic64_add);
EXPORT_SYMBOL(atomic64_add_ret);
EXPORT_SYMBOL(atomic64_sub);
EXPORT_SYMBOL(atomic64_sub_ret);
/* Atomic bit operations. */
EXPORT_SYMBOL(test_and_set_bit);
EXPORT_SYMBOL(test_and_clear_bit);
EXPORT_SYMBOL(test_and_change_bit);
EXPORT_SYMBOL(set_bit);
EXPORT_SYMBOL(clear_bit);
EXPORT_SYMBOL(change_bit);
EXPORT_SYMBOL(__flushw_user);
EXPORT_SYMBOL(tlb_type);
EXPORT_SYMBOL(sun4v_chip_type);
EXPORT_SYMBOL(get_fb_unmapped_area);
EXPORT_SYMBOL(flush_icache_range);
EXPORT_SYMBOL(flush_dcache_page);
#ifdef DCACHE_ALIASING_POSSIBLE
EXPORT_SYMBOL(__flush_dcache_range);
#endif
EXPORT_SYMBOL(sun4v_niagara_getperf);
EXPORT_SYMBOL(sun4v_niagara_setperf);
EXPORT_SYMBOL(sun4v_niagara2_getperf);
EXPORT_SYMBOL(sun4v_niagara2_setperf);
EXPORT_SYMBOL(auxio_set_led);
EXPORT_SYMBOL(auxio_set_lte);
#ifdef CONFIG_SBUS
EXPORT_SYMBOL(sbus_set_sbus64);
#endif
EXPORT_SYMBOL(outsb);
EXPORT_SYMBOL(outsw);
EXPORT_SYMBOL(outsl);
EXPORT_SYMBOL(insb);
EXPORT_SYMBOL(insw);
EXPORT_SYMBOL(insl);
#ifdef CONFIG_PCI
EXPORT_SYMBOL(pci_alloc_consistent);
EXPORT_SYMBOL(pci_free_consistent);
EXPORT_SYMBOL(pci_map_single);
EXPORT_SYMBOL(pci_unmap_single);
EXPORT_SYMBOL(pci_map_sg);
EXPORT_SYMBOL(pci_unmap_sg);
EXPORT_SYMBOL(pci_dma_sync_single_for_cpu);
EXPORT_SYMBOL(pci_dma_sync_sg_for_cpu);
EXPORT_SYMBOL(pci_dma_supported);
#endif
/* I/O device mmaping on Sparc64. */
EXPORT_SYMBOL(io_remap_pfn_range);
EXPORT_SYMBOL(dump_fpu);
/* math-emu wants this */
EXPORT_SYMBOL(die_if_kernel);
/* Kernel thread creation. */
EXPORT_SYMBOL(kernel_thread);
/* prom symbols */
EXPORT_SYMBOL(idprom);
EXPORT_SYMBOL(prom_root_node);
EXPORT_SYMBOL(prom_getchild);
EXPORT_SYMBOL(prom_getsibling);
EXPORT_SYMBOL(prom_searchsiblings);
EXPORT_SYMBOL(prom_firstprop);
EXPORT_SYMBOL(prom_nextprop);
EXPORT_SYMBOL(prom_getproplen);
EXPORT_SYMBOL(prom_getproperty);
EXPORT_SYMBOL(prom_node_has_property);
EXPORT_SYMBOL(prom_setprop);
EXPORT_SYMBOL(saved_command_line);
EXPORT_SYMBOL(prom_finddevice);
EXPORT_SYMBOL(prom_feval);
EXPORT_SYMBOL(prom_getbool);
EXPORT_SYMBOL(prom_getstring);
EXPORT_SYMBOL(prom_getint);
EXPORT_SYMBOL(prom_getintdefault);
EXPORT_SYMBOL(__prom_getchild);
EXPORT_SYMBOL(__prom_getsibling);
/* sparc library symbols */
EXPORT_SYMBOL(strlen);
EXPORT_SYMBOL(__strlen_user);
EXPORT_SYMBOL(__strnlen_user);
/* Special internal versions of library functions. */
EXPORT_SYMBOL(_clear_page);
EXPORT_SYMBOL(clear_user_page);
EXPORT_SYMBOL(copy_user_page);
EXPORT_SYMBOL(__bzero);
EXPORT_SYMBOL(__memscan_zero);
EXPORT_SYMBOL(__memscan_generic);
EXPORT_SYMBOL(__memset);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_nocheck);
EXPORT_SYMBOL(__csum_partial_copy_from_user);
EXPORT_SYMBOL(__csum_partial_copy_to_user);
EXPORT_SYMBOL(ip_fast_csum);
/* Moving data to/from/in userspace. */
EXPORT_SYMBOL(___copy_to_user);
EXPORT_SYMBOL(___copy_from_user);
EXPORT_SYMBOL(___copy_in_user);
EXPORT_SYMBOL(copy_to_user_fixup);
EXPORT_SYMBOL(copy_from_user_fixup);
EXPORT_SYMBOL(copy_in_user_fixup);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__clear_user);
/* Various address conversion macros use this. */
EXPORT_SYMBOL(sparc64_valid_addr_bitmap);
/* No version information on this, heavily used in inline asm,
* and will always be 'void __ret_efault(void)'.
*/
EXPORT_SYMBOL(__ret_efault);
/* No version information on these, as gcc produces such symbols. */
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(strncmp);
void VISenter(void);
/* RAID code needs this */
EXPORT_SYMBOL(VISenter);
/* for input/keybdev */
EXPORT_SYMBOL(sun_do_break);
EXPORT_SYMBOL(stop_a_enabled);
#ifdef CONFIG_DEBUG_BUGVERBOSE
EXPORT_SYMBOL(do_BUG);
#endif
/* for ns8703 */
EXPORT_SYMBOL(ns87303_lock);
EXPORT_SYMBOL(tick_ops);
EXPORT_SYMBOL(xor_vis_2);
EXPORT_SYMBOL(xor_vis_3);
EXPORT_SYMBOL(xor_vis_4);
EXPORT_SYMBOL(xor_vis_5);
EXPORT_SYMBOL(xor_niagara_2);
EXPORT_SYMBOL(xor_niagara_3);
EXPORT_SYMBOL(xor_niagara_4);
EXPORT_SYMBOL(xor_niagara_5);
EXPORT_SYMBOL_GPL(real_hard_smp_processor_id);

View File

@@ -0,0 +1,245 @@
/* We need to carefully read the error status, ACK the errors,
* prevent recursive traps, and pass the information on to C
* code for logging.
*
* We pass the AFAR in as-is, and we encode the status
* information as described in asm-sparc64/sfafsr.h
*/
.type __spitfire_access_error,#function
__spitfire_access_error:
/* Disable ESTATE error reporting so that we do not take
* recursive traps and RED state the processor.
*/
stxa %g0, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync
mov UDBE_UE, %g1
ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
/* __spitfire_cee_trap branches here with AFSR in %g4 and
* UDBE_CE in %g1. It only clears ESTATE_ERR_CE in the ESTATE
* Error Enable register.
*/
__spitfire_cee_trap_continue:
ldxa [%g0] ASI_AFAR, %g5 ! Get AFAR
rdpr %tt, %g3
and %g3, 0x1ff, %g3 ! Paranoia
sllx %g3, SFSTAT_TRAP_TYPE_SHIFT, %g3
or %g4, %g3, %g4
rdpr %tl, %g3
cmp %g3, 1
mov 1, %g3
bleu %xcc, 1f
sllx %g3, SFSTAT_TL_GT_ONE_SHIFT, %g3
or %g4, %g3, %g4
/* Read in the UDB error register state, clearing the sticky
* error bits as-needed. We only clear them if the UE bit is
* set. Likewise, __spitfire_cee_trap below will only do so
* if the CE bit is set.
*
* NOTE: UltraSparc-I/II have high and low UDB error
* registers, corresponding to the two UDB units
* present on those chips. UltraSparc-IIi only
* has a single UDB, called "SDB" in the manual.
* For IIi the upper UDB register always reads
* as zero so for our purposes things will just
* work with the checks below.
*/
1: ldxa [%g0] ASI_UDBH_ERROR_R, %g3
and %g3, 0x3ff, %g7 ! Paranoia
sllx %g7, SFSTAT_UDBH_SHIFT, %g7
or %g4, %g7, %g4
andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
be,pn %xcc, 1f
nop
stxa %g3, [%g0] ASI_UDB_ERROR_W
membar #Sync
1: mov 0x18, %g3
ldxa [%g3] ASI_UDBL_ERROR_R, %g3
and %g3, 0x3ff, %g7 ! Paranoia
sllx %g7, SFSTAT_UDBL_SHIFT, %g7
or %g4, %g7, %g4
andcc %g3, %g1, %g3 ! UDBE_UE or UDBE_CE
be,pn %xcc, 1f
nop
mov 0x18, %g7
stxa %g3, [%g7] ASI_UDB_ERROR_W
membar #Sync
1: /* Ok, now that we've latched the error state, clear the
* sticky bits in the AFSR.
*/
stxa %g4, [%g0] ASI_AFSR
membar #Sync
rdpr %tl, %g2
cmp %g2, 1
rdpr %pil, %g2
bleu,pt %xcc, 1f
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etraptl1
rd %pc, %g7
ba,pt %xcc, 2f
nop
1: ba,pt %xcc, etrap_irq
rd %pc, %g7
2:
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
mov %l4, %o1
mov %l5, %o2
call spitfire_access_error
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
.size __spitfire_access_error,.-__spitfire_access_error
/* This is the trap handler entry point for ECC correctable
* errors. They are corrected, but we listen for the trap so
* that the event can be logged.
*
* Disrupting errors are either:
* 1) single-bit ECC errors during UDB reads to system
* memory
* 2) data parity errors during write-back events
*
* As far as I can make out from the manual, the CEE trap is
* only for correctable errors during memory read accesses by
* the front-end of the processor.
*
* The code below is only for trap level 1 CEE events, as it
* is the only situation where we can safely record and log.
* For trap level >1 we just clear the CE bit in the AFSR and
* return.
*
* This is just like __spiftire_access_error above, but it
* specifically handles correctable errors. If an
* uncorrectable error is indicated in the AFSR we will branch
* directly above to __spitfire_access_error to handle it
* instead. Uncorrectable therefore takes priority over
* correctable, and the error logging C code will notice this
* case by inspecting the trap type.
*/
.type __spitfire_cee_trap,#function
__spitfire_cee_trap:
ldxa [%g0] ASI_AFSR, %g4 ! Get AFSR
mov 1, %g3
sllx %g3, SFAFSR_UE_SHIFT, %g3
andcc %g4, %g3, %g0 ! Check for UE
bne,pn %xcc, __spitfire_access_error
nop
/* Ok, in this case we only have a correctable error.
* Indicate we only wish to capture that state in register
* %g1, and we only disable CE error reporting unlike UE
* handling which disables all errors.
*/
ldxa [%g0] ASI_ESTATE_ERROR_EN, %g3
andn %g3, ESTATE_ERR_CE, %g3
stxa %g3, [%g0] ASI_ESTATE_ERROR_EN
membar #Sync
/* Preserve AFSR in %g4, indicate UDB state to capture in %g1 */
ba,pt %xcc, __spitfire_cee_trap_continue
mov UDBE_CE, %g1
.size __spitfire_cee_trap,.-__spitfire_cee_trap
.type __spitfire_data_access_exception_tl1,#function
__spitfire_data_access_exception_tl1:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
mov DMMU_SFAR, %g5
ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
membar #Sync
rdpr %tt, %g3
cmp %g3, 0x80 ! first win spill/fill trap
blu,pn %xcc, 1f
cmp %g3, 0xff ! last win spill/fill trap
bgu,pn %xcc, 1f
nop
ba,pt %xcc, winfix_dax
rdpr %tpc, %g3
1: sethi %hi(109f), %g7
ba,pt %xcc, etraptl1
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call spitfire_data_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
.size __spitfire_data_access_exception_tl1,.-__spitfire_data_access_exception_tl1
.type __spitfire_data_access_exception,#function
__spitfire_data_access_exception:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
mov DMMU_SFAR, %g5
ldxa [%g3] ASI_DMMU, %g4 ! Get SFSR
ldxa [%g5] ASI_DMMU, %g5 ! Get SFAR
stxa %g0, [%g3] ASI_DMMU ! Clear SFSR.FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call spitfire_data_access_exception
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
.size __spitfire_data_access_exception,.-__spitfire_data_access_exception
.type __spitfire_insn_access_exception_tl1,#function
__spitfire_insn_access_exception_tl1:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etraptl1
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call spitfire_insn_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
.size __spitfire_insn_access_exception_tl1,.-__spitfire_insn_access_exception_tl1
.type __spitfire_insn_access_exception,#function
__spitfire_insn_access_exception:
rdpr %pstate, %g4
wrpr %g4, PSTATE_MG|PSTATE_AG, %pstate
mov TLB_SFSR, %g3
ldxa [%g3] ASI_IMMU, %g4 ! Get SFSR
rdpr %tpc, %g5 ! IMMU has no SFAR, use TPC
stxa %g0, [%g3] ASI_IMMU ! Clear FaultValid bit
membar #Sync
sethi %hi(109f), %g7
ba,pt %xcc, etrap
109: or %g7, %lo(109b), %g7
mov %l4, %o1
mov %l5, %o2
call spitfire_insn_access_exception
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap
nop
.size __spitfire_insn_access_exception,.-__spitfire_insn_access_exception

127
arch/sparc/kernel/sstate.c Normal file
View File

@@ -0,0 +1,127 @@
/* sstate.c: System soft state support.
*
* Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <asm/hypervisor.h>
#include <asm/spitfire.h>
#include <asm/oplib.h>
#include <asm/head.h>
#include <asm/io.h>
static int hv_supports_soft_state;
static unsigned long kimage_addr_to_ra(const char *p)
{
unsigned long val = (unsigned long) p;
return kern_base + (val - KERNBASE);
}
static void do_set_sstate(unsigned long state, const char *msg)
{
unsigned long err;
if (!hv_supports_soft_state)
return;
err = sun4v_mach_set_soft_state(state, kimage_addr_to_ra(msg));
if (err) {
printk(KERN_WARNING "SSTATE: Failed to set soft-state to "
"state[%lx] msg[%s], err=%lu\n",
state, msg, err);
}
}
static const char booting_msg[32] __attribute__((aligned(32))) =
"Linux booting";
static const char running_msg[32] __attribute__((aligned(32))) =
"Linux running";
static const char halting_msg[32] __attribute__((aligned(32))) =
"Linux halting";
static const char poweroff_msg[32] __attribute__((aligned(32))) =
"Linux powering off";
static const char rebooting_msg[32] __attribute__((aligned(32))) =
"Linux rebooting";
static const char panicing_msg[32] __attribute__((aligned(32))) =
"Linux panicing";
static int sstate_reboot_call(struct notifier_block *np, unsigned long type, void *_unused)
{
const char *msg;
switch (type) {
case SYS_DOWN:
default:
msg = rebooting_msg;
break;
case SYS_HALT:
msg = halting_msg;
break;
case SYS_POWER_OFF:
msg = poweroff_msg;
break;
}
do_set_sstate(HV_SOFT_STATE_TRANSITION, msg);
return NOTIFY_OK;
}
static struct notifier_block sstate_reboot_notifier = {
.notifier_call = sstate_reboot_call,
};
static int sstate_panic_event(struct notifier_block *n, unsigned long event, void *ptr)
{
do_set_sstate(HV_SOFT_STATE_TRANSITION, panicing_msg);
return NOTIFY_DONE;
}
static struct notifier_block sstate_panic_block = {
.notifier_call = sstate_panic_event,
.priority = INT_MAX,
};
static int __init sstate_init(void)
{
unsigned long major, minor;
if (tlb_type != hypervisor)
return 0;
major = 1;
minor = 0;
if (sun4v_hvapi_register(HV_GRP_SOFT_STATE, major, &minor))
return 0;
hv_supports_soft_state = 1;
prom_sun4v_guest_soft_state();
do_set_sstate(HV_SOFT_STATE_TRANSITION, booting_msg);
atomic_notifier_chain_register(&panic_notifier_list,
&sstate_panic_block);
register_reboot_notifier(&sstate_reboot_notifier);
return 0;
}
core_initcall(sstate_init);
static int __init sstate_running(void)
{
do_set_sstate(HV_SOFT_STATE_NORMAL, running_msg);
return 0;
}
late_initcall(sstate_running);

View File

@@ -0,0 +1,64 @@
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/thread_info.h>
#include <linux/module.h>
#include <asm/ptrace.h>
#include <asm/stacktrace.h>
#include "kstack.h"
static void __save_stack_trace(struct thread_info *tp,
struct stack_trace *trace,
bool skip_sched)
{
unsigned long ksp, fp;
if (tp == current_thread_info()) {
stack_trace_flush();
__asm__ __volatile__("mov %%fp, %0" : "=r" (ksp));
} else {
ksp = tp->ksp;
}
fp = ksp + STACK_BIAS;
do {
struct sparc_stackf *sf;
struct pt_regs *regs;
unsigned long pc;
if (!kstack_valid(tp, fp))
break;
sf = (struct sparc_stackf *) fp;
regs = (struct pt_regs *) (sf + 1);
if (kstack_is_trap_frame(tp, regs)) {
if (!(regs->tstate & TSTATE_PRIV))
break;
pc = regs->tpc;
fp = regs->u_regs[UREG_I6] + STACK_BIAS;
} else {
pc = sf->callers_pc;
fp = (unsigned long)sf->fp + STACK_BIAS;
}
if (trace->skip > 0)
trace->skip--;
else if (!skip_sched || !in_sched_functions(pc))
trace->entries[trace->nr_entries++] = pc;
} while (trace->nr_entries < trace->max_entries);
}
void save_stack_trace(struct stack_trace *trace)
{
__save_stack_trace(current_thread_info(), trace, false);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
struct thread_info *tp = task_thread_info(tsk);
__save_stack_trace(tp, trace, true);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);

View File

@@ -0,0 +1,116 @@
/*
* starfire.c: Starfire/E10000 support.
*
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
* Copyright (C) 2000 Anton Blanchard (anton@samba.org)
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <asm/page.h>
#include <asm/oplib.h>
#include <asm/smp.h>
#include <asm/upa.h>
#include <asm/starfire.h>
/*
* A few places around the kernel check this to see if
* they need to call us to do things in a Starfire specific
* way.
*/
int this_is_starfire = 0;
void check_if_starfire(void)
{
int ssnode = prom_finddevice("/ssp-serial");
if (ssnode != 0 && ssnode != -1)
this_is_starfire = 1;
}
int starfire_hard_smp_processor_id(void)
{
return upa_readl(0x1fff40000d0UL);
}
/*
* Each Starfire board has 32 registers which perform translation
* and delivery of traditional interrupt packets into the extended
* Starfire hardware format. Essentially UPAID's now have 2 more
* bits than in all previous Sun5 systems.
*/
struct starfire_irqinfo {
unsigned long imap_slots[32];
unsigned long tregs[32];
struct starfire_irqinfo *next;
int upaid, hwmid;
};
static struct starfire_irqinfo *sflist = NULL;
/* Beam me up Scott(McNeil)y... */
void starfire_hookup(int upaid)
{
struct starfire_irqinfo *p;
unsigned long treg_base, hwmid, i;
p = kmalloc(sizeof(*p), GFP_KERNEL);
if (!p) {
prom_printf("starfire_hookup: No memory, this is insane.\n");
prom_halt();
}
treg_base = 0x100fc000000UL;
hwmid = ((upaid & 0x3c) << 1) |
((upaid & 0x40) >> 4) |
(upaid & 0x3);
p->hwmid = hwmid;
treg_base += (hwmid << 33UL);
treg_base += 0x200UL;
for (i = 0; i < 32; i++) {
p->imap_slots[i] = 0UL;
p->tregs[i] = treg_base + (i * 0x10UL);
/* Lets play it safe and not overwrite existing mappings */
if (upa_readl(p->tregs[i]) != 0)
p->imap_slots[i] = 0xdeadbeaf;
}
p->upaid = upaid;
p->next = sflist;
sflist = p;
}
unsigned int starfire_translate(unsigned long imap,
unsigned int upaid)
{
struct starfire_irqinfo *p;
unsigned int bus_hwmid;
unsigned int i;
bus_hwmid = (((unsigned long)imap) >> 33) & 0x7f;
for (p = sflist; p != NULL; p = p->next)
if (p->hwmid == bus_hwmid)
break;
if (p == NULL) {
prom_printf("XFIRE: Cannot find irqinfo for imap %016lx\n",
((unsigned long)imap));
prom_halt();
}
for (i = 0; i < 32; i++) {
if (p->imap_slots[i] == imap ||
p->imap_slots[i] == 0UL)
break;
}
if (i == 32) {
printk("starfire_translate: Are you kidding me?\n");
panic("Lucy in the sky....");
}
p->imap_slots[i] = imap;
/* map to real upaid */
upaid = (((upaid & 0x3c) << 1) |
((upaid & 0x40) >> 4) |
(upaid & 0x3));
upa_writel(upaid, p->tregs[i]);
return i;
}

View File

@@ -160,6 +160,7 @@ static void __init sun4c_init_timers(irq_handler_t counter_fn)
sun4c_timers = (void __iomem *) (unsigned long) addr[0];
irq = of_get_property(dp, "intr", NULL);
of_node_put(dp);
if (!irq) {
prom_printf("sun4c_init_timers: No intr property\n");
prom_halt();
@@ -200,6 +201,7 @@ void __init sun4c_init_IRQ(void)
}
addr = of_get_property(dp, "address", NULL);
of_node_put(dp);
if (!addr) {
prom_printf("sun4c_init_IRQ: No address property\n");
prom_halt();

View File

@@ -40,6 +40,7 @@
#include <asm/cacheflush.h>
#include <asm/irq_regs.h>
#include "kernel.h"
#include "irq.h"
/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */
@@ -58,7 +59,6 @@ static struct sun4d_timer_regs __iomem *sun4d_timers;
#define TIMER_IRQ 10
#define MAX_STATIC_ALLOC 4
extern struct irqaction static_irqaction[MAX_STATIC_ALLOC];
extern int static_irq_count;
static unsigned char sbus_tid[32];
@@ -508,6 +508,7 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn)
* bootbus.
*/
reg = of_get_property(dp, "reg", NULL);
of_node_put(dp);
if (!reg) {
prom_printf("sun4d_init_timers: No reg property\n");
prom_halt();

View File

@@ -374,6 +374,7 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
}
addr = of_get_property(dp, "address", &len);
of_node_put(dp);
if (!addr) {
printk(KERN_ERR "sun4m_init_timers: No 'address' prop.\n");
return;
@@ -437,6 +438,7 @@ void __init sun4m_init_IRQ(void)
}
addr = of_get_property(dp, "address", &len);
of_node_put(dp);
if (!addr) {
printk(KERN_ERR "sun4m_init_IRQ: No 'address' prop.\n");
return;

View File

@@ -0,0 +1,341 @@
/* sun4v_ivec.S: Sun4v interrupt vector handling.
*
* Copyright (C) 2006 <davem@davemloft.net>
*/
#include <asm/cpudata.h>
#include <asm/intr_queue.h>
#include <asm/pil.h>
.text
.align 32
sun4v_cpu_mondo:
/* Head offset in %g2, tail offset in %g4.
* If they are the same, no work.
*/
mov INTRQ_CPU_MONDO_HEAD, %g2
ldxa [%g2] ASI_QUEUE, %g2
mov INTRQ_CPU_MONDO_TAIL, %g4
ldxa [%g4] ASI_QUEUE, %g4
cmp %g2, %g4
be,pn %xcc, sun4v_cpu_mondo_queue_empty
nop
/* Get &trap_block[smp_processor_id()] into %g4. */
ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
/* Get CPU mondo queue base phys address into %g7. */
ldx [%g4 + TRAP_PER_CPU_CPU_MONDO_PA], %g7
/* Now get the cross-call arguments and handler PC, same
* layout as sun4u:
*
* 1st 64-bit word: low half is 32-bit PC, put into %g3 and jmpl to it
* high half is context arg to MMU flushes, into %g5
* 2nd 64-bit word: 64-bit arg, load into %g1
* 3rd 64-bit word: 64-bit arg, load into %g7
*/
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g3
add %g2, 0x8, %g2
srlx %g3, 32, %g5
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
add %g2, 0x8, %g2
srl %g3, 0, %g3
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g7
add %g2, 0x40 - 0x8 - 0x8, %g2
/* Update queue head pointer. */
lduw [%g4 + TRAP_PER_CPU_CPU_MONDO_QMASK], %g4
and %g2, %g4, %g2
mov INTRQ_CPU_MONDO_HEAD, %g4
stxa %g2, [%g4] ASI_QUEUE
membar #Sync
jmpl %g3, %g0
nop
sun4v_cpu_mondo_queue_empty:
retry
sun4v_dev_mondo:
/* Head offset in %g2, tail offset in %g4. */
mov INTRQ_DEVICE_MONDO_HEAD, %g2
ldxa [%g2] ASI_QUEUE, %g2
mov INTRQ_DEVICE_MONDO_TAIL, %g4
ldxa [%g4] ASI_QUEUE, %g4
cmp %g2, %g4
be,pn %xcc, sun4v_dev_mondo_queue_empty
nop
/* Get &trap_block[smp_processor_id()] into %g4. */
ldxa [%g0] ASI_SCRATCHPAD, %g4
sub %g4, TRAP_PER_CPU_FAULT_INFO, %g4
/* Get DEV mondo queue base phys address into %g5. */
ldx [%g4 + TRAP_PER_CPU_DEV_MONDO_PA], %g5
/* Load IVEC into %g3. */
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
add %g2, 0x40, %g2
/* XXX There can be a full 64-byte block of data here.
* XXX This is how we can get at MSI vector data.
* XXX Current we do not capture this, but when we do we'll
* XXX need to add a 64-byte storage area in the struct ino_bucket
* XXX or the struct irq_desc.
*/
/* Update queue head pointer, this frees up some registers. */
lduw [%g4 + TRAP_PER_CPU_DEV_MONDO_QMASK], %g4
and %g2, %g4, %g2
mov INTRQ_DEVICE_MONDO_HEAD, %g4
stxa %g2, [%g4] ASI_QUEUE
membar #Sync
TRAP_LOAD_IRQ_WORK_PA(%g1, %g4)
/* For VIRQs, cookie is encoded as ~bucket_phys_addr */
brlz,pt %g3, 1f
xnor %g3, %g0, %g4
/* Get __pa(&ivector_table[IVEC]) into %g4. */
sethi %hi(ivector_table_pa), %g4
ldx [%g4 + %lo(ivector_table_pa)], %g4
sllx %g3, 4, %g3
add %g4, %g3, %g4
1: ldx [%g1], %g2
stxa %g2, [%g4] ASI_PHYS_USE_EC
stx %g4, [%g1]
/* Signal the interrupt by setting (1 << pil) in %softint. */
wr %g0, 1 << PIL_DEVICE_IRQ, %set_softint
sun4v_dev_mondo_queue_empty:
retry
sun4v_res_mondo:
/* Head offset in %g2, tail offset in %g4. */
mov INTRQ_RESUM_MONDO_HEAD, %g2
ldxa [%g2] ASI_QUEUE, %g2
mov INTRQ_RESUM_MONDO_TAIL, %g4
ldxa [%g4] ASI_QUEUE, %g4
cmp %g2, %g4
be,pn %xcc, sun4v_res_mondo_queue_empty
nop
/* Get &trap_block[smp_processor_id()] into %g3. */
ldxa [%g0] ASI_SCRATCHPAD, %g3
sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
/* Get RES mondo queue base phys address into %g5. */
ldx [%g3 + TRAP_PER_CPU_RESUM_MONDO_PA], %g5
/* Get RES kernel buffer base phys address into %g7. */
ldx [%g3 + TRAP_PER_CPU_RESUM_KBUF_PA], %g7
/* If the first word is non-zero, queue is full. */
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
brnz,pn %g1, sun4v_res_mondo_queue_full
nop
lduw [%g3 + TRAP_PER_CPU_RESUM_QMASK], %g4
/* Remember this entry's offset in %g1. */
mov %g2, %g1
/* Copy 64-byte queue entry into kernel buffer. */
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
/* Update queue head pointer. */
and %g2, %g4, %g2
mov INTRQ_RESUM_MONDO_HEAD, %g4
stxa %g2, [%g4] ASI_QUEUE
membar #Sync
/* Disable interrupts and save register state so we can call
* C code. The etrap handling will leave %g4 in %l4 for us
* when it's done.
*/
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
mov %g1, %g4
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
/* Log the event. */
add %sp, PTREGS_OFF, %o0
call sun4v_resum_error
mov %l4, %o1
/* Return from trap. */
ba,pt %xcc, rtrap_irq
nop
sun4v_res_mondo_queue_empty:
retry
sun4v_res_mondo_queue_full:
/* The queue is full, consolidate our damage by setting
* the head equal to the tail. We'll just trap again otherwise.
* Call C code to log the event.
*/
mov INTRQ_RESUM_MONDO_HEAD, %g2
stxa %g4, [%g2] ASI_QUEUE
membar #Sync
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
call sun4v_resum_overflow
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap_irq
nop
sun4v_nonres_mondo:
/* Head offset in %g2, tail offset in %g4. */
mov INTRQ_NONRESUM_MONDO_HEAD, %g2
ldxa [%g2] ASI_QUEUE, %g2
mov INTRQ_NONRESUM_MONDO_TAIL, %g4
ldxa [%g4] ASI_QUEUE, %g4
cmp %g2, %g4
be,pn %xcc, sun4v_nonres_mondo_queue_empty
nop
/* Get &trap_block[smp_processor_id()] into %g3. */
ldxa [%g0] ASI_SCRATCHPAD, %g3
sub %g3, TRAP_PER_CPU_FAULT_INFO, %g3
/* Get RES mondo queue base phys address into %g5. */
ldx [%g3 + TRAP_PER_CPU_NONRESUM_MONDO_PA], %g5
/* Get RES kernel buffer base phys address into %g7. */
ldx [%g3 + TRAP_PER_CPU_NONRESUM_KBUF_PA], %g7
/* If the first word is non-zero, queue is full. */
ldxa [%g7 + %g2] ASI_PHYS_USE_EC, %g1
brnz,pn %g1, sun4v_nonres_mondo_queue_full
nop
lduw [%g3 + TRAP_PER_CPU_NONRESUM_QMASK], %g4
/* Remember this entry's offset in %g1. */
mov %g2, %g1
/* Copy 64-byte queue entry into kernel buffer. */
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
ldxa [%g5 + %g2] ASI_PHYS_USE_EC, %g3
stxa %g3, [%g7 + %g2] ASI_PHYS_USE_EC
add %g2, 0x08, %g2
/* Update queue head pointer. */
and %g2, %g4, %g2
mov INTRQ_NONRESUM_MONDO_HEAD, %g4
stxa %g2, [%g4] ASI_QUEUE
membar #Sync
/* Disable interrupts and save register state so we can call
* C code. The etrap handling will leave %g4 in %l4 for us
* when it's done.
*/
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
mov %g1, %g4
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
/* Log the event. */
add %sp, PTREGS_OFF, %o0
call sun4v_nonresum_error
mov %l4, %o1
/* Return from trap. */
ba,pt %xcc, rtrap_irq
nop
sun4v_nonres_mondo_queue_empty:
retry
sun4v_nonres_mondo_queue_full:
/* The queue is full, consolidate our damage by setting
* the head equal to the tail. We'll just trap again otherwise.
* Call C code to log the event.
*/
mov INTRQ_NONRESUM_MONDO_HEAD, %g2
stxa %g4, [%g2] ASI_QUEUE
membar #Sync
rdpr %pil, %g2
wrpr %g0, PIL_NORMAL_MAX, %pil
ba,pt %xcc, etrap_irq
rd %pc, %g7
#ifdef CONFIG_TRACE_IRQFLAGS
call trace_hardirqs_off
nop
#endif
call sun4v_nonresum_overflow
add %sp, PTREGS_OFF, %o0
ba,pt %xcc, rtrap_irq
nop

View File

@@ -0,0 +1,428 @@
/* sun4v_tlb_miss.S: Sun4v TLB miss handlers.
*
* Copyright (C) 2006 <davem@davemloft.net>
*/
.text
.align 32
/* Load ITLB fault information into VADDR and CTX, using BASE. */
#define LOAD_ITLB_INFO(BASE, VADDR, CTX) \
ldx [BASE + HV_FAULT_I_ADDR_OFFSET], VADDR; \
ldx [BASE + HV_FAULT_I_CTX_OFFSET], CTX;
/* Load DTLB fault information into VADDR and CTX, using BASE. */
#define LOAD_DTLB_INFO(BASE, VADDR, CTX) \
ldx [BASE + HV_FAULT_D_ADDR_OFFSET], VADDR; \
ldx [BASE + HV_FAULT_D_CTX_OFFSET], CTX;
/* DEST = (VADDR >> 22)
*
* Branch to ZERO_CTX_LABEL if context is zero.
*/
#define COMPUTE_TAG_TARGET(DEST, VADDR, CTX, ZERO_CTX_LABEL) \
srlx VADDR, 22, DEST; \
brz,pn CTX, ZERO_CTX_LABEL; \
nop;
/* Create TSB pointer. This is something like:
*
* index_mask = (512 << (tsb_reg & 0x7UL)) - 1UL;
* tsb_base = tsb_reg & ~0x7UL;
* tsb_index = ((vaddr >> HASH_SHIFT) & tsb_mask);
* tsb_ptr = tsb_base + (tsb_index * 16);
*/
#define COMPUTE_TSB_PTR(TSB_PTR, VADDR, HASH_SHIFT, TMP1, TMP2) \
and TSB_PTR, 0x7, TMP1; \
mov 512, TMP2; \
andn TSB_PTR, 0x7, TSB_PTR; \
sllx TMP2, TMP1, TMP2; \
srlx VADDR, HASH_SHIFT, TMP1; \
sub TMP2, 1, TMP2; \
and TMP1, TMP2, TMP1; \
sllx TMP1, 4, TMP1; \
add TSB_PTR, TMP1, TSB_PTR;
sun4v_itlb_miss:
/* Load MMU Miss base into %g2. */
ldxa [%g0] ASI_SCRATCHPAD, %g2
/* Load UTSB reg into %g1. */
mov SCRATCHPAD_UTSBREG1, %g1
ldxa [%g1] ASI_SCRATCHPAD, %g1
LOAD_ITLB_INFO(%g2, %g4, %g5)
COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_itlb_4v)
COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
cmp %g2, %g6
bne,a,pn %xcc, tsb_miss_page_table_walk
mov FAULT_CODE_ITLB, %g3
andcc %g3, _PAGE_EXEC_4V, %g0
be,a,pn %xcc, tsb_do_fault
mov FAULT_CODE_ITLB, %g3
/* We have a valid entry, make hypervisor call to load
* I-TLB and return from trap.
*
* %g3: PTE
* %g4: vaddr
*/
sun4v_itlb_load:
ldxa [%g0] ASI_SCRATCHPAD, %g6
mov %o0, %g1 ! save %o0
mov %o1, %g2 ! save %o1
mov %o2, %g5 ! save %o2
mov %o3, %g7 ! save %o3
mov %g4, %o0 ! vaddr
ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1 ! ctx
mov %g3, %o2 ! PTE
mov HV_MMU_IMMU, %o3 ! flags
ta HV_MMU_MAP_ADDR_TRAP
brnz,pn %o0, sun4v_itlb_error
mov %g2, %o1 ! restore %o1
mov %g1, %o0 ! restore %o0
mov %g5, %o2 ! restore %o2
mov %g7, %o3 ! restore %o3
retry
sun4v_dtlb_miss:
/* Load MMU Miss base into %g2. */
ldxa [%g0] ASI_SCRATCHPAD, %g2
/* Load UTSB reg into %g1. */
mov SCRATCHPAD_UTSBREG1, %g1
ldxa [%g1] ASI_SCRATCHPAD, %g1
LOAD_DTLB_INFO(%g2, %g4, %g5)
COMPUTE_TAG_TARGET(%g6, %g4, %g5, kvmap_dtlb_4v)
COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g3, %g7)
/* Load TSB tag/pte into %g2/%g3 and compare the tag. */
ldda [%g1] ASI_QUAD_LDD_PHYS_4V, %g2
cmp %g2, %g6
bne,a,pn %xcc, tsb_miss_page_table_walk
mov FAULT_CODE_DTLB, %g3
/* We have a valid entry, make hypervisor call to load
* D-TLB and return from trap.
*
* %g3: PTE
* %g4: vaddr
*/
sun4v_dtlb_load:
ldxa [%g0] ASI_SCRATCHPAD, %g6
mov %o0, %g1 ! save %o0
mov %o1, %g2 ! save %o1
mov %o2, %g5 ! save %o2
mov %o3, %g7 ! save %o3
mov %g4, %o0 ! vaddr
ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1 ! ctx
mov %g3, %o2 ! PTE
mov HV_MMU_DMMU, %o3 ! flags
ta HV_MMU_MAP_ADDR_TRAP
brnz,pn %o0, sun4v_dtlb_error
mov %g2, %o1 ! restore %o1
mov %g1, %o0 ! restore %o0
mov %g5, %o2 ! restore %o2
mov %g7, %o3 ! restore %o3
retry
sun4v_dtlb_prot:
SET_GL(1)
/* Load MMU Miss base into %g5. */
ldxa [%g0] ASI_SCRATCHPAD, %g5
ldx [%g5 + HV_FAULT_D_ADDR_OFFSET], %g5
rdpr %tl, %g1
cmp %g1, 1
bgu,pn %xcc, winfix_trampoline
mov FAULT_CODE_DTLB | FAULT_CODE_WRITE, %g4
ba,pt %xcc, sparc64_realfault_common
nop
/* Called from trap table:
* %g4: vaddr
* %g5: context
* %g6: TAG TARGET
*/
sun4v_itsb_miss:
mov SCRATCHPAD_UTSBREG1, %g1
ldxa [%g1] ASI_SCRATCHPAD, %g1
brz,pn %g5, kvmap_itlb_4v
mov FAULT_CODE_ITLB, %g3
ba,a,pt %xcc, sun4v_tsb_miss_common
/* Called from trap table:
* %g4: vaddr
* %g5: context
* %g6: TAG TARGET
*/
sun4v_dtsb_miss:
mov SCRATCHPAD_UTSBREG1, %g1
ldxa [%g1] ASI_SCRATCHPAD, %g1
brz,pn %g5, kvmap_dtlb_4v
mov FAULT_CODE_DTLB, %g3
/* fallthrough */
sun4v_tsb_miss_common:
COMPUTE_TSB_PTR(%g1, %g4, PAGE_SHIFT, %g5, %g7)
sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2
#ifdef CONFIG_HUGETLB_PAGE
mov SCRATCHPAD_UTSBREG2, %g5
ldxa [%g5] ASI_SCRATCHPAD, %g5
cmp %g5, -1
be,pt %xcc, 80f
nop
COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7)
/* That clobbered %g2, reload it. */
ldxa [%g0] ASI_SCRATCHPAD, %g2
sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2
80: stx %g5, [%g2 + TRAP_PER_CPU_TSB_HUGE_TEMP]
#endif
ba,pt %xcc, tsb_miss_page_table_walk_sun4v_fastpath
ldx [%g2 + TRAP_PER_CPU_PGD_PADDR], %g7
sun4v_itlb_error:
sethi %hi(sun4v_err_itlb_vaddr), %g1
stx %g4, [%g1 + %lo(sun4v_err_itlb_vaddr)]
sethi %hi(sun4v_err_itlb_ctx), %g1
ldxa [%g0] ASI_SCRATCHPAD, %g6
ldx [%g6 + HV_FAULT_I_CTX_OFFSET], %o1
stx %o1, [%g1 + %lo(sun4v_err_itlb_ctx)]
sethi %hi(sun4v_err_itlb_pte), %g1
stx %g3, [%g1 + %lo(sun4v_err_itlb_pte)]
sethi %hi(sun4v_err_itlb_error), %g1
stx %o0, [%g1 + %lo(sun4v_err_itlb_error)]
rdpr %tl, %g4
cmp %g4, 1
ble,pt %icc, 1f
sethi %hi(2f), %g7
ba,pt %xcc, etraptl1
or %g7, %lo(2f), %g7
1: ba,pt %xcc, etrap
2: or %g7, %lo(2b), %g7
mov %l4, %o1
call sun4v_itlb_error_report
add %sp, PTREGS_OFF, %o0
/* NOTREACHED */
sun4v_dtlb_error:
sethi %hi(sun4v_err_dtlb_vaddr), %g1
stx %g4, [%g1 + %lo(sun4v_err_dtlb_vaddr)]
sethi %hi(sun4v_err_dtlb_ctx), %g1
ldxa [%g0] ASI_SCRATCHPAD, %g6
ldx [%g6 + HV_FAULT_D_CTX_OFFSET], %o1
stx %o1, [%g1 + %lo(sun4v_err_dtlb_ctx)]
sethi %hi(sun4v_err_dtlb_pte), %g1
stx %g3, [%g1 + %lo(sun4v_err_dtlb_pte)]
sethi %hi(sun4v_err_dtlb_error), %g1
stx %o0, [%g1 + %lo(sun4v_err_dtlb_error)]
rdpr %tl, %g4
cmp %g4, 1
ble,pt %icc, 1f
sethi %hi(2f), %g7
ba,pt %xcc, etraptl1
or %g7, %lo(2f), %g7
1: ba,pt %xcc, etrap
2: or %g7, %lo(2b), %g7
mov %l4, %o1
call sun4v_dtlb_error_report
add %sp, PTREGS_OFF, %o0
/* NOTREACHED */
/* Instruction Access Exception, tl0. */
sun4v_iacc:
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3
ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etrap
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call sun4v_insn_access_exception
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Instruction Access Exception, tl1. */
sun4v_iacc_tl1:
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_I_TYPE_OFFSET], %g3
ldx [%g2 + HV_FAULT_I_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_I_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etraptl1
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call sun4v_insn_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Data Access Exception, tl0. */
sun4v_dacc:
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etrap
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call sun4v_data_access_exception
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Data Access Exception, tl1. */
sun4v_dacc_tl1:
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etraptl1
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call sun4v_data_access_exception_tl1
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Memory Address Unaligned. */
sun4v_mna:
/* Window fixup? */
rdpr %tl, %g2
cmp %g2, 1
ble,pt %icc, 1f
nop
SET_GL(1)
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g5
mov HV_FAULT_TYPE_UNALIGNED, %g3
ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g4
sllx %g3, 16, %g3
or %g4, %g3, %g4
ba,pt %xcc, winfix_mna
rdpr %tpc, %g3
/* not reached */
1: ldxa [%g0] ASI_SCRATCHPAD, %g2
mov HV_FAULT_TYPE_UNALIGNED, %g3
ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etrap
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call sun4v_do_mna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Privileged Action. */
sun4v_privact:
ba,pt %xcc, etrap
rd %pc, %g7
call do_privact
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Unaligned ldd float, tl0. */
sun4v_lddfmna:
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etrap
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call handle_lddfmna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
/* Unaligned std float, tl0. */
sun4v_stdfmna:
ldxa [%g0] ASI_SCRATCHPAD, %g2
ldx [%g2 + HV_FAULT_D_TYPE_OFFSET], %g3
ldx [%g2 + HV_FAULT_D_ADDR_OFFSET], %g4
ldx [%g2 + HV_FAULT_D_CTX_OFFSET], %g5
sllx %g3, 16, %g3
or %g5, %g3, %g5
ba,pt %xcc, etrap
rd %pc, %g7
mov %l4, %o1
mov %l5, %o2
call handle_stdfmna
add %sp, PTREGS_OFF, %o0
ba,a,pt %xcc, rtrap
#define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000
#define SUN4V_DO_PATCH(OLD, NEW) \
sethi %hi(NEW), %g1; \
or %g1, %lo(NEW), %g1; \
sethi %hi(OLD), %g2; \
or %g2, %lo(OLD), %g2; \
sub %g1, %g2, %g1; \
sethi %hi(BRANCH_ALWAYS), %g3; \
sll %g1, 11, %g1; \
srl %g1, 11 + 2, %g1; \
or %g3, %lo(BRANCH_ALWAYS), %g3; \
or %g3, %g1, %g3; \
stw %g3, [%g2]; \
sethi %hi(NOP), %g3; \
or %g3, %lo(NOP), %g3; \
stw %g3, [%g2 + 0x4]; \
flush %g2;
.globl sun4v_patch_tlb_handlers
.type sun4v_patch_tlb_handlers,#function
sun4v_patch_tlb_handlers:
SUN4V_DO_PATCH(tl0_iamiss, sun4v_itlb_miss)
SUN4V_DO_PATCH(tl1_iamiss, sun4v_itlb_miss)
SUN4V_DO_PATCH(tl0_damiss, sun4v_dtlb_miss)
SUN4V_DO_PATCH(tl1_damiss, sun4v_dtlb_miss)
SUN4V_DO_PATCH(tl0_daprot, sun4v_dtlb_prot)
SUN4V_DO_PATCH(tl1_daprot, sun4v_dtlb_prot)
SUN4V_DO_PATCH(tl0_iax, sun4v_iacc)
SUN4V_DO_PATCH(tl1_iax, sun4v_iacc_tl1)
SUN4V_DO_PATCH(tl0_dax, sun4v_dacc)
SUN4V_DO_PATCH(tl1_dax, sun4v_dacc_tl1)
SUN4V_DO_PATCH(tl0_mna, sun4v_mna)
SUN4V_DO_PATCH(tl1_mna, sun4v_mna)
SUN4V_DO_PATCH(tl0_lddfmna, sun4v_lddfmna)
SUN4V_DO_PATCH(tl0_stdfmna, sun4v_stdfmna)
SUN4V_DO_PATCH(tl0_privact, sun4v_privact)
retl
nop
.size sun4v_patch_tlb_handlers,.-sun4v_patch_tlb_handlers

367
arch/sparc/kernel/sys32.S Normal file
View File

@@ -0,0 +1,367 @@
/*
* sys32.S: I-cache tricks for 32-bit compatibility layer simple
* conversions.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <asm/errno.h>
/* NOTE: call as jump breaks return stack, we have to avoid that */
.text
#define SIGN1(STUB,SYSCALL,REG1) \
.align 32; \
.globl STUB; \
STUB: sethi %hi(SYSCALL), %g1; \
jmpl %g1 + %lo(SYSCALL), %g0; \
sra REG1, 0, REG1
#define SIGN2(STUB,SYSCALL,REG1,REG2) \
.align 32; \
.globl STUB; \
STUB: sethi %hi(SYSCALL), %g1; \
sra REG1, 0, REG1; \
jmpl %g1 + %lo(SYSCALL), %g0; \
sra REG2, 0, REG2
#define SIGN3(STUB,SYSCALL,REG1,REG2,REG3) \
.align 32; \
.globl STUB; \
STUB: sra REG1, 0, REG1; \
sethi %hi(SYSCALL), %g1; \
sra REG2, 0, REG2; \
jmpl %g1 + %lo(SYSCALL), %g0; \
sra REG3, 0, REG3
#define SIGN4(STUB,SYSCALL,REG1,REG2,REG3,REG4) \
.align 32; \
.globl STUB; \
STUB: sra REG1, 0, REG1; \
sethi %hi(SYSCALL), %g1; \
sra REG2, 0, REG2; \
sra REG3, 0, REG3; \
jmpl %g1 + %lo(SYSCALL), %g0; \
sra REG4, 0, REG4
SIGN1(sys32_exit, sparc_exit, %o0)
SIGN1(sys32_exit_group, sys_exit_group, %o0)
SIGN1(sys32_wait4, compat_sys_wait4, %o2)
SIGN1(sys32_creat, sys_creat, %o1)
SIGN1(sys32_mknod, sys_mknod, %o1)
SIGN1(sys32_perfctr, sys_perfctr, %o0)
SIGN1(sys32_umount, sys_umount, %o1)
SIGN1(sys32_signal, sys_signal, %o0)
SIGN1(sys32_access, sys_access, %o1)
SIGN1(sys32_msync, sys_msync, %o2)
SIGN2(sys32_reboot, sys_reboot, %o0, %o1)
SIGN1(sys32_setitimer, compat_sys_setitimer, %o0)
SIGN1(sys32_getitimer, compat_sys_getitimer, %o0)
SIGN1(sys32_sethostname, sys_sethostname, %o1)
SIGN1(sys32_swapon, sys_swapon, %o1)
SIGN1(sys32_sigaction, compat_sys_sigaction, %o0)
SIGN1(sys32_rt_sigaction, compat_sys_rt_sigaction, %o0)
SIGN1(sys32_sigprocmask, compat_sys_sigprocmask, %o0)
SIGN1(sys32_rt_sigprocmask, compat_sys_rt_sigprocmask, %o0)
SIGN2(sys32_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo, %o0, %o1)
SIGN1(sys32_getrusage, compat_sys_getrusage, %o0)
SIGN1(sys32_setxattr, sys_setxattr, %o4)
SIGN1(sys32_lsetxattr, sys_lsetxattr, %o4)
SIGN1(sys32_fsetxattr, sys_fsetxattr, %o4)
SIGN1(sys32_fgetxattr, sys_fgetxattr, %o0)
SIGN1(sys32_flistxattr, sys_flistxattr, %o0)
SIGN1(sys32_fremovexattr, sys_fremovexattr, %o0)
SIGN2(sys32_tkill, sys_tkill, %o0, %o1)
SIGN1(sys32_epoll_create, sys_epoll_create, %o0)
SIGN3(sys32_epoll_ctl, sys_epoll_ctl, %o0, %o1, %o2)
SIGN3(sys32_epoll_wait, sys_epoll_wait, %o0, %o2, %o3)
SIGN1(sys32_readahead, compat_sys_readahead, %o0)
SIGN2(sys32_fadvise64, compat_sys_fadvise64, %o0, %o4)
SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5)
SIGN2(sys32_bdflush, sys_bdflush, %o0, %o1)
SIGN1(sys32_mlockall, sys_mlockall, %o0)
SIGN1(sys32_nfsservctl, compat_sys_nfsservctl, %o0)
SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1)
SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1)
SIGN1(sys32_io_submit, compat_sys_io_submit, %o1)
SIGN1(sys32_mq_open, compat_sys_mq_open, %o1)
SIGN1(sys32_select, compat_sys_select, %o0)
SIGN1(sys32_mkdir, sys_mkdir, %o1)
SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5)
SIGN1(sys32_sysfs, compat_sys_sysfs, %o0)
SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1)
SIGN2(sys32_sendfile64, compat_sys_sendfile64, %o0, %o1)
SIGN1(sys32_prctl, sys_prctl, %o0)
SIGN1(sys32_sched_rr_get_interval, compat_sys_sched_rr_get_interval, %o0)
SIGN2(sys32_waitpid, sys_waitpid, %o0, %o2)
SIGN1(sys32_getgroups, sys_getgroups, %o0)
SIGN1(sys32_getpgid, sys_getpgid, %o0)
SIGN2(sys32_getpriority, sys_getpriority, %o0, %o1)
SIGN1(sys32_getsid, sys_getsid, %o0)
SIGN2(sys32_kill, sys_kill, %o0, %o1)
SIGN1(sys32_nice, sys_nice, %o0)
SIGN1(sys32_lseek, sys_lseek, %o1)
SIGN2(sys32_open, sparc32_open, %o1, %o2)
SIGN1(sys32_readlink, sys_readlink, %o2)
SIGN1(sys32_sched_get_priority_max, sys_sched_get_priority_max, %o0)
SIGN1(sys32_sched_get_priority_min, sys_sched_get_priority_min, %o0)
SIGN1(sys32_sched_getparam, sys_sched_getparam, %o0)
SIGN1(sys32_sched_getscheduler, sys_sched_getscheduler, %o0)
SIGN1(sys32_sched_setparam, sys_sched_setparam, %o0)
SIGN2(sys32_sched_setscheduler, sys_sched_setscheduler, %o0, %o1)
SIGN1(sys32_getdomainname, sys_getdomainname, %o1)
SIGN1(sys32_setdomainname, sys_setdomainname, %o1)
SIGN1(sys32_setgroups, sys_setgroups, %o0)
SIGN2(sys32_setpgid, sys_setpgid, %o0, %o1)
SIGN3(sys32_setpriority, sys_setpriority, %o0, %o1, %o2)
SIGN1(sys32_ssetmask, sys_ssetmask, %o0)
SIGN2(sys32_syslog, sys_syslog, %o0, %o2)
SIGN1(sys32_umask, sys_umask, %o0)
SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2)
SIGN1(sys32_sendto, sys_sendto, %o0)
SIGN1(sys32_recvfrom, sys_recvfrom, %o0)
SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2)
SIGN2(sys32_connect, sys_connect, %o0, %o2)
SIGN2(sys32_bind, sys_bind, %o0, %o2)
SIGN2(sys32_listen, sys_listen, %o0, %o1)
SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
SIGN2(sys32_shutdown, sys_shutdown, %o0, %o1)
SIGN3(sys32_socketpair, sys_socketpair, %o0, %o1, %o2)
SIGN1(sys32_getpeername, sys_getpeername, %o0)
SIGN1(sys32_getsockname, sys_getsockname, %o0)
SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1)
SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2)
SIGN2(sys32_splice, sys_splice, %o0, %o1)
SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5)
SIGN2(sys32_tee, sys_tee, %o0, %o1)
SIGN1(sys32_vmsplice, compat_sys_vmsplice, %o0)
.globl sys32_mmap2
sys32_mmap2:
sethi %hi(sys_mmap), %g1
jmpl %g1 + %lo(sys_mmap), %g0
sllx %o5, 12, %o5
.align 32
.globl sys32_socketcall
sys32_socketcall: /* %o0=call, %o1=args */
cmp %o0, 1
bl,pn %xcc, do_einval
cmp %o0, 18
bg,pn %xcc, do_einval
sub %o0, 1, %o0
sllx %o0, 5, %o0
sethi %hi(__socketcall_table_begin), %g2
or %g2, %lo(__socketcall_table_begin), %g2
jmpl %g2 + %o0, %g0
nop
do_einval:
retl
mov -EINVAL, %o0
.align 32
__socketcall_table_begin:
/* Each entry is exactly 32 bytes. */
do_sys_socket: /* sys_socket(int, int, int) */
1: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_socket), %g1
2: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_socket), %g0
3: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_bind: /* sys_bind(int fd, struct sockaddr *, int) */
4: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_bind), %g1
5: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_bind), %g0
6: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_connect: /* sys_connect(int, struct sockaddr *, int) */
7: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_connect), %g1
8: ldswa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_connect), %g0
9: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_listen: /* sys_listen(int, int) */
10: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_listen), %g1
jmpl %g1 + %lo(sys_listen), %g0
11: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
nop
do_sys_accept: /* sys_accept(int, struct sockaddr *, int *) */
12: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_accept), %g1
13: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_accept), %g0
14: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_getsockname: /* sys_getsockname(int, struct sockaddr *, int *) */
15: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getsockname), %g1
16: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_getsockname), %g0
17: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_getpeername: /* sys_getpeername(int, struct sockaddr *, int *) */
18: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_getpeername), %g1
19: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(sys_getpeername), %g0
20: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_socketpair: /* sys_socketpair(int, int, int, int *) */
21: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_socketpair), %g1
22: ldswa [%o1 + 0x8] %asi, %o2
23: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_socketpair), %g0
24: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_send: /* sys_send(int, void *, size_t, unsigned int) */
25: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_send), %g1
26: lduwa [%o1 + 0x8] %asi, %o2
27: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_send), %g0
28: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_recv: /* sys_recv(int, void *, size_t, unsigned int) */
29: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_recv), %g1
30: lduwa [%o1 + 0x8] %asi, %o2
31: lduwa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_recv), %g0
32: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
do_sys_sendto: /* sys_sendto(int, u32, compat_size_t, unsigned int, u32, int) */
33: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_sendto), %g1
34: lduwa [%o1 + 0x8] %asi, %o2
35: lduwa [%o1 + 0xc] %asi, %o3
36: lduwa [%o1 + 0x10] %asi, %o4
37: ldswa [%o1 + 0x14] %asi, %o5
jmpl %g1 + %lo(sys_sendto), %g0
38: lduwa [%o1 + 0x4] %asi, %o1
do_sys_recvfrom: /* sys_recvfrom(int, u32, compat_size_t, unsigned int, u32, u32) */
39: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_recvfrom), %g1
40: lduwa [%o1 + 0x8] %asi, %o2
41: lduwa [%o1 + 0xc] %asi, %o3
42: lduwa [%o1 + 0x10] %asi, %o4
43: lduwa [%o1 + 0x14] %asi, %o5
jmpl %g1 + %lo(sys_recvfrom), %g0
44: lduwa [%o1 + 0x4] %asi, %o1
do_sys_shutdown: /* sys_shutdown(int, int) */
45: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_shutdown), %g1
jmpl %g1 + %lo(sys_shutdown), %g0
46: ldswa [%o1 + 0x4] %asi, %o1
nop
nop
nop
nop
do_sys_setsockopt: /* compat_sys_setsockopt(int, int, int, char *, int) */
47: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_setsockopt), %g1
48: ldswa [%o1 + 0x8] %asi, %o2
49: lduwa [%o1 + 0xc] %asi, %o3
50: ldswa [%o1 + 0x10] %asi, %o4
jmpl %g1 + %lo(compat_sys_setsockopt), %g0
51: ldswa [%o1 + 0x4] %asi, %o1
nop
do_sys_getsockopt: /* compat_sys_getsockopt(int, int, int, u32, u32) */
52: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_getsockopt), %g1
53: ldswa [%o1 + 0x8] %asi, %o2
54: lduwa [%o1 + 0xc] %asi, %o3
55: lduwa [%o1 + 0x10] %asi, %o4
jmpl %g1 + %lo(compat_sys_getsockopt), %g0
56: ldswa [%o1 + 0x4] %asi, %o1
nop
do_sys_sendmsg: /* compat_sys_sendmsg(int, struct compat_msghdr *, unsigned int) */
57: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_sendmsg), %g1
58: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(compat_sys_sendmsg), %g0
59: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_recvmsg: /* compat_sys_recvmsg(int, struct compat_msghdr *, unsigned int) */
60: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(compat_sys_recvmsg), %g1
61: lduwa [%o1 + 0x8] %asi, %o2
jmpl %g1 + %lo(compat_sys_recvmsg), %g0
62: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
nop
do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */
63: ldswa [%o1 + 0x0] %asi, %o0
sethi %hi(sys_accept4), %g1
64: lduwa [%o1 + 0x8] %asi, %o2
65: ldswa [%o1 + 0xc] %asi, %o3
jmpl %g1 + %lo(sys_accept4), %g0
66: lduwa [%o1 + 0x4] %asi, %o1
nop
nop
.section __ex_table,"a"
.align 4
.word 1b, __retl_efault, 2b, __retl_efault
.word 3b, __retl_efault, 4b, __retl_efault
.word 5b, __retl_efault, 6b, __retl_efault
.word 7b, __retl_efault, 8b, __retl_efault
.word 9b, __retl_efault, 10b, __retl_efault
.word 11b, __retl_efault, 12b, __retl_efault
.word 13b, __retl_efault, 14b, __retl_efault
.word 15b, __retl_efault, 16b, __retl_efault
.word 17b, __retl_efault, 18b, __retl_efault
.word 19b, __retl_efault, 20b, __retl_efault
.word 21b, __retl_efault, 22b, __retl_efault
.word 23b, __retl_efault, 24b, __retl_efault
.word 25b, __retl_efault, 26b, __retl_efault
.word 27b, __retl_efault, 28b, __retl_efault
.word 29b, __retl_efault, 30b, __retl_efault
.word 31b, __retl_efault, 32b, __retl_efault
.word 33b, __retl_efault, 34b, __retl_efault
.word 35b, __retl_efault, 36b, __retl_efault
.word 37b, __retl_efault, 38b, __retl_efault
.word 39b, __retl_efault, 40b, __retl_efault
.word 41b, __retl_efault, 42b, __retl_efault
.word 43b, __retl_efault, 44b, __retl_efault
.word 45b, __retl_efault, 46b, __retl_efault
.word 47b, __retl_efault, 48b, __retl_efault
.word 49b, __retl_efault, 50b, __retl_efault
.word 51b, __retl_efault, 52b, __retl_efault
.word 53b, __retl_efault, 54b, __retl_efault
.word 55b, __retl_efault, 56b, __retl_efault
.word 57b, __retl_efault, 58b, __retl_efault
.word 59b, __retl_efault, 60b, __retl_efault
.word 61b, __retl_efault, 62b, __retl_efault
.word 63b, __retl_efault, 64b, __retl_efault
.word 65b, __retl_efault, 66b, __retl_efault
.previous

View File

@@ -0,0 +1,682 @@
/* sys_sparc32.c: Conversion between 32bit and 64bit native syscalls.
*
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/resource.h>
#include <linux/times.h>
#include <linux/utsname.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/slab.h>
#include <linux/uio.h>
#include <linux/nfs_fs.h>
#include <linux/quota.h>
#include <linux/module.h>
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/cache.h>
#include <linux/nfsd/xdr.h>
#include <linux/nfsd/syscall.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
#include <linux/filter.h>
#include <linux/highmem.h>
#include <linux/highuid.h>
#include <linux/mman.h>
#include <linux/ipv6.h>
#include <linux/in.h>
#include <linux/icmpv6.h>
#include <linux/syscalls.h>
#include <linux/sysctl.h>
#include <linux/binfmts.h>
#include <linux/dnotify.h>
#include <linux/security.h>
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/ptrace.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <asm/fpumacro.h>
#include <asm/mmu_context.h>
#include <asm/compat_signal.h>
#ifdef CONFIG_SYSVIPC
asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, u32 fifth)
{
int version;
version = call >> 16; /* hack for backward compatibility */
call &= 0xffff;
switch (call) {
case SEMTIMEDOP:
if (fifth)
/* sign extend semid */
return compat_sys_semtimedop((int)first,
compat_ptr(ptr), second,
compat_ptr(fifth));
/* else fall through for normal semop() */
case SEMOP:
/* struct sembuf is the same on 32 and 64bit :)) */
/* sign extend semid */
return sys_semtimedop((int)first, compat_ptr(ptr), second,
NULL);
case SEMGET:
/* sign extend key, nsems */
return sys_semget((int)first, (int)second, third);
case SEMCTL:
/* sign extend semid, semnum */
return compat_sys_semctl((int)first, (int)second, third,
compat_ptr(ptr));
case MSGSND:
/* sign extend msqid */
return compat_sys_msgsnd((int)first, (int)second, third,
compat_ptr(ptr));
case MSGRCV:
/* sign extend msqid, msgtyp */
return compat_sys_msgrcv((int)first, second, (int)fifth,
third, version, compat_ptr(ptr));
case MSGGET:
/* sign extend key */
return sys_msgget((int)first, second);
case MSGCTL:
/* sign extend msqid */
return compat_sys_msgctl((int)first, second, compat_ptr(ptr));
case SHMAT:
/* sign extend shmid */
return compat_sys_shmat((int)first, second, third, version,
compat_ptr(ptr));
case SHMDT:
return sys_shmdt(compat_ptr(ptr));
case SHMGET:
/* sign extend key_t */
return sys_shmget((int)first, second, third);
case SHMCTL:
/* sign extend shmid */
return compat_sys_shmctl((int)first, second, compat_ptr(ptr));
default:
return -ENOSYS;
};
return -ENOSYS;
}
#endif
asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
{
if ((int)high < 0)
return -EINVAL;
else
return sys_truncate(path, (high << 32) | low);
}
asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
{
if ((int)high < 0)
return -EINVAL;
else
return sys_ftruncate(fd, (high << 32) | low);
}
static int cp_compat_stat64(struct kstat *stat,
struct compat_stat64 __user *statbuf)
{
int err;
err = put_user(huge_encode_dev(stat->dev), &statbuf->st_dev);
err |= put_user(stat->ino, &statbuf->st_ino);
err |= put_user(stat->mode, &statbuf->st_mode);
err |= put_user(stat->nlink, &statbuf->st_nlink);
err |= put_user(stat->uid, &statbuf->st_uid);
err |= put_user(stat->gid, &statbuf->st_gid);
err |= put_user(huge_encode_dev(stat->rdev), &statbuf->st_rdev);
err |= put_user(0, (unsigned long __user *) &statbuf->__pad3[0]);
err |= put_user(stat->size, &statbuf->st_size);
err |= put_user(stat->blksize, &statbuf->st_blksize);
err |= put_user(0, (unsigned int __user *) &statbuf->__pad4[0]);
err |= put_user(0, (unsigned int __user *) &statbuf->__pad4[4]);
err |= put_user(stat->blocks, &statbuf->st_blocks);
err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
err |= put_user(0, &statbuf->__unused4);
err |= put_user(0, &statbuf->__unused5);
return err;
}
asmlinkage long compat_sys_stat64(char __user * filename,
struct compat_stat64 __user *statbuf)
{
struct kstat stat;
int error = vfs_stat(filename, &stat);
if (!error)
error = cp_compat_stat64(&stat, statbuf);
return error;
}
asmlinkage long compat_sys_lstat64(char __user * filename,
struct compat_stat64 __user *statbuf)
{
struct kstat stat;
int error = vfs_lstat(filename, &stat);
if (!error)
error = cp_compat_stat64(&stat, statbuf);
return error;
}
asmlinkage long compat_sys_fstat64(unsigned int fd,
struct compat_stat64 __user * statbuf)
{
struct kstat stat;
int error = vfs_fstat(fd, &stat);
if (!error)
error = cp_compat_stat64(&stat, statbuf);
return error;
}
asmlinkage long compat_sys_fstatat64(unsigned int dfd, char __user *filename,
struct compat_stat64 __user * statbuf, int flag)
{
struct kstat stat;
int error = -EINVAL;
if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
goto out;
if (flag & AT_SYMLINK_NOFOLLOW)
error = vfs_lstat_fd(dfd, filename, &stat);
else
error = vfs_stat_fd(dfd, filename, &stat);
if (!error)
error = cp_compat_stat64(&stat, statbuf);
out:
return error;
}
asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2)
{
return sys_sysfs(option, arg1, arg2);
}
asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval)
{
struct timespec t;
int ret;
mm_segment_t old_fs = get_fs ();
set_fs (KERNEL_DS);
ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
set_fs (old_fs);
if (put_compat_timespec(&t, interval))
return -EFAULT;
return ret;
}
asmlinkage long compat_sys_rt_sigprocmask(int how,
compat_sigset_t __user *set,
compat_sigset_t __user *oset,
compat_size_t sigsetsize)
{
sigset_t s;
compat_sigset_t s32;
int ret;
mm_segment_t old_fs = get_fs();
if (set) {
if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
return -EFAULT;
switch (_NSIG_WORDS) {
case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
}
}
set_fs (KERNEL_DS);
ret = sys_rt_sigprocmask(how,
set ? (sigset_t __user *) &s : NULL,
oset ? (sigset_t __user *) &s : NULL,
sigsetsize);
set_fs (old_fs);
if (ret) return ret;
if (oset) {
switch (_NSIG_WORDS) {
case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
}
if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
return -EFAULT;
}
return 0;
}
asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
compat_size_t sigsetsize)
{
sigset_t s;
compat_sigset_t s32;
int ret;
mm_segment_t old_fs = get_fs();
set_fs (KERNEL_DS);
ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
set_fs (old_fs);
if (!ret) {
switch (_NSIG_WORDS) {
case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
}
if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
return -EFAULT;
}
return ret;
}
asmlinkage long compat_sys_rt_sigqueueinfo(int pid, int sig,
struct compat_siginfo __user *uinfo)
{
siginfo_t info;
int ret;
mm_segment_t old_fs = get_fs();
if (copy_siginfo_from_user32(&info, uinfo))
return -EFAULT;
set_fs (KERNEL_DS);
ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info);
set_fs (old_fs);
return ret;
}
asmlinkage long compat_sys_sigaction(int sig, struct old_sigaction32 __user *act,
struct old_sigaction32 __user *oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
WARN_ON_ONCE(sig >= 0);
sig = -sig;
if (act) {
compat_old_sigset_t mask;
u32 u_handler, u_restorer;
ret = get_user(u_handler, &act->sa_handler);
new_ka.sa.sa_handler = compat_ptr(u_handler);
ret |= __get_user(u_restorer, &act->sa_restorer);
new_ka.sa.sa_restorer = compat_ptr(u_restorer);
ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
ret |= __get_user(mask, &act->sa_mask);
if (ret)
return ret;
new_ka.ka_restorer = NULL;
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
ret |= __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
}
return ret;
}
asmlinkage long compat_sys_rt_sigaction(int sig,
struct sigaction32 __user *act,
struct sigaction32 __user *oact,
void __user *restorer,
compat_size_t sigsetsize)
{
struct k_sigaction new_ka, old_ka;
int ret;
compat_sigset_t set32;
/* XXX: Don't preclude handling different sized sigset_t's. */
if (sigsetsize != sizeof(compat_sigset_t))
return -EINVAL;
if (act) {
u32 u_handler, u_restorer;
new_ka.ka_restorer = restorer;
ret = get_user(u_handler, &act->sa_handler);
new_ka.sa.sa_handler = compat_ptr(u_handler);
ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t));
switch (_NSIG_WORDS) {
case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | (((long)set32.sig[7]) << 32);
case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | (((long)set32.sig[5]) << 32);
case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | (((long)set32.sig[3]) << 32);
case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | (((long)set32.sig[1]) << 32);
}
ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
ret |= __get_user(u_restorer, &act->sa_restorer);
new_ka.sa.sa_restorer = compat_ptr(u_restorer);
if (ret)
return -EFAULT;
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
switch (_NSIG_WORDS) {
case 4: set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); set32.sig[6] = old_ka.sa.sa_mask.sig[3];
case 3: set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); set32.sig[4] = old_ka.sa.sa_mask.sig[2];
case 2: set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); set32.sig[2] = old_ka.sa.sa_mask.sig[1];
case 1: set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); set32.sig[0] = old_ka.sa.sa_mask.sig[0];
}
ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler);
ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t));
ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer);
if (ret)
ret = -EFAULT;
}
return ret;
}
/*
* sparc32_execve() executes a new program after the asm stub has set
* things up for us. This should basically do what I want it to.
*/
asmlinkage long sparc32_execve(struct pt_regs *regs)
{
int error, base = 0;
char *filename;
/* User register window flush is done by entry.S */
/* Check for indirect call. */
if ((u32)regs->u_regs[UREG_G1] == 0)
base = 1;
filename = getname(compat_ptr(regs->u_regs[base + UREG_I0]));
error = PTR_ERR(filename);
if (IS_ERR(filename))
goto out;
error = compat_do_execve(filename,
compat_ptr(regs->u_regs[base + UREG_I1]),
compat_ptr(regs->u_regs[base + UREG_I2]), regs);
putname(filename);
if (!error) {
fprs_write(0);
current_thread_info()->xfsr[0] = 0;
current_thread_info()->fpsaved[0] = 0;
regs->tstate &= ~TSTATE_PEF;
}
out:
return error;
}
#ifdef CONFIG_MODULES
asmlinkage long sys32_init_module(void __user *umod, u32 len,
const char __user *uargs)
{
return sys_init_module(umod, len, uargs);
}
asmlinkage long sys32_delete_module(const char __user *name_user,
unsigned int flags)
{
return sys_delete_module(name_user, flags);
}
#else /* CONFIG_MODULES */
asmlinkage long sys32_init_module(const char __user *name_user,
struct module __user *mod_user)
{
return -ENOSYS;
}
asmlinkage long sys32_delete_module(const char __user *name_user)
{
return -ENOSYS;
}
#endif /* CONFIG_MODULES */
asmlinkage compat_ssize_t sys32_pread64(unsigned int fd,
char __user *ubuf,
compat_size_t count,
unsigned long poshi,
unsigned long poslo)
{
return sys_pread64(fd, ubuf, count, (poshi << 32) | poslo);
}
asmlinkage compat_ssize_t sys32_pwrite64(unsigned int fd,
char __user *ubuf,
compat_size_t count,
unsigned long poshi,
unsigned long poslo)
{
return sys_pwrite64(fd, ubuf, count, (poshi << 32) | poslo);
}
asmlinkage long compat_sys_readahead(int fd,
unsigned long offhi,
unsigned long offlo,
compat_size_t count)
{
return sys_readahead(fd, (offhi << 32) | offlo, count);
}
long compat_sys_fadvise64(int fd,
unsigned long offhi,
unsigned long offlo,
compat_size_t len, int advice)
{
return sys_fadvise64_64(fd, (offhi << 32) | offlo, len, advice);
}
long compat_sys_fadvise64_64(int fd,
unsigned long offhi, unsigned long offlo,
unsigned long lenhi, unsigned long lenlo,
int advice)
{
return sys_fadvise64_64(fd,
(offhi << 32) | offlo,
(lenhi << 32) | lenlo,
advice);
}
asmlinkage long compat_sys_sendfile(int out_fd, int in_fd,
compat_off_t __user *offset,
compat_size_t count)
{
mm_segment_t old_fs = get_fs();
int ret;
off_t of;
if (offset && get_user(of, offset))
return -EFAULT;
set_fs(KERNEL_DS);
ret = sys_sendfile(out_fd, in_fd,
offset ? (off_t __user *) &of : NULL,
count);
set_fs(old_fs);
if (offset && put_user(of, offset))
return -EFAULT;
return ret;
}
asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd,
compat_loff_t __user *offset,
compat_size_t count)
{
mm_segment_t old_fs = get_fs();
int ret;
loff_t lof;
if (offset && get_user(lof, offset))
return -EFAULT;
set_fs(KERNEL_DS);
ret = sys_sendfile64(out_fd, in_fd,
offset ? (loff_t __user *) &lof : NULL,
count);
set_fs(old_fs);
if (offset && put_user(lof, offset))
return -EFAULT;
return ret;
}
/* This is just a version for 32-bit applications which does
* not force O_LARGEFILE on.
*/
asmlinkage long sparc32_open(const char __user *filename,
int flags, int mode)
{
return do_sys_open(AT_FDCWD, filename, flags, mode);
}
extern unsigned long do_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, unsigned long new_addr);
asmlinkage unsigned long sys32_mremap(unsigned long addr,
unsigned long old_len, unsigned long new_len,
unsigned long flags, u32 __new_addr)
{
unsigned long ret = -EINVAL;
unsigned long new_addr = __new_addr;
if (unlikely(sparc_mmap_check(addr, old_len)))
goto out;
if (unlikely(sparc_mmap_check(new_addr, new_len)))
goto out;
down_write(&current->mm->mmap_sem);
ret = do_mremap(addr, old_len, new_len, flags, new_addr);
up_write(&current->mm->mmap_sem);
out:
return ret;
}
struct __sysctl_args32 {
u32 name;
int nlen;
u32 oldval;
u32 oldlenp;
u32 newval;
u32 newlen;
u32 __unused[4];
};
asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
{
#ifndef CONFIG_SYSCTL_SYSCALL
return -ENOSYS;
#else
struct __sysctl_args32 tmp;
int error;
size_t oldlen, __user *oldlenp = NULL;
unsigned long addr = (((unsigned long)&args->__unused[0]) + 7UL) & ~7UL;
if (copy_from_user(&tmp, args, sizeof(tmp)))
return -EFAULT;
if (tmp.oldval && tmp.oldlenp) {
/* Duh, this is ugly and might not work if sysctl_args
is in read-only memory, but do_sysctl does indirectly
a lot of uaccess in both directions and we'd have to
basically copy the whole sysctl.c here, and
glibc's __sysctl uses rw memory for the structure
anyway. */
if (get_user(oldlen, (u32 __user *)(unsigned long)tmp.oldlenp) ||
put_user(oldlen, (size_t __user *)addr))
return -EFAULT;
oldlenp = (size_t __user *)addr;
}
lock_kernel();
error = do_sysctl((int __user *)(unsigned long) tmp.name,
tmp.nlen,
(void __user *)(unsigned long) tmp.oldval,
oldlenp,
(void __user *)(unsigned long) tmp.newval,
tmp.newlen);
unlock_kernel();
if (oldlenp) {
if (!error) {
if (get_user(oldlen, (size_t __user *)addr) ||
put_user(oldlen, (u32 __user *)(unsigned long) tmp.oldlenp))
error = -EFAULT;
}
if (copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
error = -EFAULT;
}
return error;
#endif
}
long sys32_lookup_dcookie(unsigned long cookie_high,
unsigned long cookie_low,
char __user *buf, size_t len)
{
return sys_lookup_dcookie((cookie_high << 32) | cookie_low,
buf, len);
}
long compat_sync_file_range(int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, int flags)
{
return sys_sync_file_range(fd,
(off_high << 32) | off_low,
(nb_high << 32) | nb_low,
flags);
}
asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
u32 lenhi, u32 lenlo)
{
return sys_fallocate(fd, mode, ((loff_t)offhi << 32) | offlo,
((loff_t)lenhi << 32) | lenlo);
}

Some files were not shown because too many files have changed in this diff Show More