1
0

powerpc/4xx: Create 4xx pseudo-platform in platforms/4xx

We have a lot of code in sysdev for supporting 4xx, ie. either 40x or
44x. Instead it would be cleaner if it was all in platforms/4xx.

This is slightly odd in that we don't actually define any machines in
the 4xx platform, as is usual for a platform directory. But still it
seems like a better result to have all this related code in a directory
by itself.

Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Este cometimento está contido em:
Michael Ellerman
2017-08-08 16:39:20 +10:00
ascendente 5b4e28577b
cometimento bfa9a2eb92
12 ficheiros modificados com 10 adições e 11 eliminações

Ver ficheiro

@@ -36,19 +36,9 @@ obj-$(CONFIG_AXON_RAM) += axonram.o
obj-$(CONFIG_PPC_INDIRECT_PCI) += indirect_pci.o
obj-$(CONFIG_PPC_I8259) += i8259.o
obj-$(CONFIG_IPIC) += ipic.o
obj-$(CONFIG_4xx) += uic.o
obj-$(CONFIG_PPC4xx_OCM) += ppc4xx_ocm.o
obj-$(CONFIG_4xx_SOC) += ppc4xx_soc.o
obj-$(CONFIG_XILINX_VIRTEX) += xilinx_intc.o
obj-$(CONFIG_XILINX_PCI) += xilinx_pci.o
obj-$(CONFIG_OF_RTC) += of_rtc.o
ifeq ($(CONFIG_PCI),y)
obj-$(CONFIG_4xx) += ppc4xx_pci.o
endif
obj-$(CONFIG_PPC4xx_HSTA_MSI) += ppc4xx_hsta_msi.o
obj-$(CONFIG_PPC4xx_MSI) += ppc4xx_msi.o
obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o
obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o
obj-$(CONFIG_CPM) += cpm_common.o
obj-$(CONFIG_CPM2) += cpm2.o cpm2_pic.o

Ver ficheiro

@@ -1,346 +0,0 @@
/*
* PowerPC 4xx Clock and Power Management
*
* Copyright (C) 2010, Applied Micro Circuits Corporation
* Victor Gallardo (vgallardo@apm.com)
*
* Based on arch/powerpc/platforms/44x/idle.c:
* Jerone Young <jyoung5@us.ibm.com>
* Copyright 2008 IBM Corp.
*
* Based on arch/powerpc/sysdev/fsl_pmc.c:
* Anton Vorontsov <avorontsov@ru.mvista.com>
* Copyright 2009 MontaVista Software, Inc.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/of_platform.h>
#include <linux/sysfs.h>
#include <linux/cpu.h>
#include <linux/suspend.h>
#include <asm/dcr.h>
#include <asm/dcr-native.h>
#include <asm/machdep.h>
#define CPM_ER 0
#define CPM_FR 1
#define CPM_SR 2
#define CPM_IDLE_WAIT 0
#define CPM_IDLE_DOZE 1
struct cpm {
dcr_host_t dcr_host;
unsigned int dcr_offset[3];
unsigned int powersave_off;
unsigned int unused;
unsigned int idle_doze;
unsigned int standby;
unsigned int suspend;
};
static struct cpm cpm;
struct cpm_idle_mode {
unsigned int enabled;
const char *name;
};
static struct cpm_idle_mode idle_mode[] = {
[CPM_IDLE_WAIT] = { 1, "wait" }, /* default */
[CPM_IDLE_DOZE] = { 0, "doze" },
};
static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask)
{
unsigned int value;
/* CPM controller supports 3 different types of sleep interface
* known as class 1, 2 and 3. For class 1 units, they are
* unconditionally put to sleep when the corresponding CPM bit is
* set. For class 2 and 3 units this is not case; if they can be
* put to to sleep, they will. Here we do not verify, we just
* set them and expect them to eventually go off when they can.
*/
value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]);
dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask);
/* return old state, to restore later if needed */
return value;
}
static void cpm_idle_wait(void)
{
unsigned long msr_save;
/* save off initial state */
msr_save = mfmsr();
/* sync required when CPM0_ER[CPU] is set */
mb();
/* set wait state MSR */
mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE);
isync();
/* return to initial state */
mtmsr(msr_save);
isync();
}
static void cpm_idle_sleep(unsigned int mask)
{
unsigned int er_save;
/* update CPM_ER state */
er_save = cpm_set(CPM_ER, mask);
/* go to wait state so that CPM0_ER[CPU] can take effect */
cpm_idle_wait();
/* restore CPM_ER state */
dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save);
}
static void cpm_idle_doze(void)
{
cpm_idle_sleep(cpm.idle_doze);
}
static void cpm_idle_config(int mode)
{
int i;
if (idle_mode[mode].enabled)
return;
for (i = 0; i < ARRAY_SIZE(idle_mode); i++)
idle_mode[i].enabled = 0;
idle_mode[mode].enabled = 1;
}
static ssize_t cpm_idle_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
char *s = buf;
int i;
for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
if (idle_mode[i].enabled)
s += sprintf(s, "[%s] ", idle_mode[i].name);
else
s += sprintf(s, "%s ", idle_mode[i].name);
}
*(s-1) = '\n'; /* convert the last space to a newline */
return s - buf;
}
static ssize_t cpm_idle_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
int i;
char *p;
int len;
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
for (i = 0; i < ARRAY_SIZE(idle_mode); i++) {
if (strncmp(buf, idle_mode[i].name, len) == 0) {
cpm_idle_config(i);
return n;
}
}
return -EINVAL;
}
static struct kobj_attribute cpm_idle_attr =
__ATTR(idle, 0644, cpm_idle_show, cpm_idle_store);
static void cpm_idle_config_sysfs(void)
{
struct device *dev;
unsigned long ret;
dev = get_cpu_device(0);
ret = sysfs_create_file(&dev->kobj,
&cpm_idle_attr.attr);
if (ret)
printk(KERN_WARNING
"cpm: failed to create idle sysfs entry\n");
}
static void cpm_idle(void)
{
if (idle_mode[CPM_IDLE_DOZE].enabled)
cpm_idle_doze();
else
cpm_idle_wait();
}
static int cpm_suspend_valid(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
return !!cpm.standby;
case PM_SUSPEND_MEM:
return !!cpm.suspend;
default:
return 0;
}
}
static void cpm_suspend_standby(unsigned int mask)
{
unsigned long tcr_save;
/* disable decrement interrupt */
tcr_save = mfspr(SPRN_TCR);
mtspr(SPRN_TCR, tcr_save & ~TCR_DIE);
/* go to sleep state */
cpm_idle_sleep(mask);
/* restore decrement interrupt */
mtspr(SPRN_TCR, tcr_save);
}
static int cpm_suspend_enter(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
cpm_suspend_standby(cpm.standby);
break;
case PM_SUSPEND_MEM:
cpm_suspend_standby(cpm.suspend);
break;
}
return 0;
}
static struct platform_suspend_ops cpm_suspend_ops = {
.valid = cpm_suspend_valid,
.enter = cpm_suspend_enter,
};
static int cpm_get_uint_property(struct device_node *np,
const char *name)
{
int len;
const unsigned int *prop = of_get_property(np, name, &len);
if (prop == NULL || len < sizeof(u32))
return 0;
return *prop;
}
static int __init cpm_init(void)
{
struct device_node *np;
int dcr_base, dcr_len;
int ret = 0;
if (!cpm.powersave_off) {
cpm_idle_config(CPM_IDLE_WAIT);
ppc_md.power_save = &cpm_idle;
}
np = of_find_compatible_node(NULL, NULL, "ibm,cpm");
if (!np) {
ret = -EINVAL;
goto out;
}
dcr_base = dcr_resource_start(np, 0);
dcr_len = dcr_resource_len(np, 0);
if (dcr_base == 0 || dcr_len == 0) {
printk(KERN_ERR "cpm: could not parse dcr property for %s\n",
np->full_name);
ret = -EINVAL;
goto node_put;
}
cpm.dcr_host = dcr_map(np, dcr_base, dcr_len);
if (!DCR_MAP_OK(cpm.dcr_host)) {
printk(KERN_ERR "cpm: failed to map dcr property for %s\n",
np->full_name);
ret = -EINVAL;
goto node_put;
}
/* All 4xx SoCs with a CPM controller have one of two
* different order for the CPM registers. Some have the
* CPM registers in the following order (ER,FR,SR). The
* others have them in the following order (SR,ER,FR).
*/
if (cpm_get_uint_property(np, "er-offset") == 0) {
cpm.dcr_offset[CPM_ER] = 0;
cpm.dcr_offset[CPM_FR] = 1;
cpm.dcr_offset[CPM_SR] = 2;
} else {
cpm.dcr_offset[CPM_ER] = 1;
cpm.dcr_offset[CPM_FR] = 2;
cpm.dcr_offset[CPM_SR] = 0;
}
/* Now let's see what IPs to turn off for the following modes */
cpm.unused = cpm_get_uint_property(np, "unused-units");
cpm.idle_doze = cpm_get_uint_property(np, "idle-doze");
cpm.standby = cpm_get_uint_property(np, "standby");
cpm.suspend = cpm_get_uint_property(np, "suspend");
/* If some IPs are unused let's turn them off now */
if (cpm.unused) {
cpm_set(CPM_ER, cpm.unused);
cpm_set(CPM_FR, cpm.unused);
}
/* Now let's export interfaces */
if (!cpm.powersave_off && cpm.idle_doze)
cpm_idle_config_sysfs();
if (cpm.standby || cpm.suspend)
suspend_set_ops(&cpm_suspend_ops);
node_put:
of_node_put(np);
out:
return ret;
}
late_initcall(cpm_init);
static int __init cpm_powersave_off(char *arg)
{
cpm.powersave_off = 1;
return 0;
}
__setup("powersave=off", cpm_powersave_off);

Ver ficheiro

@@ -1,208 +0,0 @@
/*
* PPC4xx gpio driver
*
* Copyright (c) 2008 Harris Corporation
* Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
* Copyright (c) MontaVista Software, Inc. 2008.
*
* Author: Steve Falco <sfalco@harris.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/gpio/driver.h>
#include <linux/types.h>
#include <linux/slab.h>
#define GPIO_MASK(gpio) (0x80000000 >> (gpio))
#define GPIO_MASK2(gpio) (0xc0000000 >> ((gpio) * 2))
/* Physical GPIO register layout */
struct ppc4xx_gpio {
__be32 or;
__be32 tcr;
__be32 osrl;
__be32 osrh;
__be32 tsrl;
__be32 tsrh;
__be32 odr;
__be32 ir;
__be32 rr1;
__be32 rr2;
__be32 rr3;
__be32 reserved1;
__be32 isr1l;
__be32 isr1h;
__be32 isr2l;
__be32 isr2h;
__be32 isr3l;
__be32 isr3h;
};
struct ppc4xx_gpio_chip {
struct of_mm_gpio_chip mm_gc;
spinlock_t lock;
};
/*
* GPIO LIB API implementation for GPIOs
*
* There are a maximum of 32 gpios in each gpio controller.
*/
static int ppc4xx_gpio_get(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
return !!(in_be32(&regs->ir) & GPIO_MASK(gpio));
}
static inline void
__ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
if (val)
setbits32(&regs->or, GPIO_MASK(gpio));
else
clrbits32(&regs->or, GPIO_MASK(gpio));
}
static void
ppc4xx_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc);
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
__ppc4xx_gpio_set(gc, gpio, val);
spin_unlock_irqrestore(&chip->lock, flags);
pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
}
static int ppc4xx_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc);
struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
/* Disable open-drain function */
clrbits32(&regs->odr, GPIO_MASK(gpio));
/* Float the pin */
clrbits32(&regs->tcr, GPIO_MASK(gpio));
/* Bits 0-15 use TSRL/OSRL, bits 16-31 use TSRH/OSRH */
if (gpio < 16) {
clrbits32(&regs->osrl, GPIO_MASK2(gpio));
clrbits32(&regs->tsrl, GPIO_MASK2(gpio));
} else {
clrbits32(&regs->osrh, GPIO_MASK2(gpio));
clrbits32(&regs->tsrh, GPIO_MASK2(gpio));
}
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
}
static int
ppc4xx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
struct ppc4xx_gpio_chip *chip = gpiochip_get_data(gc);
struct ppc4xx_gpio __iomem *regs = mm_gc->regs;
unsigned long flags;
spin_lock_irqsave(&chip->lock, flags);
/* First set initial value */
__ppc4xx_gpio_set(gc, gpio, val);
/* Disable open-drain function */
clrbits32(&regs->odr, GPIO_MASK(gpio));
/* Drive the pin */
setbits32(&regs->tcr, GPIO_MASK(gpio));
/* Bits 0-15 use TSRL, bits 16-31 use TSRH */
if (gpio < 16) {
clrbits32(&regs->osrl, GPIO_MASK2(gpio));
clrbits32(&regs->tsrl, GPIO_MASK2(gpio));
} else {
clrbits32(&regs->osrh, GPIO_MASK2(gpio));
clrbits32(&regs->tsrh, GPIO_MASK2(gpio));
}
spin_unlock_irqrestore(&chip->lock, flags);
pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val);
return 0;
}
static int __init ppc4xx_add_gpiochips(void)
{
struct device_node *np;
for_each_compatible_node(np, NULL, "ibm,ppc4xx-gpio") {
int ret;
struct ppc4xx_gpio_chip *ppc4xx_gc;
struct of_mm_gpio_chip *mm_gc;
struct gpio_chip *gc;
ppc4xx_gc = kzalloc(sizeof(*ppc4xx_gc), GFP_KERNEL);
if (!ppc4xx_gc) {
ret = -ENOMEM;
goto err;
}
spin_lock_init(&ppc4xx_gc->lock);
mm_gc = &ppc4xx_gc->mm_gc;
gc = &mm_gc->gc;
gc->ngpio = 32;
gc->direction_input = ppc4xx_gpio_dir_in;
gc->direction_output = ppc4xx_gpio_dir_out;
gc->get = ppc4xx_gpio_get;
gc->set = ppc4xx_gpio_set;
ret = of_mm_gpiochip_add_data(np, mm_gc, ppc4xx_gc);
if (ret)
goto err;
continue;
err:
pr_err("%s: registration failed with status %d\n",
np->full_name, ret);
kfree(ppc4xx_gc);
/* try others anyway */
}
return 0;
}
arch_initcall(ppc4xx_add_gpiochips);

Ver ficheiro

@@ -1,212 +0,0 @@
/*
* MSI support for PPC4xx SoCs using High Speed Transfer Assist (HSTA) for
* generation of the interrupt.
*
* Copyright © 2013 Alistair Popple <alistair@popple.id.au> IBM Corporation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/msi.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/semaphore.h>
#include <asm/msi_bitmap.h>
#include <asm/ppc-pci.h>
struct ppc4xx_hsta_msi {
struct device *dev;
/* The ioremapped HSTA MSI IO space */
u32 __iomem *data;
/* Physical address of HSTA MSI IO space */
u64 address;
struct msi_bitmap bmp;
/* An array mapping offsets to hardware IRQs */
int *irq_map;
/* Number of hwirqs supported */
int irq_count;
};
static struct ppc4xx_hsta_msi ppc4xx_hsta_msi;
static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
struct msi_msg msg;
struct msi_desc *entry;
int irq, hwirq;
u64 addr;
/* We don't support MSI-X */
if (type == PCI_CAP_ID_MSIX) {
pr_debug("%s: MSI-X not supported.\n", __func__);
return -EINVAL;
}
for_each_pci_msi_entry(entry, dev) {
irq = msi_bitmap_alloc_hwirqs(&ppc4xx_hsta_msi.bmp, 1);
if (irq < 0) {
pr_debug("%s: Failed to allocate msi interrupt\n",
__func__);
return irq;
}
hwirq = ppc4xx_hsta_msi.irq_map[irq];
if (!hwirq) {
pr_err("%s: Failed mapping irq %d\n", __func__, irq);
return -EINVAL;
}
/*
* HSTA generates interrupts on writes to 128-bit aligned
* addresses.
*/
addr = ppc4xx_hsta_msi.address + irq*0x10;
msg.address_hi = upper_32_bits(addr);
msg.address_lo = lower_32_bits(addr);
/* Data is not used by the HSTA. */
msg.data = 0;
pr_debug("%s: Setup irq %d (0x%0llx)\n", __func__, hwirq,
(((u64) msg.address_hi) << 32) | msg.address_lo);
if (irq_set_msi_desc(hwirq, entry)) {
pr_err(
"%s: Invalid hwirq %d specified in device tree\n",
__func__, hwirq);
msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1);
return -EINVAL;
}
pci_write_msi_msg(hwirq, &msg);
}
return 0;
}
static int hsta_find_hwirq_offset(int hwirq)
{
int irq;
/* Find the offset given the hwirq */
for (irq = 0; irq < ppc4xx_hsta_msi.irq_count; irq++)
if (ppc4xx_hsta_msi.irq_map[irq] == hwirq)
return irq;
return -EINVAL;
}
static void hsta_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
int irq;
for_each_pci_msi_entry(entry, dev) {
if (!entry->irq)
continue;
irq = hsta_find_hwirq_offset(entry->irq);
/* entry->irq should always be in irq_map */
BUG_ON(irq < 0);
irq_set_msi_desc(entry->irq, NULL);
msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1);
pr_debug("%s: Teardown IRQ %u (index %u)\n", __func__,
entry->irq, irq);
}
}
static int hsta_msi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *mem;
int irq, ret, irq_count;
struct pci_controller *phb;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(dev, "Unable to get mmio space\n");
return -EINVAL;
}
irq_count = of_irq_count(dev->of_node);
if (!irq_count) {
dev_err(dev, "Unable to find IRQ range\n");
return -EINVAL;
}
ppc4xx_hsta_msi.dev = dev;
ppc4xx_hsta_msi.address = mem->start;
ppc4xx_hsta_msi.data = ioremap(mem->start, resource_size(mem));
ppc4xx_hsta_msi.irq_count = irq_count;
if (!ppc4xx_hsta_msi.data) {
dev_err(dev, "Unable to map memory\n");
return -ENOMEM;
}
ret = msi_bitmap_alloc(&ppc4xx_hsta_msi.bmp, irq_count, dev->of_node);
if (ret)
goto out;
ppc4xx_hsta_msi.irq_map = kmalloc(sizeof(int) * irq_count, GFP_KERNEL);
if (!ppc4xx_hsta_msi.irq_map) {
ret = -ENOMEM;
goto out1;
}
/* Setup a mapping from irq offsets to hardware irq numbers */
for (irq = 0; irq < irq_count; irq++) {
ppc4xx_hsta_msi.irq_map[irq] =
irq_of_parse_and_map(dev->of_node, irq);
if (!ppc4xx_hsta_msi.irq_map[irq]) {
dev_err(dev, "Unable to map IRQ\n");
ret = -EINVAL;
goto out2;
}
}
list_for_each_entry(phb, &hose_list, list_node) {
phb->controller_ops.setup_msi_irqs = hsta_setup_msi_irqs;
phb->controller_ops.teardown_msi_irqs = hsta_teardown_msi_irqs;
}
return 0;
out2:
kfree(ppc4xx_hsta_msi.irq_map);
out1:
msi_bitmap_free(&ppc4xx_hsta_msi.bmp);
out:
iounmap(ppc4xx_hsta_msi.data);
return ret;
}
static const struct of_device_id hsta_msi_ids[] = {
{
.compatible = "ibm,hsta-msi",
},
{}
};
static struct platform_driver hsta_msi_driver = {
.probe = hsta_msi_probe,
.driver = {
.name = "hsta-msi",
.of_match_table = hsta_msi_ids,
},
};
static int hsta_msi_init(void)
{
return platform_driver_register(&hsta_msi_driver);
}
subsys_initcall(hsta_msi_init);

Ver ficheiro

@@ -1,286 +0,0 @@
/*
* Adding PCI-E MSI support for PPC4XX SoCs.
*
* Copyright (c) 2010, Applied Micro Circuits Corporation
* Authors: Tirumala R Marri <tmarri@apm.com>
* Feng Kan <fkan@apm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <asm/prom.h>
#include <asm/hw_irq.h>
#include <asm/ppc-pci.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/msi_bitmap.h>
#define PEIH_TERMADH 0x00
#define PEIH_TERMADL 0x08
#define PEIH_MSIED 0x10
#define PEIH_MSIMK 0x18
#define PEIH_MSIASS 0x20
#define PEIH_FLUSH0 0x30
#define PEIH_FLUSH1 0x38
#define PEIH_CNTRST 0x48
static int msi_irqs;
struct ppc4xx_msi {
u32 msi_addr_lo;
u32 msi_addr_hi;
void __iomem *msi_regs;
int *msi_virqs;
struct msi_bitmap bitmap;
struct device_node *msi_dev;
};
static struct ppc4xx_msi ppc4xx_msi;
static int ppc4xx_msi_init_allocator(struct platform_device *dev,
struct ppc4xx_msi *msi_data)
{
int err;
err = msi_bitmap_alloc(&msi_data->bitmap, msi_irqs,
dev->dev.of_node);
if (err)
return err;
err = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap);
if (err < 0) {
msi_bitmap_free(&msi_data->bitmap);
return err;
}
return 0;
}
static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
{
int int_no = -ENOMEM;
unsigned int virq;
struct msi_msg msg;
struct msi_desc *entry;
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
dev_dbg(&dev->dev, "PCIE-MSI:%s called. vec %x type %d\n",
__func__, nvec, type);
if (type == PCI_CAP_ID_MSIX)
pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n");
msi_data->msi_virqs = kmalloc((msi_irqs) * sizeof(int), GFP_KERNEL);
if (!msi_data->msi_virqs)
return -ENOMEM;
for_each_pci_msi_entry(entry, dev) {
int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1);
if (int_no >= 0)
break;
if (int_no < 0) {
pr_debug("%s: fail allocating msi interrupt\n",
__func__);
}
virq = irq_of_parse_and_map(msi_data->msi_dev, int_no);
if (!virq) {
dev_err(&dev->dev, "%s: fail mapping irq\n", __func__);
msi_bitmap_free_hwirqs(&msi_data->bitmap, int_no, 1);
return -ENOSPC;
}
dev_dbg(&dev->dev, "%s: virq = %d\n", __func__, virq);
/* Setup msi address space */
msg.address_hi = msi_data->msi_addr_hi;
msg.address_lo = msi_data->msi_addr_lo;
irq_set_msi_desc(virq, entry);
msg.data = int_no;
pci_write_msi_msg(virq, &msg);
}
return 0;
}
void ppc4xx_teardown_msi_irqs(struct pci_dev *dev)
{
struct msi_desc *entry;
struct ppc4xx_msi *msi_data = &ppc4xx_msi;
irq_hw_number_t hwirq;
dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n");
for_each_pci_msi_entry(entry, dev) {
if (!entry->irq)
continue;
hwirq = virq_to_hw(entry->irq);
irq_set_msi_desc(entry->irq, NULL);
irq_dispose_mapping(entry->irq);
msi_bitmap_free_hwirqs(&msi_data->bitmap, hwirq, 1);
}
}
static int ppc4xx_setup_pcieh_hw(struct platform_device *dev,
struct resource res, struct ppc4xx_msi *msi)
{
const u32 *msi_data;
const u32 *msi_mask;
const u32 *sdr_addr;
dma_addr_t msi_phys;
void *msi_virt;
sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL);
if (!sdr_addr)
return -1;
mtdcri(SDR0, *sdr_addr, upper_32_bits(res.start)); /*HIGH addr */
mtdcri(SDR0, *sdr_addr + 1, lower_32_bits(res.start)); /* Low addr */
msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi");
if (!msi->msi_dev)
return -ENODEV;
msi->msi_regs = of_iomap(msi->msi_dev, 0);
if (!msi->msi_regs) {
dev_err(&dev->dev, "of_iomap problem failed\n");
return -ENOMEM;
}
dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n",
(u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs));
msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL);
if (!msi_virt)
return -ENOMEM;
msi->msi_addr_hi = upper_32_bits(msi_phys);
msi->msi_addr_lo = lower_32_bits(msi_phys & 0xffffffff);
dev_dbg(&dev->dev, "PCIE-MSI: msi address high 0x%x, low 0x%x\n",
msi->msi_addr_hi, msi->msi_addr_lo);
/* Progam the Interrupt handler Termination addr registers */
out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi);
out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo);
msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL);
if (!msi_data)
return -1;
msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL);
if (!msi_mask)
return -1;
/* Program MSI Expected data and Mask bits */
out_be32(msi->msi_regs + PEIH_MSIED, *msi_data);
out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask);
dma_free_coherent(&dev->dev, 64, msi_virt, msi_phys);
return 0;
}
static int ppc4xx_of_msi_remove(struct platform_device *dev)
{
struct ppc4xx_msi *msi = dev->dev.platform_data;
int i;
int virq;
for (i = 0; i < msi_irqs; i++) {
virq = msi->msi_virqs[i];
if (virq)
irq_dispose_mapping(virq);
}
if (msi->bitmap.bitmap)
msi_bitmap_free(&msi->bitmap);
iounmap(msi->msi_regs);
of_node_put(msi->msi_dev);
kfree(msi);
return 0;
}
static int ppc4xx_msi_probe(struct platform_device *dev)
{
struct ppc4xx_msi *msi;
struct resource res;
int err = 0;
struct pci_controller *phb;
dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n");
msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL);
if (!msi) {
dev_err(&dev->dev, "No memory for MSI structure\n");
return -ENOMEM;
}
dev->dev.platform_data = msi;
/* Get MSI ranges */
err = of_address_to_resource(dev->dev.of_node, 0, &res);
if (err) {
dev_err(&dev->dev, "%s resource error!\n",
dev->dev.of_node->full_name);
goto error_out;
}
msi_irqs = of_irq_count(dev->dev.of_node);
if (!msi_irqs)
return -ENODEV;
if (ppc4xx_setup_pcieh_hw(dev, res, msi))
goto error_out;
err = ppc4xx_msi_init_allocator(dev, msi);
if (err) {
dev_err(&dev->dev, "Error allocating MSI bitmap\n");
goto error_out;
}
ppc4xx_msi = *msi;
list_for_each_entry(phb, &hose_list, list_node) {
phb->controller_ops.setup_msi_irqs = ppc4xx_setup_msi_irqs;
phb->controller_ops.teardown_msi_irqs = ppc4xx_teardown_msi_irqs;
}
return err;
error_out:
ppc4xx_of_msi_remove(dev);
return err;
}
static const struct of_device_id ppc4xx_msi_ids[] = {
{
.compatible = "amcc,ppc4xx-msi",
},
{}
};
static struct platform_driver ppc4xx_msi_driver = {
.probe = ppc4xx_msi_probe,
.remove = ppc4xx_of_msi_remove,
.driver = {
.name = "ppc4xx-msi",
.of_match_table = ppc4xx_msi_ids,
},
};
static __init int ppc4xx_msi_init(void)
{
return platform_driver_register(&ppc4xx_msi_driver);
}
subsys_initcall(ppc4xx_msi_init);

Ver ficheiro

@@ -1,416 +0,0 @@
/*
* PowerPC 4xx OCM memory allocation support
*
* (C) Copyright 2009, Applied Micro Circuits Corporation
* Victor Gallardo (vgallardo@amcc.com)
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/rheap.h>
#include <asm/ppc4xx_ocm.h>
#include <linux/slab.h>
#include <linux/debugfs.h>
#define OCM_DISABLED 0
#define OCM_ENABLED 1
struct ocm_block {
struct list_head list;
void __iomem *addr;
int size;
const char *owner;
};
/* non-cached or cached region */
struct ocm_region {
phys_addr_t phys;
void __iomem *virt;
int memtotal;
int memfree;
rh_info_t *rh;
struct list_head list;
};
struct ocm_info {
int index;
int status;
int ready;
phys_addr_t phys;
int alignment;
int memtotal;
int cache_size;
struct ocm_region nc; /* non-cached region */
struct ocm_region c; /* cached region */
};
static struct ocm_info *ocm_nodes;
static int ocm_count;
static struct ocm_info *ocm_get_node(unsigned int index)
{
if (index >= ocm_count) {
printk(KERN_ERR "PPC4XX OCM: invalid index");
return NULL;
}
return &ocm_nodes[index];
}
static int ocm_free_region(struct ocm_region *ocm_reg, const void *addr)
{
struct ocm_block *blk, *tmp;
unsigned long offset;
if (!ocm_reg->virt)
return 0;
list_for_each_entry_safe(blk, tmp, &ocm_reg->list, list) {
if (blk->addr == addr) {
offset = addr - ocm_reg->virt;
ocm_reg->memfree += blk->size;
rh_free(ocm_reg->rh, offset);
list_del(&blk->list);
kfree(blk);
return 1;
}
}
return 0;
}
static void __init ocm_init_node(int count, struct device_node *node)
{
struct ocm_info *ocm;
const unsigned int *cell_index;
const unsigned int *cache_size;
int len;
struct resource rsrc;
int ioflags;
ocm = ocm_get_node(count);
cell_index = of_get_property(node, "cell-index", &len);
if (!cell_index) {
printk(KERN_ERR "PPC4XX OCM: missing cell-index property");
return;
}
ocm->index = *cell_index;
if (of_device_is_available(node))
ocm->status = OCM_ENABLED;
cache_size = of_get_property(node, "cached-region-size", &len);
if (cache_size)
ocm->cache_size = *cache_size;
if (of_address_to_resource(node, 0, &rsrc)) {
printk(KERN_ERR "PPC4XX OCM%d: could not get resource address\n",
ocm->index);
return;
}
ocm->phys = rsrc.start;
ocm->memtotal = (rsrc.end - rsrc.start + 1);
printk(KERN_INFO "PPC4XX OCM%d: %d Bytes (%s)\n",
ocm->index, ocm->memtotal,
(ocm->status == OCM_DISABLED) ? "disabled" : "enabled");
if (ocm->status == OCM_DISABLED)
return;
/* request region */
if (!request_mem_region(ocm->phys, ocm->memtotal, "ppc4xx_ocm")) {
printk(KERN_ERR "PPC4XX OCM%d: could not request region\n",
ocm->index);
return;
}
/* Configure non-cached and cached regions */
ocm->nc.phys = ocm->phys;
ocm->nc.memtotal = ocm->memtotal - ocm->cache_size;
ocm->nc.memfree = ocm->nc.memtotal;
ocm->c.phys = ocm->phys + ocm->nc.memtotal;
ocm->c.memtotal = ocm->cache_size;
ocm->c.memfree = ocm->c.memtotal;
if (ocm->nc.memtotal == 0)
ocm->nc.phys = 0;
if (ocm->c.memtotal == 0)
ocm->c.phys = 0;
printk(KERN_INFO "PPC4XX OCM%d: %d Bytes (non-cached)\n",
ocm->index, ocm->nc.memtotal);
printk(KERN_INFO "PPC4XX OCM%d: %d Bytes (cached)\n",
ocm->index, ocm->c.memtotal);
/* ioremap the non-cached region */
if (ocm->nc.memtotal) {
ioflags = _PAGE_NO_CACHE | _PAGE_GUARDED | _PAGE_EXEC;
ocm->nc.virt = __ioremap(ocm->nc.phys, ocm->nc.memtotal,
ioflags);
if (!ocm->nc.virt) {
printk(KERN_ERR
"PPC4XX OCM%d: failed to ioremap non-cached memory\n",
ocm->index);
ocm->nc.memfree = 0;
return;
}
}
/* ioremap the cached region */
if (ocm->c.memtotal) {
ioflags = _PAGE_EXEC;
ocm->c.virt = __ioremap(ocm->c.phys, ocm->c.memtotal,
ioflags);
if (!ocm->c.virt) {
printk(KERN_ERR
"PPC4XX OCM%d: failed to ioremap cached memory\n",
ocm->index);
ocm->c.memfree = 0;
return;
}
}
/* Create Remote Heaps */
ocm->alignment = 4; /* default 4 byte alignment */
if (ocm->nc.virt) {
ocm->nc.rh = rh_create(ocm->alignment);
rh_attach_region(ocm->nc.rh, 0, ocm->nc.memtotal);
}
if (ocm->c.virt) {
ocm->c.rh = rh_create(ocm->alignment);
rh_attach_region(ocm->c.rh, 0, ocm->c.memtotal);
}
INIT_LIST_HEAD(&ocm->nc.list);
INIT_LIST_HEAD(&ocm->c.list);
ocm->ready = 1;
return;
}
static int ocm_debugfs_show(struct seq_file *m, void *v)
{
struct ocm_block *blk, *tmp;
unsigned int i;
for (i = 0; i < ocm_count; i++) {
struct ocm_info *ocm = ocm_get_node(i);
if (!ocm || !ocm->ready)
continue;
seq_printf(m, "PPC4XX OCM : %d\n", ocm->index);
seq_printf(m, "PhysAddr : 0x%llx\n", ocm->phys);
seq_printf(m, "MemTotal : %d Bytes\n", ocm->memtotal);
seq_printf(m, "MemTotal(NC) : %d Bytes\n", ocm->nc.memtotal);
seq_printf(m, "MemTotal(C) : %d Bytes\n", ocm->c.memtotal);
seq_printf(m, "\n");
seq_printf(m, "NC.PhysAddr : 0x%llx\n", ocm->nc.phys);
seq_printf(m, "NC.VirtAddr : 0x%p\n", ocm->nc.virt);
seq_printf(m, "NC.MemTotal : %d Bytes\n", ocm->nc.memtotal);
seq_printf(m, "NC.MemFree : %d Bytes\n", ocm->nc.memfree);
list_for_each_entry_safe(blk, tmp, &ocm->nc.list, list) {
seq_printf(m, "NC.MemUsed : %d Bytes (%s)\n",
blk->size, blk->owner);
}
seq_printf(m, "\n");
seq_printf(m, "C.PhysAddr : 0x%llx\n", ocm->c.phys);
seq_printf(m, "C.VirtAddr : 0x%p\n", ocm->c.virt);
seq_printf(m, "C.MemTotal : %d Bytes\n", ocm->c.memtotal);
seq_printf(m, "C.MemFree : %d Bytes\n", ocm->c.memfree);
list_for_each_entry_safe(blk, tmp, &ocm->c.list, list) {
seq_printf(m, "C.MemUsed : %d Bytes (%s)\n",
blk->size, blk->owner);
}
seq_printf(m, "\n");
}
return 0;
}
static int ocm_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, ocm_debugfs_show, NULL);
}
static const struct file_operations ocm_debugfs_fops = {
.open = ocm_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int ocm_debugfs_init(void)
{
struct dentry *junk;
junk = debugfs_create_dir("ppc4xx_ocm", 0);
if (!junk) {
printk(KERN_ALERT "debugfs ppc4xx ocm: failed to create dir\n");
return -1;
}
if (debugfs_create_file("info", 0644, junk, NULL, &ocm_debugfs_fops)) {
printk(KERN_ALERT "debugfs ppc4xx ocm: failed to create file\n");
return -1;
}
return 0;
}
void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
int flags, const char *owner)
{
void __iomem *addr = NULL;
unsigned long offset;
struct ocm_info *ocm;
struct ocm_region *ocm_reg;
struct ocm_block *ocm_blk;
int i;
for (i = 0; i < ocm_count; i++) {
ocm = ocm_get_node(i);
if (!ocm || !ocm->ready)
continue;
if (flags == PPC4XX_OCM_NON_CACHED)
ocm_reg = &ocm->nc;
else
ocm_reg = &ocm->c;
if (!ocm_reg->virt)
continue;
if (align < ocm->alignment)
align = ocm->alignment;
offset = rh_alloc_align(ocm_reg->rh, size, align, NULL);
if (IS_ERR_VALUE(offset))
continue;
ocm_blk = kzalloc(sizeof(struct ocm_block), GFP_KERNEL);
if (!ocm_blk) {
printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block");
rh_free(ocm_reg->rh, offset);
break;
}
*phys = ocm_reg->phys + offset;
addr = ocm_reg->virt + offset;
size = ALIGN(size, align);
ocm_blk->addr = addr;
ocm_blk->size = size;
ocm_blk->owner = owner;
list_add_tail(&ocm_blk->list, &ocm_reg->list);
ocm_reg->memfree -= size;
break;
}
return addr;
}
void ppc4xx_ocm_free(const void *addr)
{
int i;
if (!addr)
return;
for (i = 0; i < ocm_count; i++) {
struct ocm_info *ocm = ocm_get_node(i);
if (!ocm || !ocm->ready)
continue;
if (ocm_free_region(&ocm->nc, addr) ||
ocm_free_region(&ocm->c, addr))
return;
}
}
static int __init ppc4xx_ocm_init(void)
{
struct device_node *np;
int count;
count = 0;
for_each_compatible_node(np, NULL, "ibm,ocm")
count++;
if (!count)
return 0;
ocm_nodes = kzalloc((count * sizeof(struct ocm_info)), GFP_KERNEL);
if (!ocm_nodes) {
printk(KERN_ERR "PPC4XX OCM: failed to allocate OCM nodes!\n");
return -ENOMEM;
}
ocm_count = count;
count = 0;
for_each_compatible_node(np, NULL, "ibm,ocm") {
ocm_init_node(count, np);
count++;
}
ocm_debugfs_init();
return 0;
}
arch_initcall(ppc4xx_ocm_init);

A apresentação das diferenças no ficheiro foi suprimida por ser demasiado grande Carregar diff

Ver ficheiro

@@ -1,505 +0,0 @@
/*
* PCI / PCI-X / PCI-Express support for 4xx parts
*
* Copyright 2007 Ben. Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
*
* Bits and pieces extracted from arch/ppc support by
*
* Matt Porter <mporter@kernel.crashing.org>
*
* Copyright 2002-2005 MontaVista Software Inc.
*/
#ifndef __PPC4XX_PCI_H__
#define __PPC4XX_PCI_H__
/*
* 4xx PCI-X bridge register definitions
*/
#define PCIX0_VENDID 0x000
#define PCIX0_DEVID 0x002
#define PCIX0_COMMAND 0x004
#define PCIX0_STATUS 0x006
#define PCIX0_REVID 0x008
#define PCIX0_CLS 0x009
#define PCIX0_CACHELS 0x00c
#define PCIX0_LATTIM 0x00d
#define PCIX0_HDTYPE 0x00e
#define PCIX0_BIST 0x00f
#define PCIX0_BAR0L 0x010
#define PCIX0_BAR0H 0x014
#define PCIX0_BAR1 0x018
#define PCIX0_BAR2L 0x01c
#define PCIX0_BAR2H 0x020
#define PCIX0_BAR3 0x024
#define PCIX0_CISPTR 0x028
#define PCIX0_SBSYSVID 0x02c
#define PCIX0_SBSYSID 0x02e
#define PCIX0_EROMBA 0x030
#define PCIX0_CAP 0x034
#define PCIX0_RES0 0x035
#define PCIX0_RES1 0x036
#define PCIX0_RES2 0x038
#define PCIX0_INTLN 0x03c
#define PCIX0_INTPN 0x03d
#define PCIX0_MINGNT 0x03e
#define PCIX0_MAXLTNCY 0x03f
#define PCIX0_BRDGOPT1 0x040
#define PCIX0_BRDGOPT2 0x044
#define PCIX0_ERREN 0x050
#define PCIX0_ERRSTS 0x054
#define PCIX0_PLBBESR 0x058
#define PCIX0_PLBBEARL 0x05c
#define PCIX0_PLBBEARH 0x060
#define PCIX0_POM0LAL 0x068
#define PCIX0_POM0LAH 0x06c
#define PCIX0_POM0SA 0x070
#define PCIX0_POM0PCIAL 0x074
#define PCIX0_POM0PCIAH 0x078
#define PCIX0_POM1LAL 0x07c
#define PCIX0_POM1LAH 0x080
#define PCIX0_POM1SA 0x084
#define PCIX0_POM1PCIAL 0x088
#define PCIX0_POM1PCIAH 0x08c
#define PCIX0_POM2SA 0x090
#define PCIX0_PIM0SAL 0x098
#define PCIX0_PIM0SA PCIX0_PIM0SAL
#define PCIX0_PIM0LAL 0x09c
#define PCIX0_PIM0LAH 0x0a0
#define PCIX0_PIM1SA 0x0a4
#define PCIX0_PIM1LAL 0x0a8
#define PCIX0_PIM1LAH 0x0ac
#define PCIX0_PIM2SAL 0x0b0
#define PCIX0_PIM2SA PCIX0_PIM2SAL
#define PCIX0_PIM2LAL 0x0b4
#define PCIX0_PIM2LAH 0x0b8
#define PCIX0_OMCAPID 0x0c0
#define PCIX0_OMNIPTR 0x0c1
#define PCIX0_OMMC 0x0c2
#define PCIX0_OMMA 0x0c4
#define PCIX0_OMMUA 0x0c8
#define PCIX0_OMMDATA 0x0cc
#define PCIX0_OMMEOI 0x0ce
#define PCIX0_PMCAPID 0x0d0
#define PCIX0_PMNIPTR 0x0d1
#define PCIX0_PMC 0x0d2
#define PCIX0_PMCSR 0x0d4
#define PCIX0_PMCSRBSE 0x0d6
#define PCIX0_PMDATA 0x0d7
#define PCIX0_PMSCRR 0x0d8
#define PCIX0_CAPID 0x0dc
#define PCIX0_NIPTR 0x0dd
#define PCIX0_CMD 0x0de
#define PCIX0_STS 0x0e0
#define PCIX0_IDR 0x0e4
#define PCIX0_CID 0x0e8
#define PCIX0_RID 0x0ec
#define PCIX0_PIM0SAH 0x0f8
#define PCIX0_PIM2SAH 0x0fc
#define PCIX0_MSGIL 0x100
#define PCIX0_MSGIH 0x104
#define PCIX0_MSGOL 0x108
#define PCIX0_MSGOH 0x10c
#define PCIX0_IM 0x1f8
/*
* 4xx PCI bridge register definitions
*/
#define PCIL0_PMM0LA 0x00
#define PCIL0_PMM0MA 0x04
#define PCIL0_PMM0PCILA 0x08
#define PCIL0_PMM0PCIHA 0x0c
#define PCIL0_PMM1LA 0x10
#define PCIL0_PMM1MA 0x14
#define PCIL0_PMM1PCILA 0x18
#define PCIL0_PMM1PCIHA 0x1c
#define PCIL0_PMM2LA 0x20
#define PCIL0_PMM2MA 0x24
#define PCIL0_PMM2PCILA 0x28
#define PCIL0_PMM2PCIHA 0x2c
#define PCIL0_PTM1MS 0x30
#define PCIL0_PTM1LA 0x34
#define PCIL0_PTM2MS 0x38
#define PCIL0_PTM2LA 0x3c
/*
* 4xx PCIe bridge register definitions
*/
/* DCR offsets */
#define DCRO_PEGPL_CFGBAH 0x00
#define DCRO_PEGPL_CFGBAL 0x01
#define DCRO_PEGPL_CFGMSK 0x02
#define DCRO_PEGPL_MSGBAH 0x03
#define DCRO_PEGPL_MSGBAL 0x04
#define DCRO_PEGPL_MSGMSK 0x05
#define DCRO_PEGPL_OMR1BAH 0x06
#define DCRO_PEGPL_OMR1BAL 0x07
#define DCRO_PEGPL_OMR1MSKH 0x08
#define DCRO_PEGPL_OMR1MSKL 0x09
#define DCRO_PEGPL_OMR2BAH 0x0a
#define DCRO_PEGPL_OMR2BAL 0x0b
#define DCRO_PEGPL_OMR2MSKH 0x0c
#define DCRO_PEGPL_OMR2MSKL 0x0d
#define DCRO_PEGPL_OMR3BAH 0x0e
#define DCRO_PEGPL_OMR3BAL 0x0f
#define DCRO_PEGPL_OMR3MSKH 0x10
#define DCRO_PEGPL_OMR3MSKL 0x11
#define DCRO_PEGPL_REGBAH 0x12
#define DCRO_PEGPL_REGBAL 0x13
#define DCRO_PEGPL_REGMSK 0x14
#define DCRO_PEGPL_SPECIAL 0x15
#define DCRO_PEGPL_CFG 0x16
#define DCRO_PEGPL_ESR 0x17
#define DCRO_PEGPL_EARH 0x18
#define DCRO_PEGPL_EARL 0x19
#define DCRO_PEGPL_EATR 0x1a
/* DMER mask */
#define GPL_DMER_MASK_DISA 0x02000000
/*
* System DCRs (SDRs)
*/
#define PESDR0_PLLLCT1 0x03a0
#define PESDR0_PLLLCT2 0x03a1
#define PESDR0_PLLLCT3 0x03a2
/*
* 440SPe additional DCRs
*/
#define PESDR0_440SPE_UTLSET1 0x0300
#define PESDR0_440SPE_UTLSET2 0x0301
#define PESDR0_440SPE_DLPSET 0x0302
#define PESDR0_440SPE_LOOP 0x0303
#define PESDR0_440SPE_RCSSET 0x0304
#define PESDR0_440SPE_RCSSTS 0x0305
#define PESDR0_440SPE_HSSL0SET1 0x0306
#define PESDR0_440SPE_HSSL0SET2 0x0307
#define PESDR0_440SPE_HSSL0STS 0x0308
#define PESDR0_440SPE_HSSL1SET1 0x0309
#define PESDR0_440SPE_HSSL1SET2 0x030a
#define PESDR0_440SPE_HSSL1STS 0x030b
#define PESDR0_440SPE_HSSL2SET1 0x030c
#define PESDR0_440SPE_HSSL2SET2 0x030d
#define PESDR0_440SPE_HSSL2STS 0x030e
#define PESDR0_440SPE_HSSL3SET1 0x030f
#define PESDR0_440SPE_HSSL3SET2 0x0310
#define PESDR0_440SPE_HSSL3STS 0x0311
#define PESDR0_440SPE_HSSL4SET1 0x0312
#define PESDR0_440SPE_HSSL4SET2 0x0313
#define PESDR0_440SPE_HSSL4STS 0x0314
#define PESDR0_440SPE_HSSL5SET1 0x0315
#define PESDR0_440SPE_HSSL5SET2 0x0316
#define PESDR0_440SPE_HSSL5STS 0x0317
#define PESDR0_440SPE_HSSL6SET1 0x0318
#define PESDR0_440SPE_HSSL6SET2 0x0319
#define PESDR0_440SPE_HSSL6STS 0x031a
#define PESDR0_440SPE_HSSL7SET1 0x031b
#define PESDR0_440SPE_HSSL7SET2 0x031c
#define PESDR0_440SPE_HSSL7STS 0x031d
#define PESDR0_440SPE_HSSCTLSET 0x031e
#define PESDR0_440SPE_LANE_ABCD 0x031f
#define PESDR0_440SPE_LANE_EFGH 0x0320
#define PESDR1_440SPE_UTLSET1 0x0340
#define PESDR1_440SPE_UTLSET2 0x0341
#define PESDR1_440SPE_DLPSET 0x0342
#define PESDR1_440SPE_LOOP 0x0343
#define PESDR1_440SPE_RCSSET 0x0344
#define PESDR1_440SPE_RCSSTS 0x0345
#define PESDR1_440SPE_HSSL0SET1 0x0346
#define PESDR1_440SPE_HSSL0SET2 0x0347
#define PESDR1_440SPE_HSSL0STS 0x0348
#define PESDR1_440SPE_HSSL1SET1 0x0349
#define PESDR1_440SPE_HSSL1SET2 0x034a
#define PESDR1_440SPE_HSSL1STS 0x034b
#define PESDR1_440SPE_HSSL2SET1 0x034c
#define PESDR1_440SPE_HSSL2SET2 0x034d
#define PESDR1_440SPE_HSSL2STS 0x034e
#define PESDR1_440SPE_HSSL3SET1 0x034f
#define PESDR1_440SPE_HSSL3SET2 0x0350
#define PESDR1_440SPE_HSSL3STS 0x0351
#define PESDR1_440SPE_HSSCTLSET 0x0352
#define PESDR1_440SPE_LANE_ABCD 0x0353
#define PESDR2_440SPE_UTLSET1 0x0370
#define PESDR2_440SPE_UTLSET2 0x0371
#define PESDR2_440SPE_DLPSET 0x0372
#define PESDR2_440SPE_LOOP 0x0373
#define PESDR2_440SPE_RCSSET 0x0374
#define PESDR2_440SPE_RCSSTS 0x0375
#define PESDR2_440SPE_HSSL0SET1 0x0376
#define PESDR2_440SPE_HSSL0SET2 0x0377
#define PESDR2_440SPE_HSSL0STS 0x0378
#define PESDR2_440SPE_HSSL1SET1 0x0379
#define PESDR2_440SPE_HSSL1SET2 0x037a
#define PESDR2_440SPE_HSSL1STS 0x037b
#define PESDR2_440SPE_HSSL2SET1 0x037c
#define PESDR2_440SPE_HSSL2SET2 0x037d
#define PESDR2_440SPE_HSSL2STS 0x037e
#define PESDR2_440SPE_HSSL3SET1 0x037f
#define PESDR2_440SPE_HSSL3SET2 0x0380
#define PESDR2_440SPE_HSSL3STS 0x0381
#define PESDR2_440SPE_HSSCTLSET 0x0382
#define PESDR2_440SPE_LANE_ABCD 0x0383
/*
* 405EX additional DCRs
*/
#define PESDR0_405EX_UTLSET1 0x0400
#define PESDR0_405EX_UTLSET2 0x0401
#define PESDR0_405EX_DLPSET 0x0402
#define PESDR0_405EX_LOOP 0x0403
#define PESDR0_405EX_RCSSET 0x0404
#define PESDR0_405EX_RCSSTS 0x0405
#define PESDR0_405EX_PHYSET1 0x0406
#define PESDR0_405EX_PHYSET2 0x0407
#define PESDR0_405EX_BIST 0x0408
#define PESDR0_405EX_LPB 0x040B
#define PESDR0_405EX_PHYSTA 0x040C
#define PESDR1_405EX_UTLSET1 0x0440
#define PESDR1_405EX_UTLSET2 0x0441
#define PESDR1_405EX_DLPSET 0x0442
#define PESDR1_405EX_LOOP 0x0443
#define PESDR1_405EX_RCSSET 0x0444
#define PESDR1_405EX_RCSSTS 0x0445
#define PESDR1_405EX_PHYSET1 0x0446
#define PESDR1_405EX_PHYSET2 0x0447
#define PESDR1_405EX_BIST 0x0448
#define PESDR1_405EX_LPB 0x044B
#define PESDR1_405EX_PHYSTA 0x044C
/*
* 460EX additional DCRs
*/
#define PESDR0_460EX_L0BIST 0x0308
#define PESDR0_460EX_L0BISTSTS 0x0309
#define PESDR0_460EX_L0CDRCTL 0x030A
#define PESDR0_460EX_L0DRV 0x030B
#define PESDR0_460EX_L0REC 0x030C
#define PESDR0_460EX_L0LPB 0x030D
#define PESDR0_460EX_L0CLK 0x030E
#define PESDR0_460EX_PHY_CTL_RST 0x030F
#define PESDR0_460EX_RSTSTA 0x0310
#define PESDR0_460EX_OBS 0x0311
#define PESDR0_460EX_L0ERRC 0x0320
#define PESDR1_460EX_L0BIST 0x0348
#define PESDR1_460EX_L1BIST 0x0349
#define PESDR1_460EX_L2BIST 0x034A
#define PESDR1_460EX_L3BIST 0x034B
#define PESDR1_460EX_L0BISTSTS 0x034C
#define PESDR1_460EX_L1BISTSTS 0x034D
#define PESDR1_460EX_L2BISTSTS 0x034E
#define PESDR1_460EX_L3BISTSTS 0x034F
#define PESDR1_460EX_L0CDRCTL 0x0350
#define PESDR1_460EX_L1CDRCTL 0x0351
#define PESDR1_460EX_L2CDRCTL 0x0352
#define PESDR1_460EX_L3CDRCTL 0x0353
#define PESDR1_460EX_L0DRV 0x0354
#define PESDR1_460EX_L1DRV 0x0355
#define PESDR1_460EX_L2DRV 0x0356
#define PESDR1_460EX_L3DRV 0x0357
#define PESDR1_460EX_L0REC 0x0358
#define PESDR1_460EX_L1REC 0x0359
#define PESDR1_460EX_L2REC 0x035A
#define PESDR1_460EX_L3REC 0x035B
#define PESDR1_460EX_L0LPB 0x035C
#define PESDR1_460EX_L1LPB 0x035D
#define PESDR1_460EX_L2LPB 0x035E
#define PESDR1_460EX_L3LPB 0x035F
#define PESDR1_460EX_L0CLK 0x0360
#define PESDR1_460EX_L1CLK 0x0361
#define PESDR1_460EX_L2CLK 0x0362
#define PESDR1_460EX_L3CLK 0x0363
#define PESDR1_460EX_PHY_CTL_RST 0x0364
#define PESDR1_460EX_RSTSTA 0x0365
#define PESDR1_460EX_OBS 0x0366
#define PESDR1_460EX_L0ERRC 0x0368
#define PESDR1_460EX_L1ERRC 0x0369
#define PESDR1_460EX_L2ERRC 0x036A
#define PESDR1_460EX_L3ERRC 0x036B
#define PESDR0_460EX_IHS1 0x036C
#define PESDR0_460EX_IHS2 0x036D
/*
* 460SX additional DCRs
*/
#define PESDRn_460SX_RCEI 0x02
#define PESDR0_460SX_HSSL0DAMP 0x320
#define PESDR0_460SX_HSSL1DAMP 0x321
#define PESDR0_460SX_HSSL2DAMP 0x322
#define PESDR0_460SX_HSSL3DAMP 0x323
#define PESDR0_460SX_HSSL4DAMP 0x324
#define PESDR0_460SX_HSSL5DAMP 0x325
#define PESDR0_460SX_HSSL6DAMP 0x326
#define PESDR0_460SX_HSSL7DAMP 0x327
#define PESDR1_460SX_HSSL0DAMP 0x354
#define PESDR1_460SX_HSSL1DAMP 0x355
#define PESDR1_460SX_HSSL2DAMP 0x356
#define PESDR1_460SX_HSSL3DAMP 0x357
#define PESDR2_460SX_HSSL0DAMP 0x384
#define PESDR2_460SX_HSSL1DAMP 0x385
#define PESDR2_460SX_HSSL2DAMP 0x386
#define PESDR2_460SX_HSSL3DAMP 0x387
#define PESDR0_460SX_HSSL0COEFA 0x328
#define PESDR0_460SX_HSSL1COEFA 0x329
#define PESDR0_460SX_HSSL2COEFA 0x32A
#define PESDR0_460SX_HSSL3COEFA 0x32B
#define PESDR0_460SX_HSSL4COEFA 0x32C
#define PESDR0_460SX_HSSL5COEFA 0x32D
#define PESDR0_460SX_HSSL6COEFA 0x32E
#define PESDR0_460SX_HSSL7COEFA 0x32F
#define PESDR1_460SX_HSSL0COEFA 0x358
#define PESDR1_460SX_HSSL1COEFA 0x359
#define PESDR1_460SX_HSSL2COEFA 0x35A
#define PESDR1_460SX_HSSL3COEFA 0x35B
#define PESDR2_460SX_HSSL0COEFA 0x388
#define PESDR2_460SX_HSSL1COEFA 0x389
#define PESDR2_460SX_HSSL2COEFA 0x38A
#define PESDR2_460SX_HSSL3COEFA 0x38B
#define PESDR0_460SX_HSSL1CALDRV 0x339
#define PESDR1_460SX_HSSL1CALDRV 0x361
#define PESDR2_460SX_HSSL1CALDRV 0x391
#define PESDR0_460SX_HSSSLEW 0x338
#define PESDR1_460SX_HSSSLEW 0x360
#define PESDR2_460SX_HSSSLEW 0x390
#define PESDR0_460SX_HSSCTLSET 0x31E
#define PESDR1_460SX_HSSCTLSET 0x352
#define PESDR2_460SX_HSSCTLSET 0x382
#define PESDR0_460SX_RCSSET 0x304
#define PESDR1_460SX_RCSSET 0x344
#define PESDR2_460SX_RCSSET 0x374
/*
* Of the above, some are common offsets from the base
*/
#define PESDRn_UTLSET1 0x00
#define PESDRn_UTLSET2 0x01
#define PESDRn_DLPSET 0x02
#define PESDRn_LOOP 0x03
#define PESDRn_RCSSET 0x04
#define PESDRn_RCSSTS 0x05
/* 440spe only */
#define PESDRn_440SPE_HSSL0SET1 0x06
#define PESDRn_440SPE_HSSL0SET2 0x07
#define PESDRn_440SPE_HSSL0STS 0x08
#define PESDRn_440SPE_HSSL1SET1 0x09
#define PESDRn_440SPE_HSSL1SET2 0x0a
#define PESDRn_440SPE_HSSL1STS 0x0b
#define PESDRn_440SPE_HSSL2SET1 0x0c
#define PESDRn_440SPE_HSSL2SET2 0x0d
#define PESDRn_440SPE_HSSL2STS 0x0e
#define PESDRn_440SPE_HSSL3SET1 0x0f
#define PESDRn_440SPE_HSSL3SET2 0x10
#define PESDRn_440SPE_HSSL3STS 0x11
/* 440spe port 0 only */
#define PESDRn_440SPE_HSSL4SET1 0x12
#define PESDRn_440SPE_HSSL4SET2 0x13
#define PESDRn_440SPE_HSSL4STS 0x14
#define PESDRn_440SPE_HSSL5SET1 0x15
#define PESDRn_440SPE_HSSL5SET2 0x16
#define PESDRn_440SPE_HSSL5STS 0x17
#define PESDRn_440SPE_HSSL6SET1 0x18
#define PESDRn_440SPE_HSSL6SET2 0x19
#define PESDRn_440SPE_HSSL6STS 0x1a
#define PESDRn_440SPE_HSSL7SET1 0x1b
#define PESDRn_440SPE_HSSL7SET2 0x1c
#define PESDRn_440SPE_HSSL7STS 0x1d
/* 405ex only */
#define PESDRn_405EX_PHYSET1 0x06
#define PESDRn_405EX_PHYSET2 0x07
#define PESDRn_405EX_PHYSTA 0x0c
/*
* UTL register offsets
*/
#define PEUTL_PBCTL 0x00
#define PEUTL_PBBSZ 0x20
#define PEUTL_OPDBSZ 0x68
#define PEUTL_IPHBSZ 0x70
#define PEUTL_IPDBSZ 0x78
#define PEUTL_OUTTR 0x90
#define PEUTL_INTR 0x98
#define PEUTL_PCTL 0xa0
#define PEUTL_RCSTA 0xB0
#define PEUTL_RCIRQEN 0xb8
/*
* Config space register offsets
*/
#define PECFG_ECRTCTL 0x074
#define PECFG_BAR0LMPA 0x210
#define PECFG_BAR0HMPA 0x214
#define PECFG_BAR1MPA 0x218
#define PECFG_BAR2LMPA 0x220
#define PECFG_BAR2HMPA 0x224
#define PECFG_PIMEN 0x33c
#define PECFG_PIM0LAL 0x340
#define PECFG_PIM0LAH 0x344
#define PECFG_PIM1LAL 0x348
#define PECFG_PIM1LAH 0x34c
#define PECFG_PIM01SAL 0x350
#define PECFG_PIM01SAH 0x354
#define PECFG_POM0LAL 0x380
#define PECFG_POM0LAH 0x384
#define PECFG_POM1LAL 0x388
#define PECFG_POM1LAH 0x38c
#define PECFG_POM2LAL 0x390
#define PECFG_POM2LAH 0x394
/* 460sx only */
#define PECFG_460SX_DLLSTA 0x3f8
/* 460sx Bit Mappings */
#define PECFG_460SX_DLLSTA_LINKUP 0x00000010
#define DCRO_PEGPL_460SX_OMR1MSKL_UOT 0x00000004
/* PEGPL Bit Mappings */
#define DCRO_PEGPL_OMRxMSKL_VAL 0x00000001
#define DCRO_PEGPL_OMR1MSKL_UOT 0x00000002
#define DCRO_PEGPL_OMR3MSKL_IO 0x00000002
/* 476FPE */
#define PCCFG_LCPA 0x270
#define PECFG_TLDLP 0x3F8
#define PECFG_TLDLP_LNKUP 0x00000008
#define PECFG_TLDLP_PRESENT 0x00000010
#define DCRO_PEGPL_476FPE_OMR1MSKL_UOT 0x00000004
/* SDR Bit Mappings */
#define PESDRx_RCSSET_HLDPLB 0x10000000
#define PESDRx_RCSSET_RSTGU 0x01000000
#define PESDRx_RCSSET_RDY 0x00100000
#define PESDRx_RCSSET_RSTDL 0x00010000
#define PESDRx_RCSSET_RSTPYN 0x00001000
enum
{
PTYPE_ENDPOINT = 0x0,
PTYPE_LEGACY_ENDPOINT = 0x1,
PTYPE_ROOT_PORT = 0x4,
LNKW_X1 = 0x1,
LNKW_X4 = 0x4,
LNKW_X8 = 0x8
};
#endif /* __PPC4XX_PCI_H__ */

Ver ficheiro

@@ -1,222 +0,0 @@
/*
* IBM/AMCC PPC4xx SoC setup code
*
* Copyright 2008 DENX Software Engineering, Stefan Roese <sr@denx.de>
*
* L2 cache routines cloned from arch/ppc/syslib/ibm440gx_common.c which is:
* Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net>
* Copyright (c) 2003 - 2006 Zultys Technologies
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <asm/dcr.h>
#include <asm/dcr-regs.h>
#include <asm/reg.h>
static u32 dcrbase_l2c;
/*
* L2-cache
*/
/* Issue L2C diagnostic command */
static inline u32 l2c_diag(u32 addr)
{
mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, addr);
mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_DIAG);
while (!(mfdcr(dcrbase_l2c + DCRN_L2C0_SR) & L2C_SR_CC))
;
return mfdcr(dcrbase_l2c + DCRN_L2C0_DATA);
}
static irqreturn_t l2c_error_handler(int irq, void *dev)
{
u32 sr = mfdcr(dcrbase_l2c + DCRN_L2C0_SR);
if (sr & L2C_SR_CPE) {
/* Read cache trapped address */
u32 addr = l2c_diag(0x42000000);
printk(KERN_EMERG "L2C: Cache Parity Error, addr[16:26] = 0x%08x\n",
addr);
}
if (sr & L2C_SR_TPE) {
/* Read tag trapped address */
u32 addr = l2c_diag(0x82000000) >> 16;
printk(KERN_EMERG "L2C: Tag Parity Error, addr[16:26] = 0x%08x\n",
addr);
}
/* Clear parity errors */
if (sr & (L2C_SR_CPE | L2C_SR_TPE)){
mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, 0);
mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE);
} else {
printk(KERN_EMERG "L2C: LRU error\n");
}
return IRQ_HANDLED;
}
static int __init ppc4xx_l2c_probe(void)
{
struct device_node *np;
u32 r;
unsigned long flags;
int irq;
const u32 *dcrreg;
u32 dcrbase_isram;
int len;
const u32 *prop;
u32 l2_size;
np = of_find_compatible_node(NULL, NULL, "ibm,l2-cache");
if (!np)
return 0;
/* Get l2 cache size */
prop = of_get_property(np, "cache-size", NULL);
if (prop == NULL) {
printk(KERN_ERR "%s: Can't get cache-size!\n", np->full_name);
of_node_put(np);
return -ENODEV;
}
l2_size = prop[0];
/* Map DCRs */
dcrreg = of_get_property(np, "dcr-reg", &len);
if (!dcrreg || (len != 4 * sizeof(u32))) {
printk(KERN_ERR "%s: Can't get DCR register base !",
np->full_name);
of_node_put(np);
return -ENODEV;
}
dcrbase_isram = dcrreg[0];
dcrbase_l2c = dcrreg[2];
/* Get and map irq number from device tree */
irq = irq_of_parse_and_map(np, 0);
if (!irq) {
printk(KERN_ERR "irq_of_parse_and_map failed\n");
of_node_put(np);
return -ENODEV;
}
/* Install error handler */
if (request_irq(irq, l2c_error_handler, 0, "L2C", 0) < 0) {
printk(KERN_ERR "Cannot install L2C error handler"
", cache is not enabled\n");
of_node_put(np);
return -ENODEV;
}
local_irq_save(flags);
asm volatile ("sync" ::: "memory");
/* Disable SRAM */
mtdcr(dcrbase_isram + DCRN_SRAM0_DPC,
mfdcr(dcrbase_isram + DCRN_SRAM0_DPC) & ~SRAM_DPC_ENABLE);
mtdcr(dcrbase_isram + DCRN_SRAM0_SB0CR,
mfdcr(dcrbase_isram + DCRN_SRAM0_SB0CR) & ~SRAM_SBCR_BU_MASK);
mtdcr(dcrbase_isram + DCRN_SRAM0_SB1CR,
mfdcr(dcrbase_isram + DCRN_SRAM0_SB1CR) & ~SRAM_SBCR_BU_MASK);
mtdcr(dcrbase_isram + DCRN_SRAM0_SB2CR,
mfdcr(dcrbase_isram + DCRN_SRAM0_SB2CR) & ~SRAM_SBCR_BU_MASK);
mtdcr(dcrbase_isram + DCRN_SRAM0_SB3CR,
mfdcr(dcrbase_isram + DCRN_SRAM0_SB3CR) & ~SRAM_SBCR_BU_MASK);
/* Enable L2_MODE without ICU/DCU */
r = mfdcr(dcrbase_l2c + DCRN_L2C0_CFG) &
~(L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_SS_MASK);
r |= L2C_CFG_L2M | L2C_CFG_SS_256;
mtdcr(dcrbase_l2c + DCRN_L2C0_CFG, r);
mtdcr(dcrbase_l2c + DCRN_L2C0_ADDR, 0);
/* Hardware Clear Command */
mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_HCC);
while (!(mfdcr(dcrbase_l2c + DCRN_L2C0_SR) & L2C_SR_CC))
;
/* Clear Cache Parity and Tag Errors */
mtdcr(dcrbase_l2c + DCRN_L2C0_CMD, L2C_CMD_CCP | L2C_CMD_CTE);
/* Enable 64G snoop region starting at 0 */
r = mfdcr(dcrbase_l2c + DCRN_L2C0_SNP0) &
~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK);
r |= L2C_SNP_SSR_32G | L2C_SNP_ESR;
mtdcr(dcrbase_l2c + DCRN_L2C0_SNP0, r);
r = mfdcr(dcrbase_l2c + DCRN_L2C0_SNP1) &
~(L2C_SNP_BA_MASK | L2C_SNP_SSR_MASK);
r |= 0x80000000 | L2C_SNP_SSR_32G | L2C_SNP_ESR;
mtdcr(dcrbase_l2c + DCRN_L2C0_SNP1, r);
asm volatile ("sync" ::: "memory");
/* Enable ICU/DCU ports */
r = mfdcr(dcrbase_l2c + DCRN_L2C0_CFG);
r &= ~(L2C_CFG_DCW_MASK | L2C_CFG_PMUX_MASK | L2C_CFG_PMIM
| L2C_CFG_TPEI | L2C_CFG_CPEI | L2C_CFG_NAM | L2C_CFG_NBRM);
r |= L2C_CFG_ICU | L2C_CFG_DCU | L2C_CFG_TPC | L2C_CFG_CPC | L2C_CFG_FRAN
| L2C_CFG_CPIM | L2C_CFG_TPIM | L2C_CFG_LIM | L2C_CFG_SMCM;
/* Check for 460EX/GT special handling */
if (of_device_is_compatible(np, "ibm,l2-cache-460ex") ||
of_device_is_compatible(np, "ibm,l2-cache-460gt"))
r |= L2C_CFG_RDBW;
mtdcr(dcrbase_l2c + DCRN_L2C0_CFG, r);
asm volatile ("sync; isync" ::: "memory");
local_irq_restore(flags);
printk(KERN_INFO "%dk L2-cache enabled\n", l2_size >> 10);
of_node_put(np);
return 0;
}
arch_initcall(ppc4xx_l2c_probe);
/*
* Apply a system reset. Alternatively a board specific value may be
* provided via the "reset-type" property in the cpu node.
*/
void ppc4xx_reset_system(char *cmd)
{
struct device_node *np;
u32 reset_type = DBCR0_RST_SYSTEM;
const u32 *prop;
np = of_find_node_by_type(NULL, "cpu");
if (np) {
prop = of_get_property(np, "reset-type", NULL);
/*
* Check if property exists and if it is in range:
* 1 - PPC4xx core reset
* 2 - PPC4xx chip reset
* 3 - PPC4xx system reset (default)
*/
if ((prop) && ((prop[0] >= 1) && (prop[0] <= 3)))
reset_type = prop[0] << 28;
}
mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | reset_type);
while (1)
; /* Just in case the reset doesn't work */
}

Ver ficheiro

@@ -1,334 +0,0 @@
/*
* arch/powerpc/sysdev/uic.c
*
* IBM PowerPC 4xx Universal Interrupt Controller
*
* Copyright 2007 David Gibson <dwg@au1.ibm.com>, IBM Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/dcr.h>
#define NR_UIC_INTS 32
#define UIC_SR 0x0
#define UIC_ER 0x2
#define UIC_CR 0x3
#define UIC_PR 0x4
#define UIC_TR 0x5
#define UIC_MSR 0x6
#define UIC_VR 0x7
#define UIC_VCR 0x8
struct uic *primary_uic;
struct uic {
int index;
int dcrbase;
raw_spinlock_t lock;
/* The remapper for this UIC */
struct irq_domain *irqhost;
};
static void uic_unmask_irq(struct irq_data *d)
{
struct uic *uic = irq_data_get_irq_chip_data(d);
unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 er, sr;
sr = 1 << (31-src);
raw_spin_lock_irqsave(&uic->lock, flags);
/* ack level-triggered interrupts here */
if (irqd_is_level_type(d))
mtdcr(uic->dcrbase + UIC_SR, sr);
er = mfdcr(uic->dcrbase + UIC_ER);
er |= sr;
mtdcr(uic->dcrbase + UIC_ER, er);
raw_spin_unlock_irqrestore(&uic->lock, flags);
}
static void uic_mask_irq(struct irq_data *d)
{
struct uic *uic = irq_data_get_irq_chip_data(d);
unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 er;
raw_spin_lock_irqsave(&uic->lock, flags);
er = mfdcr(uic->dcrbase + UIC_ER);
er &= ~(1 << (31 - src));
mtdcr(uic->dcrbase + UIC_ER, er);
raw_spin_unlock_irqrestore(&uic->lock, flags);
}
static void uic_ack_irq(struct irq_data *d)
{
struct uic *uic = irq_data_get_irq_chip_data(d);
unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
raw_spin_lock_irqsave(&uic->lock, flags);
mtdcr(uic->dcrbase + UIC_SR, 1 << (31-src));
raw_spin_unlock_irqrestore(&uic->lock, flags);
}
static void uic_mask_ack_irq(struct irq_data *d)
{
struct uic *uic = irq_data_get_irq_chip_data(d);
unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 er, sr;
sr = 1 << (31-src);
raw_spin_lock_irqsave(&uic->lock, flags);
er = mfdcr(uic->dcrbase + UIC_ER);
er &= ~sr;
mtdcr(uic->dcrbase + UIC_ER, er);
/* On the UIC, acking (i.e. clearing the SR bit)
* a level irq will have no effect if the interrupt
* is still asserted by the device, even if
* the interrupt is already masked. Therefore
* we only ack the egde interrupts here, while
* level interrupts are ack'ed after the actual
* isr call in the uic_unmask_irq()
*/
if (!irqd_is_level_type(d))
mtdcr(uic->dcrbase + UIC_SR, sr);
raw_spin_unlock_irqrestore(&uic->lock, flags);
}
static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type)
{
struct uic *uic = irq_data_get_irq_chip_data(d);
unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
int trigger, polarity;
u32 tr, pr, mask;
switch (flow_type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_NONE:
uic_mask_irq(d);
return 0;
case IRQ_TYPE_EDGE_RISING:
trigger = 1; polarity = 1;
break;
case IRQ_TYPE_EDGE_FALLING:
trigger = 1; polarity = 0;
break;
case IRQ_TYPE_LEVEL_HIGH:
trigger = 0; polarity = 1;
break;
case IRQ_TYPE_LEVEL_LOW:
trigger = 0; polarity = 0;
break;
default:
return -EINVAL;
}
mask = ~(1 << (31 - src));
raw_spin_lock_irqsave(&uic->lock, flags);
tr = mfdcr(uic->dcrbase + UIC_TR);
pr = mfdcr(uic->dcrbase + UIC_PR);
tr = (tr & mask) | (trigger << (31-src));
pr = (pr & mask) | (polarity << (31-src));
mtdcr(uic->dcrbase + UIC_PR, pr);
mtdcr(uic->dcrbase + UIC_TR, tr);
raw_spin_unlock_irqrestore(&uic->lock, flags);
return 0;
}
static struct irq_chip uic_irq_chip = {
.name = "UIC",
.irq_unmask = uic_unmask_irq,
.irq_mask = uic_mask_irq,
.irq_mask_ack = uic_mask_ack_irq,
.irq_ack = uic_ack_irq,
.irq_set_type = uic_set_irq_type,
};
static int uic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct uic *uic = h->host_data;
irq_set_chip_data(virq, uic);
/* Despite the name, handle_level_irq() works for both level
* and edge irqs on UIC. FIXME: check this is correct */
irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq);
/* Set default irq type */
irq_set_irq_type(virq, IRQ_TYPE_NONE);
return 0;
}
static const struct irq_domain_ops uic_host_ops = {
.map = uic_host_map,
.xlate = irq_domain_xlate_twocell,
};
static void uic_irq_cascade(struct irq_desc *desc)
{
struct irq_chip *chip = irq_desc_get_chip(desc);
struct irq_data *idata = irq_desc_get_irq_data(desc);
struct uic *uic = irq_desc_get_handler_data(desc);
u32 msr;
int src;
int subvirq;
raw_spin_lock(&desc->lock);
if (irqd_is_level_type(idata))
chip->irq_mask(idata);
else
chip->irq_mask_ack(idata);
raw_spin_unlock(&desc->lock);
msr = mfdcr(uic->dcrbase + UIC_MSR);
if (!msr) /* spurious interrupt */
goto uic_irq_ret;
src = 32 - ffs(msr);
subvirq = irq_linear_revmap(uic->irqhost, src);
generic_handle_irq(subvirq);
uic_irq_ret:
raw_spin_lock(&desc->lock);
if (irqd_is_level_type(idata))
chip->irq_ack(idata);
if (!irqd_irq_disabled(idata) && chip->irq_unmask)
chip->irq_unmask(idata);
raw_spin_unlock(&desc->lock);
}
static struct uic * __init uic_init_one(struct device_node *node)
{
struct uic *uic;
const u32 *indexp, *dcrreg;
int len;
BUG_ON(! of_device_is_compatible(node, "ibm,uic"));
uic = kzalloc(sizeof(*uic), GFP_KERNEL);
if (! uic)
return NULL; /* FIXME: panic? */
raw_spin_lock_init(&uic->lock);
indexp = of_get_property(node, "cell-index", &len);
if (!indexp || (len != sizeof(u32))) {
printk(KERN_ERR "uic: Device node %s has missing or invalid "
"cell-index property\n", node->full_name);
return NULL;
}
uic->index = *indexp;
dcrreg = of_get_property(node, "dcr-reg", &len);
if (!dcrreg || (len != 2*sizeof(u32))) {
printk(KERN_ERR "uic: Device node %s has missing or invalid "
"dcr-reg property\n", node->full_name);
return NULL;
}
uic->dcrbase = *dcrreg;
uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops,
uic);
if (! uic->irqhost)
return NULL; /* FIXME: panic? */
/* Start with all interrupts disabled, level and non-critical */
mtdcr(uic->dcrbase + UIC_ER, 0);
mtdcr(uic->dcrbase + UIC_CR, 0);
mtdcr(uic->dcrbase + UIC_TR, 0);
/* Clear any pending interrupts, in case the firmware left some */
mtdcr(uic->dcrbase + UIC_SR, 0xffffffff);
printk ("UIC%d (%d IRQ sources) at DCR 0x%x\n", uic->index,
NR_UIC_INTS, uic->dcrbase);
return uic;
}
void __init uic_init_tree(void)
{
struct device_node *np;
struct uic *uic;
const u32 *interrupts;
/* First locate and initialize the top-level UIC */
for_each_compatible_node(np, NULL, "ibm,uic") {
interrupts = of_get_property(np, "interrupts", NULL);
if (!interrupts)
break;
}
BUG_ON(!np); /* uic_init_tree() assumes there's a UIC as the
* top-level interrupt controller */
primary_uic = uic_init_one(np);
if (!primary_uic)
panic("Unable to initialize primary UIC %s\n", np->full_name);
irq_set_default_host(primary_uic->irqhost);
of_node_put(np);
/* The scan again for cascaded UICs */
for_each_compatible_node(np, NULL, "ibm,uic") {
interrupts = of_get_property(np, "interrupts", NULL);
if (interrupts) {
/* Secondary UIC */
int cascade_virq;
uic = uic_init_one(np);
if (! uic)
panic("Unable to initialize a secondary UIC %s\n",
np->full_name);
cascade_virq = irq_of_parse_and_map(np, 0);
irq_set_handler_data(cascade_virq, uic);
irq_set_chained_handler(cascade_virq, uic_irq_cascade);
/* FIXME: setup critical cascade?? */
}
}
}
/* Return an interrupt vector or 0 if no interrupt is pending. */
unsigned int uic_get_irq(void)
{
u32 msr;
int src;
BUG_ON(! primary_uic);
msr = mfdcr(primary_uic->dcrbase + UIC_MSR);
src = 32 - ffs(msr);
return irq_linear_revmap(primary_uic->irqhost, src);
}