QE: Move QE from arch/powerpc to drivers/soc
ls1 has qe and ls1 has arm cpu. move qe from arch/powerpc to drivers/soc/fsl to adapt to powerpc and arm Signed-off-by: Zhao Qiang <qiang.zhao@freescale.com> Signed-off-by: Scott Wood <scottwood@freescale.com>
This commit is contained in:
6
drivers/soc/fsl/Makefile
Normal file
6
drivers/soc/fsl/Makefile
Normal file
@@ -0,0 +1,6 @@
|
||||
#
|
||||
# Makefile for the Linux Kernel SOC fsl specific device drivers
|
||||
#
|
||||
|
||||
obj-$(CONFIG_QUICC_ENGINE) += qe/
|
||||
obj-$(CONFIG_CPM) += qe/
|
38
drivers/soc/fsl/qe/Kconfig
Normal file
38
drivers/soc/fsl/qe/Kconfig
Normal file
@@ -0,0 +1,38 @@
|
||||
#
|
||||
# QE Communication options
|
||||
#
|
||||
|
||||
config QUICC_ENGINE
|
||||
bool "Freescale QUICC Engine (QE) Support"
|
||||
depends on FSL_SOC && PPC32
|
||||
select GENERIC_ALLOCATOR
|
||||
select CRC32
|
||||
help
|
||||
The QUICC Engine (QE) is a new generation of communications
|
||||
coprocessors on Freescale embedded CPUs (akin to CPM in older chips).
|
||||
Selecting this option means that you wish to build a kernel
|
||||
for a machine with a QE coprocessor.
|
||||
|
||||
config UCC_SLOW
|
||||
bool
|
||||
default y if SERIAL_QE
|
||||
help
|
||||
This option provides qe_lib support to UCC slow
|
||||
protocols: UART, BISYNC, QMC
|
||||
|
||||
config UCC_FAST
|
||||
bool
|
||||
default y if UCC_GETH
|
||||
help
|
||||
This option provides qe_lib support to UCC fast
|
||||
protocols: HDLC, Ethernet, ATM, transparent
|
||||
|
||||
config UCC
|
||||
bool
|
||||
default y if UCC_FAST || UCC_SLOW
|
||||
|
||||
config QE_USB
|
||||
bool
|
||||
default y if USB_FSL_QE
|
||||
help
|
||||
QE USB Controller support
|
10
drivers/soc/fsl/qe/Makefile
Normal file
10
drivers/soc/fsl/qe/Makefile
Normal file
@@ -0,0 +1,10 @@
|
||||
#
|
||||
# Makefile for the linux ppc-specific parts of QE
|
||||
#
|
||||
obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_common.o qe_ic.o qe_io.o
|
||||
obj-$(CONFIG_CPM) += qe_common.o
|
||||
obj-$(CONFIG_UCC) += ucc.o
|
||||
obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
|
||||
obj-$(CONFIG_UCC_FAST) += ucc_fast.o
|
||||
obj-$(CONFIG_QE_USB) += usb.o
|
||||
obj-$(CONFIG_QE_GPIO) += gpio.o
|
317
drivers/soc/fsl/qe/gpio.c
Normal file
317
drivers/soc/fsl/qe/gpio.c
Normal file
@@ -0,0 +1,317 @@
|
||||
/*
|
||||
* QUICC Engine GPIOs
|
||||
*
|
||||
* Copyright (c) MontaVista Software, Inc. 2008.
|
||||
*
|
||||
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/export.h>
|
||||
#include <soc/fsl/qe/qe.h>
|
||||
|
||||
struct qe_gpio_chip {
|
||||
struct of_mm_gpio_chip mm_gc;
|
||||
spinlock_t lock;
|
||||
|
||||
unsigned long pin_flags[QE_PIO_PINS];
|
||||
#define QE_PIN_REQUESTED 0
|
||||
|
||||
/* shadowed data register to clear/set bits safely */
|
||||
u32 cpdata;
|
||||
|
||||
/* saved_regs used to restore dedicated functions */
|
||||
struct qe_pio_regs saved_regs;
|
||||
};
|
||||
|
||||
static inline struct qe_gpio_chip *
|
||||
to_qe_gpio_chip(struct of_mm_gpio_chip *mm_gc)
|
||||
{
|
||||
return container_of(mm_gc, struct qe_gpio_chip, mm_gc);
|
||||
}
|
||||
|
||||
static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
|
||||
{
|
||||
struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
|
||||
struct qe_pio_regs __iomem *regs = mm_gc->regs;
|
||||
|
||||
qe_gc->cpdata = in_be32(®s->cpdata);
|
||||
qe_gc->saved_regs.cpdata = qe_gc->cpdata;
|
||||
qe_gc->saved_regs.cpdir1 = in_be32(®s->cpdir1);
|
||||
qe_gc->saved_regs.cpdir2 = in_be32(®s->cpdir2);
|
||||
qe_gc->saved_regs.cppar1 = in_be32(®s->cppar1);
|
||||
qe_gc->saved_regs.cppar2 = in_be32(®s->cppar2);
|
||||
qe_gc->saved_regs.cpodr = in_be32(®s->cpodr);
|
||||
}
|
||||
|
||||
static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
|
||||
{
|
||||
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
|
||||
struct qe_pio_regs __iomem *regs = mm_gc->regs;
|
||||
u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
|
||||
|
||||
return in_be32(®s->cpdata) & pin_mask;
|
||||
}
|
||||
|
||||
static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
|
||||
{
|
||||
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
|
||||
struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
|
||||
struct qe_pio_regs __iomem *regs = mm_gc->regs;
|
||||
unsigned long flags;
|
||||
u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
|
||||
|
||||
spin_lock_irqsave(&qe_gc->lock, flags);
|
||||
|
||||
if (val)
|
||||
qe_gc->cpdata |= pin_mask;
|
||||
else
|
||||
qe_gc->cpdata &= ~pin_mask;
|
||||
|
||||
out_be32(®s->cpdata, qe_gc->cpdata);
|
||||
|
||||
spin_unlock_irqrestore(&qe_gc->lock, flags);
|
||||
}
|
||||
|
||||
static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
|
||||
{
|
||||
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
|
||||
struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qe_gc->lock, flags);
|
||||
|
||||
__par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
|
||||
|
||||
spin_unlock_irqrestore(&qe_gc->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
|
||||
{
|
||||
struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
|
||||
struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
|
||||
unsigned long flags;
|
||||
|
||||
qe_gpio_set(gc, gpio, val);
|
||||
|
||||
spin_lock_irqsave(&qe_gc->lock, flags);
|
||||
|
||||
__par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
|
||||
|
||||
spin_unlock_irqrestore(&qe_gc->lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct qe_pin {
|
||||
/*
|
||||
* The qe_gpio_chip name is unfortunate, we should change that to
|
||||
* something like qe_pio_controller. Someday.
|
||||
*/
|
||||
struct qe_gpio_chip *controller;
|
||||
int num;
|
||||
};
|
||||
|
||||
/**
|
||||
* qe_pin_request - Request a QE pin
|
||||
* @np: device node to get a pin from
|
||||
* @index: index of a pin in the device tree
|
||||
* Context: non-atomic
|
||||
*
|
||||
* This function return qe_pin so that you could use it with the rest of
|
||||
* the QE Pin Multiplexing API.
|
||||
*/
|
||||
struct qe_pin *qe_pin_request(struct device_node *np, int index)
|
||||
{
|
||||
struct qe_pin *qe_pin;
|
||||
struct gpio_chip *gc;
|
||||
struct of_mm_gpio_chip *mm_gc;
|
||||
struct qe_gpio_chip *qe_gc;
|
||||
int err;
|
||||
unsigned long flags;
|
||||
|
||||
qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
|
||||
if (!qe_pin) {
|
||||
pr_debug("%s: can't allocate memory\n", __func__);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
err = of_get_gpio(np, index);
|
||||
if (err < 0)
|
||||
goto err0;
|
||||
gc = gpio_to_chip(err);
|
||||
if (WARN_ON(!gc))
|
||||
goto err0;
|
||||
|
||||
if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
|
||||
pr_debug("%s: tried to get a non-qe pin\n", __func__);
|
||||
err = -EINVAL;
|
||||
goto err0;
|
||||
}
|
||||
|
||||
mm_gc = to_of_mm_gpio_chip(gc);
|
||||
qe_gc = to_qe_gpio_chip(mm_gc);
|
||||
|
||||
spin_lock_irqsave(&qe_gc->lock, flags);
|
||||
|
||||
err -= gc->base;
|
||||
if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
|
||||
qe_pin->controller = qe_gc;
|
||||
qe_pin->num = err;
|
||||
err = 0;
|
||||
} else {
|
||||
err = -EBUSY;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&qe_gc->lock, flags);
|
||||
|
||||
if (!err)
|
||||
return qe_pin;
|
||||
err0:
|
||||
kfree(qe_pin);
|
||||
pr_debug("%s failed with status %d\n", __func__, err);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
EXPORT_SYMBOL(qe_pin_request);
|
||||
|
||||
/**
|
||||
* qe_pin_free - Free a pin
|
||||
* @qe_pin: pointer to the qe_pin structure
|
||||
* Context: any
|
||||
*
|
||||
* This function frees the qe_pin structure and makes a pin available
|
||||
* for further qe_pin_request() calls.
|
||||
*/
|
||||
void qe_pin_free(struct qe_pin *qe_pin)
|
||||
{
|
||||
struct qe_gpio_chip *qe_gc = qe_pin->controller;
|
||||
unsigned long flags;
|
||||
const int pin = qe_pin->num;
|
||||
|
||||
spin_lock_irqsave(&qe_gc->lock, flags);
|
||||
test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]);
|
||||
spin_unlock_irqrestore(&qe_gc->lock, flags);
|
||||
|
||||
kfree(qe_pin);
|
||||
}
|
||||
EXPORT_SYMBOL(qe_pin_free);
|
||||
|
||||
/**
|
||||
* qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
|
||||
* @qe_pin: pointer to the qe_pin structure
|
||||
* Context: any
|
||||
*
|
||||
* This function resets a pin to a dedicated peripheral function that
|
||||
* has been set up by the firmware.
|
||||
*/
|
||||
void qe_pin_set_dedicated(struct qe_pin *qe_pin)
|
||||
{
|
||||
struct qe_gpio_chip *qe_gc = qe_pin->controller;
|
||||
struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
|
||||
struct qe_pio_regs *sregs = &qe_gc->saved_regs;
|
||||
int pin = qe_pin->num;
|
||||
u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
|
||||
u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
|
||||
bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qe_gc->lock, flags);
|
||||
|
||||
if (second_reg) {
|
||||
clrsetbits_be32(®s->cpdir2, mask2, sregs->cpdir2 & mask2);
|
||||
clrsetbits_be32(®s->cppar2, mask2, sregs->cppar2 & mask2);
|
||||
} else {
|
||||
clrsetbits_be32(®s->cpdir1, mask2, sregs->cpdir1 & mask2);
|
||||
clrsetbits_be32(®s->cppar1, mask2, sregs->cppar1 & mask2);
|
||||
}
|
||||
|
||||
if (sregs->cpdata & mask1)
|
||||
qe_gc->cpdata |= mask1;
|
||||
else
|
||||
qe_gc->cpdata &= ~mask1;
|
||||
|
||||
out_be32(®s->cpdata, qe_gc->cpdata);
|
||||
clrsetbits_be32(®s->cpodr, mask1, sregs->cpodr & mask1);
|
||||
|
||||
spin_unlock_irqrestore(&qe_gc->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(qe_pin_set_dedicated);
|
||||
|
||||
/**
|
||||
* qe_pin_set_gpio - Set a pin to the GPIO mode
|
||||
* @qe_pin: pointer to the qe_pin structure
|
||||
* Context: any
|
||||
*
|
||||
* This function sets a pin to the GPIO mode.
|
||||
*/
|
||||
void qe_pin_set_gpio(struct qe_pin *qe_pin)
|
||||
{
|
||||
struct qe_gpio_chip *qe_gc = qe_pin->controller;
|
||||
struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&qe_gc->lock, flags);
|
||||
|
||||
/* Let's make it input by default, GPIO API is able to change that. */
|
||||
__par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
|
||||
|
||||
spin_unlock_irqrestore(&qe_gc->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(qe_pin_set_gpio);
|
||||
|
||||
static int __init qe_add_gpiochips(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
|
||||
for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
|
||||
int ret;
|
||||
struct qe_gpio_chip *qe_gc;
|
||||
struct of_mm_gpio_chip *mm_gc;
|
||||
struct gpio_chip *gc;
|
||||
|
||||
qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
|
||||
if (!qe_gc) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
spin_lock_init(&qe_gc->lock);
|
||||
|
||||
mm_gc = &qe_gc->mm_gc;
|
||||
gc = &mm_gc->gc;
|
||||
|
||||
mm_gc->save_regs = qe_gpio_save_regs;
|
||||
gc->ngpio = QE_PIO_PINS;
|
||||
gc->direction_input = qe_gpio_dir_in;
|
||||
gc->direction_output = qe_gpio_dir_out;
|
||||
gc->get = qe_gpio_get;
|
||||
gc->set = qe_gpio_set;
|
||||
|
||||
ret = of_mm_gpiochip_add(np, mm_gc);
|
||||
if (ret)
|
||||
goto err;
|
||||
continue;
|
||||
err:
|
||||
pr_err("%s: registration failed with status %d\n",
|
||||
np->full_name, ret);
|
||||
kfree(qe_gc);
|
||||
/* try others anyway */
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
arch_initcall(qe_add_gpiochips);
|
719
drivers/soc/fsl/qe/qe.c
Normal file
719
drivers/soc/fsl/qe/qe.c
Normal file
@@ -0,0 +1,719 @@
|
||||
/*
|
||||
* Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* Authors: Shlomi Gridish <gridish@freescale.com>
|
||||
* Li Yang <leoli@freescale.com>
|
||||
* Based on cpm2_common.c from Dan Malek (dmalek@jlc.net)
|
||||
*
|
||||
* Description:
|
||||
* General Purpose functions for the global management of the
|
||||
* QUICC Engine (QE).
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/param.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <soc/fsl/qe/immap_qe.h>
|
||||
#include <soc/fsl/qe/qe.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/rheap.h>
|
||||
|
||||
static void qe_snums_init(void);
|
||||
static int qe_sdma_init(void);
|
||||
|
||||
static DEFINE_SPINLOCK(qe_lock);
|
||||
DEFINE_SPINLOCK(cmxgcr_lock);
|
||||
EXPORT_SYMBOL(cmxgcr_lock);
|
||||
|
||||
/* QE snum state */
|
||||
enum qe_snum_state {
|
||||
QE_SNUM_STATE_USED,
|
||||
QE_SNUM_STATE_FREE
|
||||
};
|
||||
|
||||
/* QE snum */
|
||||
struct qe_snum {
|
||||
u8 num;
|
||||
enum qe_snum_state state;
|
||||
};
|
||||
|
||||
/* We allocate this here because it is used almost exclusively for
|
||||
* the communication processor devices.
|
||||
*/
|
||||
struct qe_immap __iomem *qe_immr;
|
||||
EXPORT_SYMBOL(qe_immr);
|
||||
|
||||
static struct qe_snum snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
|
||||
static unsigned int qe_num_of_snum;
|
||||
|
||||
static phys_addr_t qebase = -1;
|
||||
|
||||
phys_addr_t get_qe_base(void)
|
||||
{
|
||||
struct device_node *qe;
|
||||
int size;
|
||||
const u32 *prop;
|
||||
|
||||
if (qebase != -1)
|
||||
return qebase;
|
||||
|
||||
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
|
||||
if (!qe) {
|
||||
qe = of_find_node_by_type(NULL, "qe");
|
||||
if (!qe)
|
||||
return qebase;
|
||||
}
|
||||
|
||||
prop = of_get_property(qe, "reg", &size);
|
||||
if (prop && size >= sizeof(*prop))
|
||||
qebase = of_translate_address(qe, prop);
|
||||
of_node_put(qe);
|
||||
|
||||
return qebase;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(get_qe_base);
|
||||
|
||||
void qe_reset(void)
|
||||
{
|
||||
if (qe_immr == NULL)
|
||||
qe_immr = ioremap(get_qe_base(), QE_IMMAP_SIZE);
|
||||
|
||||
qe_snums_init();
|
||||
|
||||
qe_issue_cmd(QE_RESET, QE_CR_SUBBLOCK_INVALID,
|
||||
QE_CR_PROTOCOL_UNSPECIFIED, 0);
|
||||
|
||||
/* Reclaim the MURAM memory for our use. */
|
||||
qe_muram_init();
|
||||
|
||||
if (qe_sdma_init())
|
||||
panic("sdma init failed!");
|
||||
}
|
||||
|
||||
int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
|
||||
{
|
||||
unsigned long flags;
|
||||
u8 mcn_shift = 0, dev_shift = 0;
|
||||
u32 ret;
|
||||
|
||||
spin_lock_irqsave(&qe_lock, flags);
|
||||
if (cmd == QE_RESET) {
|
||||
out_be32(&qe_immr->cp.cecr, (u32) (cmd | QE_CR_FLG));
|
||||
} else {
|
||||
if (cmd == QE_ASSIGN_PAGE) {
|
||||
/* Here device is the SNUM, not sub-block */
|
||||
dev_shift = QE_CR_SNUM_SHIFT;
|
||||
} else if (cmd == QE_ASSIGN_RISC) {
|
||||
/* Here device is the SNUM, and mcnProtocol is
|
||||
* e_QeCmdRiscAssignment value */
|
||||
dev_shift = QE_CR_SNUM_SHIFT;
|
||||
mcn_shift = QE_CR_MCN_RISC_ASSIGN_SHIFT;
|
||||
} else {
|
||||
if (device == QE_CR_SUBBLOCK_USB)
|
||||
mcn_shift = QE_CR_MCN_USB_SHIFT;
|
||||
else
|
||||
mcn_shift = QE_CR_MCN_NORMAL_SHIFT;
|
||||
}
|
||||
|
||||
out_be32(&qe_immr->cp.cecdr, cmd_input);
|
||||
out_be32(&qe_immr->cp.cecr,
|
||||
(cmd | QE_CR_FLG | ((u32) device << dev_shift) | (u32)
|
||||
mcn_protocol << mcn_shift));
|
||||
}
|
||||
|
||||
/* wait for the QE_CR_FLG to clear */
|
||||
ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
|
||||
100, 0);
|
||||
/* On timeout (e.g. failure), the expression will be false (ret == 0),
|
||||
otherwise it will be true (ret == 1). */
|
||||
spin_unlock_irqrestore(&qe_lock, flags);
|
||||
|
||||
return ret == 1;
|
||||
}
|
||||
EXPORT_SYMBOL(qe_issue_cmd);
|
||||
|
||||
/* Set a baud rate generator. This needs lots of work. There are
|
||||
* 16 BRGs, which can be connected to the QE channels or output
|
||||
* as clocks. The BRGs are in two different block of internal
|
||||
* memory mapped space.
|
||||
* The BRG clock is the QE clock divided by 2.
|
||||
* It was set up long ago during the initial boot phase and is
|
||||
* is given to us.
|
||||
* Baud rate clocks are zero-based in the driver code (as that maps
|
||||
* to port numbers). Documentation uses 1-based numbering.
|
||||
*/
|
||||
static unsigned int brg_clk = 0;
|
||||
|
||||
unsigned int qe_get_brg_clk(void)
|
||||
{
|
||||
struct device_node *qe;
|
||||
int size;
|
||||
const u32 *prop;
|
||||
|
||||
if (brg_clk)
|
||||
return brg_clk;
|
||||
|
||||
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
|
||||
if (!qe) {
|
||||
qe = of_find_node_by_type(NULL, "qe");
|
||||
if (!qe)
|
||||
return brg_clk;
|
||||
}
|
||||
|
||||
prop = of_get_property(qe, "brg-frequency", &size);
|
||||
if (prop && size == sizeof(*prop))
|
||||
brg_clk = *prop;
|
||||
|
||||
of_node_put(qe);
|
||||
|
||||
return brg_clk;
|
||||
}
|
||||
EXPORT_SYMBOL(qe_get_brg_clk);
|
||||
|
||||
/* Program the BRG to the given sampling rate and multiplier
|
||||
*
|
||||
* @brg: the BRG, QE_BRG1 - QE_BRG16
|
||||
* @rate: the desired sampling rate
|
||||
* @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or
|
||||
* GUMR_L[TDCR]. E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01,
|
||||
* then 'multiplier' should be 8.
|
||||
*/
|
||||
int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
|
||||
{
|
||||
u32 divisor, tempval;
|
||||
u32 div16 = 0;
|
||||
|
||||
if ((brg < QE_BRG1) || (brg > QE_BRG16))
|
||||
return -EINVAL;
|
||||
|
||||
divisor = qe_get_brg_clk() / (rate * multiplier);
|
||||
|
||||
if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
|
||||
div16 = QE_BRGC_DIV16;
|
||||
divisor /= 16;
|
||||
}
|
||||
|
||||
/* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
|
||||
that the BRG divisor must be even if you're not using divide-by-16
|
||||
mode. */
|
||||
if (!div16 && (divisor & 1) && (divisor > 3))
|
||||
divisor++;
|
||||
|
||||
tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
|
||||
QE_BRGC_ENABLE | div16;
|
||||
|
||||
out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(qe_setbrg);
|
||||
|
||||
/* Convert a string to a QE clock source enum
|
||||
*
|
||||
* This function takes a string, typically from a property in the device
|
||||
* tree, and returns the corresponding "enum qe_clock" value.
|
||||
*/
|
||||
enum qe_clock qe_clock_source(const char *source)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
if (strcasecmp(source, "none") == 0)
|
||||
return QE_CLK_NONE;
|
||||
|
||||
if (strncasecmp(source, "brg", 3) == 0) {
|
||||
i = simple_strtoul(source + 3, NULL, 10);
|
||||
if ((i >= 1) && (i <= 16))
|
||||
return (QE_BRG1 - 1) + i;
|
||||
else
|
||||
return QE_CLK_DUMMY;
|
||||
}
|
||||
|
||||
if (strncasecmp(source, "clk", 3) == 0) {
|
||||
i = simple_strtoul(source + 3, NULL, 10);
|
||||
if ((i >= 1) && (i <= 24))
|
||||
return (QE_CLK1 - 1) + i;
|
||||
else
|
||||
return QE_CLK_DUMMY;
|
||||
}
|
||||
|
||||
return QE_CLK_DUMMY;
|
||||
}
|
||||
EXPORT_SYMBOL(qe_clock_source);
|
||||
|
||||
/* Initialize SNUMs (thread serial numbers) according to
|
||||
* QE Module Control chapter, SNUM table
|
||||
*/
|
||||
static void qe_snums_init(void)
|
||||
{
|
||||
int i;
|
||||
static const u8 snum_init_76[] = {
|
||||
0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
|
||||
0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
|
||||
0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
|
||||
0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D,
|
||||
0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D,
|
||||
0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D,
|
||||
0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD,
|
||||
0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD,
|
||||
0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED,
|
||||
0xF4, 0xF5, 0xFC, 0xFD,
|
||||
};
|
||||
static const u8 snum_init_46[] = {
|
||||
0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
|
||||
0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
|
||||
0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
|
||||
0xD8, 0xD9, 0xE8, 0xE9, 0x08, 0x09, 0x18, 0x19,
|
||||
0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
|
||||
0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
|
||||
};
|
||||
static const u8 *snum_init;
|
||||
|
||||
qe_num_of_snum = qe_get_num_of_snums();
|
||||
|
||||
if (qe_num_of_snum == 76)
|
||||
snum_init = snum_init_76;
|
||||
else
|
||||
snum_init = snum_init_46;
|
||||
|
||||
for (i = 0; i < qe_num_of_snum; i++) {
|
||||
snums[i].num = snum_init[i];
|
||||
snums[i].state = QE_SNUM_STATE_FREE;
|
||||
}
|
||||
}
|
||||
|
||||
int qe_get_snum(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
int snum = -EBUSY;
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&qe_lock, flags);
|
||||
for (i = 0; i < qe_num_of_snum; i++) {
|
||||
if (snums[i].state == QE_SNUM_STATE_FREE) {
|
||||
snums[i].state = QE_SNUM_STATE_USED;
|
||||
snum = snums[i].num;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&qe_lock, flags);
|
||||
|
||||
return snum;
|
||||
}
|
||||
EXPORT_SYMBOL(qe_get_snum);
|
||||
|
||||
void qe_put_snum(u8 snum)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < qe_num_of_snum; i++) {
|
||||
if (snums[i].num == snum) {
|
||||
snums[i].state = QE_SNUM_STATE_FREE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(qe_put_snum);
|
||||
|
||||
static int qe_sdma_init(void)
|
||||
{
|
||||
struct sdma __iomem *sdma = &qe_immr->sdma;
|
||||
static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
|
||||
|
||||
if (!sdma)
|
||||
return -ENODEV;
|
||||
|
||||
/* allocate 2 internal temporary buffers (512 bytes size each) for
|
||||
* the SDMA */
|
||||
if (IS_ERR_VALUE(sdma_buf_offset)) {
|
||||
sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
|
||||
if (IS_ERR_VALUE(sdma_buf_offset))
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
|
||||
out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
|
||||
(0x1 << QE_SDMR_CEN_SHIFT)));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* The maximum number of RISCs we support */
|
||||
#define MAX_QE_RISC 4
|
||||
|
||||
/* Firmware information stored here for qe_get_firmware_info() */
|
||||
static struct qe_firmware_info qe_firmware_info;
|
||||
|
||||
/*
|
||||
* Set to 1 if QE firmware has been uploaded, and therefore
|
||||
* qe_firmware_info contains valid data.
|
||||
*/
|
||||
static int qe_firmware_uploaded;
|
||||
|
||||
/*
|
||||
* Upload a QE microcode
|
||||
*
|
||||
* This function is a worker function for qe_upload_firmware(). It does
|
||||
* the actual uploading of the microcode.
|
||||
*/
|
||||
static void qe_upload_microcode(const void *base,
|
||||
const struct qe_microcode *ucode)
|
||||
{
|
||||
const __be32 *code = base + be32_to_cpu(ucode->code_offset);
|
||||
unsigned int i;
|
||||
|
||||
if (ucode->major || ucode->minor || ucode->revision)
|
||||
printk(KERN_INFO "qe-firmware: "
|
||||
"uploading microcode '%s' version %u.%u.%u\n",
|
||||
ucode->id, ucode->major, ucode->minor, ucode->revision);
|
||||
else
|
||||
printk(KERN_INFO "qe-firmware: "
|
||||
"uploading microcode '%s'\n", ucode->id);
|
||||
|
||||
/* Use auto-increment */
|
||||
out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
|
||||
QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
|
||||
|
||||
for (i = 0; i < be32_to_cpu(ucode->count); i++)
|
||||
out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
|
||||
|
||||
/* Set I-RAM Ready Register */
|
||||
out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
|
||||
}
|
||||
|
||||
/*
|
||||
* Upload a microcode to the I-RAM at a specific address.
|
||||
*
|
||||
* See Documentation/powerpc/qe_firmware.txt for information on QE microcode
|
||||
* uploading.
|
||||
*
|
||||
* Currently, only version 1 is supported, so the 'version' field must be
|
||||
* set to 1.
|
||||
*
|
||||
* The SOC model and revision are not validated, they are only displayed for
|
||||
* informational purposes.
|
||||
*
|
||||
* 'calc_size' is the calculated size, in bytes, of the firmware structure and
|
||||
* all of the microcode structures, minus the CRC.
|
||||
*
|
||||
* 'length' is the size that the structure says it is, including the CRC.
|
||||
*/
|
||||
int qe_upload_firmware(const struct qe_firmware *firmware)
|
||||
{
|
||||
unsigned int i;
|
||||
unsigned int j;
|
||||
u32 crc;
|
||||
size_t calc_size = sizeof(struct qe_firmware);
|
||||
size_t length;
|
||||
const struct qe_header *hdr;
|
||||
|
||||
if (!firmware) {
|
||||
printk(KERN_ERR "qe-firmware: invalid pointer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
hdr = &firmware->header;
|
||||
length = be32_to_cpu(hdr->length);
|
||||
|
||||
/* Check the magic */
|
||||
if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
|
||||
(hdr->magic[2] != 'F')) {
|
||||
printk(KERN_ERR "qe-firmware: not a microcode\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
/* Check the version */
|
||||
if (hdr->version != 1) {
|
||||
printk(KERN_ERR "qe-firmware: unsupported version\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
/* Validate some of the fields */
|
||||
if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
|
||||
printk(KERN_ERR "qe-firmware: invalid data\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Validate the length and check if there's a CRC */
|
||||
calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);
|
||||
|
||||
for (i = 0; i < firmware->count; i++)
|
||||
/*
|
||||
* For situations where the second RISC uses the same microcode
|
||||
* as the first, the 'code_offset' and 'count' fields will be
|
||||
* zero, so it's okay to add those.
|
||||
*/
|
||||
calc_size += sizeof(__be32) *
|
||||
be32_to_cpu(firmware->microcode[i].count);
|
||||
|
||||
/* Validate the length */
|
||||
if (length != calc_size + sizeof(__be32)) {
|
||||
printk(KERN_ERR "qe-firmware: invalid length\n");
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
/* Validate the CRC */
|
||||
crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size));
|
||||
if (crc != crc32(0, firmware, calc_size)) {
|
||||
printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the microcode calls for it, split the I-RAM.
|
||||
*/
|
||||
if (!firmware->split)
|
||||
setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
|
||||
|
||||
if (firmware->soc.model)
|
||||
printk(KERN_INFO
|
||||
"qe-firmware: firmware '%s' for %u V%u.%u\n",
|
||||
firmware->id, be16_to_cpu(firmware->soc.model),
|
||||
firmware->soc.major, firmware->soc.minor);
|
||||
else
|
||||
printk(KERN_INFO "qe-firmware: firmware '%s'\n",
|
||||
firmware->id);
|
||||
|
||||
/*
|
||||
* The QE only supports one microcode per RISC, so clear out all the
|
||||
* saved microcode information and put in the new.
|
||||
*/
|
||||
memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
|
||||
strlcpy(qe_firmware_info.id, firmware->id, sizeof(qe_firmware_info.id));
|
||||
qe_firmware_info.extended_modes = firmware->extended_modes;
|
||||
memcpy(qe_firmware_info.vtraps, firmware->vtraps,
|
||||
sizeof(firmware->vtraps));
|
||||
|
||||
/* Loop through each microcode. */
|
||||
for (i = 0; i < firmware->count; i++) {
|
||||
const struct qe_microcode *ucode = &firmware->microcode[i];
|
||||
|
||||
/* Upload a microcode if it's present */
|
||||
if (ucode->code_offset)
|
||||
qe_upload_microcode(firmware, ucode);
|
||||
|
||||
/* Program the traps for this processor */
|
||||
for (j = 0; j < 16; j++) {
|
||||
u32 trap = be32_to_cpu(ucode->traps[j]);
|
||||
|
||||
if (trap)
|
||||
out_be32(&qe_immr->rsp[i].tibcr[j], trap);
|
||||
}
|
||||
|
||||
/* Enable traps */
|
||||
out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
|
||||
}
|
||||
|
||||
qe_firmware_uploaded = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(qe_upload_firmware);
|
||||
|
||||
/*
|
||||
* Get info on the currently-loaded firmware
|
||||
*
|
||||
* This function also checks the device tree to see if the boot loader has
|
||||
* uploaded a firmware already.
|
||||
*/
|
||||
struct qe_firmware_info *qe_get_firmware_info(void)
|
||||
{
|
||||
static int initialized;
|
||||
struct property *prop;
|
||||
struct device_node *qe;
|
||||
struct device_node *fw = NULL;
|
||||
const char *sprop;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* If we haven't checked yet, and a driver hasn't uploaded a firmware
|
||||
* yet, then check the device tree for information.
|
||||
*/
|
||||
if (qe_firmware_uploaded)
|
||||
return &qe_firmware_info;
|
||||
|
||||
if (initialized)
|
||||
return NULL;
|
||||
|
||||
initialized = 1;
|
||||
|
||||
/*
|
||||
* Newer device trees have an "fsl,qe" compatible property for the QE
|
||||
* node, but we still need to support older device trees.
|
||||
*/
|
||||
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
|
||||
if (!qe) {
|
||||
qe = of_find_node_by_type(NULL, "qe");
|
||||
if (!qe)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Find the 'firmware' child node */
|
||||
for_each_child_of_node(qe, fw) {
|
||||
if (strcmp(fw->name, "firmware") == 0)
|
||||
break;
|
||||
}
|
||||
|
||||
of_node_put(qe);
|
||||
|
||||
/* Did we find the 'firmware' node? */
|
||||
if (!fw)
|
||||
return NULL;
|
||||
|
||||
qe_firmware_uploaded = 1;
|
||||
|
||||
/* Copy the data into qe_firmware_info*/
|
||||
sprop = of_get_property(fw, "id", NULL);
|
||||
if (sprop)
|
||||
strlcpy(qe_firmware_info.id, sprop,
|
||||
sizeof(qe_firmware_info.id));
|
||||
|
||||
prop = of_find_property(fw, "extended-modes", NULL);
|
||||
if (prop && (prop->length == sizeof(u64))) {
|
||||
const u64 *iprop = prop->value;
|
||||
|
||||
qe_firmware_info.extended_modes = *iprop;
|
||||
}
|
||||
|
||||
prop = of_find_property(fw, "virtual-traps", NULL);
|
||||
if (prop && (prop->length == 32)) {
|
||||
const u32 *iprop = prop->value;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++)
|
||||
qe_firmware_info.vtraps[i] = iprop[i];
|
||||
}
|
||||
|
||||
of_node_put(fw);
|
||||
|
||||
return &qe_firmware_info;
|
||||
}
|
||||
EXPORT_SYMBOL(qe_get_firmware_info);
|
||||
|
||||
unsigned int qe_get_num_of_risc(void)
|
||||
{
|
||||
struct device_node *qe;
|
||||
int size;
|
||||
unsigned int num_of_risc = 0;
|
||||
const u32 *prop;
|
||||
|
||||
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
|
||||
if (!qe) {
|
||||
/* Older devices trees did not have an "fsl,qe"
|
||||
* compatible property, so we need to look for
|
||||
* the QE node by name.
|
||||
*/
|
||||
qe = of_find_node_by_type(NULL, "qe");
|
||||
if (!qe)
|
||||
return num_of_risc;
|
||||
}
|
||||
|
||||
prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
|
||||
if (prop && size == sizeof(*prop))
|
||||
num_of_risc = *prop;
|
||||
|
||||
of_node_put(qe);
|
||||
|
||||
return num_of_risc;
|
||||
}
|
||||
EXPORT_SYMBOL(qe_get_num_of_risc);
|
||||
|
||||
unsigned int qe_get_num_of_snums(void)
|
||||
{
|
||||
struct device_node *qe;
|
||||
int size;
|
||||
unsigned int num_of_snums;
|
||||
const u32 *prop;
|
||||
|
||||
num_of_snums = 28; /* The default number of snum for threads is 28 */
|
||||
qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
|
||||
if (!qe) {
|
||||
/* Older devices trees did not have an "fsl,qe"
|
||||
* compatible property, so we need to look for
|
||||
* the QE node by name.
|
||||
*/
|
||||
qe = of_find_node_by_type(NULL, "qe");
|
||||
if (!qe)
|
||||
return num_of_snums;
|
||||
}
|
||||
|
||||
prop = of_get_property(qe, "fsl,qe-num-snums", &size);
|
||||
if (prop && size == sizeof(*prop)) {
|
||||
num_of_snums = *prop;
|
||||
if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
|
||||
/* No QE ever has fewer than 28 SNUMs */
|
||||
pr_err("QE: number of snum is invalid\n");
|
||||
of_node_put(qe);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
of_node_put(qe);
|
||||
|
||||
return num_of_snums;
|
||||
}
|
||||
EXPORT_SYMBOL(qe_get_num_of_snums);
|
||||
|
||||
static int __init qe_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
|
||||
np = of_find_compatible_node(NULL, NULL, "fsl,qe");
|
||||
if (!np)
|
||||
return -ENODEV;
|
||||
qe_reset();
|
||||
of_node_put(np);
|
||||
return 0;
|
||||
}
|
||||
subsys_initcall(qe_init);
|
||||
|
||||
#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
|
||||
static int qe_resume(struct platform_device *ofdev)
|
||||
{
|
||||
if (!qe_alive_during_sleep())
|
||||
qe_reset();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int qe_probe(struct platform_device *ofdev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id qe_ids[] = {
|
||||
{ .compatible = "fsl,qe", },
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct platform_driver qe_driver = {
|
||||
.driver = {
|
||||
.name = "fsl-qe",
|
||||
.of_match_table = qe_ids,
|
||||
},
|
||||
.probe = qe_probe,
|
||||
.resume = qe_resume,
|
||||
};
|
||||
|
||||
static int __init qe_drv_init(void)
|
||||
{
|
||||
return platform_driver_register(&qe_driver);
|
||||
}
|
||||
device_initcall(qe_drv_init);
|
||||
#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
|
235
drivers/soc/fsl/qe/qe_common.c
Normal file
235
drivers/soc/fsl/qe/qe_common.c
Normal file
@@ -0,0 +1,235 @@
|
||||
/*
|
||||
* Common CPM code
|
||||
*
|
||||
* Author: Scott Wood <scottwood@freescale.com>
|
||||
*
|
||||
* Copyright 2007-2008,2010 Freescale Semiconductor, Inc.
|
||||
*
|
||||
* Some parts derived from commproc.c/cpm2_common.c, which is:
|
||||
* Copyright (c) 1997 Dan error_act (dmalek@jlc.net)
|
||||
* Copyright (c) 1999-2001 Dan Malek <dan@embeddedalley.com>
|
||||
* Copyright (c) 2000 MontaVista Software, Inc (source@mvista.com)
|
||||
* 2006 (c) MontaVista Software, Inc.
|
||||
* Vitaly Bordug <vbordug@ru.mvista.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of version 2 of the GNU General Public License as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
#include <linux/genalloc.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/io.h>
|
||||
#include <soc/fsl/qe/qe.h>
|
||||
|
||||
static struct gen_pool *muram_pool;
|
||||
static spinlock_t cpm_muram_lock;
|
||||
static u8 __iomem *muram_vbase;
|
||||
static phys_addr_t muram_pbase;
|
||||
|
||||
struct muram_block {
|
||||
struct list_head head;
|
||||
unsigned long start;
|
||||
int size;
|
||||
};
|
||||
|
||||
static LIST_HEAD(muram_block_list);
|
||||
|
||||
/* max address size we deal with */
|
||||
#define OF_MAX_ADDR_CELLS 4
|
||||
#define GENPOOL_OFFSET (4096 * 8)
|
||||
|
||||
int cpm_muram_init(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
struct resource r;
|
||||
u32 zero[OF_MAX_ADDR_CELLS] = {};
|
||||
resource_size_t max = 0;
|
||||
int i = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (muram_pbase)
|
||||
return 0;
|
||||
|
||||
spin_lock_init(&cpm_muram_lock);
|
||||
np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
|
||||
if (!np) {
|
||||
/* try legacy bindings */
|
||||
np = of_find_node_by_name(NULL, "data-only");
|
||||
if (!np) {
|
||||
pr_err("Cannot find CPM muram data node");
|
||||
ret = -ENODEV;
|
||||
goto out_muram;
|
||||
}
|
||||
}
|
||||
|
||||
muram_pool = gen_pool_create(0, -1);
|
||||
muram_pbase = of_translate_address(np, zero);
|
||||
if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
|
||||
pr_err("Cannot translate zero through CPM muram node");
|
||||
ret = -ENODEV;
|
||||
goto out_pool;
|
||||
}
|
||||
|
||||
while (of_address_to_resource(np, i++, &r) == 0) {
|
||||
if (r.end > max)
|
||||
max = r.end;
|
||||
ret = gen_pool_add(muram_pool, r.start - muram_pbase +
|
||||
GENPOOL_OFFSET, resource_size(&r), -1);
|
||||
if (ret) {
|
||||
pr_err("QE: couldn't add muram to pool!\n");
|
||||
goto out_pool;
|
||||
}
|
||||
}
|
||||
|
||||
muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
|
||||
if (!muram_vbase) {
|
||||
pr_err("Cannot map QE muram");
|
||||
ret = -ENOMEM;
|
||||
goto out_pool;
|
||||
}
|
||||
goto out_muram;
|
||||
out_pool:
|
||||
gen_pool_destroy(muram_pool);
|
||||
out_muram:
|
||||
of_node_put(np);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* cpm_muram_alloc - allocate the requested size worth of multi-user ram
|
||||
* @size: number of bytes to allocate
|
||||
* @align: requested alignment, in bytes
|
||||
*
|
||||
* This function returns an offset into the muram area.
|
||||
* Use cpm_dpram_addr() to get the virtual address of the area.
|
||||
* Use cpm_muram_free() to free the allocation.
|
||||
*/
|
||||
unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
|
||||
{
|
||||
unsigned long start;
|
||||
unsigned long flags;
|
||||
struct genpool_data_align muram_pool_data;
|
||||
|
||||
spin_lock_irqsave(&cpm_muram_lock, flags);
|
||||
muram_pool_data.align = align;
|
||||
start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
|
||||
&muram_pool_data);
|
||||
spin_unlock_irqrestore(&cpm_muram_lock, flags);
|
||||
return start;
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_muram_alloc);
|
||||
|
||||
/**
|
||||
* cpm_muram_free - free a chunk of multi-user ram
|
||||
* @offset: The beginning of the chunk as returned by cpm_muram_alloc().
|
||||
*/
|
||||
int cpm_muram_free(unsigned long offset)
|
||||
{
|
||||
unsigned long flags;
|
||||
int size;
|
||||
struct muram_block *tmp;
|
||||
|
||||
size = 0;
|
||||
spin_lock_irqsave(&cpm_muram_lock, flags);
|
||||
list_for_each_entry(tmp, &muram_block_list, head) {
|
||||
if (tmp->start == offset) {
|
||||
size = tmp->size;
|
||||
list_del(&tmp->head);
|
||||
kfree(tmp);
|
||||
break;
|
||||
}
|
||||
}
|
||||
gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
|
||||
spin_unlock_irqrestore(&cpm_muram_lock, flags);
|
||||
return size;
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_muram_free);
|
||||
|
||||
/*
|
||||
* cpm_muram_alloc_fixed - reserve a specific region of multi-user ram
|
||||
* @offset: offset of allocation start address
|
||||
* @size: number of bytes to allocate
|
||||
* This function returns an offset into the muram area
|
||||
* Use cpm_dpram_addr() to get the virtual address of the area.
|
||||
* Use cpm_muram_free() to free the allocation.
|
||||
*/
|
||||
unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
|
||||
{
|
||||
unsigned long start;
|
||||
unsigned long flags;
|
||||
struct genpool_data_fixed muram_pool_data_fixed;
|
||||
|
||||
spin_lock_irqsave(&cpm_muram_lock, flags);
|
||||
muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
|
||||
start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
|
||||
&muram_pool_data_fixed);
|
||||
spin_unlock_irqrestore(&cpm_muram_lock, flags);
|
||||
return start;
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_muram_alloc_fixed);
|
||||
|
||||
/*
|
||||
* cpm_muram_alloc_common - cpm_muram_alloc common code
|
||||
* @size: number of bytes to allocate
|
||||
* @algo: algorithm for alloc.
|
||||
* @data: data for genalloc's algorithm.
|
||||
*
|
||||
* This function returns an offset into the muram area.
|
||||
*/
|
||||
unsigned long cpm_muram_alloc_common(unsigned long size, genpool_algo_t algo,
|
||||
void *data)
|
||||
{
|
||||
struct muram_block *entry;
|
||||
unsigned long start;
|
||||
|
||||
start = gen_pool_alloc_algo(muram_pool, size, algo, data);
|
||||
if (!start)
|
||||
goto out2;
|
||||
start = start - GENPOOL_OFFSET;
|
||||
memset_io(cpm_muram_addr(start), 0, size);
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
goto out1;
|
||||
entry->start = start;
|
||||
entry->size = size;
|
||||
list_add(&entry->head, &muram_block_list);
|
||||
|
||||
return start;
|
||||
out1:
|
||||
gen_pool_free(muram_pool, start, size);
|
||||
out2:
|
||||
return (unsigned long)-ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpm_muram_addr - turn a muram offset into a virtual address
|
||||
* @offset: muram offset to convert
|
||||
*/
|
||||
void __iomem *cpm_muram_addr(unsigned long offset)
|
||||
{
|
||||
return muram_vbase + offset;
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_muram_addr);
|
||||
|
||||
unsigned long cpm_muram_offset(void __iomem *addr)
|
||||
{
|
||||
return addr - (void __iomem *)muram_vbase;
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_muram_offset);
|
||||
|
||||
/**
|
||||
* cpm_muram_dma - turn a muram virtual address into a DMA address
|
||||
* @offset: virtual address from cpm_muram_addr() to convert
|
||||
*/
|
||||
dma_addr_t cpm_muram_dma(void __iomem *addr)
|
||||
{
|
||||
return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
|
||||
}
|
||||
EXPORT_SYMBOL(cpm_muram_dma);
|
503
drivers/soc/fsl/qe/qe_ic.c
Normal file
503
drivers/soc/fsl/qe/qe_ic.c
Normal file
@@ -0,0 +1,503 @@
|
||||
/*
|
||||
* arch/powerpc/sysdev/qe_lib/qe_ic.c
|
||||
*
|
||||
* Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* Author: Li Yang <leoli@freescale.com>
|
||||
* Based on code from Shlomi Gridish <gridish@freescale.com>
|
||||
*
|
||||
* QUICC ENGINE Interrupt Controller
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/io.h>
|
||||
#include <soc/fsl/qe/qe_ic.h>
|
||||
|
||||
#include "qe_ic.h"
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(qe_ic_lock);
|
||||
|
||||
static struct qe_ic_info qe_ic_info[] = {
|
||||
[1] = {
|
||||
.mask = 0x00008000,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 0,
|
||||
.pri_reg = QEIC_CIPWCC,
|
||||
},
|
||||
[2] = {
|
||||
.mask = 0x00004000,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 1,
|
||||
.pri_reg = QEIC_CIPWCC,
|
||||
},
|
||||
[3] = {
|
||||
.mask = 0x00002000,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 2,
|
||||
.pri_reg = QEIC_CIPWCC,
|
||||
},
|
||||
[10] = {
|
||||
.mask = 0x00000040,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 1,
|
||||
.pri_reg = QEIC_CIPZCC,
|
||||
},
|
||||
[11] = {
|
||||
.mask = 0x00000020,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 2,
|
||||
.pri_reg = QEIC_CIPZCC,
|
||||
},
|
||||
[12] = {
|
||||
.mask = 0x00000010,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 3,
|
||||
.pri_reg = QEIC_CIPZCC,
|
||||
},
|
||||
[13] = {
|
||||
.mask = 0x00000008,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 4,
|
||||
.pri_reg = QEIC_CIPZCC,
|
||||
},
|
||||
[14] = {
|
||||
.mask = 0x00000004,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 5,
|
||||
.pri_reg = QEIC_CIPZCC,
|
||||
},
|
||||
[15] = {
|
||||
.mask = 0x00000002,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 6,
|
||||
.pri_reg = QEIC_CIPZCC,
|
||||
},
|
||||
[20] = {
|
||||
.mask = 0x10000000,
|
||||
.mask_reg = QEIC_CRIMR,
|
||||
.pri_code = 3,
|
||||
.pri_reg = QEIC_CIPRTA,
|
||||
},
|
||||
[25] = {
|
||||
.mask = 0x00800000,
|
||||
.mask_reg = QEIC_CRIMR,
|
||||
.pri_code = 0,
|
||||
.pri_reg = QEIC_CIPRTB,
|
||||
},
|
||||
[26] = {
|
||||
.mask = 0x00400000,
|
||||
.mask_reg = QEIC_CRIMR,
|
||||
.pri_code = 1,
|
||||
.pri_reg = QEIC_CIPRTB,
|
||||
},
|
||||
[27] = {
|
||||
.mask = 0x00200000,
|
||||
.mask_reg = QEIC_CRIMR,
|
||||
.pri_code = 2,
|
||||
.pri_reg = QEIC_CIPRTB,
|
||||
},
|
||||
[28] = {
|
||||
.mask = 0x00100000,
|
||||
.mask_reg = QEIC_CRIMR,
|
||||
.pri_code = 3,
|
||||
.pri_reg = QEIC_CIPRTB,
|
||||
},
|
||||
[32] = {
|
||||
.mask = 0x80000000,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 0,
|
||||
.pri_reg = QEIC_CIPXCC,
|
||||
},
|
||||
[33] = {
|
||||
.mask = 0x40000000,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 1,
|
||||
.pri_reg = QEIC_CIPXCC,
|
||||
},
|
||||
[34] = {
|
||||
.mask = 0x20000000,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 2,
|
||||
.pri_reg = QEIC_CIPXCC,
|
||||
},
|
||||
[35] = {
|
||||
.mask = 0x10000000,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 3,
|
||||
.pri_reg = QEIC_CIPXCC,
|
||||
},
|
||||
[36] = {
|
||||
.mask = 0x08000000,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 4,
|
||||
.pri_reg = QEIC_CIPXCC,
|
||||
},
|
||||
[40] = {
|
||||
.mask = 0x00800000,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 0,
|
||||
.pri_reg = QEIC_CIPYCC,
|
||||
},
|
||||
[41] = {
|
||||
.mask = 0x00400000,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 1,
|
||||
.pri_reg = QEIC_CIPYCC,
|
||||
},
|
||||
[42] = {
|
||||
.mask = 0x00200000,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 2,
|
||||
.pri_reg = QEIC_CIPYCC,
|
||||
},
|
||||
[43] = {
|
||||
.mask = 0x00100000,
|
||||
.mask_reg = QEIC_CIMR,
|
||||
.pri_code = 3,
|
||||
.pri_reg = QEIC_CIPYCC,
|
||||
},
|
||||
};
|
||||
|
||||
static inline u32 qe_ic_read(volatile __be32 __iomem * base, unsigned int reg)
|
||||
{
|
||||
return in_be32(base + (reg >> 2));
|
||||
}
|
||||
|
||||
static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg,
|
||||
u32 value)
|
||||
{
|
||||
out_be32(base + (reg >> 2), value);
|
||||
}
|
||||
|
||||
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
|
||||
{
|
||||
return irq_get_chip_data(virq);
|
||||
}
|
||||
|
||||
static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
|
||||
{
|
||||
return irq_data_get_irq_chip_data(d);
|
||||
}
|
||||
|
||||
static void qe_ic_unmask_irq(struct irq_data *d)
|
||||
{
|
||||
struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
|
||||
unsigned int src = irqd_to_hwirq(d);
|
||||
unsigned long flags;
|
||||
u32 temp;
|
||||
|
||||
raw_spin_lock_irqsave(&qe_ic_lock, flags);
|
||||
|
||||
temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
|
||||
qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
|
||||
temp | qe_ic_info[src].mask);
|
||||
|
||||
raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
|
||||
}
|
||||
|
||||
static void qe_ic_mask_irq(struct irq_data *d)
|
||||
{
|
||||
struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
|
||||
unsigned int src = irqd_to_hwirq(d);
|
||||
unsigned long flags;
|
||||
u32 temp;
|
||||
|
||||
raw_spin_lock_irqsave(&qe_ic_lock, flags);
|
||||
|
||||
temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
|
||||
qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
|
||||
temp & ~qe_ic_info[src].mask);
|
||||
|
||||
/* Flush the above write before enabling interrupts; otherwise,
|
||||
* spurious interrupts will sometimes happen. To be 100% sure
|
||||
* that the write has reached the device before interrupts are
|
||||
* enabled, the mask register would have to be read back; however,
|
||||
* this is not required for correctness, only to avoid wasting
|
||||
* time on a large number of spurious interrupts. In testing,
|
||||
* a sync reduced the observed spurious interrupts to zero.
|
||||
*/
|
||||
mb();
|
||||
|
||||
raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
|
||||
}
|
||||
|
||||
static struct irq_chip qe_ic_irq_chip = {
|
||||
.name = "QEIC",
|
||||
.irq_unmask = qe_ic_unmask_irq,
|
||||
.irq_mask = qe_ic_mask_irq,
|
||||
.irq_mask_ack = qe_ic_mask_irq,
|
||||
};
|
||||
|
||||
static int qe_ic_host_match(struct irq_domain *h, struct device_node *node,
|
||||
enum irq_domain_bus_token bus_token)
|
||||
{
|
||||
/* Exact match, unless qe_ic node is NULL */
|
||||
struct device_node *of_node = irq_domain_get_of_node(h);
|
||||
return of_node == NULL || of_node == node;
|
||||
}
|
||||
|
||||
static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
struct qe_ic *qe_ic = h->host_data;
|
||||
struct irq_chip *chip;
|
||||
|
||||
if (qe_ic_info[hw].mask == 0) {
|
||||
printk(KERN_ERR "Can't map reserved IRQ\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Default chip */
|
||||
chip = &qe_ic->hc_irq;
|
||||
|
||||
irq_set_chip_data(virq, qe_ic);
|
||||
irq_set_status_flags(virq, IRQ_LEVEL);
|
||||
|
||||
irq_set_chip_and_handler(virq, chip, handle_level_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops qe_ic_host_ops = {
|
||||
.match = qe_ic_host_match,
|
||||
.map = qe_ic_host_map,
|
||||
.xlate = irq_domain_xlate_onetwocell,
|
||||
};
|
||||
|
||||
/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
|
||||
unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
|
||||
{
|
||||
int irq;
|
||||
|
||||
BUG_ON(qe_ic == NULL);
|
||||
|
||||
/* get the interrupt source vector. */
|
||||
irq = qe_ic_read(qe_ic->regs, QEIC_CIVEC) >> 26;
|
||||
|
||||
if (irq == 0)
|
||||
return NO_IRQ;
|
||||
|
||||
return irq_linear_revmap(qe_ic->irqhost, irq);
|
||||
}
|
||||
|
||||
/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
|
||||
unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
|
||||
{
|
||||
int irq;
|
||||
|
||||
BUG_ON(qe_ic == NULL);
|
||||
|
||||
/* get the interrupt source vector. */
|
||||
irq = qe_ic_read(qe_ic->regs, QEIC_CHIVEC) >> 26;
|
||||
|
||||
if (irq == 0)
|
||||
return NO_IRQ;
|
||||
|
||||
return irq_linear_revmap(qe_ic->irqhost, irq);
|
||||
}
|
||||
|
||||
void __init qe_ic_init(struct device_node *node, unsigned int flags,
|
||||
void (*low_handler)(struct irq_desc *desc),
|
||||
void (*high_handler)(struct irq_desc *desc))
|
||||
{
|
||||
struct qe_ic *qe_ic;
|
||||
struct resource res;
|
||||
u32 temp = 0, ret, high_active = 0;
|
||||
|
||||
ret = of_address_to_resource(node, 0, &res);
|
||||
if (ret)
|
||||
return;
|
||||
|
||||
qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
|
||||
if (qe_ic == NULL)
|
||||
return;
|
||||
|
||||
qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
|
||||
&qe_ic_host_ops, qe_ic);
|
||||
if (qe_ic->irqhost == NULL) {
|
||||
kfree(qe_ic);
|
||||
return;
|
||||
}
|
||||
|
||||
qe_ic->regs = ioremap(res.start, resource_size(&res));
|
||||
|
||||
qe_ic->hc_irq = qe_ic_irq_chip;
|
||||
|
||||
qe_ic->virq_high = irq_of_parse_and_map(node, 0);
|
||||
qe_ic->virq_low = irq_of_parse_and_map(node, 1);
|
||||
|
||||
if (qe_ic->virq_low == NO_IRQ) {
|
||||
printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
|
||||
kfree(qe_ic);
|
||||
return;
|
||||
}
|
||||
|
||||
/* default priority scheme is grouped. If spread mode is */
|
||||
/* required, configure cicr accordingly. */
|
||||
if (flags & QE_IC_SPREADMODE_GRP_W)
|
||||
temp |= CICR_GWCC;
|
||||
if (flags & QE_IC_SPREADMODE_GRP_X)
|
||||
temp |= CICR_GXCC;
|
||||
if (flags & QE_IC_SPREADMODE_GRP_Y)
|
||||
temp |= CICR_GYCC;
|
||||
if (flags & QE_IC_SPREADMODE_GRP_Z)
|
||||
temp |= CICR_GZCC;
|
||||
if (flags & QE_IC_SPREADMODE_GRP_RISCA)
|
||||
temp |= CICR_GRTA;
|
||||
if (flags & QE_IC_SPREADMODE_GRP_RISCB)
|
||||
temp |= CICR_GRTB;
|
||||
|
||||
/* choose destination signal for highest priority interrupt */
|
||||
if (flags & QE_IC_HIGH_SIGNAL) {
|
||||
temp |= (SIGNAL_HIGH << CICR_HPIT_SHIFT);
|
||||
high_active = 1;
|
||||
}
|
||||
|
||||
qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
|
||||
|
||||
irq_set_handler_data(qe_ic->virq_low, qe_ic);
|
||||
irq_set_chained_handler(qe_ic->virq_low, low_handler);
|
||||
|
||||
if (qe_ic->virq_high != NO_IRQ &&
|
||||
qe_ic->virq_high != qe_ic->virq_low) {
|
||||
irq_set_handler_data(qe_ic->virq_high, qe_ic);
|
||||
irq_set_chained_handler(qe_ic->virq_high, high_handler);
|
||||
}
|
||||
}
|
||||
|
||||
void qe_ic_set_highest_priority(unsigned int virq, int high)
|
||||
{
|
||||
struct qe_ic *qe_ic = qe_ic_from_irq(virq);
|
||||
unsigned int src = virq_to_hw(virq);
|
||||
u32 temp = 0;
|
||||
|
||||
temp = qe_ic_read(qe_ic->regs, QEIC_CICR);
|
||||
|
||||
temp &= ~CICR_HP_MASK;
|
||||
temp |= src << CICR_HP_SHIFT;
|
||||
|
||||
temp &= ~CICR_HPIT_MASK;
|
||||
temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << CICR_HPIT_SHIFT;
|
||||
|
||||
qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
|
||||
}
|
||||
|
||||
/* Set Priority level within its group, from 1 to 8 */
|
||||
int qe_ic_set_priority(unsigned int virq, unsigned int priority)
|
||||
{
|
||||
struct qe_ic *qe_ic = qe_ic_from_irq(virq);
|
||||
unsigned int src = virq_to_hw(virq);
|
||||
u32 temp;
|
||||
|
||||
if (priority > 8 || priority == 0)
|
||||
return -EINVAL;
|
||||
if (src > 127)
|
||||
return -EINVAL;
|
||||
if (qe_ic_info[src].pri_reg == 0)
|
||||
return -EINVAL;
|
||||
|
||||
temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].pri_reg);
|
||||
|
||||
if (priority < 4) {
|
||||
temp &= ~(0x7 << (32 - priority * 3));
|
||||
temp |= qe_ic_info[src].pri_code << (32 - priority * 3);
|
||||
} else {
|
||||
temp &= ~(0x7 << (24 - priority * 3));
|
||||
temp |= qe_ic_info[src].pri_code << (24 - priority * 3);
|
||||
}
|
||||
|
||||
qe_ic_write(qe_ic->regs, qe_ic_info[src].pri_reg, temp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set a QE priority to use high irq, only priority 1~2 can use high irq */
|
||||
int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
|
||||
{
|
||||
struct qe_ic *qe_ic = qe_ic_from_irq(virq);
|
||||
unsigned int src = virq_to_hw(virq);
|
||||
u32 temp, control_reg = QEIC_CICNR, shift = 0;
|
||||
|
||||
if (priority > 2 || priority == 0)
|
||||
return -EINVAL;
|
||||
|
||||
switch (qe_ic_info[src].pri_reg) {
|
||||
case QEIC_CIPZCC:
|
||||
shift = CICNR_ZCC1T_SHIFT;
|
||||
break;
|
||||
case QEIC_CIPWCC:
|
||||
shift = CICNR_WCC1T_SHIFT;
|
||||
break;
|
||||
case QEIC_CIPYCC:
|
||||
shift = CICNR_YCC1T_SHIFT;
|
||||
break;
|
||||
case QEIC_CIPXCC:
|
||||
shift = CICNR_XCC1T_SHIFT;
|
||||
break;
|
||||
case QEIC_CIPRTA:
|
||||
shift = CRICR_RTA1T_SHIFT;
|
||||
control_reg = QEIC_CRICR;
|
||||
break;
|
||||
case QEIC_CIPRTB:
|
||||
shift = CRICR_RTB1T_SHIFT;
|
||||
control_reg = QEIC_CRICR;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
shift += (2 - priority) * 2;
|
||||
temp = qe_ic_read(qe_ic->regs, control_reg);
|
||||
temp &= ~(SIGNAL_MASK << shift);
|
||||
temp |= (high ? SIGNAL_HIGH : SIGNAL_LOW) << shift;
|
||||
qe_ic_write(qe_ic->regs, control_reg, temp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bus_type qe_ic_subsys = {
|
||||
.name = "qe_ic",
|
||||
.dev_name = "qe_ic",
|
||||
};
|
||||
|
||||
static struct device device_qe_ic = {
|
||||
.id = 0,
|
||||
.bus = &qe_ic_subsys,
|
||||
};
|
||||
|
||||
static int __init init_qe_ic_sysfs(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
|
||||
|
||||
rc = subsys_system_register(&qe_ic_subsys, NULL);
|
||||
if (rc) {
|
||||
printk(KERN_ERR "Failed registering qe_ic sys class\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
rc = device_register(&device_qe_ic);
|
||||
if (rc) {
|
||||
printk(KERN_ERR "Failed registering qe_ic sys device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
subsys_initcall(init_qe_ic_sysfs);
|
103
drivers/soc/fsl/qe/qe_ic.h
Normal file
103
drivers/soc/fsl/qe/qe_ic.h
Normal file
@@ -0,0 +1,103 @@
|
||||
/*
|
||||
* drivers/soc/fsl/qe/qe_ic.h
|
||||
*
|
||||
* QUICC ENGINE Interrupt Controller Header
|
||||
*
|
||||
* Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* Author: Li Yang <leoli@freescale.com>
|
||||
* Based on code from Shlomi Gridish <gridish@freescale.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
#ifndef _POWERPC_SYSDEV_QE_IC_H
|
||||
#define _POWERPC_SYSDEV_QE_IC_H
|
||||
|
||||
#include <soc/fsl/qe/qe_ic.h>
|
||||
|
||||
#define NR_QE_IC_INTS 64
|
||||
|
||||
/* QE IC registers offset */
|
||||
#define QEIC_CICR 0x00
|
||||
#define QEIC_CIVEC 0x04
|
||||
#define QEIC_CRIPNR 0x08
|
||||
#define QEIC_CIPNR 0x0c
|
||||
#define QEIC_CIPXCC 0x10
|
||||
#define QEIC_CIPYCC 0x14
|
||||
#define QEIC_CIPWCC 0x18
|
||||
#define QEIC_CIPZCC 0x1c
|
||||
#define QEIC_CIMR 0x20
|
||||
#define QEIC_CRIMR 0x24
|
||||
#define QEIC_CICNR 0x28
|
||||
#define QEIC_CIPRTA 0x30
|
||||
#define QEIC_CIPRTB 0x34
|
||||
#define QEIC_CRICR 0x3c
|
||||
#define QEIC_CHIVEC 0x60
|
||||
|
||||
/* Interrupt priority registers */
|
||||
#define CIPCC_SHIFT_PRI0 29
|
||||
#define CIPCC_SHIFT_PRI1 26
|
||||
#define CIPCC_SHIFT_PRI2 23
|
||||
#define CIPCC_SHIFT_PRI3 20
|
||||
#define CIPCC_SHIFT_PRI4 13
|
||||
#define CIPCC_SHIFT_PRI5 10
|
||||
#define CIPCC_SHIFT_PRI6 7
|
||||
#define CIPCC_SHIFT_PRI7 4
|
||||
|
||||
/* CICR priority modes */
|
||||
#define CICR_GWCC 0x00040000
|
||||
#define CICR_GXCC 0x00020000
|
||||
#define CICR_GYCC 0x00010000
|
||||
#define CICR_GZCC 0x00080000
|
||||
#define CICR_GRTA 0x00200000
|
||||
#define CICR_GRTB 0x00400000
|
||||
#define CICR_HPIT_SHIFT 8
|
||||
#define CICR_HPIT_MASK 0x00000300
|
||||
#define CICR_HP_SHIFT 24
|
||||
#define CICR_HP_MASK 0x3f000000
|
||||
|
||||
/* CICNR */
|
||||
#define CICNR_WCC1T_SHIFT 20
|
||||
#define CICNR_ZCC1T_SHIFT 28
|
||||
#define CICNR_YCC1T_SHIFT 12
|
||||
#define CICNR_XCC1T_SHIFT 4
|
||||
|
||||
/* CRICR */
|
||||
#define CRICR_RTA1T_SHIFT 20
|
||||
#define CRICR_RTB1T_SHIFT 28
|
||||
|
||||
/* Signal indicator */
|
||||
#define SIGNAL_MASK 3
|
||||
#define SIGNAL_HIGH 2
|
||||
#define SIGNAL_LOW 0
|
||||
|
||||
struct qe_ic {
|
||||
/* Control registers offset */
|
||||
volatile u32 __iomem *regs;
|
||||
|
||||
/* The remapper for this QEIC */
|
||||
struct irq_domain *irqhost;
|
||||
|
||||
/* The "linux" controller struct */
|
||||
struct irq_chip hc_irq;
|
||||
|
||||
/* VIRQ numbers of QE high/low irqs */
|
||||
unsigned int virq_high;
|
||||
unsigned int virq_low;
|
||||
};
|
||||
|
||||
/*
|
||||
* QE interrupt controller internal structure
|
||||
*/
|
||||
struct qe_ic_info {
|
||||
u32 mask; /* location of this source at the QIMR register. */
|
||||
u32 mask_reg; /* Mask register offset */
|
||||
u8 pri_code; /* for grouped interrupts sources - the interrupt
|
||||
code as appears at the group priority register */
|
||||
u32 pri_reg; /* Group priority register offset */
|
||||
};
|
||||
|
||||
#endif /* _POWERPC_SYSDEV_QE_IC_H */
|
192
drivers/soc/fsl/qe/qe_io.c
Normal file
192
drivers/soc/fsl/qe/qe_io.c
Normal file
@@ -0,0 +1,192 @@
|
||||
/*
|
||||
* arch/powerpc/sysdev/qe_lib/qe_io.c
|
||||
*
|
||||
* QE Parallel I/O ports configuration routines
|
||||
*
|
||||
* Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* Author: Li Yang <LeoLi@freescale.com>
|
||||
* Based on code from Shlomi Gridish <gridish@freescale.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ioport.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <soc/fsl/qe/qe.h>
|
||||
#include <asm/prom.h>
|
||||
#include <sysdev/fsl_soc.h>
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
static struct qe_pio_regs __iomem *par_io;
|
||||
static int num_par_io_ports = 0;
|
||||
|
||||
int par_io_init(struct device_node *np)
|
||||
{
|
||||
struct resource res;
|
||||
int ret;
|
||||
const u32 *num_ports;
|
||||
|
||||
/* Map Parallel I/O ports registers */
|
||||
ret = of_address_to_resource(np, 0, &res);
|
||||
if (ret)
|
||||
return ret;
|
||||
par_io = ioremap(res.start, resource_size(&res));
|
||||
|
||||
num_ports = of_get_property(np, "num-ports", NULL);
|
||||
if (num_ports)
|
||||
num_par_io_ports = *num_ports;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
|
||||
int open_drain, int assignment, int has_irq)
|
||||
{
|
||||
u32 pin_mask1bit;
|
||||
u32 pin_mask2bits;
|
||||
u32 new_mask2bits;
|
||||
u32 tmp_val;
|
||||
|
||||
/* calculate pin location for single and 2 bits information */
|
||||
pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
|
||||
|
||||
/* Set open drain, if required */
|
||||
tmp_val = in_be32(&par_io->cpodr);
|
||||
if (open_drain)
|
||||
out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
|
||||
else
|
||||
out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
|
||||
|
||||
/* define direction */
|
||||
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
|
||||
in_be32(&par_io->cpdir2) :
|
||||
in_be32(&par_io->cpdir1);
|
||||
|
||||
/* get all bits mask for 2 bit per port */
|
||||
pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
|
||||
(pin % (QE_PIO_PINS / 2) + 1) * 2));
|
||||
|
||||
/* Get the final mask we need for the right definition */
|
||||
new_mask2bits = (u32) (dir << (QE_PIO_PINS -
|
||||
(pin % (QE_PIO_PINS / 2) + 1) * 2));
|
||||
|
||||
/* clear and set 2 bits mask */
|
||||
if (pin > (QE_PIO_PINS / 2) - 1) {
|
||||
out_be32(&par_io->cpdir2,
|
||||
~pin_mask2bits & tmp_val);
|
||||
tmp_val &= ~pin_mask2bits;
|
||||
out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
|
||||
} else {
|
||||
out_be32(&par_io->cpdir1,
|
||||
~pin_mask2bits & tmp_val);
|
||||
tmp_val &= ~pin_mask2bits;
|
||||
out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
|
||||
}
|
||||
/* define pin assignment */
|
||||
tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
|
||||
in_be32(&par_io->cppar2) :
|
||||
in_be32(&par_io->cppar1);
|
||||
|
||||
new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
|
||||
(pin % (QE_PIO_PINS / 2) + 1) * 2));
|
||||
/* clear and set 2 bits mask */
|
||||
if (pin > (QE_PIO_PINS / 2) - 1) {
|
||||
out_be32(&par_io->cppar2,
|
||||
~pin_mask2bits & tmp_val);
|
||||
tmp_val &= ~pin_mask2bits;
|
||||
out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
|
||||
} else {
|
||||
out_be32(&par_io->cppar1,
|
||||
~pin_mask2bits & tmp_val);
|
||||
tmp_val &= ~pin_mask2bits;
|
||||
out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(__par_io_config_pin);
|
||||
|
||||
int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
|
||||
int assignment, int has_irq)
|
||||
{
|
||||
if (!par_io || port >= num_par_io_ports)
|
||||
return -EINVAL;
|
||||
|
||||
__par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment,
|
||||
has_irq);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(par_io_config_pin);
|
||||
|
||||
int par_io_data_set(u8 port, u8 pin, u8 val)
|
||||
{
|
||||
u32 pin_mask, tmp_val;
|
||||
|
||||
if (port >= num_par_io_ports)
|
||||
return -EINVAL;
|
||||
if (pin >= QE_PIO_PINS)
|
||||
return -EINVAL;
|
||||
/* calculate pin location */
|
||||
pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
|
||||
|
||||
tmp_val = in_be32(&par_io[port].cpdata);
|
||||
|
||||
if (val == 0) /* clear */
|
||||
out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val);
|
||||
else /* set */
|
||||
out_be32(&par_io[port].cpdata, pin_mask | tmp_val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(par_io_data_set);
|
||||
|
||||
int par_io_of_config(struct device_node *np)
|
||||
{
|
||||
struct device_node *pio;
|
||||
const phandle *ph;
|
||||
int pio_map_len;
|
||||
const unsigned int *pio_map;
|
||||
|
||||
if (par_io == NULL) {
|
||||
printk(KERN_ERR "par_io not initialized\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
ph = of_get_property(np, "pio-handle", NULL);
|
||||
if (ph == NULL) {
|
||||
printk(KERN_ERR "pio-handle not available\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
pio = of_find_node_by_phandle(*ph);
|
||||
|
||||
pio_map = of_get_property(pio, "pio-map", &pio_map_len);
|
||||
if (pio_map == NULL) {
|
||||
printk(KERN_ERR "pio-map is not set!\n");
|
||||
return -1;
|
||||
}
|
||||
pio_map_len /= sizeof(unsigned int);
|
||||
if ((pio_map_len % 6) != 0) {
|
||||
printk(KERN_ERR "pio-map format wrong!\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
while (pio_map_len > 0) {
|
||||
par_io_config_pin((u8) pio_map[0], (u8) pio_map[1],
|
||||
(int) pio_map[2], (int) pio_map[3],
|
||||
(int) pio_map[4], (int) pio_map[5]);
|
||||
pio_map += 6;
|
||||
pio_map_len -= 6;
|
||||
}
|
||||
of_node_put(pio);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(par_io_of_config);
|
212
drivers/soc/fsl/qe/ucc.c
Normal file
212
drivers/soc/fsl/qe/ucc.c
Normal file
@@ -0,0 +1,212 @@
|
||||
/*
|
||||
* arch/powerpc/sysdev/qe_lib/ucc.c
|
||||
*
|
||||
* QE UCC API Set - UCC specific routines implementations.
|
||||
*
|
||||
* Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* Authors: Shlomi Gridish <gridish@freescale.com>
|
||||
* Li Yang <leoli@freescale.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm/io.h>
|
||||
#include <soc/fsl/qe/immap_qe.h>
|
||||
#include <soc/fsl/qe/qe.h>
|
||||
#include <soc/fsl/qe/ucc.h>
|
||||
|
||||
int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (ucc_num > UCC_MAX_NUM - 1)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock_irqsave(&cmxgcr_lock, flags);
|
||||
clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
|
||||
ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
|
||||
spin_unlock_irqrestore(&cmxgcr_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
|
||||
|
||||
/* Configure the UCC to either Slow or Fast.
|
||||
*
|
||||
* A given UCC can be figured to support either "slow" devices (e.g. UART)
|
||||
* or "fast" devices (e.g. Ethernet).
|
||||
*
|
||||
* 'ucc_num' is the UCC number, from 0 - 7.
|
||||
*
|
||||
* This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit
|
||||
* must always be set to 1.
|
||||
*/
|
||||
int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
|
||||
{
|
||||
u8 __iomem *guemr;
|
||||
|
||||
/* The GUEMR register is at the same location for both slow and fast
|
||||
devices, so we just use uccX.slow.guemr. */
|
||||
switch (ucc_num) {
|
||||
case 0: guemr = &qe_immr->ucc1.slow.guemr;
|
||||
break;
|
||||
case 1: guemr = &qe_immr->ucc2.slow.guemr;
|
||||
break;
|
||||
case 2: guemr = &qe_immr->ucc3.slow.guemr;
|
||||
break;
|
||||
case 3: guemr = &qe_immr->ucc4.slow.guemr;
|
||||
break;
|
||||
case 4: guemr = &qe_immr->ucc5.slow.guemr;
|
||||
break;
|
||||
case 5: guemr = &qe_immr->ucc6.slow.guemr;
|
||||
break;
|
||||
case 6: guemr = &qe_immr->ucc7.slow.guemr;
|
||||
break;
|
||||
case 7: guemr = &qe_immr->ucc8.slow.guemr;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
|
||||
UCC_GUEMR_SET_RESERVED3 | speed);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr,
|
||||
unsigned int *reg_num, unsigned int *shift)
|
||||
{
|
||||
unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3);
|
||||
|
||||
*reg_num = cmx + 1;
|
||||
*cmxucr = &qe_immr->qmx.cmxucr[cmx];
|
||||
*shift = 16 - 8 * (ucc_num & 2);
|
||||
}
|
||||
|
||||
int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
|
||||
{
|
||||
__be32 __iomem *cmxucr;
|
||||
unsigned int reg_num;
|
||||
unsigned int shift;
|
||||
|
||||
/* check if the UCC number is in range. */
|
||||
if (ucc_num > UCC_MAX_NUM - 1)
|
||||
return -EINVAL;
|
||||
|
||||
get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift);
|
||||
|
||||
if (set)
|
||||
setbits32(cmxucr, mask << shift);
|
||||
else
|
||||
clrbits32(cmxucr, mask << shift);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
|
||||
enum comm_dir mode)
|
||||
{
|
||||
__be32 __iomem *cmxucr;
|
||||
unsigned int reg_num;
|
||||
unsigned int shift;
|
||||
u32 clock_bits = 0;
|
||||
|
||||
/* check if the UCC number is in range. */
|
||||
if (ucc_num > UCC_MAX_NUM - 1)
|
||||
return -EINVAL;
|
||||
|
||||
/* The communications direction must be RX or TX */
|
||||
if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX)))
|
||||
return -EINVAL;
|
||||
|
||||
get_cmxucr_reg(ucc_num, &cmxucr, ®_num, &shift);
|
||||
|
||||
switch (reg_num) {
|
||||
case 1:
|
||||
switch (clock) {
|
||||
case QE_BRG1: clock_bits = 1; break;
|
||||
case QE_BRG2: clock_bits = 2; break;
|
||||
case QE_BRG7: clock_bits = 3; break;
|
||||
case QE_BRG8: clock_bits = 4; break;
|
||||
case QE_CLK9: clock_bits = 5; break;
|
||||
case QE_CLK10: clock_bits = 6; break;
|
||||
case QE_CLK11: clock_bits = 7; break;
|
||||
case QE_CLK12: clock_bits = 8; break;
|
||||
case QE_CLK15: clock_bits = 9; break;
|
||||
case QE_CLK16: clock_bits = 10; break;
|
||||
default: break;
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
switch (clock) {
|
||||
case QE_BRG5: clock_bits = 1; break;
|
||||
case QE_BRG6: clock_bits = 2; break;
|
||||
case QE_BRG7: clock_bits = 3; break;
|
||||
case QE_BRG8: clock_bits = 4; break;
|
||||
case QE_CLK13: clock_bits = 5; break;
|
||||
case QE_CLK14: clock_bits = 6; break;
|
||||
case QE_CLK19: clock_bits = 7; break;
|
||||
case QE_CLK20: clock_bits = 8; break;
|
||||
case QE_CLK15: clock_bits = 9; break;
|
||||
case QE_CLK16: clock_bits = 10; break;
|
||||
default: break;
|
||||
}
|
||||
break;
|
||||
case 3:
|
||||
switch (clock) {
|
||||
case QE_BRG9: clock_bits = 1; break;
|
||||
case QE_BRG10: clock_bits = 2; break;
|
||||
case QE_BRG15: clock_bits = 3; break;
|
||||
case QE_BRG16: clock_bits = 4; break;
|
||||
case QE_CLK3: clock_bits = 5; break;
|
||||
case QE_CLK4: clock_bits = 6; break;
|
||||
case QE_CLK17: clock_bits = 7; break;
|
||||
case QE_CLK18: clock_bits = 8; break;
|
||||
case QE_CLK7: clock_bits = 9; break;
|
||||
case QE_CLK8: clock_bits = 10; break;
|
||||
case QE_CLK16: clock_bits = 11; break;
|
||||
default: break;
|
||||
}
|
||||
break;
|
||||
case 4:
|
||||
switch (clock) {
|
||||
case QE_BRG13: clock_bits = 1; break;
|
||||
case QE_BRG14: clock_bits = 2; break;
|
||||
case QE_BRG15: clock_bits = 3; break;
|
||||
case QE_BRG16: clock_bits = 4; break;
|
||||
case QE_CLK5: clock_bits = 5; break;
|
||||
case QE_CLK6: clock_bits = 6; break;
|
||||
case QE_CLK21: clock_bits = 7; break;
|
||||
case QE_CLK22: clock_bits = 8; break;
|
||||
case QE_CLK7: clock_bits = 9; break;
|
||||
case QE_CLK8: clock_bits = 10; break;
|
||||
case QE_CLK16: clock_bits = 11; break;
|
||||
default: break;
|
||||
}
|
||||
break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
/* Check for invalid combination of clock and UCC number */
|
||||
if (!clock_bits)
|
||||
return -ENOENT;
|
||||
|
||||
if (mode == COMM_DIR_RX)
|
||||
shift += 4;
|
||||
|
||||
clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
|
||||
clock_bits << shift);
|
||||
|
||||
return 0;
|
||||
}
|
363
drivers/soc/fsl/qe/ucc_fast.c
Normal file
363
drivers/soc/fsl/qe/ucc_fast.c
Normal file
@@ -0,0 +1,363 @@
|
||||
/*
|
||||
* Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* Authors: Shlomi Gridish <gridish@freescale.com>
|
||||
* Li Yang <leoli@freescale.com>
|
||||
*
|
||||
* Description:
|
||||
* QE UCC Fast API Set - UCC Fast specific routines implementations.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <soc/fsl/qe/immap_qe.h>
|
||||
#include <soc/fsl/qe/qe.h>
|
||||
|
||||
#include <soc/fsl/qe/ucc.h>
|
||||
#include <soc/fsl/qe/ucc_fast.h>
|
||||
|
||||
void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
|
||||
{
|
||||
printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
|
||||
printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
|
||||
|
||||
printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
|
||||
printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
|
||||
printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
|
||||
printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
|
||||
printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
|
||||
printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
|
||||
printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
|
||||
&uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
|
||||
printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
|
||||
printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
|
||||
printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
|
||||
printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
|
||||
printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
|
||||
printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
|
||||
printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
|
||||
printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
|
||||
printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
|
||||
&uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
|
||||
printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
|
||||
&uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
|
||||
printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
|
||||
&uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_fast_dump_regs);
|
||||
|
||||
u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
|
||||
{
|
||||
switch (uccf_num) {
|
||||
case 0: return QE_CR_SUBBLOCK_UCCFAST1;
|
||||
case 1: return QE_CR_SUBBLOCK_UCCFAST2;
|
||||
case 2: return QE_CR_SUBBLOCK_UCCFAST3;
|
||||
case 3: return QE_CR_SUBBLOCK_UCCFAST4;
|
||||
case 4: return QE_CR_SUBBLOCK_UCCFAST5;
|
||||
case 5: return QE_CR_SUBBLOCK_UCCFAST6;
|
||||
case 6: return QE_CR_SUBBLOCK_UCCFAST7;
|
||||
case 7: return QE_CR_SUBBLOCK_UCCFAST8;
|
||||
default: return QE_CR_SUBBLOCK_INVALID;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
|
||||
|
||||
void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
|
||||
{
|
||||
out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
|
||||
|
||||
void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
|
||||
{
|
||||
struct ucc_fast __iomem *uf_regs;
|
||||
u32 gumr;
|
||||
|
||||
uf_regs = uccf->uf_regs;
|
||||
|
||||
/* Enable reception and/or transmission on this UCC. */
|
||||
gumr = in_be32(&uf_regs->gumr);
|
||||
if (mode & COMM_DIR_TX) {
|
||||
gumr |= UCC_FAST_GUMR_ENT;
|
||||
uccf->enabled_tx = 1;
|
||||
}
|
||||
if (mode & COMM_DIR_RX) {
|
||||
gumr |= UCC_FAST_GUMR_ENR;
|
||||
uccf->enabled_rx = 1;
|
||||
}
|
||||
out_be32(&uf_regs->gumr, gumr);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_fast_enable);
|
||||
|
||||
void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
|
||||
{
|
||||
struct ucc_fast __iomem *uf_regs;
|
||||
u32 gumr;
|
||||
|
||||
uf_regs = uccf->uf_regs;
|
||||
|
||||
/* Disable reception and/or transmission on this UCC. */
|
||||
gumr = in_be32(&uf_regs->gumr);
|
||||
if (mode & COMM_DIR_TX) {
|
||||
gumr &= ~UCC_FAST_GUMR_ENT;
|
||||
uccf->enabled_tx = 0;
|
||||
}
|
||||
if (mode & COMM_DIR_RX) {
|
||||
gumr &= ~UCC_FAST_GUMR_ENR;
|
||||
uccf->enabled_rx = 0;
|
||||
}
|
||||
out_be32(&uf_regs->gumr, gumr);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_fast_disable);
|
||||
|
||||
int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
|
||||
{
|
||||
struct ucc_fast_private *uccf;
|
||||
struct ucc_fast __iomem *uf_regs;
|
||||
u32 gumr;
|
||||
int ret;
|
||||
|
||||
if (!uf_info)
|
||||
return -EINVAL;
|
||||
|
||||
/* check if the UCC port number is in range. */
|
||||
if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
|
||||
printk(KERN_ERR "%s: illegal UCC number\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check that 'max_rx_buf_length' is properly aligned (4). */
|
||||
if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
|
||||
printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
|
||||
__func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Validate Virtual Fifo register values */
|
||||
if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
|
||||
printk(KERN_ERR "%s: urfs is too small\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
|
||||
printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
|
||||
printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
|
||||
printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
|
||||
printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
|
||||
printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
|
||||
printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
|
||||
if (!uccf) {
|
||||
printk(KERN_ERR "%s: Cannot allocate private data\n",
|
||||
__func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Fill fast UCC structure */
|
||||
uccf->uf_info = uf_info;
|
||||
/* Set the PHY base address */
|
||||
uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
|
||||
if (uccf->uf_regs == NULL) {
|
||||
printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
|
||||
kfree(uccf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
uccf->enabled_tx = 0;
|
||||
uccf->enabled_rx = 0;
|
||||
uccf->stopped_tx = 0;
|
||||
uccf->stopped_rx = 0;
|
||||
uf_regs = uccf->uf_regs;
|
||||
uccf->p_ucce = &uf_regs->ucce;
|
||||
uccf->p_uccm = &uf_regs->uccm;
|
||||
#ifdef CONFIG_UGETH_TX_ON_DEMAND
|
||||
uccf->p_utodr = &uf_regs->utodr;
|
||||
#endif
|
||||
#ifdef STATISTICS
|
||||
uccf->tx_frames = 0;
|
||||
uccf->rx_frames = 0;
|
||||
uccf->rx_discarded = 0;
|
||||
#endif /* STATISTICS */
|
||||
|
||||
/* Set UCC to fast type */
|
||||
ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
|
||||
ucc_fast_free(uccf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uccf->mrblr = uf_info->max_rx_buf_length;
|
||||
|
||||
/* Set GUMR */
|
||||
/* For more details see the hardware spec. */
|
||||
gumr = uf_info->ttx_trx;
|
||||
if (uf_info->tci)
|
||||
gumr |= UCC_FAST_GUMR_TCI;
|
||||
if (uf_info->cdp)
|
||||
gumr |= UCC_FAST_GUMR_CDP;
|
||||
if (uf_info->ctsp)
|
||||
gumr |= UCC_FAST_GUMR_CTSP;
|
||||
if (uf_info->cds)
|
||||
gumr |= UCC_FAST_GUMR_CDS;
|
||||
if (uf_info->ctss)
|
||||
gumr |= UCC_FAST_GUMR_CTSS;
|
||||
if (uf_info->txsy)
|
||||
gumr |= UCC_FAST_GUMR_TXSY;
|
||||
if (uf_info->rsyn)
|
||||
gumr |= UCC_FAST_GUMR_RSYN;
|
||||
gumr |= uf_info->synl;
|
||||
if (uf_info->rtsm)
|
||||
gumr |= UCC_FAST_GUMR_RTSM;
|
||||
gumr |= uf_info->renc;
|
||||
if (uf_info->revd)
|
||||
gumr |= UCC_FAST_GUMR_REVD;
|
||||
gumr |= uf_info->tenc;
|
||||
gumr |= uf_info->tcrc;
|
||||
gumr |= uf_info->mode;
|
||||
out_be32(&uf_regs->gumr, gumr);
|
||||
|
||||
/* Allocate memory for Tx Virtual Fifo */
|
||||
uccf->ucc_fast_tx_virtual_fifo_base_offset =
|
||||
qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
|
||||
if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
|
||||
printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
|
||||
__func__);
|
||||
uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
|
||||
ucc_fast_free(uccf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Allocate memory for Rx Virtual Fifo */
|
||||
uccf->ucc_fast_rx_virtual_fifo_base_offset =
|
||||
qe_muram_alloc(uf_info->urfs +
|
||||
UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
|
||||
UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
|
||||
if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
|
||||
printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
|
||||
__func__);
|
||||
uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
|
||||
ucc_fast_free(uccf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Set Virtual Fifo registers */
|
||||
out_be16(&uf_regs->urfs, uf_info->urfs);
|
||||
out_be16(&uf_regs->urfet, uf_info->urfet);
|
||||
out_be16(&uf_regs->urfset, uf_info->urfset);
|
||||
out_be16(&uf_regs->utfs, uf_info->utfs);
|
||||
out_be16(&uf_regs->utfet, uf_info->utfet);
|
||||
out_be16(&uf_regs->utftt, uf_info->utftt);
|
||||
/* utfb, urfb are offsets from MURAM base */
|
||||
out_be32(&uf_regs->utfb, uccf->ucc_fast_tx_virtual_fifo_base_offset);
|
||||
out_be32(&uf_regs->urfb, uccf->ucc_fast_rx_virtual_fifo_base_offset);
|
||||
|
||||
/* Mux clocking */
|
||||
/* Grant Support */
|
||||
ucc_set_qe_mux_grant(uf_info->ucc_num, uf_info->grant_support);
|
||||
/* Breakpoint Support */
|
||||
ucc_set_qe_mux_bkpt(uf_info->ucc_num, uf_info->brkpt_support);
|
||||
/* Set Tsa or NMSI mode. */
|
||||
ucc_set_qe_mux_tsa(uf_info->ucc_num, uf_info->tsa);
|
||||
/* If NMSI (not Tsa), set Tx and Rx clock. */
|
||||
if (!uf_info->tsa) {
|
||||
/* Rx clock routing */
|
||||
if ((uf_info->rx_clock != QE_CLK_NONE) &&
|
||||
ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
|
||||
COMM_DIR_RX)) {
|
||||
printk(KERN_ERR "%s: illegal value for RX clock\n",
|
||||
__func__);
|
||||
ucc_fast_free(uccf);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Tx clock routing */
|
||||
if ((uf_info->tx_clock != QE_CLK_NONE) &&
|
||||
ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
|
||||
COMM_DIR_TX)) {
|
||||
printk(KERN_ERR "%s: illegal value for TX clock\n",
|
||||
__func__);
|
||||
ucc_fast_free(uccf);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set interrupt mask register at UCC level. */
|
||||
out_be32(&uf_regs->uccm, uf_info->uccm_mask);
|
||||
|
||||
/* First, clear anything pending at UCC level,
|
||||
* otherwise, old garbage may come through
|
||||
* as soon as the dam is opened. */
|
||||
|
||||
/* Writing '1' clears */
|
||||
out_be32(&uf_regs->ucce, 0xffffffff);
|
||||
|
||||
*uccf_ret = uccf;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_fast_init);
|
||||
|
||||
void ucc_fast_free(struct ucc_fast_private * uccf)
|
||||
{
|
||||
if (!uccf)
|
||||
return;
|
||||
|
||||
if (uccf->ucc_fast_tx_virtual_fifo_base_offset)
|
||||
qe_muram_free(uccf->ucc_fast_tx_virtual_fifo_base_offset);
|
||||
|
||||
if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
|
||||
qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
|
||||
|
||||
if (uccf->uf_regs)
|
||||
iounmap(uccf->uf_regs);
|
||||
|
||||
kfree(uccf);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_fast_free);
|
374
drivers/soc/fsl/qe/ucc_slow.c
Normal file
374
drivers/soc/fsl/qe/ucc_slow.c
Normal file
@@ -0,0 +1,374 @@
|
||||
/*
|
||||
* Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
|
||||
*
|
||||
* Authors: Shlomi Gridish <gridish@freescale.com>
|
||||
* Li Yang <leoli@freescale.com>
|
||||
*
|
||||
* Description:
|
||||
* QE UCC Slow API Set - UCC Slow specific routines implementations.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <soc/fsl/qe/immap_qe.h>
|
||||
#include <soc/fsl/qe/qe.h>
|
||||
|
||||
#include <soc/fsl/qe/ucc.h>
|
||||
#include <soc/fsl/qe/ucc_slow.h>
|
||||
|
||||
u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
|
||||
{
|
||||
switch (uccs_num) {
|
||||
case 0: return QE_CR_SUBBLOCK_UCCSLOW1;
|
||||
case 1: return QE_CR_SUBBLOCK_UCCSLOW2;
|
||||
case 2: return QE_CR_SUBBLOCK_UCCSLOW3;
|
||||
case 3: return QE_CR_SUBBLOCK_UCCSLOW4;
|
||||
case 4: return QE_CR_SUBBLOCK_UCCSLOW5;
|
||||
case 5: return QE_CR_SUBBLOCK_UCCSLOW6;
|
||||
case 6: return QE_CR_SUBBLOCK_UCCSLOW7;
|
||||
case 7: return QE_CR_SUBBLOCK_UCCSLOW8;
|
||||
default: return QE_CR_SUBBLOCK_INVALID;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
|
||||
|
||||
void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
|
||||
{
|
||||
struct ucc_slow_info *us_info = uccs->us_info;
|
||||
u32 id;
|
||||
|
||||
id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
|
||||
qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
|
||||
QE_CR_PROTOCOL_UNSPECIFIED, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_slow_graceful_stop_tx);
|
||||
|
||||
void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
|
||||
{
|
||||
struct ucc_slow_info *us_info = uccs->us_info;
|
||||
u32 id;
|
||||
|
||||
id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
|
||||
qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_slow_stop_tx);
|
||||
|
||||
void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
|
||||
{
|
||||
struct ucc_slow_info *us_info = uccs->us_info;
|
||||
u32 id;
|
||||
|
||||
id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
|
||||
qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_slow_restart_tx);
|
||||
|
||||
void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
|
||||
{
|
||||
struct ucc_slow *us_regs;
|
||||
u32 gumr_l;
|
||||
|
||||
us_regs = uccs->us_regs;
|
||||
|
||||
/* Enable reception and/or transmission on this UCC. */
|
||||
gumr_l = in_be32(&us_regs->gumr_l);
|
||||
if (mode & COMM_DIR_TX) {
|
||||
gumr_l |= UCC_SLOW_GUMR_L_ENT;
|
||||
uccs->enabled_tx = 1;
|
||||
}
|
||||
if (mode & COMM_DIR_RX) {
|
||||
gumr_l |= UCC_SLOW_GUMR_L_ENR;
|
||||
uccs->enabled_rx = 1;
|
||||
}
|
||||
out_be32(&us_regs->gumr_l, gumr_l);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_slow_enable);
|
||||
|
||||
void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
|
||||
{
|
||||
struct ucc_slow *us_regs;
|
||||
u32 gumr_l;
|
||||
|
||||
us_regs = uccs->us_regs;
|
||||
|
||||
/* Disable reception and/or transmission on this UCC. */
|
||||
gumr_l = in_be32(&us_regs->gumr_l);
|
||||
if (mode & COMM_DIR_TX) {
|
||||
gumr_l &= ~UCC_SLOW_GUMR_L_ENT;
|
||||
uccs->enabled_tx = 0;
|
||||
}
|
||||
if (mode & COMM_DIR_RX) {
|
||||
gumr_l &= ~UCC_SLOW_GUMR_L_ENR;
|
||||
uccs->enabled_rx = 0;
|
||||
}
|
||||
out_be32(&us_regs->gumr_l, gumr_l);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_slow_disable);
|
||||
|
||||
/* Initialize the UCC for Slow operations
|
||||
*
|
||||
* The caller should initialize the following us_info
|
||||
*/
|
||||
int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
|
||||
{
|
||||
struct ucc_slow_private *uccs;
|
||||
u32 i;
|
||||
struct ucc_slow __iomem *us_regs;
|
||||
u32 gumr;
|
||||
struct qe_bd *bd;
|
||||
u32 id;
|
||||
u32 command;
|
||||
int ret = 0;
|
||||
|
||||
if (!us_info)
|
||||
return -EINVAL;
|
||||
|
||||
/* check if the UCC port number is in range. */
|
||||
if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
|
||||
printk(KERN_ERR "%s: illegal UCC number\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set mrblr
|
||||
* Check that 'max_rx_buf_length' is properly aligned (4), unless
|
||||
* rfw is 1, meaning that QE accepts one byte at a time, unlike normal
|
||||
* case when QE accepts 32 bits at a time.
|
||||
*/
|
||||
if ((!us_info->rfw) &&
|
||||
(us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
|
||||
printk(KERN_ERR "max_rx_buf_length not aligned.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
|
||||
if (!uccs) {
|
||||
printk(KERN_ERR "%s: Cannot allocate private data\n",
|
||||
__func__);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Fill slow UCC structure */
|
||||
uccs->us_info = us_info;
|
||||
/* Set the PHY base address */
|
||||
uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
|
||||
if (uccs->us_regs == NULL) {
|
||||
printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
|
||||
kfree(uccs);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
uccs->saved_uccm = 0;
|
||||
uccs->p_rx_frame = 0;
|
||||
us_regs = uccs->us_regs;
|
||||
uccs->p_ucce = (u16 *) & (us_regs->ucce);
|
||||
uccs->p_uccm = (u16 *) & (us_regs->uccm);
|
||||
#ifdef STATISTICS
|
||||
uccs->rx_frames = 0;
|
||||
uccs->tx_frames = 0;
|
||||
uccs->rx_discarded = 0;
|
||||
#endif /* STATISTICS */
|
||||
|
||||
/* Get PRAM base */
|
||||
uccs->us_pram_offset =
|
||||
qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
|
||||
if (IS_ERR_VALUE(uccs->us_pram_offset)) {
|
||||
printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
|
||||
ucc_slow_free(uccs);
|
||||
return -ENOMEM;
|
||||
}
|
||||
id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
|
||||
qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol,
|
||||
uccs->us_pram_offset);
|
||||
|
||||
uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
|
||||
|
||||
/* Set UCC to slow type */
|
||||
ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW);
|
||||
if (ret) {
|
||||
printk(KERN_ERR "%s: cannot set UCC type", __func__);
|
||||
ucc_slow_free(uccs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
out_be16(&uccs->us_pram->mrblr, us_info->max_rx_buf_length);
|
||||
|
||||
INIT_LIST_HEAD(&uccs->confQ);
|
||||
|
||||
/* Allocate BDs. */
|
||||
uccs->rx_base_offset =
|
||||
qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
|
||||
QE_ALIGNMENT_OF_BD);
|
||||
if (IS_ERR_VALUE(uccs->rx_base_offset)) {
|
||||
printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
|
||||
us_info->rx_bd_ring_len);
|
||||
uccs->rx_base_offset = 0;
|
||||
ucc_slow_free(uccs);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
uccs->tx_base_offset =
|
||||
qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
|
||||
QE_ALIGNMENT_OF_BD);
|
||||
if (IS_ERR_VALUE(uccs->tx_base_offset)) {
|
||||
printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
|
||||
uccs->tx_base_offset = 0;
|
||||
ucc_slow_free(uccs);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Init Tx bds */
|
||||
bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
|
||||
for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
|
||||
/* clear bd buffer */
|
||||
out_be32(&bd->buf, 0);
|
||||
/* set bd status and length */
|
||||
out_be32((u32 *) bd, 0);
|
||||
bd++;
|
||||
}
|
||||
/* for last BD set Wrap bit */
|
||||
out_be32(&bd->buf, 0);
|
||||
out_be32((u32 *) bd, cpu_to_be32(T_W));
|
||||
|
||||
/* Init Rx bds */
|
||||
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
|
||||
for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
|
||||
/* set bd status and length */
|
||||
out_be32((u32*)bd, 0);
|
||||
/* clear bd buffer */
|
||||
out_be32(&bd->buf, 0);
|
||||
bd++;
|
||||
}
|
||||
/* for last BD set Wrap bit */
|
||||
out_be32((u32*)bd, cpu_to_be32(R_W));
|
||||
out_be32(&bd->buf, 0);
|
||||
|
||||
/* Set GUMR (For more details see the hardware spec.). */
|
||||
/* gumr_h */
|
||||
gumr = us_info->tcrc;
|
||||
if (us_info->cdp)
|
||||
gumr |= UCC_SLOW_GUMR_H_CDP;
|
||||
if (us_info->ctsp)
|
||||
gumr |= UCC_SLOW_GUMR_H_CTSP;
|
||||
if (us_info->cds)
|
||||
gumr |= UCC_SLOW_GUMR_H_CDS;
|
||||
if (us_info->ctss)
|
||||
gumr |= UCC_SLOW_GUMR_H_CTSS;
|
||||
if (us_info->tfl)
|
||||
gumr |= UCC_SLOW_GUMR_H_TFL;
|
||||
if (us_info->rfw)
|
||||
gumr |= UCC_SLOW_GUMR_H_RFW;
|
||||
if (us_info->txsy)
|
||||
gumr |= UCC_SLOW_GUMR_H_TXSY;
|
||||
if (us_info->rtsm)
|
||||
gumr |= UCC_SLOW_GUMR_H_RTSM;
|
||||
out_be32(&us_regs->gumr_h, gumr);
|
||||
|
||||
/* gumr_l */
|
||||
gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
|
||||
us_info->diag | us_info->mode;
|
||||
if (us_info->tci)
|
||||
gumr |= UCC_SLOW_GUMR_L_TCI;
|
||||
if (us_info->rinv)
|
||||
gumr |= UCC_SLOW_GUMR_L_RINV;
|
||||
if (us_info->tinv)
|
||||
gumr |= UCC_SLOW_GUMR_L_TINV;
|
||||
if (us_info->tend)
|
||||
gumr |= UCC_SLOW_GUMR_L_TEND;
|
||||
out_be32(&us_regs->gumr_l, gumr);
|
||||
|
||||
/* Function code registers */
|
||||
|
||||
/* if the data is in cachable memory, the 'global' */
|
||||
/* in the function code should be set. */
|
||||
uccs->us_pram->tbmr = UCC_BMR_BO_BE;
|
||||
uccs->us_pram->rbmr = UCC_BMR_BO_BE;
|
||||
|
||||
/* rbase, tbase are offsets from MURAM base */
|
||||
out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
|
||||
out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
|
||||
|
||||
/* Mux clocking */
|
||||
/* Grant Support */
|
||||
ucc_set_qe_mux_grant(us_info->ucc_num, us_info->grant_support);
|
||||
/* Breakpoint Support */
|
||||
ucc_set_qe_mux_bkpt(us_info->ucc_num, us_info->brkpt_support);
|
||||
/* Set Tsa or NMSI mode. */
|
||||
ucc_set_qe_mux_tsa(us_info->ucc_num, us_info->tsa);
|
||||
/* If NMSI (not Tsa), set Tx and Rx clock. */
|
||||
if (!us_info->tsa) {
|
||||
/* Rx clock routing */
|
||||
if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
|
||||
COMM_DIR_RX)) {
|
||||
printk(KERN_ERR "%s: illegal value for RX clock\n",
|
||||
__func__);
|
||||
ucc_slow_free(uccs);
|
||||
return -EINVAL;
|
||||
}
|
||||
/* Tx clock routing */
|
||||
if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
|
||||
COMM_DIR_TX)) {
|
||||
printk(KERN_ERR "%s: illegal value for TX clock\n",
|
||||
__func__);
|
||||
ucc_slow_free(uccs);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set interrupt mask register at UCC level. */
|
||||
out_be16(&us_regs->uccm, us_info->uccm_mask);
|
||||
|
||||
/* First, clear anything pending at UCC level,
|
||||
* otherwise, old garbage may come through
|
||||
* as soon as the dam is opened. */
|
||||
|
||||
/* Writing '1' clears */
|
||||
out_be16(&us_regs->ucce, 0xffff);
|
||||
|
||||
/* Issue QE Init command */
|
||||
if (us_info->init_tx && us_info->init_rx)
|
||||
command = QE_INIT_TX_RX;
|
||||
else if (us_info->init_tx)
|
||||
command = QE_INIT_TX;
|
||||
else
|
||||
command = QE_INIT_RX; /* We know at least one is TRUE */
|
||||
|
||||
qe_issue_cmd(command, id, us_info->protocol, 0);
|
||||
|
||||
*uccs_ret = uccs;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_slow_init);
|
||||
|
||||
void ucc_slow_free(struct ucc_slow_private * uccs)
|
||||
{
|
||||
if (!uccs)
|
||||
return;
|
||||
|
||||
if (uccs->rx_base_offset)
|
||||
qe_muram_free(uccs->rx_base_offset);
|
||||
|
||||
if (uccs->tx_base_offset)
|
||||
qe_muram_free(uccs->tx_base_offset);
|
||||
|
||||
if (uccs->us_pram)
|
||||
qe_muram_free(uccs->us_pram_offset);
|
||||
|
||||
if (uccs->us_regs)
|
||||
iounmap(uccs->us_regs);
|
||||
|
||||
kfree(uccs);
|
||||
}
|
||||
EXPORT_SYMBOL(ucc_slow_free);
|
||||
|
56
drivers/soc/fsl/qe/usb.c
Normal file
56
drivers/soc/fsl/qe/usb.c
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
* QE USB routines
|
||||
*
|
||||
* Copyright 2006 Freescale Semiconductor, Inc.
|
||||
* Shlomi Gridish <gridish@freescale.com>
|
||||
* Jerry Huang <Chang-Ming.Huang@freescale.com>
|
||||
* Copyright (c) MontaVista Software, Inc. 2008.
|
||||
* Anton Vorontsov <avorontsov@ru.mvista.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/io.h>
|
||||
#include <soc/fsl/qe/immap_qe.h>
|
||||
#include <soc/fsl/qe/qe.h>
|
||||
|
||||
int qe_usb_clock_set(enum qe_clock clk, int rate)
|
||||
{
|
||||
struct qe_mux __iomem *mux = &qe_immr->qmx;
|
||||
unsigned long flags;
|
||||
u32 val;
|
||||
|
||||
switch (clk) {
|
||||
case QE_CLK3: val = QE_CMXGCR_USBCS_CLK3; break;
|
||||
case QE_CLK5: val = QE_CMXGCR_USBCS_CLK5; break;
|
||||
case QE_CLK7: val = QE_CMXGCR_USBCS_CLK7; break;
|
||||
case QE_CLK9: val = QE_CMXGCR_USBCS_CLK9; break;
|
||||
case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break;
|
||||
case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break;
|
||||
case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break;
|
||||
case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break;
|
||||
case QE_BRG9: val = QE_CMXGCR_USBCS_BRG9; break;
|
||||
case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break;
|
||||
default:
|
||||
pr_err("%s: requested unknown clock %d\n", __func__, clk);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (qe_clock_is_brg(clk))
|
||||
qe_setbrg(clk, rate, 1);
|
||||
|
||||
spin_lock_irqsave(&cmxgcr_lock, flags);
|
||||
|
||||
clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
|
||||
|
||||
spin_unlock_irqrestore(&cmxgcr_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(qe_usb_clock_set);
|
Reference in New Issue
Block a user