staging: fsl-mc: Move DPIO from staging to drivers/soc/fsl

Move the NXP DPIO (Datapath I/O Driver) out of the
drivers/staging directory and into the drivers/soc/fsl directory.

The DPIO driver enables access to Queue and Buffer Manager (QBMAN)
hardware on NXP DPAA2 devices. This is a prerequisite to moving the
DPAA2 Ethernet driver out of staging.

Signed-off-by: Roy Pledge <roy.pledge@nxp.com>
Reviewed-by: Horia Geantă <horia.geanta@nxp.com>
Reviewed-by: Ioana Radulescu <ruxandra.radulescu@nxp.com>
Signed-off-by: Li Yang <leoyang.li@nxp.com>
This commit is contained in:
Roy Pledge
2018-07-24 09:21:29 -05:00
committed by Li Yang
orang tua 58ad0d0263
melakukan c89105c9b3
20 mengubah file dengan 20 tambahan dan 20 penghapusan

Melihat File

@@ -16,3 +16,13 @@ config FSL_GUTS
Initially only reading SVR and registering soc device are supported.
Other guts accesses, such as reading RCW, should eventually be moved
into this driver as well.
config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
buffer management facilities for software to interact with
other DPAA2 objects. This driver does not expose the DPIO
objects individually, but groups them under a service layer
API.

Melihat File

@@ -6,3 +6,4 @@ obj-$(CONFIG_FSL_DPAA) += qbman/
obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
obj-$(CONFIG_FSL_GUTS) += guts.o
obj-$(CONFIG_FSL_MC_DPIO) += dpio/

Melihat File

@@ -0,0 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
#
# QorIQ DPAA2 DPIO driver
#
obj-$(CONFIG_FSL_MC_DPIO) += fsl-mc-dpio.o
fsl-mc-dpio-objs := dpio.o qbman-portal.o dpio-service.o dpio-driver.o

Melihat File

@@ -0,0 +1,49 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
*
*/
#ifndef _FSL_DPIO_CMD_H
#define _FSL_DPIO_CMD_H
/* DPIO Version */
#define DPIO_VER_MAJOR 4
#define DPIO_VER_MINOR 2
/* Command Versioning */
#define DPIO_CMD_ID_OFFSET 4
#define DPIO_CMD_BASE_VERSION 1
#define DPIO_CMD(id) (((id) << DPIO_CMD_ID_OFFSET) | DPIO_CMD_BASE_VERSION)
/* Command IDs */
#define DPIO_CMDID_CLOSE DPIO_CMD(0x800)
#define DPIO_CMDID_OPEN DPIO_CMD(0x803)
#define DPIO_CMDID_GET_API_VERSION DPIO_CMD(0xa03)
#define DPIO_CMDID_ENABLE DPIO_CMD(0x002)
#define DPIO_CMDID_DISABLE DPIO_CMD(0x003)
#define DPIO_CMDID_GET_ATTR DPIO_CMD(0x004)
struct dpio_cmd_open {
__le32 dpio_id;
};
#define DPIO_CHANNEL_MODE_MASK 0x3
struct dpio_rsp_get_attr {
/* cmd word 0 */
__le32 id;
__le16 qbman_portal_id;
u8 num_priorities;
u8 channel_mode;
/* cmd word 1 */
__le64 qbman_portal_ce_addr;
/* cmd word 2 */
__le64 qbman_portal_ci_addr;
/* cmd word 3 */
__le32 qbman_version;
};
#endif /* _FSL_DPIO_CMD_H */

Melihat File

@@ -0,0 +1,281 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright NXP 2016
*
*/
#include <linux/types.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/msi.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/fsl/mc.h>
#include <soc/fsl/dpaa2-io.h>
#include "qbman-portal.h"
#include "dpio.h"
#include "dpio-cmd.h"
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("DPIO Driver");
struct dpio_priv {
struct dpaa2_io *io;
};
static irqreturn_t dpio_irq_handler(int irq_num, void *arg)
{
struct device *dev = (struct device *)arg;
struct dpio_priv *priv = dev_get_drvdata(dev);
return dpaa2_io_irq(priv->io);
}
static void unregister_dpio_irq_handlers(struct fsl_mc_device *dpio_dev)
{
struct fsl_mc_device_irq *irq;
irq = dpio_dev->irqs[0];
/* clear the affinity hint */
irq_set_affinity_hint(irq->msi_desc->irq, NULL);
}
static int register_dpio_irq_handlers(struct fsl_mc_device *dpio_dev, int cpu)
{
struct dpio_priv *priv;
int error;
struct fsl_mc_device_irq *irq;
cpumask_t mask;
priv = dev_get_drvdata(&dpio_dev->dev);
irq = dpio_dev->irqs[0];
error = devm_request_irq(&dpio_dev->dev,
irq->msi_desc->irq,
dpio_irq_handler,
0,
dev_name(&dpio_dev->dev),
&dpio_dev->dev);
if (error < 0) {
dev_err(&dpio_dev->dev,
"devm_request_irq() failed: %d\n",
error);
return error;
}
/* set the affinity hint */
cpumask_clear(&mask);
cpumask_set_cpu(cpu, &mask);
if (irq_set_affinity_hint(irq->msi_desc->irq, &mask))
dev_err(&dpio_dev->dev,
"irq_set_affinity failed irq %d cpu %d\n",
irq->msi_desc->irq, cpu);
return 0;
}
static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
{
struct dpio_attr dpio_attrs;
struct dpaa2_io_desc desc;
struct dpio_priv *priv;
int err = -ENOMEM;
struct device *dev = &dpio_dev->dev;
static int next_cpu = -1;
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
goto err_priv_alloc;
dev_set_drvdata(dev, priv);
err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
if (err) {
dev_dbg(dev, "MC portal allocation failed\n");
err = -EPROBE_DEFER;
goto err_priv_alloc;
}
err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
&dpio_dev->mc_handle);
if (err) {
dev_err(dev, "dpio_open() failed\n");
goto err_open;
}
err = dpio_get_attributes(dpio_dev->mc_io, 0, dpio_dev->mc_handle,
&dpio_attrs);
if (err) {
dev_err(dev, "dpio_get_attributes() failed %d\n", err);
goto err_get_attr;
}
desc.qman_version = dpio_attrs.qbman_version;
err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
if (err) {
dev_err(dev, "dpio_enable() failed %d\n", err);
goto err_get_attr;
}
/* initialize DPIO descriptor */
desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0;
desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0;
desc.dpio_id = dpio_dev->obj_desc.id;
/* get the cpu to use for the affinity hint */
if (next_cpu == -1)
next_cpu = cpumask_first(cpu_online_mask);
else
next_cpu = cpumask_next(next_cpu, cpu_online_mask);
if (!cpu_possible(next_cpu)) {
dev_err(dev, "probe failed. Number of DPIOs exceeds NR_CPUS.\n");
err = -ERANGE;
goto err_allocate_irqs;
}
desc.cpu = next_cpu;
/*
* Set the CENA regs to be the cache inhibited area of the portal to
* avoid coherency issues if a user migrates to another core.
*/
desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
resource_size(&dpio_dev->regions[1]),
MEMREMAP_WC);
if (IS_ERR(desc.regs_cena)) {
dev_err(dev, "devm_memremap failed\n");
err = PTR_ERR(desc.regs_cena);
goto err_allocate_irqs;
}
desc.regs_cinh = devm_ioremap(dev, dpio_dev->regions[1].start,
resource_size(&dpio_dev->regions[1]));
if (!desc.regs_cinh) {
err = -ENOMEM;
dev_err(dev, "devm_ioremap failed\n");
goto err_allocate_irqs;
}
err = fsl_mc_allocate_irqs(dpio_dev);
if (err) {
dev_err(dev, "fsl_mc_allocate_irqs failed. err=%d\n", err);
goto err_allocate_irqs;
}
err = register_dpio_irq_handlers(dpio_dev, desc.cpu);
if (err)
goto err_register_dpio_irq;
priv->io = dpaa2_io_create(&desc);
if (!priv->io) {
dev_err(dev, "dpaa2_io_create failed\n");
err = -ENOMEM;
goto err_dpaa2_io_create;
}
dev_info(dev, "probed\n");
dev_dbg(dev, " receives_notifications = %d\n",
desc.receives_notifications);
dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
fsl_mc_portal_free(dpio_dev->mc_io);
return 0;
err_dpaa2_io_create:
unregister_dpio_irq_handlers(dpio_dev);
err_register_dpio_irq:
fsl_mc_free_irqs(dpio_dev);
err_allocate_irqs:
dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
err_get_attr:
dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
err_open:
fsl_mc_portal_free(dpio_dev->mc_io);
err_priv_alloc:
return err;
}
/* Tear down interrupts for a given DPIO object */
static void dpio_teardown_irqs(struct fsl_mc_device *dpio_dev)
{
unregister_dpio_irq_handlers(dpio_dev);
fsl_mc_free_irqs(dpio_dev);
}
static int dpaa2_dpio_remove(struct fsl_mc_device *dpio_dev)
{
struct device *dev;
struct dpio_priv *priv;
int err;
dev = &dpio_dev->dev;
priv = dev_get_drvdata(dev);
dpaa2_io_down(priv->io);
dpio_teardown_irqs(dpio_dev);
err = fsl_mc_portal_allocate(dpio_dev, 0, &dpio_dev->mc_io);
if (err) {
dev_err(dev, "MC portal allocation failed\n");
goto err_mcportal;
}
err = dpio_open(dpio_dev->mc_io, 0, dpio_dev->obj_desc.id,
&dpio_dev->mc_handle);
if (err) {
dev_err(dev, "dpio_open() failed\n");
goto err_open;
}
dpio_disable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
dpio_close(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
fsl_mc_portal_free(dpio_dev->mc_io);
return 0;
err_open:
fsl_mc_portal_free(dpio_dev->mc_io);
err_mcportal:
return err;
}
static const struct fsl_mc_device_id dpaa2_dpio_match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpio",
},
{ .vendor = 0x0 }
};
static struct fsl_mc_driver dpaa2_dpio_driver = {
.driver = {
.name = KBUILD_MODNAME,
.owner = THIS_MODULE,
},
.probe = dpaa2_dpio_probe,
.remove = dpaa2_dpio_remove,
.match_id_table = dpaa2_dpio_match_id_table
};
static int dpio_driver_init(void)
{
return fsl_mc_driver_register(&dpaa2_dpio_driver);
}
static void dpio_driver_exit(void)
{
fsl_mc_driver_unregister(&dpaa2_dpio_driver);
}
module_init(dpio_driver_init);
module_exit(dpio_driver_exit);

Melihat File

@@ -0,0 +1,135 @@
Copyright 2016 NXP
Introduction
------------
A DPAA2 DPIO (Data Path I/O) is a hardware object that provides
interfaces to enqueue and dequeue frames to/from network interfaces
and other accelerators. A DPIO also provides hardware buffer
pool management for network interfaces.
This document provides an overview the Linux DPIO driver, its
subcomponents, and its APIs.
See Documentation/networking/dpaa2/overview.rst for a general overview of DPAA2
and the general DPAA2 driver architecture in Linux.
Driver Overview
---------------
The DPIO driver is bound to DPIO objects discovered on the fsl-mc bus and
provides services that:
A) allow other drivers, such as the Ethernet driver, to enqueue and dequeue
frames for their respective objects
B) allow drivers to register callbacks for data availability notifications
when data becomes available on a queue or channel
C) allow drivers to manage hardware buffer pools
The Linux DPIO driver consists of 3 primary components--
DPIO object driver-- fsl-mc driver that manages the DPIO object
DPIO service-- provides APIs to other Linux drivers for services
QBman portal interface-- sends portal commands, gets responses
fsl-mc other
bus drivers
| |
+---+----+ +------+-----+
|DPIO obj| |DPIO service|
| driver |---| (DPIO) |
+--------+ +------+-----+
|
+------+-----+
| QBman |
| portal i/f |
+------------+
|
hardware
The diagram below shows how the DPIO driver components fit with the other
DPAA2 Linux driver components:
+------------+
| OS Network |
| Stack |
+------------+ +------------+
| Allocator |. . . . . . . | Ethernet |
|(DPMCP,DPBP)| | (DPNI) |
+-.----------+ +---+---+----+
. . ^ |
. . <data avail, | |<enqueue,
. . tx confirm> | | dequeue>
+-------------+ . | |
| DPRC driver | . +--------+ +------------+
| (DPRC) | . . |DPIO obj| |DPIO service|
+----------+--+ | driver |-| (DPIO) |
| +--------+ +------+-----+
|<dev add/remove> +------|-----+
| | QBman |
+----+--------------+ | portal i/f |
| MC-bus driver | +------------+
| | |
| /soc/fsl-mc | |
+-------------------+ |
|
=========================================|=========|========================
+-+--DPIO---|-----------+
| | |
| QBman Portal |
+-----------------------+
============================================================================
DPIO Object Driver (dpio-driver.c)
----------------------------------
The dpio-driver component registers with the fsl-mc bus to handle objects of
type "dpio". The implementation of probe() handles basic initialization
of the DPIO including mapping of the DPIO regions (the QBman SW portal)
and initializing interrupts and registering irq handlers. The dpio-driver
registers the probed DPIO with dpio-service.
DPIO service (dpio-service.c, dpaa2-io.h)
------------------------------------------
The dpio service component provides queuing, notification, and buffers
management services to DPAA2 drivers, such as the Ethernet driver. A system
will typically allocate 1 DPIO object per CPU to allow queuing operations
to happen simultaneously across all CPUs.
Notification handling
dpaa2_io_service_register()
dpaa2_io_service_deregister()
dpaa2_io_service_rearm()
Queuing
dpaa2_io_service_pull_fq()
dpaa2_io_service_pull_channel()
dpaa2_io_service_enqueue_fq()
dpaa2_io_service_enqueue_qd()
dpaa2_io_store_create()
dpaa2_io_store_destroy()
dpaa2_io_store_next()
Buffer pool management
dpaa2_io_service_release()
dpaa2_io_service_acquire()
QBman portal interface (qbman-portal.c)
---------------------------------------
The qbman-portal component provides APIs to do the low level hardware
bit twiddling for operations such as:
-initializing Qman software portals
-building and sending portal commands
-portal interrupt configuration and processing
The qbman-portal APIs are not public to other drivers, and are
only used by dpio-service.
Other (dpaa2-fd.h, dpaa2-global.h)
----------------------------------
Frame descriptor and scatter-gather definitions and the APIs used to
manipulate them are defined in dpaa2-fd.h.
Dequeue result struct and parsing APIs are defined in dpaa2-global.h.

Melihat File

@@ -0,0 +1,545 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2014-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
*
*/
#include <linux/types.h>
#include <linux/fsl/mc.h>
#include <soc/fsl/dpaa2-io.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include "dpio.h"
#include "qbman-portal.h"
struct dpaa2_io {
struct dpaa2_io_desc dpio_desc;
struct qbman_swp_desc swp_desc;
struct qbman_swp *swp;
struct list_head node;
/* protect against multiple management commands */
spinlock_t lock_mgmt_cmd;
/* protect notifications list */
spinlock_t lock_notifications;
struct list_head notifications;
};
struct dpaa2_io_store {
unsigned int max;
dma_addr_t paddr;
struct dpaa2_dq *vaddr;
void *alloced_addr; /* unaligned value from kmalloc() */
unsigned int idx; /* position of the next-to-be-returned entry */
struct qbman_swp *swp; /* portal used to issue VDQCR */
struct device *dev; /* device used for DMA mapping */
};
/* keep a per cpu array of DPIOs for fast access */
static struct dpaa2_io *dpio_by_cpu[NR_CPUS];
static struct list_head dpio_list = LIST_HEAD_INIT(dpio_list);
static DEFINE_SPINLOCK(dpio_list_lock);
static inline struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d,
int cpu)
{
if (d)
return d;
if (cpu != DPAA2_IO_ANY_CPU && cpu >= num_possible_cpus())
return NULL;
/*
* If cpu == -1, choose the current cpu, with no guarantees about
* potentially being migrated away.
*/
if (unlikely(cpu < 0))
cpu = smp_processor_id();
/* If a specific cpu was requested, pick it up immediately */
return dpio_by_cpu[cpu];
}
static inline struct dpaa2_io *service_select(struct dpaa2_io *d)
{
if (d)
return d;
spin_lock(&dpio_list_lock);
d = list_entry(dpio_list.next, struct dpaa2_io, node);
list_del(&d->node);
list_add_tail(&d->node, &dpio_list);
spin_unlock(&dpio_list_lock);
return d;
}
/**
* dpaa2_io_service_select() - return a dpaa2_io service affined to this cpu
* @cpu: the cpu id
*
* Return the affine dpaa2_io service, or NULL if there is no service affined
* to the specified cpu. If DPAA2_IO_ANY_CPU is used, return the next available
* service.
*/
struct dpaa2_io *dpaa2_io_service_select(int cpu)
{
if (cpu == DPAA2_IO_ANY_CPU)
return service_select(NULL);
return service_select_by_cpu(NULL, cpu);
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
/**
* dpaa2_io_create() - create a dpaa2_io object.
* @desc: the dpaa2_io descriptor
*
* Activates a "struct dpaa2_io" corresponding to the given config of an actual
* DPIO object.
*
* Return a valid dpaa2_io object for success, or NULL for failure.
*/
struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc)
{
struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return NULL;
/* check if CPU is out of range (-1 means any cpu) */
if (desc->cpu != DPAA2_IO_ANY_CPU && desc->cpu >= num_possible_cpus()) {
kfree(obj);
return NULL;
}
obj->dpio_desc = *desc;
obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
obj->swp = qbman_swp_init(&obj->swp_desc);
if (!obj->swp) {
kfree(obj);
return NULL;
}
INIT_LIST_HEAD(&obj->node);
spin_lock_init(&obj->lock_mgmt_cmd);
spin_lock_init(&obj->lock_notifications);
INIT_LIST_HEAD(&obj->notifications);
/* For now only enable DQRR interrupts */
qbman_swp_interrupt_set_trigger(obj->swp,
QBMAN_SWP_INTERRUPT_DQRI);
qbman_swp_interrupt_clear_status(obj->swp, 0xffffffff);
if (obj->dpio_desc.receives_notifications)
qbman_swp_push_set(obj->swp, 0, 1);
spin_lock(&dpio_list_lock);
list_add_tail(&obj->node, &dpio_list);
if (desc->cpu >= 0 && !dpio_by_cpu[desc->cpu])
dpio_by_cpu[desc->cpu] = obj;
spin_unlock(&dpio_list_lock);
return obj;
}
/**
* dpaa2_io_down() - release the dpaa2_io object.
* @d: the dpaa2_io object to be released.
*
* The "struct dpaa2_io" type can represent an individual DPIO object (as
* described by "struct dpaa2_io_desc") or an instance of a "DPIO service",
* which can be used to group/encapsulate multiple DPIO objects. In all cases,
* each handle obtained should be released using this function.
*/
void dpaa2_io_down(struct dpaa2_io *d)
{
kfree(d);
}
#define DPAA_POLL_MAX 32
/**
* dpaa2_io_irq() - ISR for DPIO interrupts
*
* @obj: the given DPIO object.
*
* Return IRQ_HANDLED for success or IRQ_NONE if there
* were no pending interrupts.
*/
irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
{
const struct dpaa2_dq *dq;
int max = 0;
struct qbman_swp *swp;
u32 status;
swp = obj->swp;
status = qbman_swp_interrupt_read_status(swp);
if (!status)
return IRQ_NONE;
dq = qbman_swp_dqrr_next(swp);
while (dq) {
if (qbman_result_is_SCN(dq)) {
struct dpaa2_io_notification_ctx *ctx;
u64 q64;
q64 = qbman_result_SCN_ctx(dq);
ctx = (void *)(uintptr_t)q64;
ctx->cb(ctx);
} else {
pr_crit("fsl-mc-dpio: Unrecognised/ignored DQRR entry\n");
}
qbman_swp_dqrr_consume(swp, dq);
++max;
if (max > DPAA_POLL_MAX)
goto done;
dq = qbman_swp_dqrr_next(swp);
}
done:
qbman_swp_interrupt_clear_status(swp, status);
qbman_swp_interrupt_set_inhibit(swp, 0);
return IRQ_HANDLED;
}
/**
* dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN
* notifications on the given DPIO service.
* @d: the given DPIO service.
* @ctx: the notification context.
*
* The caller should make the MC command to attach a DPAA2 object to
* a DPIO after this function completes successfully. In that way:
* (a) The DPIO service is "ready" to handle a notification arrival
* (which might happen before the "attach" command to MC has
* returned control of execution back to the caller)
* (b) The DPIO service can provide back to the caller the 'dpio_id' and
* 'qman64' parameters that it should pass along in the MC command
* in order for the object to be configured to produce the right
* notification fields to the DPIO service.
*
* Return 0 for success, or -ENODEV for failure.
*/
int dpaa2_io_service_register(struct dpaa2_io *d,
struct dpaa2_io_notification_ctx *ctx)
{
unsigned long irqflags;
d = service_select_by_cpu(d, ctx->desired_cpu);
if (!d)
return -ENODEV;
ctx->dpio_id = d->dpio_desc.dpio_id;
ctx->qman64 = (u64)(uintptr_t)ctx;
ctx->dpio_private = d;
spin_lock_irqsave(&d->lock_notifications, irqflags);
list_add(&ctx->node, &d->notifications);
spin_unlock_irqrestore(&d->lock_notifications, irqflags);
/* Enable the generation of CDAN notifications */
if (ctx->is_cdan)
return qbman_swp_CDAN_set_context_enable(d->swp,
(u16)ctx->id,
ctx->qman64);
return 0;
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_register);
/**
* dpaa2_io_service_deregister - The opposite of 'register'.
* @service: the given DPIO service.
* @ctx: the notification context.
*
* This function should be called only after sending the MC command to
* to detach the notification-producing device from the DPIO.
*/
void dpaa2_io_service_deregister(struct dpaa2_io *service,
struct dpaa2_io_notification_ctx *ctx)
{
struct dpaa2_io *d = ctx->dpio_private;
unsigned long irqflags;
if (ctx->is_cdan)
qbman_swp_CDAN_disable(d->swp, (u16)ctx->id);
spin_lock_irqsave(&d->lock_notifications, irqflags);
list_del(&ctx->node);
spin_unlock_irqrestore(&d->lock_notifications, irqflags);
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_deregister);
/**
* dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service.
* @d: the given DPIO service.
* @ctx: the notification context.
*
* Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is
* considered "disarmed". Ie. the user can issue pull dequeue operations on that
* traffic source for as long as it likes. Eventually it may wish to "rearm"
* that source to allow it to produce another FQDAN/CDAN, that's what this
* function achieves.
*
* Return 0 for success.
*/
int dpaa2_io_service_rearm(struct dpaa2_io *d,
struct dpaa2_io_notification_ctx *ctx)
{
unsigned long irqflags;
int err;
d = service_select_by_cpu(d, ctx->desired_cpu);
if (!unlikely(d))
return -ENODEV;
spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
if (ctx->is_cdan)
err = qbman_swp_CDAN_enable(d->swp, (u16)ctx->id);
else
err = qbman_swp_fq_schedule(d->swp, ctx->id);
spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
return err;
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_rearm);
/**
* dpaa2_io_service_pull_channel() - pull dequeue functions from a channel.
* @d: the given DPIO service.
* @channelid: the given channel id.
* @s: the dpaa2_io_store object for the result.
*
* Return 0 for success, or error code for failure.
*/
int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
struct dpaa2_io_store *s)
{
struct qbman_pull_desc pd;
int err;
qbman_pull_desc_clear(&pd);
qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1);
qbman_pull_desc_set_numframes(&pd, (u8)s->max);
qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio);
d = service_select(d);
if (!d)
return -ENODEV;
s->swp = d->swp;
err = qbman_swp_pull(d->swp, &pd);
if (err)
s->swp = NULL;
return err;
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_pull_channel);
/**
* dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD.
* @d: the given DPIO service.
* @qdid: the given queuing destination id.
* @prio: the given queuing priority.
* @qdbin: the given queuing destination bin.
* @fd: the frame descriptor which is enqueued.
*
* Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
* or -ENODEV if there is no dpio service.
*/
int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d,
u32 qdid, u8 prio, u16 qdbin,
const struct dpaa2_fd *fd)
{
struct qbman_eq_desc ed;
d = service_select(d);
if (!d)
return -ENODEV;
qbman_eq_desc_clear(&ed);
qbman_eq_desc_set_no_orp(&ed, 0);
qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
return qbman_swp_enqueue(d->swp, &ed, fd);
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_qd);
/**
* dpaa2_io_service_release() - Release buffers to a buffer pool.
* @d: the given DPIO object.
* @bpid: the buffer pool id.
* @buffers: the buffers to be released.
* @num_buffers: the number of the buffers to be released.
*
* Return 0 for success, and negative error code for failure.
*/
int dpaa2_io_service_release(struct dpaa2_io *d,
u32 bpid,
const u64 *buffers,
unsigned int num_buffers)
{
struct qbman_release_desc rd;
d = service_select(d);
if (!d)
return -ENODEV;
qbman_release_desc_clear(&rd);
qbman_release_desc_set_bpid(&rd, bpid);
return qbman_swp_release(d->swp, &rd, buffers, num_buffers);
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_release);
/**
* dpaa2_io_service_acquire() - Acquire buffers from a buffer pool.
* @d: the given DPIO object.
* @bpid: the buffer pool id.
* @buffers: the buffer addresses for acquired buffers.
* @num_buffers: the expected number of the buffers to acquire.
*
* Return a negative error code if the command failed, otherwise it returns
* the number of buffers acquired, which may be less than the number requested.
* Eg. if the buffer pool is empty, this will return zero.
*/
int dpaa2_io_service_acquire(struct dpaa2_io *d,
u32 bpid,
u64 *buffers,
unsigned int num_buffers)
{
unsigned long irqflags;
int err;
d = service_select(d);
if (!d)
return -ENODEV;
spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
err = qbman_swp_acquire(d->swp, bpid, buffers, num_buffers);
spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
return err;
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_acquire);
/*
* 'Stores' are reusable memory blocks for holding dequeue results, and to
* assist with parsing those results.
*/
/**
* dpaa2_io_store_create() - Create the dma memory storage for dequeue result.
* @max_frames: the maximum number of dequeued result for frames, must be <= 16.
* @dev: the device to allow mapping/unmapping the DMAable region.
*
* The size of the storage is "max_frames*sizeof(struct dpaa2_dq)".
* The 'dpaa2_io_store' returned is a DPIO service managed object.
*
* Return pointer to dpaa2_io_store struct for successfully created storage
* memory, or NULL on error.
*/
struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
struct device *dev)
{
struct dpaa2_io_store *ret;
size_t size;
if (!max_frames || (max_frames > 16))
return NULL;
ret = kmalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
ret->max = max_frames;
size = max_frames * sizeof(struct dpaa2_dq) + 64;
ret->alloced_addr = kzalloc(size, GFP_KERNEL);
if (!ret->alloced_addr) {
kfree(ret);
return NULL;
}
ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64);
ret->paddr = dma_map_single(dev, ret->vaddr,
sizeof(struct dpaa2_dq) * max_frames,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, ret->paddr)) {
kfree(ret->alloced_addr);
kfree(ret);
return NULL;
}
ret->idx = 0;
ret->dev = dev;
return ret;
}
EXPORT_SYMBOL_GPL(dpaa2_io_store_create);
/**
* dpaa2_io_store_destroy() - Frees the dma memory storage for dequeue
* result.
* @s: the storage memory to be destroyed.
*/
void dpaa2_io_store_destroy(struct dpaa2_io_store *s)
{
dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max,
DMA_FROM_DEVICE);
kfree(s->alloced_addr);
kfree(s);
}
EXPORT_SYMBOL_GPL(dpaa2_io_store_destroy);
/**
* dpaa2_io_store_next() - Determine when the next dequeue result is available.
* @s: the dpaa2_io_store object.
* @is_last: indicate whether this is the last frame in the pull command.
*
* When an object driver performs dequeues to a dpaa2_io_store, this function
* can be used to determine when the next frame result is available. Once
* this function returns non-NULL, a subsequent call to it will try to find
* the next dequeue result.
*
* Note that if a pull-dequeue has a NULL result because the target FQ/channel
* was empty, then this function will also return NULL (rather than expecting
* the caller to always check for this. As such, "is_last" can be used to
* differentiate between "end-of-empty-dequeue" and "still-waiting".
*
* Return dequeue result for a valid dequeue result, or NULL for empty dequeue.
*/
struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last)
{
int match;
struct dpaa2_dq *ret = &s->vaddr[s->idx];
match = qbman_result_has_new_result(s->swp, ret);
if (!match) {
*is_last = 0;
return NULL;
}
s->idx++;
if (dpaa2_dq_is_pull_complete(ret)) {
*is_last = 1;
s->idx = 0;
/*
* If we get an empty dequeue result to terminate a zero-results
* vdqcr, return NULL to the caller rather than expecting him to
* check non-NULL results every time.
*/
if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME))
ret = NULL;
} else {
*is_last = 0;
}
return ret;
}
EXPORT_SYMBOL_GPL(dpaa2_io_store_next);

198
drivers/soc/fsl/dpio/dpio.c Normal file
Melihat File

@@ -0,0 +1,198 @@
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
*
*/
#include <linux/kernel.h>
#include <linux/fsl/mc.h>
#include "dpio.h"
#include "dpio-cmd.h"
/*
* Data Path I/O Portal API
* Contains initialization APIs and runtime control APIs for DPIO
*/
/**
* dpio_open() - Open a control session for the specified object
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @dpio_id: DPIO unique ID
* @token: Returned token; use in subsequent API calls
*
* This function can be used to open a control session for an
* already created object; an object may have been declared in
* the DPL or by calling the dpio_create() function.
* This function returns a unique authentication token,
* associated with the specific object ID and the specific MC
* portal; this token must be used in all subsequent commands for
* this specific object.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpio_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int dpio_id,
u16 *token)
{
struct fsl_mc_command cmd = { 0 };
struct dpio_cmd_open *dpio_cmd;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN,
cmd_flags,
0);
dpio_cmd = (struct dpio_cmd_open *)cmd.params;
dpio_cmd->dpio_id = cpu_to_le32(dpio_id);
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
*token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
/**
* dpio_close() - Close the control session of the object
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPIO object
*
* Return: '0' on Success; Error code otherwise.
*/
int dpio_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE,
cmd_flags,
token);
return mc_send_command(mc_io, &cmd);
}
/**
* dpio_enable() - Enable the DPIO, allow I/O portal operations.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPIO object
*
* Return: '0' on Success; Error code otherwise
*/
int dpio_enable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE,
cmd_flags,
token);
return mc_send_command(mc_io, &cmd);
}
/**
* dpio_disable() - Disable the DPIO, stop any I/O portal operation.
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPIO object
*
* Return: '0' on Success; Error code otherwise
*/
int dpio_disable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE,
cmd_flags,
token);
return mc_send_command(mc_io, &cmd);
}
/**
* dpio_get_attributes() - Retrieve DPIO attributes
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPIO object
* @attr: Returned object's attributes
*
* Return: '0' on Success; Error code otherwise
*/
int dpio_get_attributes(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
struct dpio_attr *attr)
{
struct fsl_mc_command cmd = { 0 };
struct dpio_rsp_get_attr *dpio_rsp;
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR,
cmd_flags,
token);
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
dpio_rsp = (struct dpio_rsp_get_attr *)cmd.params;
attr->id = le32_to_cpu(dpio_rsp->id);
attr->qbman_portal_id = le16_to_cpu(dpio_rsp->qbman_portal_id);
attr->num_priorities = dpio_rsp->num_priorities;
attr->channel_mode = dpio_rsp->channel_mode & DPIO_CHANNEL_MODE_MASK;
attr->qbman_portal_ce_offset =
le64_to_cpu(dpio_rsp->qbman_portal_ce_addr);
attr->qbman_portal_ci_offset =
le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
return 0;
}
/**
* dpio_get_api_version - Get Data Path I/O API version
* @mc_io: Pointer to MC portal's DPIO object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @major_ver: Major version of DPIO API
* @minor_ver: Minor version of DPIO API
*
* Return: '0' on Success; Error code otherwise
*/
int dpio_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 *major_ver,
u16 *minor_ver)
{
struct fsl_mc_command cmd = { 0 };
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_API_VERSION,
cmd_flags, 0);
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
mc_cmd_read_api_version(&cmd, major_ver, minor_ver);
return 0;
}

Melihat File

@@ -0,0 +1,83 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright 2013-2016 Freescale Semiconductor Inc.
* Copyright 2016 NXP
*
*/
#ifndef __FSL_DPIO_H
#define __FSL_DPIO_H
struct fsl_mc_io;
int dpio_open(struct fsl_mc_io *mc_io,
u32 cmd_flags,
int dpio_id,
u16 *token);
int dpio_close(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
/**
* enum dpio_channel_mode - DPIO notification channel mode
* @DPIO_NO_CHANNEL: No support for notification channel
* @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a
* dedicated channel in the DPIO; user should point the queue's
* destination in the relevant interface to this DPIO
*/
enum dpio_channel_mode {
DPIO_NO_CHANNEL = 0,
DPIO_LOCAL_CHANNEL = 1,
};
/**
* struct dpio_cfg - Structure representing DPIO configuration
* @channel_mode: Notification channel mode
* @num_priorities: Number of priorities for the notification channel (1-8);
* relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
*/
struct dpio_cfg {
enum dpio_channel_mode channel_mode;
u8 num_priorities;
};
int dpio_enable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
int dpio_disable(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
/**
* struct dpio_attr - Structure representing DPIO attributes
* @id: DPIO object ID
* @qbman_portal_ce_offset: offset of the software portal cache-enabled area
* @qbman_portal_ci_offset: offset of the software portal cache-inhibited area
* @qbman_portal_id: Software portal ID
* @channel_mode: Notification channel mode
* @num_priorities: Number of priorities for the notification channel (1-8);
* relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
* @qbman_version: QBMAN version
*/
struct dpio_attr {
int id;
u64 qbman_portal_ce_offset;
u64 qbman_portal_ci_offset;
u16 qbman_portal_id;
enum dpio_channel_mode channel_mode;
u8 num_priorities;
u32 qbman_version;
};
int dpio_get_attributes(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
struct dpio_attr *attr);
int dpio_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 *major_ver,
u16 *minor_ver);
#endif /* __FSL_DPIO_H */

File diff ditekan karena terlalu besar Load Diff

Melihat File

@@ -0,0 +1,444 @@
/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
/*
* Copyright (C) 2014-2016 Freescale Semiconductor, Inc.
* Copyright 2016 NXP
*
*/
#ifndef __FSL_QBMAN_PORTAL_H
#define __FSL_QBMAN_PORTAL_H
#include <soc/fsl/dpaa2-fd.h>
struct dpaa2_dq;
struct qbman_swp;
/* qbman software portal descriptor structure */
struct qbman_swp_desc {
void *cena_bar; /* Cache-enabled portal base address */
void __iomem *cinh_bar; /* Cache-inhibited portal base address */
u32 qman_version;
};
#define QBMAN_SWP_INTERRUPT_EQRI 0x01
#define QBMAN_SWP_INTERRUPT_EQDI 0x02
#define QBMAN_SWP_INTERRUPT_DQRI 0x04
#define QBMAN_SWP_INTERRUPT_RCRI 0x08
#define QBMAN_SWP_INTERRUPT_RCDI 0x10
#define QBMAN_SWP_INTERRUPT_VDCI 0x20
/* the structure for pull dequeue descriptor */
struct qbman_pull_desc {
u8 verb;
u8 numf;
u8 tok;
u8 reserved;
__le32 dq_src;
__le64 rsp_addr;
u64 rsp_addr_virt;
u8 padding[40];
};
enum qbman_pull_type_e {
/* dequeue with priority precedence, respect intra-class scheduling */
qbman_pull_type_prio = 1,
/* dequeue with active FQ precedence, respect ICS */
qbman_pull_type_active,
/* dequeue with active FQ precedence, no ICS */
qbman_pull_type_active_noics
};
/* Definitions for parsing dequeue entries */
#define QBMAN_RESULT_MASK 0x7f
#define QBMAN_RESULT_DQ 0x60
#define QBMAN_RESULT_FQRN 0x21
#define QBMAN_RESULT_FQRNI 0x22
#define QBMAN_RESULT_FQPN 0x24
#define QBMAN_RESULT_FQDAN 0x25
#define QBMAN_RESULT_CDAN 0x26
#define QBMAN_RESULT_CSCN_MEM 0x27
#define QBMAN_RESULT_CGCU 0x28
#define QBMAN_RESULT_BPSCN 0x29
#define QBMAN_RESULT_CSCN_WQ 0x2a
/* QBMan FQ management command codes */
#define QBMAN_FQ_SCHEDULE 0x48
#define QBMAN_FQ_FORCE 0x49
#define QBMAN_FQ_XON 0x4d
#define QBMAN_FQ_XOFF 0x4e
/* structure of enqueue descriptor */
struct qbman_eq_desc {
u8 verb;
u8 dca;
__le16 seqnum;
__le16 orpid;
__le16 reserved1;
__le32 tgtid;
__le32 tag;
__le16 qdbin;
u8 qpri;
u8 reserved[3];
u8 wae;
u8 rspid;
__le64 rsp_addr;
u8 fd[32];
};
/* buffer release descriptor */
struct qbman_release_desc {
u8 verb;
u8 reserved;
__le16 bpid;
__le32 reserved2;
__le64 buf[7];
};
/* Management command result codes */
#define QBMAN_MC_RSLT_OK 0xf0
#define CODE_CDAN_WE_EN 0x1
#define CODE_CDAN_WE_CTX 0x4
/* portal data structure */
struct qbman_swp {
const struct qbman_swp_desc *desc;
void *addr_cena;
void __iomem *addr_cinh;
/* Management commands */
struct {
u32 valid_bit; /* 0x00 or 0x80 */
} mc;
/* Push dequeues */
u32 sdq;
/* Volatile dequeues */
struct {
atomic_t available; /* indicates if a command can be sent */
u32 valid_bit; /* 0x00 or 0x80 */
struct dpaa2_dq *storage; /* NULL if DQRR */
} vdq;
/* DQRR */
struct {
u32 next_idx;
u32 valid_bit;
u8 dqrr_size;
int reset_bug; /* indicates dqrr reset workaround is needed */
} dqrr;
};
struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d);
void qbman_swp_finish(struct qbman_swp *p);
u32 qbman_swp_interrupt_read_status(struct qbman_swp *p);
void qbman_swp_interrupt_clear_status(struct qbman_swp *p, u32 mask);
u32 qbman_swp_interrupt_get_trigger(struct qbman_swp *p);
void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, u32 mask);
int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p);
void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit);
void qbman_swp_push_get(struct qbman_swp *p, u8 channel_idx, int *enabled);
void qbman_swp_push_set(struct qbman_swp *p, u8 channel_idx, int enable);
void qbman_pull_desc_clear(struct qbman_pull_desc *d);
void qbman_pull_desc_set_storage(struct qbman_pull_desc *d,
struct dpaa2_dq *storage,
dma_addr_t storage_phys,
int stash);
void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, u8 numframes);
void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, u32 fqid);
void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, u32 wqid,
enum qbman_pull_type_e dct);
void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, u32 chid,
enum qbman_pull_type_e dct);
int qbman_swp_pull(struct qbman_swp *p, struct qbman_pull_desc *d);
const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s);
void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq);
int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
void qbman_eq_desc_clear(struct qbman_eq_desc *d);
void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
u32 qd_bin, u32 qd_prio);
int qbman_swp_enqueue(struct qbman_swp *p, const struct qbman_eq_desc *d,
const struct dpaa2_fd *fd);
void qbman_release_desc_clear(struct qbman_release_desc *d);
void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable);
int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d,
const u64 *buffers, unsigned int num_buffers);
int qbman_swp_acquire(struct qbman_swp *s, u16 bpid, u64 *buffers,
unsigned int num_buffers);
int qbman_swp_alt_fq_state(struct qbman_swp *s, u32 fqid,
u8 alt_fq_verb);
int qbman_swp_CDAN_set(struct qbman_swp *s, u16 channelid,
u8 we_mask, u8 cdan_en,
u64 ctx);
void *qbman_swp_mc_start(struct qbman_swp *p);
void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb);
void *qbman_swp_mc_result(struct qbman_swp *p);
/**
* qbman_result_is_DQ() - check if the dequeue result is a dequeue response
* @dq: the dequeue result to be checked
*
* DQRR entries may contain non-dequeue results, ie. notifications
*/
static inline int qbman_result_is_DQ(const struct dpaa2_dq *dq)
{
return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_DQ);
}
/**
* qbman_result_is_SCN() - Check the dequeue result is notification or not
* @dq: the dequeue result to be checked
*
*/
static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq)
{
return !qbman_result_is_DQ(dq);
}
/* FQ Data Availability */
static inline int qbman_result_is_FQDAN(const struct dpaa2_dq *dq)
{
return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQDAN);
}
/* Channel Data Availability */
static inline int qbman_result_is_CDAN(const struct dpaa2_dq *dq)
{
return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CDAN);
}
/* Congestion State Change */
static inline int qbman_result_is_CSCN(const struct dpaa2_dq *dq)
{
return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CSCN_WQ);
}
/* Buffer Pool State Change */
static inline int qbman_result_is_BPSCN(const struct dpaa2_dq *dq)
{
return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_BPSCN);
}
/* Congestion Group Count Update */
static inline int qbman_result_is_CGCU(const struct dpaa2_dq *dq)
{
return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_CGCU);
}
/* Retirement */
static inline int qbman_result_is_FQRN(const struct dpaa2_dq *dq)
{
return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRN);
}
/* Retirement Immediate */
static inline int qbman_result_is_FQRNI(const struct dpaa2_dq *dq)
{
return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQRNI);
}
/* Park */
static inline int qbman_result_is_FQPN(const struct dpaa2_dq *dq)
{
return ((dq->dq.verb & QBMAN_RESULT_MASK) == QBMAN_RESULT_FQPN);
}
/**
* qbman_result_SCN_state() - Get the state field in State-change notification
*/
static inline u8 qbman_result_SCN_state(const struct dpaa2_dq *scn)
{
return scn->scn.state;
}
#define SCN_RID_MASK 0x00FFFFFF
/**
* qbman_result_SCN_rid() - Get the resource id in State-change notification
*/
static inline u32 qbman_result_SCN_rid(const struct dpaa2_dq *scn)
{
return le32_to_cpu(scn->scn.rid_tok) & SCN_RID_MASK;
}
/**
* qbman_result_SCN_ctx() - Get the context data in State-change notification
*/
static inline u64 qbman_result_SCN_ctx(const struct dpaa2_dq *scn)
{
return le64_to_cpu(scn->scn.ctx);
}
/**
* qbman_swp_fq_schedule() - Move the fq to the scheduled state
* @s: the software portal object
* @fqid: the index of frame queue to be scheduled
*
* There are a couple of different ways that a FQ can end up parked state,
* This schedules it.
*
* Return 0 for success, or negative error code for failure.
*/
static inline int qbman_swp_fq_schedule(struct qbman_swp *s, u32 fqid)
{
return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE);
}
/**
* qbman_swp_fq_force() - Force the FQ to fully scheduled state
* @s: the software portal object
* @fqid: the index of frame queue to be forced
*
* Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
* and thus be available for selection by any channel-dequeuing behaviour (push
* or pull). If the FQ is subsequently "dequeued" from the channel and is still
* empty at the time this happens, the resulting dq_entry will have no FD.
* (qbman_result_DQ_fd() will return NULL.)
*
* Return 0 for success, or negative error code for failure.
*/
static inline int qbman_swp_fq_force(struct qbman_swp *s, u32 fqid)
{
return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE);
}
/**
* qbman_swp_fq_xon() - sets FQ flow-control to XON
* @s: the software portal object
* @fqid: the index of frame queue
*
* This setting doesn't affect enqueues to the FQ, just dequeues.
*
* Return 0 for success, or negative error code for failure.
*/
static inline int qbman_swp_fq_xon(struct qbman_swp *s, u32 fqid)
{
return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON);
}
/**
* qbman_swp_fq_xoff() - sets FQ flow-control to XOFF
* @s: the software portal object
* @fqid: the index of frame queue
*
* This setting doesn't affect enqueues to the FQ, just dequeues.
* XOFF FQs will remain in the tenatively-scheduled state, even when
* non-empty, meaning they won't be selected for scheduled dequeuing.
* If a FQ is changed to XOFF after it had already become truly-scheduled
* to a channel, and a pull dequeue of that channel occurs that selects
* that FQ for dequeuing, then the resulting dq_entry will have no FD.
* (qbman_result_DQ_fd() will return NULL.)
*
* Return 0 for success, or negative error code for failure.
*/
static inline int qbman_swp_fq_xoff(struct qbman_swp *s, u32 fqid)
{
return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF);
}
/* If the user has been allocated a channel object that is going to generate
* CDANs to another channel, then the qbman_swp_CDAN* functions will be
* necessary.
*
* CDAN-enabled channels only generate a single CDAN notification, after which
* they need to be reenabled before they'll generate another. The idea is
* that pull dequeuing will occur in reaction to the CDAN, followed by a
* reenable step. Each function generates a distinct command to hardware, so a
* combination function is provided if the user wishes to modify the "context"
* (which shows up in each CDAN message) each time they reenable, as a single
* command to hardware.
*/
/**
* qbman_swp_CDAN_set_context() - Set CDAN context
* @s: the software portal object
* @channelid: the channel index
* @ctx: the context to be set in CDAN
*
* Return 0 for success, or negative error code for failure.
*/
static inline int qbman_swp_CDAN_set_context(struct qbman_swp *s, u16 channelid,
u64 ctx)
{
return qbman_swp_CDAN_set(s, channelid,
CODE_CDAN_WE_CTX,
0, ctx);
}
/**
* qbman_swp_CDAN_enable() - Enable CDAN for the channel
* @s: the software portal object
* @channelid: the index of the channel to generate CDAN
*
* Return 0 for success, or negative error code for failure.
*/
static inline int qbman_swp_CDAN_enable(struct qbman_swp *s, u16 channelid)
{
return qbman_swp_CDAN_set(s, channelid,
CODE_CDAN_WE_EN,
1, 0);
}
/**
* qbman_swp_CDAN_disable() - disable CDAN for the channel
* @s: the software portal object
* @channelid: the index of the channel to generate CDAN
*
* Return 0 for success, or negative error code for failure.
*/
static inline int qbman_swp_CDAN_disable(struct qbman_swp *s, u16 channelid)
{
return qbman_swp_CDAN_set(s, channelid,
CODE_CDAN_WE_EN,
0, 0);
}
/**
* qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN
* @s: the software portal object
* @channelid: the index of the channel to generate CDAN
* @ctx:i the context set in CDAN
*
* Return 0 for success, or negative error code for failure.
*/
static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
u16 channelid,
u64 ctx)
{
return qbman_swp_CDAN_set(s, channelid,
CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX,
1, ctx);
}
/* Wraps up submit + poll-for-result */
static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
u8 cmd_verb)
{
int loopvar = 1000;
qbman_swp_mc_submit(swp, cmd, cmd_verb);
do {
cmd = qbman_swp_mc_result(swp);
} while (!cmd && loopvar--);
WARN_ON(!loopvar);
return cmd;
}
#endif /* __FSL_QBMAN_PORTAL_H */