Moving IPA and GSI driver code to dataipa techpack

Change-Id: I196832c62a6109178cb234f000d3db7c481e0ef5
Signed-off-by: Arnav Sharma <arnav_s@codeaurora.org>
Esse commit está contido em:
arnav_s
2019-07-02 14:46:28 -07:00
commit 835c8d87c6
119 arquivos alterados com 150390 adições e 7 exclusões

Ver arquivo

@@ -1,3 +1,23 @@
# SPDX-License-Identifier: GPL-2.0-only
ccflags-y := -Wno-unused-function
obj-y := dataipa.o
#
# Makefile for the MSM specific device drivers.
#
ifeq ($(CONFIG_ARCH_KONA), y)
include $(srctree)/techpack/dataipa/config/konadataipa.conf
endif
ifeq ($(CONFIG_ARCH_KONA), y)
LINUXINCLUDE += -include $(srctree)/techpack/dataipa/config/konadataipaconf.h
endif
ifeq ($(CONFIG_ARCH_LITO), y)
include $(srctree)/techpack/dataipa/config/litodataipa.conf
endif
ifeq ($(CONFIG_ARCH_LITO), y)
LINUXINCLUDE += -include $(srctree)/techpack/dataipa/config/litodataipaconf.h
endif
obj-$(CONFIG_GSI) += gsi/
obj-$(CONFIG_IPA3) += ipa/

11
config/konadataipa.conf Arquivo normal
Ver arquivo

@@ -0,0 +1,11 @@
export CONFIG_GSI=y
export CONFIG_GSI_REGISTER_VERSION_2=y
export CONFIG_IPA3=y
export CONFIG_IPA_WDI_UNIFIED_API=y
export CONFIG_RMNET_IPA3=y
export CONFIG_RNDIS_IPA=y
export CONFIG_IPA3_MHI_PRIME_MANAGER=y
export CONFIG_IPA_UT=y
export CONFIG_IPA3_REGDUMP=y
export CONFIG_IPA3_REGDUMP_IPA_4_5=y
export CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS=y

16
config/konadataipaconf.h Arquivo normal
Ver arquivo

@@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#define CONFIG_GSI 1
#define CONFIG_GSI_REGISTER_VERSION_2 1
#define CONFIG_IPA3 1
#define CONFIG_IPA_WDI_UNIFIED_API 1
#define CONFIG_RMNET_IPA3 1
#define CONFIG_RNDIS_IPA 1
#define CONFIG_IPA3_MHI_PRIME_MANAGER 1
#define CONFIG_IPA_UT 1
#define CONFIG_IPA3_REGDUMP 1
#define CONFIG_IPA3_REGDUMP_IPA_4_5 1
#define CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS 0

11
config/litodataipa.conf Arquivo normal
Ver arquivo

@@ -0,0 +1,11 @@
export CONFIG_GSI=y
export CONFIG_GSI_REGISTER_VERSION_2=y
export CONFIG_IPA3=y
export CONFIG_IPA_WDI_UNIFIED_API=y
export CONFIG_RMNET_IPA3=y
export CONFIG_RNDIS_IPA=y
export CONFIG_IPA3_MHI_PRIME_MANAGER=y
export CONFIG_IPA_UT=y
export CONFIG_IPA3_REGDUMP=y
export CONFIG_IPA3_REGDUMP_IPA_4_5=y
export CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS=y

16
config/litodataipaconf.h Arquivo normal
Ver arquivo

@@ -0,0 +1,16 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#define CONFIG_GSI 1
#define CONFIG_GSI_REGISTER_VERSION_2 1
#define CONFIG_IPA3 1
#define CONFIG_IPA_WDI_UNIFIED_API 1
#define CONFIG_RMNET_IPA3 1
#define CONFIG_RNDIS_IPA 1
#define CONFIG_IPA3_MHI_PRIME_MANAGER 1
#define CONFIG_IPA_UT 1
#define CONFIG_IPA3_REGDUMP 1
#define CONFIG_IPA3_REGDUMP_IPA_4_5 1
#define CONFIG_IPA3_REGDUMP_NUM_EXTRA_ENDP_REGS 0

Ver arquivo

@@ -1,5 +0,0 @@
// SPDX-License-Identifier: GPL-2.0-only
static void _dataipa_techpack_stub(void)
{
}

6
gsi/Makefile Arquivo normal
Ver arquivo

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
gsidbg-$(CONFIG_DEBUG_FS) += gsi_dbg.o
obj-$(CONFIG_GSI) += gsi.o gsidbg.o
obj-$(CONFIG_IPA_EMULATION) += gsi_emulation.o

4389
gsi/gsi.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

345
gsi/gsi.h Arquivo normal
Ver arquivo

@@ -0,0 +1,345 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef GSI_H
#define GSI_H
#include <linux/device.h>
#include <linux/types.h>
#include <linux/completion.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/msm_gsi.h>
#include <linux/errno.h>
#include <linux/ipc_logging.h>
/*
* The following for adding code (ie. for EMULATION) not found on x86.
*/
#if defined(CONFIG_IPA_EMULATION)
# include "gsi_emulation_stubs.h"
#endif
#define GSI_ASSERT() \
BUG()
#define GSI_CHAN_MAX 31
#define GSI_EVT_RING_MAX 24
#define GSI_NO_EVT_ERINDEX 31
#define gsi_readl(c) (readl(c))
#define gsi_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); })
#define GSI_IPC_LOGGING(buf, fmt, args...) \
do { \
if (buf) \
ipc_log_string((buf), fmt, __func__, __LINE__, \
## args); \
} while (0)
#define GSIDBG(fmt, args...) \
do { \
dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
## args);\
if (gsi_ctx) { \
GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \
"%s:%d " fmt, ## args); \
GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
"%s:%d " fmt, ## args); \
} \
} while (0)
#define GSIDBG_LOW(fmt, args...) \
do { \
dev_dbg(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
## args);\
if (gsi_ctx) { \
GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
"%s:%d " fmt, ## args); \
} \
} while (0)
#define GSIERR(fmt, args...) \
do { \
dev_err(gsi_ctx->dev, "%s:%d " fmt, __func__, __LINE__, \
## args);\
if (gsi_ctx) { \
GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf, \
"%s:%d " fmt, ## args); \
GSI_IPC_LOGGING(gsi_ctx->ipc_logbuf_low, \
"%s:%d " fmt, ## args); \
} \
} while (0)
#define GSI_IPC_LOG_PAGES 50
enum gsi_evt_ring_state {
GSI_EVT_RING_STATE_NOT_ALLOCATED = 0x0,
GSI_EVT_RING_STATE_ALLOCATED = 0x1,
GSI_EVT_RING_STATE_ERROR = 0xf
};
enum gsi_chan_state {
GSI_CHAN_STATE_NOT_ALLOCATED = 0x0,
GSI_CHAN_STATE_ALLOCATED = 0x1,
GSI_CHAN_STATE_STARTED = 0x2,
GSI_CHAN_STATE_STOPPED = 0x3,
GSI_CHAN_STATE_STOP_IN_PROC = 0x4,
GSI_CHAN_STATE_ERROR = 0xf
};
struct gsi_ring_ctx {
spinlock_t slock;
unsigned long base_va;
uint64_t base;
uint64_t wp;
uint64_t rp;
uint64_t wp_local;
uint64_t rp_local;
uint16_t len;
uint8_t elem_sz;
uint16_t max_num_elem;
uint64_t end;
};
struct gsi_chan_dp_stats {
unsigned long ch_below_lo;
unsigned long ch_below_hi;
unsigned long ch_above_hi;
unsigned long empty_time;
unsigned long last_timestamp;
};
struct gsi_chan_stats {
unsigned long queued;
unsigned long completed;
unsigned long callback_to_poll;
unsigned long poll_to_callback;
unsigned long poll_pending_irq;
unsigned long invalid_tre_error;
unsigned long poll_ok;
unsigned long poll_empty;
unsigned long userdata_in_use;
struct gsi_chan_dp_stats dp;
};
/**
* struct gsi_user_data - user_data element pointed by the TRE
* @valid: valid to be cleaned. if its true that means it is being used.
* false means its free to overwrite
* @p: pointer to the user data array element
*/
struct gsi_user_data {
bool valid;
void *p;
};
struct gsi_chan_ctx {
struct gsi_chan_props props;
enum gsi_chan_state state;
struct gsi_ring_ctx ring;
struct gsi_user_data *user_data;
struct gsi_evt_ctx *evtr;
struct mutex mlock;
struct completion compl;
bool allocated;
atomic_t poll_mode;
union __packed gsi_channel_scratch scratch;
struct gsi_chan_stats stats;
bool enable_dp_stats;
bool print_dp_stats;
};
struct gsi_evt_stats {
unsigned long completed;
};
struct gsi_evt_ctx {
struct gsi_evt_ring_props props;
enum gsi_evt_ring_state state;
uint8_t id;
struct gsi_ring_ctx ring;
struct mutex mlock;
struct completion compl;
struct gsi_chan_ctx *chan;
atomic_t chan_ref_cnt;
union __packed gsi_evt_scratch scratch;
struct gsi_evt_stats stats;
};
struct gsi_ee_scratch {
union __packed {
struct {
uint32_t inter_ee_cmd_return_code:3;
uint32_t resvd1:2;
uint32_t generic_ee_cmd_return_code:3;
uint32_t resvd2:7;
uint32_t max_usb_pkt_size:1;
uint32_t resvd3:8;
uint32_t mhi_base_chan_idx:8;
} s;
uint32_t val;
} word0;
uint32_t word1;
};
struct ch_debug_stats {
unsigned long ch_allocate;
unsigned long ch_start;
unsigned long ch_stop;
unsigned long ch_reset;
unsigned long ch_de_alloc;
unsigned long ch_db_stop;
unsigned long cmd_completed;
};
struct gsi_generic_ee_cmd_debug_stats {
unsigned long halt_channel;
};
struct gsi_ctx {
void __iomem *base;
struct device *dev;
struct gsi_per_props per;
bool per_registered;
struct gsi_chan_ctx chan[GSI_CHAN_MAX];
struct ch_debug_stats ch_dbg[GSI_CHAN_MAX];
struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
struct gsi_generic_ee_cmd_debug_stats gen_ee_cmd_dbg;
struct mutex mlock;
spinlock_t slock;
unsigned long evt_bmap;
bool enabled;
atomic_t num_chan;
atomic_t num_evt_ring;
struct gsi_ee_scratch scratch;
int num_ch_dp_stats;
struct workqueue_struct *dp_stat_wq;
u32 max_ch;
u32 max_ev;
struct completion gen_ee_cmd_compl;
void *ipc_logbuf;
void *ipc_logbuf_low;
/*
* The following used only on emulation systems.
*/
void __iomem *intcntrlr_base;
u32 intcntrlr_mem_size;
irq_handler_t intcntrlr_gsi_isr;
irq_handler_t intcntrlr_client_isr;
};
enum gsi_re_type {
GSI_RE_XFER = 0x2,
GSI_RE_IMMD_CMD = 0x3,
GSI_RE_NOP = 0x4,
GSI_RE_COAL = 0x8,
};
struct __packed gsi_tre {
uint64_t buffer_ptr;
uint16_t buf_len;
uint16_t resvd1;
uint16_t chain:1;
uint16_t resvd4:7;
uint16_t ieob:1;
uint16_t ieot:1;
uint16_t bei:1;
uint16_t resvd3:5;
uint8_t re_type;
uint8_t resvd2;
};
struct __packed gsi_gci_tre {
uint64_t buffer_ptr:41;
uint64_t resvd1:7;
uint64_t buf_len:16;
uint64_t cookie:40;
uint64_t resvd2:8;
uint64_t re_type:8;
uint64_t resvd3:8;
};
#define GSI_XFER_COMPL_TYPE_GCI 0x28
struct __packed gsi_xfer_compl_evt {
union {
uint64_t xfer_ptr;
struct {
uint64_t cookie:40;
uint64_t resvd1:24;
};
};
uint16_t len;
uint8_t veid;
uint8_t code; /* see gsi_chan_evt */
uint16_t resvd;
uint8_t type;
uint8_t chid;
};
enum gsi_err_type {
GSI_ERR_TYPE_GLOB = 0x1,
GSI_ERR_TYPE_CHAN = 0x2,
GSI_ERR_TYPE_EVT = 0x3,
};
enum gsi_err_code {
GSI_INVALID_TRE_ERR = 0x1,
GSI_OUT_OF_BUFFERS_ERR = 0x2,
GSI_OUT_OF_RESOURCES_ERR = 0x3,
GSI_UNSUPPORTED_INTER_EE_OP_ERR = 0x4,
GSI_EVT_RING_EMPTY_ERR = 0x5,
GSI_NON_ALLOCATED_EVT_ACCESS_ERR = 0x6,
GSI_HWO_1_ERR = 0x8
};
struct __packed gsi_log_err {
uint32_t arg3:4;
uint32_t arg2:4;
uint32_t arg1:4;
uint32_t code:4;
uint32_t resvd:3;
uint32_t virt_idx:5;
uint32_t err_type:4;
uint32_t ee:4;
};
enum gsi_ch_cmd_opcode {
GSI_CH_ALLOCATE = 0x0,
GSI_CH_START = 0x1,
GSI_CH_STOP = 0x2,
GSI_CH_RESET = 0x9,
GSI_CH_DE_ALLOC = 0xa,
GSI_CH_DB_STOP = 0xb,
};
enum gsi_evt_ch_cmd_opcode {
GSI_EVT_ALLOCATE = 0x0,
GSI_EVT_RESET = 0x9,
GSI_EVT_DE_ALLOC = 0xa,
};
enum gsi_generic_ee_cmd_opcode {
GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1,
GSI_GEN_EE_CMD_ALLOC_CHANNEL = 0x2,
};
enum gsi_generic_ee_cmd_return_code {
GSI_GEN_EE_CMD_RETURN_CODE_SUCCESS = 0x1,
GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING = 0x2,
GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_DIRECTION = 0x3,
GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE = 0x4,
GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX = 0x5,
GSI_GEN_EE_CMD_RETURN_CODE_RETRY = 0x6,
GSI_GEN_EE_CMD_RETURN_CODE_OUT_OF_RESOURCES = 0x7,
};
extern struct gsi_ctx *gsi_ctx;
void gsi_debugfs_init(void);
uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr);
void gsi_update_ch_dp_stats(struct gsi_chan_ctx *ctx, uint16_t used);
#endif

744
gsi/gsi_dbg.c Arquivo normal
Ver arquivo

@@ -0,0 +1,744 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/completion.h>
#include <linux/debugfs.h>
#include <linux/dma-mapping.h>
#include <linux/random.h>
#include <linux/uaccess.h>
#include <linux/msm_gsi.h>
#include "gsi_reg.h"
#include "gsi.h"
#define TERR(fmt, args...) \
pr_err("%s:%d " fmt, __func__, __LINE__, ## args)
#define TDBG(fmt, args...) \
pr_debug("%s:%d " fmt, __func__, __LINE__, ## args)
#define PRT_STAT(fmt, args...) \
pr_err(fmt, ## args)
static struct dentry *dent;
static char dbg_buff[4096];
static void *gsi_ipc_logbuf_low;
static void gsi_wq_print_dp_stats(struct work_struct *work);
static DECLARE_DELAYED_WORK(gsi_print_dp_stats_work, gsi_wq_print_dp_stats);
static void gsi_wq_update_dp_stats(struct work_struct *work);
static DECLARE_DELAYED_WORK(gsi_update_dp_stats_work, gsi_wq_update_dp_stats);
static ssize_t gsi_dump_evt(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
u32 arg1;
u32 arg2;
unsigned long missing;
char *sptr, *token;
uint32_t val;
struct gsi_evt_ctx *ctx;
uint16_t i;
if (count >= sizeof(dbg_buff))
return -EINVAL;
missing = copy_from_user(dbg_buff, buf, count);
if (missing)
return -EFAULT;
dbg_buff[count] = '\0';
sptr = dbg_buff;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &arg1))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &arg2))
return -EINVAL;
TDBG("arg1=%u arg2=%u\n", arg1, arg2);
if (arg1 >= gsi_ctx->max_ev) {
TERR("invalid evt ring id %u\n", arg1);
return -EINVAL;
}
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_0_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX0 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_1_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX1 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_2_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX2 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_3_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX3 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_4_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX4 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_5_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX5 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_6_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX6 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_7_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX7 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_8_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX8 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_9_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX9 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_10_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX10 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_11_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX11 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_12_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX12 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_CNTXT_13_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d CTX13 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_SCRATCH_0_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d SCR0 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_EV_CH_k_SCRATCH_1_OFFS(arg1, gsi_ctx->per.ee));
TERR("EV%2d SCR1 0x%x\n", arg1, val);
if (arg2) {
ctx = &gsi_ctx->evtr[arg1];
if (ctx->props.ring_base_vaddr) {
for (i = 0; i < ctx->props.ring_len / 16; i++)
TERR("EV%2d (0x%08llx) %08x %08x %08x %08x\n",
arg1, ctx->props.ring_base_addr + i * 16,
*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
i * 16 + 0),
*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
i * 16 + 4),
*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
i * 16 + 8),
*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
i * 16 + 12));
} else {
TERR("No VA supplied for event ring id %u\n", arg1);
}
}
return count;
}
static ssize_t gsi_dump_ch(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
u32 arg1;
u32 arg2;
unsigned long missing;
char *sptr, *token;
uint32_t val;
struct gsi_chan_ctx *ctx;
uint16_t i;
if (count >= sizeof(dbg_buff))
return -EINVAL;
missing = copy_from_user(dbg_buff, buf, count);
if (missing)
return -EFAULT;
dbg_buff[count] = '\0';
sptr = dbg_buff;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &arg1))
return -EINVAL;
token = strsep(&sptr, " ");
if (!token)
return -EINVAL;
if (kstrtou32(token, 0, &arg2))
return -EINVAL;
TDBG("arg1=%u arg2=%u\n", arg1, arg2);
if (arg1 >= gsi_ctx->max_ch) {
TERR("invalid chan id %u\n", arg1);
return -EINVAL;
}
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_0_OFFS(arg1, gsi_ctx->per.ee));
TERR("CH%2d CTX0 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_1_OFFS(arg1, gsi_ctx->per.ee));
TERR("CH%2d CTX1 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_2_OFFS(arg1, gsi_ctx->per.ee));
TERR("CH%2d CTX2 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_3_OFFS(arg1, gsi_ctx->per.ee));
TERR("CH%2d CTX3 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(arg1, gsi_ctx->per.ee));
TERR("CH%2d CTX4 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(arg1, gsi_ctx->per.ee));
TERR("CH%2d CTX5 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(arg1, gsi_ctx->per.ee));
TERR("CH%2d CTX6 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(arg1, gsi_ctx->per.ee));
TERR("CH%2d CTX7 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR_OFFS(arg1,
gsi_ctx->per.ee));
TERR("CH%2d REFRP 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR_OFFS(arg1,
gsi_ctx->per.ee));
TERR("CH%2d REFWP 0x%x\n", arg1, val);
if (gsi_ctx->per.ver >= GSI_VER_2_5) {
val = gsi_readl(gsi_ctx->base +
GSI_V2_5_EE_n_GSI_CH_k_QOS_OFFS(arg1, gsi_ctx->per.ee));
} else {
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_QOS_OFFS(arg1, gsi_ctx->per.ee));
}
TERR("CH%2d QOS 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_SCRATCH_0_OFFS(arg1, gsi_ctx->per.ee));
TERR("CH%2d SCR0 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_SCRATCH_1_OFFS(arg1, gsi_ctx->per.ee));
TERR("CH%2d SCR1 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_SCRATCH_2_OFFS(arg1, gsi_ctx->per.ee));
TERR("CH%2d SCR2 0x%x\n", arg1, val);
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_SCRATCH_3_OFFS(arg1, gsi_ctx->per.ee));
TERR("CH%2d SCR3 0x%x\n", arg1, val);
if (arg2) {
ctx = &gsi_ctx->chan[arg1];
if (ctx->props.ring_base_vaddr) {
for (i = 0; i < ctx->props.ring_len / 16; i++)
TERR("CH%2d (0x%08llx) %08x %08x %08x %08x\n",
arg1, ctx->props.ring_base_addr + i * 16,
*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
i * 16 + 0),
*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
i * 16 + 4),
*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
i * 16 + 8),
*(u32 *)((u8 *)ctx->props.ring_base_vaddr +
i * 16 + 12));
} else {
TERR("No VA supplied for chan id %u\n", arg1);
}
}
return count;
}
static void gsi_dump_ch_stats(struct gsi_chan_ctx *ctx)
{
if (!ctx->allocated)
return;
PRT_STAT("CH%2d:\n", ctx->props.ch_id);
PRT_STAT("queued=%lu compl=%lu\n",
ctx->stats.queued,
ctx->stats.completed);
PRT_STAT("cb->poll=%lu poll->cb=%lu poll_pend_irq=%lu\n",
ctx->stats.callback_to_poll,
ctx->stats.poll_to_callback,
ctx->stats.poll_pending_irq);
PRT_STAT("invalid_tre_error=%lu\n",
ctx->stats.invalid_tre_error);
PRT_STAT("poll_ok=%lu poll_empty=%lu\n",
ctx->stats.poll_ok, ctx->stats.poll_empty);
if (ctx->evtr)
PRT_STAT("compl_evt=%lu\n",
ctx->evtr->stats.completed);
PRT_STAT("userdata_in_use=%lu\n", ctx->stats.userdata_in_use);
PRT_STAT("ch_below_lo=%lu\n", ctx->stats.dp.ch_below_lo);
PRT_STAT("ch_below_hi=%lu\n", ctx->stats.dp.ch_below_hi);
PRT_STAT("ch_above_hi=%lu\n", ctx->stats.dp.ch_above_hi);
PRT_STAT("time_empty=%lums\n", ctx->stats.dp.empty_time);
PRT_STAT("\n");
}
static ssize_t gsi_dump_stats(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
int ch_id;
int min, max, ret;
ret = kstrtos32_from_user(buf, count, 0, &ch_id);
if (ret)
return ret;
if (ch_id == -1) {
min = 0;
max = gsi_ctx->max_ch;
} else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
} else {
min = ch_id;
max = ch_id + 1;
}
for (ch_id = min; ch_id < max; ch_id++)
gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]);
return count;
error:
TERR("Usage: echo ch_id > stats. Use -1 for all\n");
return -EINVAL;
}
static int gsi_dbg_create_stats_wq(void)
{
gsi_ctx->dp_stat_wq =
create_singlethread_workqueue("gsi_stat");
if (!gsi_ctx->dp_stat_wq) {
TERR("failed create workqueue\n");
return -ENOMEM;
}
return 0;
}
static void gsi_dbg_destroy_stats_wq(void)
{
cancel_delayed_work_sync(&gsi_update_dp_stats_work);
cancel_delayed_work_sync(&gsi_print_dp_stats_work);
flush_workqueue(gsi_ctx->dp_stat_wq);
destroy_workqueue(gsi_ctx->dp_stat_wq);
gsi_ctx->dp_stat_wq = NULL;
}
static ssize_t gsi_enable_dp_stats(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
int ch_id;
bool enable;
int ret;
if (count >= sizeof(dbg_buff))
goto error;
if (copy_from_user(dbg_buff, buf, count))
goto error;
dbg_buff[count] = '\0';
if (dbg_buff[0] != '+' && dbg_buff[0] != '-')
goto error;
enable = (dbg_buff[0] == '+');
if (kstrtos32(dbg_buff + 1, 0, &ch_id))
goto error;
if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
}
if (gsi_ctx->chan[ch_id].enable_dp_stats == enable) {
TERR("ch_%d: already enabled/disabled\n", ch_id);
return -EINVAL;
}
gsi_ctx->chan[ch_id].enable_dp_stats = enable;
if (enable)
gsi_ctx->num_ch_dp_stats++;
else
gsi_ctx->num_ch_dp_stats--;
if (enable) {
if (gsi_ctx->num_ch_dp_stats == 1) {
ret = gsi_dbg_create_stats_wq();
if (ret)
return ret;
}
cancel_delayed_work_sync(&gsi_update_dp_stats_work);
queue_delayed_work(gsi_ctx->dp_stat_wq,
&gsi_update_dp_stats_work, msecs_to_jiffies(10));
} else if (!enable && gsi_ctx->num_ch_dp_stats == 0) {
gsi_dbg_destroy_stats_wq();
}
return count;
error:
TERR("Usage: echo [+-]ch_id > enable_dp_stats\n");
return -EINVAL;
}
static ssize_t gsi_set_max_elem_dp_stats(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
u32 ch_id;
u32 max_elem;
unsigned long missing;
char *sptr, *token;
if (count >= sizeof(dbg_buff))
goto error;
missing = copy_from_user(dbg_buff, buf, count);
if (missing)
goto error;
dbg_buff[count] = '\0';
sptr = dbg_buff;
token = strsep(&sptr, " ");
if (!token) {
TERR("\n");
goto error;
}
if (kstrtou32(token, 0, &ch_id)) {
TERR("\n");
goto error;
}
token = strsep(&sptr, " ");
if (!token) {
/* get */
if (kstrtou32(dbg_buff, 0, &ch_id))
goto error;
if (ch_id >= gsi_ctx->max_ch)
goto error;
PRT_STAT("ch %d: max_re_expected=%d\n", ch_id,
gsi_ctx->chan[ch_id].props.max_re_expected);
return count;
}
if (kstrtou32(token, 0, &max_elem)) {
TERR("\n");
goto error;
}
TDBG("ch_id=%u max_elem=%u\n", ch_id, max_elem);
if (ch_id >= gsi_ctx->max_ch) {
TERR("invalid chan id %u\n", ch_id);
goto error;
}
gsi_ctx->chan[ch_id].props.max_re_expected = max_elem;
return count;
error:
TERR("Usage: (set) echo <ch_id> <max_elem> > max_elem_dp_stats\n");
TERR("Usage: (get) echo <ch_id> > max_elem_dp_stats\n");
return -EINVAL;
}
static void gsi_wq_print_dp_stats(struct work_struct *work)
{
int ch_id;
for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
if (gsi_ctx->chan[ch_id].print_dp_stats)
gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]);
}
queue_delayed_work(gsi_ctx->dp_stat_wq, &gsi_print_dp_stats_work,
msecs_to_jiffies(1000));
}
static void gsi_dbg_update_ch_dp_stats(struct gsi_chan_ctx *ctx)
{
uint16_t start_hw;
uint16_t end_hw;
uint64_t rp_hw;
uint64_t wp_hw;
int ee = gsi_ctx->per.ee;
uint16_t used_hw;
rp_hw = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_4_OFFS(ctx->props.ch_id, ee));
rp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_5_OFFS(ctx->props.ch_id, ee)))
<< 32;
wp_hw = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_6_OFFS(ctx->props.ch_id, ee));
wp_hw |= ((uint64_t)gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_CH_k_CNTXT_7_OFFS(ctx->props.ch_id, ee)))
<< 32;
start_hw = gsi_find_idx_from_addr(&ctx->ring, rp_hw);
end_hw = gsi_find_idx_from_addr(&ctx->ring, wp_hw);
if (end_hw >= start_hw)
used_hw = end_hw - start_hw;
else
used_hw = ctx->ring.max_num_elem + 1 - (start_hw - end_hw);
TDBG("ch %d used %d\n", ctx->props.ch_id, used_hw);
gsi_update_ch_dp_stats(ctx, used_hw);
}
static void gsi_wq_update_dp_stats(struct work_struct *work)
{
int ch_id;
for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
if (gsi_ctx->chan[ch_id].allocated &&
gsi_ctx->chan[ch_id].enable_dp_stats)
gsi_dbg_update_ch_dp_stats(&gsi_ctx->chan[ch_id]);
}
queue_delayed_work(gsi_ctx->dp_stat_wq, &gsi_update_dp_stats_work,
msecs_to_jiffies(10));
}
static ssize_t gsi_rst_stats(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
int ch_id;
int min, max, ret;
ret = kstrtos32_from_user(buf, count, 0, &ch_id);
if (ret)
return ret;
if (ch_id == -1) {
min = 0;
max = gsi_ctx->max_ch;
} else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
} else {
min = ch_id;
max = ch_id + 1;
}
for (ch_id = min; ch_id < max; ch_id++)
memset(&gsi_ctx->chan[ch_id].stats, 0,
sizeof(gsi_ctx->chan[ch_id].stats));
return count;
error:
TERR("Usage: echo ch_id > rst_stats. Use -1 for all\n");
return -EINVAL;
}
static ssize_t gsi_print_dp_stats(struct file *file,
const char __user *buf, size_t count, loff_t *ppos)
{
int ch_id;
bool enable;
int ret;
if (count >= sizeof(dbg_buff))
goto error;
if (copy_from_user(dbg_buff, buf, count))
goto error;
dbg_buff[count] = '\0';
if (dbg_buff[0] != '+' && dbg_buff[0] != '-')
goto error;
enable = (dbg_buff[0] == '+');
if (kstrtos32(dbg_buff + 1, 0, &ch_id))
goto error;
if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
}
if (gsi_ctx->chan[ch_id].print_dp_stats == enable) {
TERR("ch_%d: already enabled/disabled\n", ch_id);
return -EINVAL;
}
gsi_ctx->chan[ch_id].print_dp_stats = enable;
if (enable)
gsi_ctx->num_ch_dp_stats++;
else
gsi_ctx->num_ch_dp_stats--;
if (enable) {
if (gsi_ctx->num_ch_dp_stats == 1) {
ret = gsi_dbg_create_stats_wq();
if (ret)
return ret;
}
cancel_delayed_work_sync(&gsi_print_dp_stats_work);
queue_delayed_work(gsi_ctx->dp_stat_wq,
&gsi_print_dp_stats_work, msecs_to_jiffies(10));
} else if (!enable && gsi_ctx->num_ch_dp_stats == 0) {
gsi_dbg_destroy_stats_wq();
}
return count;
error:
TERR("Usage: echo [+-]ch_id > print_dp_stats\n");
return -EINVAL;
}
static ssize_t gsi_enable_ipc_low(struct file *file,
const char __user *ubuf, size_t count, loff_t *ppos)
{
s8 option = 0;
int ret;
ret = kstrtos8_from_user(ubuf, count, 0, &option);
if (ret)
return ret;
mutex_lock(&gsi_ctx->mlock);
if (option) {
if (!gsi_ipc_logbuf_low) {
gsi_ipc_logbuf_low =
ipc_log_context_create(GSI_IPC_LOG_PAGES,
"gsi_low", 0);
if (gsi_ipc_logbuf_low == NULL)
TERR("failed to get ipc_logbuf_low\n");
}
gsi_ctx->ipc_logbuf_low = gsi_ipc_logbuf_low;
} else {
gsi_ctx->ipc_logbuf_low = NULL;
}
mutex_unlock(&gsi_ctx->mlock);
return count;
}
const struct file_operations gsi_ev_dump_ops = {
.write = gsi_dump_evt,
};
const struct file_operations gsi_ch_dump_ops = {
.write = gsi_dump_ch,
};
const struct file_operations gsi_stats_ops = {
.write = gsi_dump_stats,
};
const struct file_operations gsi_enable_dp_stats_ops = {
.write = gsi_enable_dp_stats,
};
const struct file_operations gsi_max_elem_dp_stats_ops = {
.write = gsi_set_max_elem_dp_stats,
};
const struct file_operations gsi_rst_stats_ops = {
.write = gsi_rst_stats,
};
const struct file_operations gsi_print_dp_stats_ops = {
.write = gsi_print_dp_stats,
};
const struct file_operations gsi_ipc_low_ops = {
.write = gsi_enable_ipc_low,
};
void gsi_debugfs_init(void)
{
static struct dentry *dfile;
const mode_t write_only_mode = 0220;
dent = debugfs_create_dir("gsi", 0);
if (IS_ERR(dent)) {
TERR("fail to create dir\n");
return;
}
dfile = debugfs_create_file("ev_dump", write_only_mode,
dent, 0, &gsi_ev_dump_ops);
if (!dfile || IS_ERR(dfile)) {
TERR("fail to create ev_dump file\n");
goto fail;
}
dfile = debugfs_create_file("ch_dump", write_only_mode,
dent, 0, &gsi_ch_dump_ops);
if (!dfile || IS_ERR(dfile)) {
TERR("fail to create ch_dump file\n");
goto fail;
}
dfile = debugfs_create_file("stats", write_only_mode, dent,
0, &gsi_stats_ops);
if (!dfile || IS_ERR(dfile)) {
TERR("fail to create stats file\n");
goto fail;
}
dfile = debugfs_create_file("enable_dp_stats", write_only_mode, dent,
0, &gsi_enable_dp_stats_ops);
if (!dfile || IS_ERR(dfile)) {
TERR("fail to create stats file\n");
goto fail;
}
dfile = debugfs_create_file("max_elem_dp_stats", write_only_mode,
dent, 0, &gsi_max_elem_dp_stats_ops);
if (!dfile || IS_ERR(dfile)) {
TERR("fail to create stats file\n");
goto fail;
}
dfile = debugfs_create_file("rst_stats", write_only_mode,
dent, 0, &gsi_rst_stats_ops);
if (!dfile || IS_ERR(dfile)) {
TERR("fail to create stats file\n");
goto fail;
}
dfile = debugfs_create_file("print_dp_stats",
write_only_mode, dent, 0, &gsi_print_dp_stats_ops);
if (!dfile || IS_ERR(dfile)) {
TERR("fail to create stats file\n");
goto fail;
}
dfile = debugfs_create_file("ipc_low", write_only_mode,
dent, 0, &gsi_ipc_low_ops);
if (!dfile || IS_ERR(dfile)) {
TERR("could not create ipc_low\n");
goto fail;
}
return;
fail:
debugfs_remove_recursive(dent);
}

227
gsi/gsi_emulation.c Arquivo normal
Ver arquivo

@@ -0,0 +1,227 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include "gsi_emulation.h"
/*
* *****************************************************************************
* The following used to set up the EMULATION interrupt controller...
* *****************************************************************************
*/
int setup_emulator_cntrlr(
void __iomem *intcntrlr_base,
u32 intcntrlr_mem_size)
{
uint32_t val, ver, intrCnt, rangeCnt, range;
val = gsi_emu_readl(intcntrlr_base + GE_INT_CTL_VER_CNT);
intrCnt = val & 0xFFFF;
ver = (val >> 16) & 0xFFFF;
rangeCnt = intrCnt / 32;
GSIDBG(
"CTL_VER_CNT reg val(0x%x) intr cnt(%u) cntrlr ver(0x%x) rangeCnt(%u)\n",
val, intrCnt, ver, rangeCnt);
/*
* Verify the interrupt controller version
*/
if (ver == 0 || ver == 0xFFFF || ver < DEO_IC_INT_CTL_VER_MIN) {
GSIERR(
"Error: invalid interrupt controller version 0x%x\n",
ver);
return -GSI_STATUS_INVALID_PARAMS;
}
/*
* Verify the interrupt count
*
* NOTE: intrCnt must be at least one block and multiple of 32
*/
if ((intrCnt % 32) != 0) {
GSIERR(
"Invalid interrupt count read from HW 0x%04x\n",
intrCnt);
return -GSI_STATUS_ERROR;
}
/*
* Calculate number of ranges used, each range handles 32 int lines
*/
if (rangeCnt > DEO_IC_MAX_RANGE_CNT) {
GSIERR(
"SW interrupt limit(%u) passed, increase DEO_IC_MAX_RANGE_CNT(%u)\n",
rangeCnt,
DEO_IC_MAX_RANGE_CNT);
return -GSI_STATUS_ERROR;
}
/*
* Let's take the last register offset minus the first
* register offset (ie. range) and compare it to the interrupt
* controller's dtsi defined memory size. The range better
* fit within the size.
*/
val = GE_SOFT_INT_n(rangeCnt-1) - GE_INT_CTL_VER_CNT;
if (val > intcntrlr_mem_size) {
GSIERR(
"Interrupt controller register range (%u) exceeds dtsi provisioned size (%u)\n",
val, intcntrlr_mem_size);
return -GSI_STATUS_ERROR;
}
/*
* The following will disable the emulators interrupt controller,
* so that we can config it...
*/
GSIDBG("Writing GE_INT_MASTER_ENABLE\n");
gsi_emu_writel(
0x0,
intcntrlr_base + GE_INT_MASTER_ENABLE);
/*
* Init register maps of all ranges
*/
for (range = 0; range < rangeCnt; range++) {
/*
* Disable all int sources by setting all enable clear bits
*/
GSIDBG("Writing GE_INT_ENABLE_CLEAR_n(%u)\n", range);
gsi_emu_writel(
0xFFFFFFFF,
intcntrlr_base + GE_INT_ENABLE_CLEAR_n(range));
/*
* Clear all raw statuses
*/
GSIDBG("Writing GE_INT_CLEAR_n(%u)\n", range);
gsi_emu_writel(
0xFFFFFFFF,
intcntrlr_base + GE_INT_CLEAR_n(range));
/*
* Init all int types
*/
GSIDBG("Writing GE_INT_TYPE_n(%u)\n", range);
gsi_emu_writel(
0x0,
intcntrlr_base + GE_INT_TYPE_n(range));
}
/*
* The following tells the interrupt controller to interrupt us
* when it sees interupts from ipa and/or gsi.
*
* Interrupts:
* ===================================================================
* DUT0 [ 63 : 16 ]
* ipa_irq [ 3 : 0 ] <---HERE
* ipa_gsi_bam_irq [ 7 : 4 ] <---HERE
* ipa_bam_apu_sec_error_irq [ 8 ]
* ipa_bam_apu_non_sec_error_irq [ 9 ]
* ipa_bam_xpu2_msa_intr [ 10 ]
* ipa_vmidmt_nsgcfgirpt [ 11 ]
* ipa_vmidmt_nsgirpt [ 12 ]
* ipa_vmidmt_gcfgirpt [ 13 ]
* ipa_vmidmt_girpt [ 14 ]
* bam_xpu3_qad_non_secure_intr_sp [ 15 ]
*/
GSIDBG("Writing GE_INT_ENABLE_n(0)\n");
gsi_emu_writel(
0x00FF, /* See <---HERE above */
intcntrlr_base + GE_INT_ENABLE_n(0));
/*
* The following will enable the IC post config...
*/
GSIDBG("Writing GE_INT_MASTER_ENABLE\n");
gsi_emu_writel(
0x1,
intcntrlr_base + GE_INT_MASTER_ENABLE);
return 0;
}
/*
* *****************************************************************************
* The following for EMULATION hard irq...
* *****************************************************************************
*/
irqreturn_t emulator_hard_irq_isr(
int irq,
void *ctxt)
{
struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt;
uint32_t val;
val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_INT_MASTER_STATUS);
/*
* If bit zero is set, interrupt is for us, hence return IRQ_NONE
* when it's not set...
*/
if (!(val & 0x00000001))
return IRQ_NONE;
/*
* The following will mask (ie. turn off) future interrupts from
* the emulator's interrupt controller. It wil stay this way until
* we turn back on...which will be done in the bottom half
* (ie. emulator_soft_irq_isr)...
*/
gsi_emu_writel(
0x0,
gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE);
return IRQ_WAKE_THREAD;
}
/*
* *****************************************************************************
* The following for EMULATION soft irq...
* *****************************************************************************
*/
irqreturn_t emulator_soft_irq_isr(
int irq,
void *ctxt)
{
struct gsi_ctx *gsi_ctx_ptr = (struct gsi_ctx *) ctxt;
irqreturn_t retVal = IRQ_HANDLED;
uint32_t val;
val = gsi_emu_readl(gsi_ctx_ptr->intcntrlr_base + GE_IRQ_STATUS_n(0));
GSIDBG("Got irq(%d) with status(0x%08X)\n", irq, val);
if (val & 0xF0 && gsi_ctx_ptr->intcntrlr_gsi_isr) {
GSIDBG("Got gsi interrupt\n");
retVal = gsi_ctx_ptr->intcntrlr_gsi_isr(irq, ctxt);
}
if (val & 0x0F && gsi_ctx_ptr->intcntrlr_client_isr) {
GSIDBG("Got ipa interrupt\n");
retVal = gsi_ctx_ptr->intcntrlr_client_isr(irq, 0);
}
/*
* The following will clear the interrupts...
*/
gsi_emu_writel(
0xFFFFFFFF,
gsi_ctx_ptr->intcntrlr_base + GE_INT_CLEAR_n(0));
/*
* The following will unmask (ie. turn on) future interrupts from
* the emulator's interrupt controller...
*/
gsi_emu_writel(
0x1,
gsi_ctx_ptr->intcntrlr_base + GE_INT_OUT_ENABLE);
return retVal;
}

186
gsi/gsi_emulation.h Arquivo normal
Ver arquivo

@@ -0,0 +1,186 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#if !defined(_GSI_EMULATION_H_)
# define _GSI_EMULATION_H_
# include <linux/interrupt.h>
# include "gsi.h"
# include "gsi_reg.h"
# include "gsi_emulation_stubs.h"
# define gsi_emu_readl(c) (readl(c))
# define gsi_emu_writel(v, c) ({ __iowmb(); writel_relaxed((v), (c)); })
# define CNTRLR_BASE 0
/*
* The following file contains definitions and declarations that are
* germane only to the IPA emulation system, which is run from an X86
* environment. Declaration's for non-X86 (ie. arm) are merely stubs
* to facilitate compile and link.
*
* Interrupt controller registers.
* Descriptions taken from the EMULATION interrupt controller SWI.
* - There is only one Master Enable register
* - Each group of 32 interrupt lines (range) is controlled by 8 registers,
* which are consecutive in memory:
* GE_INT_ENABLE_n
* GE_INT_ENABLE_CLEAR_n
* GE_INT_ENABLE_SET_n
* GE_INT_TYPE_n
* GE_IRQ_STATUS_n
* GE_RAW_STATUS_n
* GE_INT_CLEAR_n
* GE_SOFT_INT_n
* - After the above 8 registers, there are the registers of the next
* group (range) of 32 interrupt lines, and so on.
*/
/** @brief The interrupt controller version and interrupt count register.
* Specifies interrupt controller version (upper 16 bits) and the
* number of interrupt lines supported by HW (lower 16 bits).
*/
# define GE_INT_CTL_VER_CNT \
(CNTRLR_BASE + 0x0000)
/** @brief Enable or disable physical IRQ output signal to the system,
* not affecting any status registers.
*
* 0x0 : DISABLE IRQ output disabled
* 0x1 : ENABLE IRQ output enabled
*/
# define GE_INT_OUT_ENABLE \
(CNTRLR_BASE + 0x0004)
/** @brief The IRQ master enable register.
* Bit #0: IRQ_ENABLE, set 0 to disable, 1 to enable.
*/
# define GE_INT_MASTER_ENABLE \
(CNTRLR_BASE + 0x0008)
# define GE_INT_MASTER_STATUS \
(CNTRLR_BASE + 0x000C)
/** @brief Each bit disables (bit=0, default) or enables (bit=1) the
* corresponding interrupt source
*/
# define GE_INT_ENABLE_n(n) \
(CNTRLR_BASE + 0x0010 + 0x20 * (n))
/** @brief Write bit=1 to clear (to 0) the corresponding bit(s) in INT_ENABLE.
* Does nothing for bit=0
*/
# define GE_INT_ENABLE_CLEAR_n(n) \
(CNTRLR_BASE + 0x0014 + 0x20 * (n))
/** @brief Write bit=1 to set (to 1) the corresponding bit(s) in INT_ENABLE.
* Does nothing for bit=0
*/
# define GE_INT_ENABLE_SET_n(n) \
(CNTRLR_BASE + 0x0018 + 0x20 * (n))
/** @brief Select level (bit=0, default) or edge (bit=1) sensitive input
* detection logic for each corresponding interrupt source
*/
# define GE_INT_TYPE_n(n) \
(CNTRLR_BASE + 0x001C + 0x20 * (n))
/** @brief Shows the interrupt sources captured in RAW_STATUS that have been
* steered to irq_n by INT_SELECT. Interrupts must also be enabled by
* INT_ENABLE and MASTER_ENABLE. Read only register.
* Bit values: 1=active, 0=inactive
*/
# define GE_IRQ_STATUS_n(n) \
(CNTRLR_BASE + 0x0020 + 0x20 * (n))
/** @brief Shows the interrupt sources that have been latched by the input
* logic of the Interrupt Controller. Read only register.
* Bit values: 1=active, 0=inactive
*/
# define GE_RAW_STATUS_n(n) \
(CNTRLR_BASE + 0x0024 + 0x20 * (n))
/** @brief Write bit=1 to clear the corresponding bit(s) in RAW_STATUS.
* Does nothing for bit=0
*/
# define GE_INT_CLEAR_n(n) \
(CNTRLR_BASE + 0x0028 + 0x20 * (n))
/** @brief Write bit=1 to set the corresponding bit(s) in RAW_STATUS.
* Does nothing for bit=0.
* @note Only functional for edge detected interrupts
*/
# define GE_SOFT_INT_n(n) \
(CNTRLR_BASE + 0x002C + 0x20 * (n))
/** @brief Maximal number of ranges in SW. Each range supports 32 interrupt
* lines. If HW is extended considerably, increase this value
*/
# define DEO_IC_MAX_RANGE_CNT 8
/** @brief Size of the registers of one range in memory, in bytes */
# define DEO_IC_RANGE_MEM_SIZE 32 /* SWI: 8 registers, no gaps */
/** @brief Minimal Interrupt controller HW version */
# define DEO_IC_INT_CTL_VER_MIN 0x0102
#if defined(CONFIG_IPA_EMULATION) /* declarations to follow */
/*
* *****************************************************************************
* The following used to set up the EMULATION interrupt controller...
* *****************************************************************************
*/
int setup_emulator_cntrlr(
void __iomem *intcntrlr_base,
u32 intcntrlr_mem_size);
/*
* *****************************************************************************
* The following for EMULATION hard irq...
* *****************************************************************************
*/
irqreturn_t emulator_hard_irq_isr(
int irq,
void *ctxt);
/*
* *****************************************************************************
* The following for EMULATION soft irq...
* *****************************************************************************
*/
irqreturn_t emulator_soft_irq_isr(
int irq,
void *ctxt);
# else /* #if !defined(CONFIG_IPA_EMULATION) then definitions to follow */
static inline int setup_emulator_cntrlr(
void __iomem *intcntrlr_base,
u32 intcntrlr_mem_size)
{
return 0;
}
static inline irqreturn_t emulator_hard_irq_isr(
int irq,
void *ctxt)
{
return IRQ_NONE;
}
static inline irqreturn_t emulator_soft_irq_isr(
int irq,
void *ctxt)
{
return IRQ_HANDLED;
}
# endif /* #if defined(CONFIG_IPA_EMULATION) */
#endif /* #if !defined(_GSI_EMULATION_H_) */

12
gsi/gsi_emulation_stubs.h Arquivo normal
Ver arquivo

@@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#if !defined(_GSI_EMULATION_STUBS_H_)
# define _GSI_EMULATION_STUBS_H_
# include <asm/barrier.h>
# define __iowmb() wmb() /* used in gsi.h */
#endif /* #if !defined(_GSI_EMULATION_STUBS_H_) */

30
gsi/gsi_reg.h Arquivo normal
Ver arquivo

@@ -0,0 +1,30 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#ifndef __GSI_REG_H__
#define __GSI_REG_H__
enum gsi_register_ver {
GSI_REGISTER_VER_1 = 0,
GSI_REGISTER_VER_2 = 1,
GSI_REGISTER_MAX,
};
#ifdef GSI_REGISTER_VER_CURRENT
#error GSI_REGISTER_VER_CURRENT already defined
#endif
#ifdef CONFIG_GSI_REGISTER_VERSION_2
#include "gsi_reg_v2.h"
#define GSI_REGISTER_VER_CURRENT GSI_REGISTER_VER_2
#endif
/* The default is V1 */
#ifndef GSI_REGISTER_VER_CURRENT
#include "gsi_reg_v1.h"
#define GSI_REGISTER_VER_CURRENT GSI_REGISTER_VER_1
#endif
#endif /* __GSI_REG_H__ */

1098
gsi/gsi_reg_v1.h Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

1099
gsi/gsi_reg_v2.h Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

6
ipa/Makefile Arquivo normal
Ver arquivo

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_IPA3) += ipa_v3/ ipa_clients/ ipa_common
obj-$(CONFIG_IPA_UT) += test/
ipa_common += ipa_api.o ipa_rm.o ipa_rm_dependency_graph.o ipa_rm_peers_list.o ipa_rm_resource.o ipa_rm_inactivity_timer.o

3760
ipa/ipa_api.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

496
ipa/ipa_api.h Arquivo normal
Ver arquivo

@@ -0,0 +1,496 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/ipa_mhi.h>
#include <linux/ipa_uc_offload.h>
#include <linux/ipa_wdi3.h>
#include "ipa_common_i.h"
#ifndef _IPA_API_H_
#define _IPA_API_H_
struct ipa_api_controller {
int (*ipa_reset_endpoint)(u32 clnt_hdl);
int (*ipa_clear_endpoint_delay)(u32 clnt_hdl);
int (*ipa_disable_endpoint)(u32 clnt_hdl);
int (*ipa_cfg_ep)(u32 clnt_hdl, const struct ipa_ep_cfg *ipa_ep_cfg);
int (*ipa_cfg_ep_nat)(u32 clnt_hdl,
const struct ipa_ep_cfg_nat *ipa_ep_cfg);
int (*ipa_cfg_ep_conn_track)(u32 clnt_hdl,
const struct ipa_ep_cfg_conn_track *ipa_ep_cfg);
int (*ipa_cfg_ep_hdr)(u32 clnt_hdl,
const struct ipa_ep_cfg_hdr *ipa_ep_cfg);
int (*ipa_cfg_ep_hdr_ext)(u32 clnt_hdl,
const struct ipa_ep_cfg_hdr_ext *ipa_ep_cfg);
int (*ipa_cfg_ep_mode)(u32 clnt_hdl,
const struct ipa_ep_cfg_mode *ipa_ep_cfg);
int (*ipa_cfg_ep_aggr)(u32 clnt_hdl,
const struct ipa_ep_cfg_aggr *ipa_ep_cfg);
int (*ipa_cfg_ep_deaggr)(u32 clnt_hdl,
const struct ipa_ep_cfg_deaggr *ipa_ep_cfg);
int (*ipa_cfg_ep_route)(u32 clnt_hdl,
const struct ipa_ep_cfg_route *ipa_ep_cfg);
int (*ipa_cfg_ep_holb)(u32 clnt_hdl,
const struct ipa_ep_cfg_holb *ipa_ep_cfg);
int (*ipa_cfg_ep_cfg)(u32 clnt_hdl,
const struct ipa_ep_cfg_cfg *ipa_ep_cfg);
int (*ipa_cfg_ep_metadata_mask)(u32 clnt_hdl,
const struct ipa_ep_cfg_metadata_mask *ipa_ep_cfg);
int (*ipa_cfg_ep_holb_by_client)(enum ipa_client_type client,
const struct ipa_ep_cfg_holb *ipa_ep_cfg);
int (*ipa_cfg_ep_ctrl)(u32 clnt_hdl,
const struct ipa_ep_cfg_ctrl *ep_ctrl);
int (*ipa_add_hdr)(struct ipa_ioc_add_hdr *hdrs);
int (*ipa_add_hdr_usr)(struct ipa_ioc_add_hdr *hdrs, bool user_only);
int (*ipa_del_hdr)(struct ipa_ioc_del_hdr *hdls);
int (*ipa_commit_hdr)(void);
int (*ipa_reset_hdr)(bool user_only);
int (*ipa_get_hdr)(struct ipa_ioc_get_hdr *lookup);
int (*ipa_put_hdr)(u32 hdr_hdl);
int (*ipa_copy_hdr)(struct ipa_ioc_copy_hdr *copy);
int (*ipa_add_hdr_proc_ctx)(struct ipa_ioc_add_hdr_proc_ctx *proc_ctxs,
bool user_only);
int (*ipa_del_hdr_proc_ctx)(struct ipa_ioc_del_hdr_proc_ctx *hdls);
int (*ipa_add_rt_rule)(struct ipa_ioc_add_rt_rule *rules);
int (*ipa_add_rt_rule_v2)(struct ipa_ioc_add_rt_rule_v2 *rules);
int (*ipa_add_rt_rule_usr)(struct ipa_ioc_add_rt_rule *rules,
bool user_only);
int (*ipa_add_rt_rule_usr_v2)(struct ipa_ioc_add_rt_rule_v2 *rules,
bool user_only);
int (*ipa_del_rt_rule)(struct ipa_ioc_del_rt_rule *hdls);
int (*ipa_commit_rt)(enum ipa_ip_type ip);
int (*ipa_reset_rt)(enum ipa_ip_type ip, bool user_only);
int (*ipa_get_rt_tbl)(struct ipa_ioc_get_rt_tbl *lookup);
int (*ipa_put_rt_tbl)(u32 rt_tbl_hdl);
int (*ipa_query_rt_index)(struct ipa_ioc_get_rt_tbl_indx *in);
int (*ipa_mdfy_rt_rule)(struct ipa_ioc_mdfy_rt_rule *rules);
int (*ipa_mdfy_rt_rule_v2)(struct ipa_ioc_mdfy_rt_rule_v2 *rules);
int (*ipa_add_flt_rule)(struct ipa_ioc_add_flt_rule *rules);
int (*ipa_add_flt_rule_v2)(struct ipa_ioc_add_flt_rule_v2 *rules);
int (*ipa_add_flt_rule_usr)(struct ipa_ioc_add_flt_rule *rules,
bool user_only);
int (*ipa_add_flt_rule_usr_v2)
(struct ipa_ioc_add_flt_rule_v2 *rules, bool user_only);
int (*ipa_del_flt_rule)(struct ipa_ioc_del_flt_rule *hdls);
int (*ipa_mdfy_flt_rule)(struct ipa_ioc_mdfy_flt_rule *rules);
int (*ipa_mdfy_flt_rule_v2)(struct ipa_ioc_mdfy_flt_rule_v2 *rules);
int (*ipa_commit_flt)(enum ipa_ip_type ip);
int (*ipa_reset_flt)(enum ipa_ip_type ip, bool user_only);
int (*ipa_allocate_nat_device)(struct ipa_ioc_nat_alloc_mem *mem);
int (*ipa_allocate_nat_table)(
struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
int (*ipa_allocate_ipv6ct_table)(
struct ipa_ioc_nat_ipv6ct_table_alloc *table_alloc);
int (*ipa_nat_init_cmd)(struct ipa_ioc_v4_nat_init *init);
int (*ipa_ipv6ct_init_cmd)(struct ipa_ioc_ipv6ct_init *init);
int (*ipa_nat_dma_cmd)(struct ipa_ioc_nat_dma_cmd *dma);
int (*ipa_table_dma_cmd)(struct ipa_ioc_nat_dma_cmd *dma);
int (*ipa_nat_del_cmd)(struct ipa_ioc_v4_nat_del *del);
int (*ipa_del_nat_table)(struct ipa_ioc_nat_ipv6ct_table_del *del);
int (*ipa_del_ipv6ct_table)(struct ipa_ioc_nat_ipv6ct_table_del *del);
int (*ipa_nat_mdfy_pdn)(struct ipa_ioc_nat_pdn_entry *mdfy_pdn);
int (*ipa_send_msg)(struct ipa_msg_meta *meta, void *buff,
ipa_msg_free_fn callback);
int (*ipa_register_pull_msg)(struct ipa_msg_meta *meta,
ipa_msg_pull_fn callback);
int (*ipa_deregister_pull_msg)(struct ipa_msg_meta *meta);
int (*ipa_register_intf)(const char *name,
const struct ipa_tx_intf *tx,
const struct ipa_rx_intf *rx);
int (*ipa_register_intf_ext)(const char *name,
const struct ipa_tx_intf *tx,
const struct ipa_rx_intf *rx,
const struct ipa_ext_intf *ext);
int (*ipa_deregister_intf)(const char *name);
int (*ipa_set_aggr_mode)(enum ipa_aggr_mode mode);
int (*ipa_set_qcncm_ndp_sig)(char sig[3]);
int (*ipa_set_single_ndp_per_mbim)(bool enable);
int (*ipa_tx_dp)(enum ipa_client_type dst, struct sk_buff *skb,
struct ipa_tx_meta *metadata);
int (*ipa_tx_dp_mul)(enum ipa_client_type dst,
struct ipa_tx_data_desc *data_desc);
void (*ipa_free_skb)(struct ipa_rx_data *data);
int (*ipa_setup_sys_pipe)(struct ipa_sys_connect_params *sys_in,
u32 *clnt_hdl);
int (*ipa_teardown_sys_pipe)(u32 clnt_hdl);
int (*ipa_sys_setup)(struct ipa_sys_connect_params *sys_in,
unsigned long *ipa_bam_hdl,
u32 *ipa_pipe_num, u32 *clnt_hdl, bool en_status);
int (*ipa_sys_teardown)(u32 clnt_hdl);
int (*ipa_sys_update_gsi_hdls)(u32 clnt_hdl, unsigned long gsi_ch_hdl,
unsigned long gsi_ev_hdl);
int (*ipa_connect_wdi_pipe)(struct ipa_wdi_in_params *in,
struct ipa_wdi_out_params *out);
int (*ipa_disconnect_wdi_pipe)(u32 clnt_hdl);
int (*ipa_enable_wdi_pipe)(u32 clnt_hdl);
int (*ipa_disable_wdi_pipe)(u32 clnt_hdl);
int (*ipa_resume_wdi_pipe)(u32 clnt_hdl);
int (*ipa_suspend_wdi_pipe)(u32 clnt_hdl);
int (*ipa_get_wdi_stats)(struct IpaHwStatsWDIInfoData_t *stats);
u16 (*ipa_get_smem_restr_bytes)(void);
int (*ipa_broadcast_wdi_quota_reach_ind)(uint32_t fid,
uint64_t num_bytes);
int (*ipa_uc_wdi_get_dbpa)(struct ipa_wdi_db_params *out);
int (*ipa_uc_reg_rdyCB)(struct ipa_wdi_uc_ready_params *param);
int (*ipa_uc_dereg_rdyCB)(void);
int (*teth_bridge_init)(struct teth_bridge_init_params *params);
int (*teth_bridge_disconnect)(enum ipa_client_type client);
int (*teth_bridge_connect)(
struct teth_bridge_connect_params *connect_params);
void (*ipa_set_client)(
int index, enum ipacm_client_enum client, bool uplink);
enum ipacm_client_enum (*ipa_get_client)(int pipe_idx);
bool (*ipa_get_client_uplink)(int pipe_idx);
int (*ipa_dma_init)(void);
int (*ipa_dma_enable)(void);
int (*ipa_dma_disable)(void);
int (*ipa_dma_sync_memcpy)(u64 dest, u64 src, int len);
int (*ipa_dma_async_memcpy)(u64 dest, u64 src, int len,
void (*user_cb)(void *user1), void *user_param);
int (*ipa_dma_uc_memcpy)(phys_addr_t dest, phys_addr_t src, int len);
void (*ipa_dma_destroy)(void);
bool (*ipa_has_open_aggr_frame)(enum ipa_client_type client);
int (*ipa_generate_tag_process)(void);
int (*ipa_disable_sps_pipe)(enum ipa_client_type client);
void (*ipa_set_tag_process_before_gating)(bool val);
int (*ipa_mhi_init_engine)(struct ipa_mhi_init_engine *params);
int (*ipa_connect_mhi_pipe)(struct ipa_mhi_connect_params_internal *in,
u32 *clnt_hdl);
int (*ipa_disconnect_mhi_pipe)(u32 clnt_hdl);
bool (*ipa_mhi_stop_gsi_channel)(enum ipa_client_type client);
int (*ipa_qmi_disable_force_clear)(u32 request_id);
int (*ipa_qmi_enable_force_clear_datapath_send)(
struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
int (*ipa_qmi_disable_force_clear_datapath_send)(
struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
bool (*ipa_mhi_sps_channel_empty)(enum ipa_client_type client);
int (*ipa_mhi_reset_channel_internal)(enum ipa_client_type client);
int (*ipa_mhi_start_channel_internal)(enum ipa_client_type client);
void (*ipa_get_holb)(int ep_idx, struct ipa_ep_cfg_holb *holb);
int (*ipa_mhi_query_ch_info)(enum ipa_client_type client,
struct gsi_chan_info *ch_info);
int (*ipa_mhi_resume_channels_internal)(
enum ipa_client_type client,
bool LPTransitionRejected,
bool brstmode_enabled,
union __packed gsi_channel_scratch ch_scratch,
u8 index);
int (*ipa_mhi_destroy_channel)(enum ipa_client_type client);
int (*ipa_uc_mhi_send_dl_ul_sync_info)
(union IpaHwMhiDlUlSyncCmdData_t *cmd);
int (*ipa_uc_mhi_init)
(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
void (*ipa_uc_mhi_cleanup)(void);
int (*ipa_uc_mhi_print_stats)(char *dbg_buff, int size);
int (*ipa_uc_mhi_reset_channel)(int channelHandle);
int (*ipa_uc_mhi_suspend_channel)(int channelHandle);
int (*ipa_uc_mhi_stop_event_update_channel)(int channelHandle);
int (*ipa_uc_state_check)(void);
int (*ipa_write_qmap_id)(struct ipa_ioc_write_qmapid *param_in);
int (*ipa_add_interrupt_handler)(enum ipa_irq_type interrupt,
ipa_irq_handler_t handler,
bool deferred_flag,
void *private_data);
int (*ipa_remove_interrupt_handler)(enum ipa_irq_type interrupt);
int (*ipa_restore_suspend_handler)(void);
void (*ipa_bam_reg_dump)(void);
int (*ipa_get_ep_mapping)(enum ipa_client_type client);
bool (*ipa_is_ready)(void);
void (*ipa_proxy_clk_vote)(void);
void (*ipa_proxy_clk_unvote)(void);
bool (*ipa_is_client_handle_valid)(u32 clnt_hdl);
enum ipa_client_type (*ipa_get_client_mapping)(int pipe_idx);
enum ipa_rm_resource_name (*ipa_get_rm_resource_from_ep)(int pipe_idx);
bool (*ipa_get_modem_cfg_emb_pipe_flt)(void);
enum ipa_transport_type (*ipa_get_transport_type)(void);
int (*ipa_ap_suspend)(struct device *dev);
int (*ipa_ap_resume)(struct device *dev);
int (*ipa_stop_gsi_channel)(u32 clnt_hdl);
int (*ipa_start_gsi_channel)(u32 clnt_hdl);
struct iommu_domain *(*ipa_get_smmu_domain)(void);
int (*ipa_disable_apps_wan_cons_deaggr)(uint32_t agg_size,
uint32_t agg_count);
struct device *(*ipa_get_dma_dev)(void);
int (*ipa_release_wdi_mapping)(u32 num_buffers,
struct ipa_wdi_buffer_info *info);
int (*ipa_create_wdi_mapping)(u32 num_buffers,
struct ipa_wdi_buffer_info *info);
const struct ipa_gsi_ep_config *(*ipa_get_gsi_ep_info)
(enum ipa_client_type client);
int (*ipa_register_ipa_ready_cb)(void (*ipa_ready_cb)(void *user_data),
void *user_data);
void (*ipa_inc_client_enable_clks)(
struct ipa_active_client_logging_info *id);
void (*ipa_dec_client_disable_clks)(
struct ipa_active_client_logging_info *id);
int (*ipa_inc_client_enable_clks_no_block)(
struct ipa_active_client_logging_info *id);
int (*ipa_suspend_resource_no_block)(
enum ipa_rm_resource_name resource);
int (*ipa_resume_resource)(enum ipa_rm_resource_name name);
int (*ipa_suspend_resource_sync)(enum ipa_rm_resource_name resource);
int (*ipa_set_required_perf_profile)(
enum ipa_voltage_level floor_voltage, u32 bandwidth_mbps);
void *(*ipa_get_ipc_logbuf)(void);
void *(*ipa_get_ipc_logbuf_low)(void);
int (*ipa_rx_poll)(u32 clnt_hdl, int budget);
void (*ipa_recycle_wan_skb)(struct sk_buff *skb);
int (*ipa_setup_uc_ntn_pipes)(struct ipa_ntn_conn_in_params *in,
ipa_notify_cb notify, void *priv, u8 hdr_len,
struct ipa_ntn_conn_out_params *outp);
int (*ipa_tear_down_uc_offload_pipes)(int ipa_ep_idx_ul,
int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params);
struct device *(*ipa_get_pdev)(void);
int (*ipa_ntn_uc_reg_rdyCB)(void (*ipauc_ready_cb)(void *user_data),
void *user_data);
void (*ipa_ntn_uc_dereg_rdyCB)(void);
int (*ipa_conn_wdi_pipes)(struct ipa_wdi_conn_in_params *in,
struct ipa_wdi_conn_out_params *out,
ipa_wdi_meter_notifier_cb wdi_notify);
int (*ipa_disconn_wdi_pipes)(int ipa_ep_idx_tx,
int ipa_ep_idx_rx);
int (*ipa_enable_wdi_pipes)(int ipa_ep_idx_tx,
int ipa_ep_idx_rx);
int (*ipa_disable_wdi_pipes)(int ipa_ep_idx_tx,
int ipa_ep_idx_rx);
int (*ipa_tz_unlock_reg)(struct ipa_tz_unlock_reg_info *reg_info,
u16 num_regs);
int (*ipa_get_smmu_params)(struct ipa_smmu_in_params *in,
struct ipa_smmu_out_params *out);
int (*ipa_is_vlan_mode)(enum ipa_vlan_ifaces iface, bool *res);
bool (*ipa_pm_is_used)(void);
int (*ipa_wigig_uc_init)(
struct ipa_wdi_uc_ready_params *inout,
ipa_wigig_misc_int_cb int_notify,
phys_addr_t *uc_db_pa);
int (*ipa_conn_wigig_rx_pipe_i)(void *in,
struct ipa_wigig_conn_out_params *out);
int (*ipa_conn_wigig_client_i)(void *in,
struct ipa_wigig_conn_out_params *out);
int (*ipa_disconn_wigig_pipe_i)(enum ipa_client_type client,
struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
void *dbuff);
int (*ipa_wigig_uc_msi_init)(bool init,
phys_addr_t periph_baddr_pa,
phys_addr_t pseudo_cause_pa,
phys_addr_t int_gen_tx_pa,
phys_addr_t int_gen_rx_pa,
phys_addr_t dma_ep_misc_pa);
int (*ipa_enable_wigig_pipe_i)(enum ipa_client_type client);
int (*ipa_disable_wigig_pipe_i)(enum ipa_client_type client);
void (*ipa_register_client_callback)(
int (*client_cb)(bool is_lock),
bool (*teth_port_state)(void), enum ipa_client_type client);
void (*ipa_deregister_client_callback)(enum ipa_client_type client);
};
#ifdef CONFIG_IPA3
int ipa3_plat_drv_probe(struct platform_device *pdev_p,
struct ipa_api_controller *api_ctrl,
const struct of_device_id *pdrv_match);
int ipa3_pci_drv_probe(
struct pci_dev *pci_dev,
struct ipa_api_controller *api_ctrl,
const struct of_device_id *pdrv_match);
#else
static inline int ipa3_plat_drv_probe(struct platform_device *pdev_p,
struct ipa_api_controller *api_ctrl,
const struct of_device_id *pdrv_match)
{
return -ENODEV;
}
static inline int ipa3_pci_drv_probe(
struct pci_dev *pci_dev,
struct ipa_api_controller *api_ctrl,
const struct of_device_id *pdrv_match)
{
return -ENODEV;
}
#endif /* (CONFIG_IPA3) */
#endif /* _IPA_API_H_ */

6
ipa/ipa_clients/Makefile Arquivo normal
Ver arquivo

@@ -0,0 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_IPA3) += ipa_usb.o odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o ipa_gsb.o ipa_wigig.o
obj-$(CONFIG_IPA) += odu_bridge.o ipa_mhi_client.o ipa_uc_offload.o ipa_wdi3.o
obj-$(CONFIG_ECM_IPA) += ecm_ipa.o
obj-$(CONFIG_RNDIS_IPA) += rndis_ipa.o

1630
ipa/ipa_clients/ecm_ipa.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

1228
ipa/ipa_clients/ipa_gsb.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

2896
ipa/ipa_clients/ipa_mhi_client.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

Ver arquivo

@@ -0,0 +1,818 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/ipa_uc_offload.h>
#include <linux/msm_ipa.h>
#include "../ipa_common_i.h"
#include "../ipa_v3/ipa_pm.h"
#define IPA_NTN_DMA_POOL_ALIGNMENT 8
#define OFFLOAD_DRV_NAME "ipa_uc_offload"
#define IPA_UC_OFFLOAD_DBG(fmt, args...) \
do { \
pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
__func__, __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPA_UC_OFFLOAD_LOW(fmt, args...) \
do { \
pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
__func__, __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPA_UC_OFFLOAD_ERR(fmt, args...) \
do { \
pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
__func__, __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPA_UC_OFFLOAD_INFO(fmt, args...) \
do { \
pr_info(OFFLOAD_DRV_NAME " %s:%d " fmt, \
__func__, __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
enum ipa_uc_offload_state {
IPA_UC_OFFLOAD_STATE_INVALID,
IPA_UC_OFFLOAD_STATE_INITIALIZED,
IPA_UC_OFFLOAD_STATE_UP,
};
struct ipa_uc_offload_ctx {
enum ipa_uc_offload_proto proto;
enum ipa_uc_offload_state state;
void *priv;
u8 hdr_len;
u32 partial_hdr_hdl[IPA_IP_MAX];
char netdev_name[IPA_RESOURCE_NAME_MAX];
ipa_notify_cb notify;
struct completion ntn_completion;
u32 pm_hdl;
struct ipa_ntn_conn_in_params conn;
};
static struct ipa_uc_offload_ctx *ipa_uc_offload_ctx[IPA_UC_MAX_PROT_SIZE];
static int ipa_uc_ntn_cons_release(void);
static int ipa_uc_ntn_cons_request(void);
static void ipa_uc_offload_rm_notify(void *, enum ipa_rm_event, unsigned long);
static int ipa_commit_partial_hdr(
struct ipa_ioc_add_hdr *hdr,
const char *netdev_name,
struct ipa_hdr_info *hdr_info)
{
int i;
if (hdr == NULL || hdr_info == NULL) {
IPA_UC_OFFLOAD_ERR("Invalid input\n");
return -EINVAL;
}
hdr->commit = 1;
hdr->num_hdrs = 2;
snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
"%s_ipv4", netdev_name);
snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
"%s_ipv6", netdev_name);
for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
hdr->hdr[i].type = hdr_info[i].hdr_type;
hdr->hdr[i].is_partial = 1;
hdr->hdr[i].is_eth2_ofst_valid = 1;
hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
}
if (ipa_add_hdr(hdr)) {
IPA_UC_OFFLOAD_ERR("fail to add partial headers\n");
return -EFAULT;
}
return 0;
}
static void ipa_uc_offload_ntn_pm_cb(void *p, enum ipa_pm_cb_event event)
{
/* suspend/resume is not supported */
IPA_UC_OFFLOAD_DBG("event = %d\n", event);
}
static int ipa_uc_offload_ntn_register_pm_client(
struct ipa_uc_offload_ctx *ntn_ctx)
{
int res;
struct ipa_pm_register_params params;
memset(&params, 0, sizeof(params));
params.name = "ETH";
params.callback = ipa_uc_offload_ntn_pm_cb;
params.user_data = ntn_ctx;
params.group = IPA_PM_GROUP_DEFAULT;
res = ipa_pm_register(&params, &ntn_ctx->pm_hdl);
if (res) {
IPA_UC_OFFLOAD_ERR("fail to register with PM %d\n", res);
return res;
}
res = ipa_pm_associate_ipa_cons_to_client(ntn_ctx->pm_hdl,
IPA_CLIENT_ETHERNET_CONS);
if (res) {
IPA_UC_OFFLOAD_ERR("fail to associate cons with PM %d\n", res);
ipa_pm_deregister(ntn_ctx->pm_hdl);
ntn_ctx->pm_hdl = ~0;
return res;
}
return 0;
}
static void ipa_uc_offload_ntn_deregister_pm_client(
struct ipa_uc_offload_ctx *ntn_ctx)
{
ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
ipa_pm_deregister(ntn_ctx->pm_hdl);
}
static int ipa_uc_offload_ntn_create_rm_resources(
struct ipa_uc_offload_ctx *ntn_ctx)
{
int ret;
struct ipa_rm_create_params param;
memset(&param, 0, sizeof(param));
param.name = IPA_RM_RESOURCE_ETHERNET_PROD;
param.reg_params.user_data = ntn_ctx;
param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
param.floor_voltage = IPA_VOLTAGE_SVS;
ret = ipa_rm_create_resource(&param);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_PROD resource\n");
return -EFAULT;
}
memset(&param, 0, sizeof(param));
param.name = IPA_RM_RESOURCE_ETHERNET_CONS;
param.request_resource = ipa_uc_ntn_cons_request;
param.release_resource = ipa_uc_ntn_cons_release;
ret = ipa_rm_create_resource(&param);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_CONS resource\n");
ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
return -EFAULT;
}
return 0;
}
static int ipa_uc_offload_ntn_reg_intf(
struct ipa_uc_offload_intf_params *inp,
struct ipa_uc_offload_out_params *outp,
struct ipa_uc_offload_ctx *ntn_ctx)
{
struct ipa_ioc_add_hdr *hdr = NULL;
struct ipa_tx_intf tx;
struct ipa_rx_intf rx;
struct ipa_ioc_tx_intf_prop tx_prop[2];
struct ipa_ioc_rx_intf_prop rx_prop[2];
int ret = 0;
u32 len;
IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
inp->netdev_name);
if (ipa_pm_is_used())
ret = ipa_uc_offload_ntn_register_pm_client(ntn_ctx);
else
ret = ipa_uc_offload_ntn_create_rm_resources(ntn_ctx);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail to create rm resource\n");
return -EFAULT;
}
memcpy(ntn_ctx->netdev_name, inp->netdev_name, IPA_RESOURCE_NAME_MAX);
ntn_ctx->hdr_len = inp->hdr_info[0].hdr_len;
ntn_ctx->notify = inp->notify;
ntn_ctx->priv = inp->priv;
/* add partial header */
len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
hdr = kzalloc(len, GFP_KERNEL);
if (hdr == NULL) {
ret = -ENOMEM;
goto fail_alloc;
}
if (ipa_commit_partial_hdr(hdr, ntn_ctx->netdev_name, inp->hdr_info)) {
IPA_UC_OFFLOAD_ERR("fail to commit partial headers\n");
ret = -EFAULT;
goto fail;
}
/* populate tx prop */
tx.num_props = 2;
tx.prop = tx_prop;
memset(tx_prop, 0, sizeof(tx_prop));
tx_prop[0].ip = IPA_IP_v4;
tx_prop[0].dst_pipe = IPA_CLIENT_ETHERNET_CONS;
tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
sizeof(tx_prop[0].hdr_name));
tx_prop[1].ip = IPA_IP_v6;
tx_prop[1].dst_pipe = IPA_CLIENT_ETHERNET_CONS;
tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
sizeof(tx_prop[1].hdr_name));
/* populate rx prop */
rx.num_props = 2;
rx.prop = rx_prop;
memset(rx_prop, 0, sizeof(rx_prop));
rx_prop[0].ip = IPA_IP_v4;
rx_prop[0].src_pipe = IPA_CLIENT_ETHERNET_PROD;
rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
if (inp->is_meta_data_valid) {
rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
rx_prop[0].attrib.meta_data = inp->meta_data;
rx_prop[0].attrib.meta_data_mask = inp->meta_data_mask;
}
rx_prop[1].ip = IPA_IP_v6;
rx_prop[1].src_pipe = IPA_CLIENT_ETHERNET_PROD;
rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
if (inp->is_meta_data_valid) {
rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
rx_prop[1].attrib.meta_data = inp->meta_data;
rx_prop[1].attrib.meta_data_mask = inp->meta_data_mask;
}
if (ipa_register_intf(inp->netdev_name, &tx, &rx)) {
IPA_UC_OFFLOAD_ERR("fail to add interface prop\n");
memset(ntn_ctx, 0, sizeof(*ntn_ctx));
ret = -EFAULT;
goto fail;
}
ntn_ctx->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
ntn_ctx->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
init_completion(&ntn_ctx->ntn_completion);
ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
kfree(hdr);
return ret;
fail:
kfree(hdr);
fail_alloc:
if (ipa_pm_is_used()) {
ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
} else {
ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS);
ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
}
return ret;
}
int ipa_uc_offload_reg_intf(
struct ipa_uc_offload_intf_params *inp,
struct ipa_uc_offload_out_params *outp)
{
struct ipa_uc_offload_ctx *ctx;
int ret = 0;
if (inp == NULL || outp == NULL) {
IPA_UC_OFFLOAD_ERR("invalid params in=%pK out=%pK\n",
inp, outp);
return -EINVAL;
}
if (inp->proto <= IPA_UC_INVALID ||
inp->proto >= IPA_UC_MAX_PROT_SIZE) {
IPA_UC_OFFLOAD_ERR("invalid proto %d\n", inp->proto);
return -EINVAL;
}
if (!ipa_uc_offload_ctx[inp->proto]) {
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (ctx == NULL) {
IPA_UC_OFFLOAD_ERR("fail to alloc uc offload ctx\n");
return -EFAULT;
}
ipa_uc_offload_ctx[inp->proto] = ctx;
ctx->proto = inp->proto;
} else
ctx = ipa_uc_offload_ctx[inp->proto];
if (ctx->state != IPA_UC_OFFLOAD_STATE_INVALID) {
IPA_UC_OFFLOAD_ERR("Already Initialized\n");
return -EINVAL;
}
if (ctx->proto == IPA_UC_NTN) {
ret = ipa_uc_offload_ntn_reg_intf(inp, outp, ctx);
if (!ret)
outp->clnt_hndl = IPA_UC_NTN;
}
return ret;
}
EXPORT_SYMBOL(ipa_uc_offload_reg_intf);
static int ipa_uc_ntn_cons_release(void)
{
return 0;
}
static int ipa_uc_ntn_cons_request(void)
{
int ret = 0;
struct ipa_uc_offload_ctx *ntn_ctx;
ntn_ctx = ipa_uc_offload_ctx[IPA_UC_NTN];
if (!ntn_ctx) {
IPA_UC_OFFLOAD_ERR("NTN is not initialized\n");
ret = -EFAULT;
} else if (ntn_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", ntn_ctx->state);
ret = -EFAULT;
}
return ret;
}
static void ipa_uc_offload_rm_notify(void *user_data, enum ipa_rm_event event,
unsigned long data)
{
struct ipa_uc_offload_ctx *offload_ctx;
offload_ctx = (struct ipa_uc_offload_ctx *)user_data;
if (!(offload_ctx && offload_ctx->proto > IPA_UC_INVALID &&
offload_ctx->proto < IPA_UC_MAX_PROT_SIZE)) {
IPA_UC_OFFLOAD_ERR("Invalid user data\n");
return;
}
if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED)
IPA_UC_OFFLOAD_ERR("Invalid State: %d\n", offload_ctx->state);
switch (event) {
case IPA_RM_RESOURCE_GRANTED:
complete_all(&offload_ctx->ntn_completion);
break;
case IPA_RM_RESOURCE_RELEASED:
break;
default:
IPA_UC_OFFLOAD_ERR("Invalid RM Evt: %d", event);
break;
}
}
static int ipa_uc_ntn_alloc_conn_smmu_info(struct ipa_ntn_setup_info *dest,
struct ipa_ntn_setup_info *source)
{
int result;
IPA_UC_OFFLOAD_DBG("Allocating smmu info\n");
memcpy(dest, source, sizeof(struct ipa_ntn_setup_info));
dest->data_buff_list =
kcalloc(dest->num_buffers, sizeof(struct ntn_buff_smmu_map),
GFP_KERNEL);
if (dest->data_buff_list == NULL) {
IPA_UC_OFFLOAD_ERR("failed to alloc smmu info\n");
return -ENOMEM;
}
memcpy(dest->data_buff_list, source->data_buff_list,
sizeof(struct ntn_buff_smmu_map) * dest->num_buffers);
result = ipa_smmu_store_sgt(&dest->buff_pool_base_sgt,
source->buff_pool_base_sgt);
if (result) {
kfree(dest->data_buff_list);
return result;
}
result = ipa_smmu_store_sgt(&dest->ring_base_sgt,
source->ring_base_sgt);
if (result) {
kfree(dest->data_buff_list);
ipa_smmu_free_sgt(&dest->buff_pool_base_sgt);
return result;
}
return 0;
}
static void ipa_uc_ntn_free_conn_smmu_info(struct ipa_ntn_setup_info *params)
{
kfree(params->data_buff_list);
ipa_smmu_free_sgt(&params->buff_pool_base_sgt);
ipa_smmu_free_sgt(&params->ring_base_sgt);
}
int ipa_uc_ntn_conn_pipes(struct ipa_ntn_conn_in_params *inp,
struct ipa_ntn_conn_out_params *outp,
struct ipa_uc_offload_ctx *ntn_ctx)
{
int result = 0;
enum ipa_uc_offload_state prev_state;
if (ntn_ctx->conn.dl.smmu_enabled != ntn_ctx->conn.ul.smmu_enabled) {
IPA_UC_OFFLOAD_ERR("ul and dl smmu enablement do not match\n");
return -EINVAL;
}
prev_state = ntn_ctx->state;
if (inp->dl.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
inp->dl.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
IPA_UC_OFFLOAD_ERR("alignment failure on TX\n");
return -EINVAL;
}
if (inp->ul.ring_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT ||
inp->ul.buff_pool_base_pa % IPA_NTN_DMA_POOL_ALIGNMENT) {
IPA_UC_OFFLOAD_ERR("alignment failure on RX\n");
return -EINVAL;
}
if (ipa_pm_is_used()) {
result = ipa_pm_activate_sync(ntn_ctx->pm_hdl);
if (result) {
IPA_UC_OFFLOAD_ERR("fail to activate: %d\n", result);
return result;
}
} else {
result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (result) {
IPA_UC_OFFLOAD_ERR("fail to add rm dependency: %d\n",
result);
return result;
}
result = ipa_rm_request_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
if (result == -EINPROGRESS) {
if (wait_for_completion_timeout(&ntn_ctx->ntn_completion
, 10*HZ) == 0) {
IPA_UC_OFFLOAD_ERR("ETH_PROD req timeout\n");
result = -EFAULT;
goto fail;
}
} else if (result != 0) {
IPA_UC_OFFLOAD_ERR("fail to request resource\n");
result = -EFAULT;
goto fail;
}
}
ntn_ctx->state = IPA_UC_OFFLOAD_STATE_UP;
result = ipa_setup_uc_ntn_pipes(inp, ntn_ctx->notify,
ntn_ctx->priv, ntn_ctx->hdr_len, outp);
if (result) {
IPA_UC_OFFLOAD_ERR("fail to setup uc offload pipes: %d\n",
result);
ntn_ctx->state = prev_state;
result = -EFAULT;
goto fail;
}
if (ntn_ctx->conn.dl.smmu_enabled) {
result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.dl,
&inp->dl);
if (result) {
IPA_UC_OFFLOAD_ERR("alloc failure on TX\n");
goto fail;
}
result = ipa_uc_ntn_alloc_conn_smmu_info(&ntn_ctx->conn.ul,
&inp->ul);
if (result) {
ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl);
IPA_UC_OFFLOAD_ERR("alloc failure on RX\n");
goto fail;
}
}
fail:
if (!ipa_pm_is_used())
ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
IPA_RM_RESOURCE_APPS_CONS);
return result;
}
int ipa_uc_offload_conn_pipes(struct ipa_uc_offload_conn_in_params *inp,
struct ipa_uc_offload_conn_out_params *outp)
{
int ret = 0;
struct ipa_uc_offload_ctx *offload_ctx;
if (!(inp && outp)) {
IPA_UC_OFFLOAD_ERR("bad parm. in=%pK out=%pK\n", inp, outp);
return -EINVAL;
}
if (inp->clnt_hndl <= IPA_UC_INVALID ||
inp->clnt_hndl >= IPA_UC_MAX_PROT_SIZE) {
IPA_UC_OFFLOAD_ERR("invalid client handle %d\n",
inp->clnt_hndl);
return -EINVAL;
}
offload_ctx = ipa_uc_offload_ctx[inp->clnt_hndl];
if (!offload_ctx) {
IPA_UC_OFFLOAD_ERR("Invalid Handle\n");
return -EINVAL;
}
if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
IPA_UC_OFFLOAD_ERR("Invalid state %d\n", offload_ctx->state);
return -EPERM;
}
switch (offload_ctx->proto) {
case IPA_UC_NTN:
ret = ipa_uc_ntn_conn_pipes(&inp->u.ntn, &outp->u.ntn,
offload_ctx);
break;
default:
IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", offload_ctx->proto);
ret = -EINVAL;
break;
}
return ret;
}
EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
int ipa_set_perf_profile(struct ipa_perf_profile *profile)
{
struct ipa_rm_perf_profile rm_profile;
enum ipa_rm_resource_name resource_name;
if (profile == NULL) {
IPA_UC_OFFLOAD_ERR("Invalid input\n");
return -EINVAL;
}
rm_profile.max_supported_bandwidth_mbps =
profile->max_supported_bw_mbps;
if (profile->client == IPA_CLIENT_ETHERNET_PROD) {
resource_name = IPA_RM_RESOURCE_ETHERNET_PROD;
} else if (profile->client == IPA_CLIENT_ETHERNET_CONS) {
resource_name = IPA_RM_RESOURCE_ETHERNET_CONS;
} else {
IPA_UC_OFFLOAD_ERR("not supported\n");
return -EINVAL;
}
if (ipa_pm_is_used())
return ipa_pm_set_throughput(
ipa_uc_offload_ctx[IPA_UC_NTN]->pm_hdl,
profile->max_supported_bw_mbps);
if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
IPA_UC_OFFLOAD_ERR("fail to setup rm perf profile\n");
return -EFAULT;
}
return 0;
}
EXPORT_SYMBOL(ipa_set_perf_profile);
static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
{
int ipa_ep_idx_ul, ipa_ep_idx_dl;
int ret = 0;
if (ntn_ctx->conn.dl.smmu_enabled != ntn_ctx->conn.ul.smmu_enabled) {
IPA_UC_OFFLOAD_ERR("ul and dl smmu enablement do not match\n");
return -EINVAL;
}
ntn_ctx->state = IPA_UC_OFFLOAD_STATE_INITIALIZED;
if (ipa_pm_is_used()) {
ret = ipa_pm_deactivate_sync(ntn_ctx->pm_hdl);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail to deactivate res: %d\n",
ret);
return -EFAULT;
}
} else {
ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail release ETHERNET_PROD: %d\n",
ret);
return -EFAULT;
}
ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
IPA_RM_RESOURCE_APPS_CONS);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail del dep ETH->APPS, %d\n", ret);
return -EFAULT;
}
}
ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS);
ret = ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl,
&ntn_ctx->conn);
if (ret) {
IPA_UC_OFFLOAD_ERR("fail to tear down ntn offload pipes, %d\n",
ret);
return -EFAULT;
}
if (ntn_ctx->conn.dl.smmu_enabled) {
ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.dl);
ipa_uc_ntn_free_conn_smmu_info(&ntn_ctx->conn.ul);
}
return ret;
}
int ipa_uc_offload_disconn_pipes(u32 clnt_hdl)
{
struct ipa_uc_offload_ctx *offload_ctx;
int ret = 0;
if (clnt_hdl <= IPA_UC_INVALID ||
clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
return -EINVAL;
}
offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
if (!offload_ctx) {
IPA_UC_OFFLOAD_ERR("Invalid client Handle\n");
return -EINVAL;
}
if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_UP) {
IPA_UC_OFFLOAD_ERR("Invalid state\n");
return -EINVAL;
}
switch (offload_ctx->proto) {
case IPA_UC_NTN:
ret = ipa_uc_ntn_disconn_pipes(offload_ctx);
break;
default:
IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
ret = -EINVAL;
break;
}
return ret;
}
EXPORT_SYMBOL(ipa_uc_offload_disconn_pipes);
static int ipa_uc_ntn_cleanup(struct ipa_uc_offload_ctx *ntn_ctx)
{
int len, result = 0;
struct ipa_ioc_del_hdr *hdr;
if (ipa_pm_is_used()) {
ipa_uc_offload_ntn_deregister_pm_client(ntn_ctx);
} else {
if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD)) {
IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_PROD\n");
return -EFAULT;
}
if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS)) {
IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_CONS\n");
return -EFAULT;
}
}
len = sizeof(struct ipa_ioc_del_hdr) + 2 * sizeof(struct ipa_hdr_del);
hdr = kzalloc(len, GFP_KERNEL);
if (hdr == NULL)
return -ENOMEM;
hdr->commit = 1;
hdr->num_hdls = 2;
hdr->hdl[0].hdl = ntn_ctx->partial_hdr_hdl[0];
hdr->hdl[1].hdl = ntn_ctx->partial_hdr_hdl[1];
if (ipa_del_hdr(hdr)) {
IPA_UC_OFFLOAD_ERR("fail to delete partial header\n");
result = -EFAULT;
goto fail;
}
if (ipa_deregister_intf(ntn_ctx->netdev_name)) {
IPA_UC_OFFLOAD_ERR("fail to delete interface prop\n");
result = -EFAULT;
goto fail;
}
fail:
kfree(hdr);
return result;
}
int ipa_uc_offload_cleanup(u32 clnt_hdl)
{
struct ipa_uc_offload_ctx *offload_ctx;
int ret = 0;
if (clnt_hdl <= IPA_UC_INVALID ||
clnt_hdl >= IPA_UC_MAX_PROT_SIZE) {
IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
return -EINVAL;
}
offload_ctx = ipa_uc_offload_ctx[clnt_hdl];
if (!offload_ctx) {
IPA_UC_OFFLOAD_ERR("Invalid client handle %d\n", clnt_hdl);
return -EINVAL;
}
if (offload_ctx->state != IPA_UC_OFFLOAD_STATE_INITIALIZED) {
IPA_UC_OFFLOAD_ERR("Invalid State %d\n", offload_ctx->state);
return -EINVAL;
}
switch (offload_ctx->proto) {
case IPA_UC_NTN:
ret = ipa_uc_ntn_cleanup(offload_ctx);
break;
default:
IPA_UC_OFFLOAD_ERR("Invalid Proto :%d\n", clnt_hdl);
ret = -EINVAL;
break;
}
if (!ret) {
kfree(offload_ctx);
offload_ctx = NULL;
ipa_uc_offload_ctx[clnt_hdl] = NULL;
}
return ret;
}
EXPORT_SYMBOL(ipa_uc_offload_cleanup);
/**
* ipa_uc_offload_uc_rdyCB() - To register uC ready CB if uC not
* ready
* @inout: [in/out] input/output parameters
* from/to client
*
* Returns: 0 on success, negative on failure
*
*/
int ipa_uc_offload_reg_rdyCB(struct ipa_uc_ready_params *inp)
{
int ret = 0;
if (!inp) {
IPA_UC_OFFLOAD_ERR("Invalid input\n");
return -EINVAL;
}
if (inp->proto == IPA_UC_NTN)
ret = ipa_ntn_uc_reg_rdyCB(inp->notify, inp->priv);
if (ret == -EEXIST) {
inp->is_uC_ready = true;
ret = 0;
} else
inp->is_uC_ready = false;
return ret;
}
EXPORT_SYMBOL(ipa_uc_offload_reg_rdyCB);
void ipa_uc_offload_dereg_rdyCB(enum ipa_uc_offload_proto proto)
{
if (proto == IPA_UC_NTN)
ipa_ntn_uc_dereg_rdyCB();
}
EXPORT_SYMBOL(ipa_uc_offload_dereg_rdyCB);

3222
ipa/ipa_clients/ipa_usb.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

905
ipa/ipa_clients/ipa_wdi3.c Arquivo normal
Ver arquivo

@@ -0,0 +1,905 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/ipa_wdi3.h>
#include <linux/msm_ipa.h>
#include <linux/string.h>
#include "../ipa_common_i.h"
#include "../ipa_v3/ipa_pm.h"
#include "../ipa_v3/ipa_i.h"
#define OFFLOAD_DRV_NAME "ipa_wdi"
#define IPA_WDI_DBG(fmt, args...) \
do { \
pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
__func__, __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPA_WDI_DBG_LOW(fmt, args...) \
do { \
pr_debug(OFFLOAD_DRV_NAME " %s:%d " fmt, \
__func__, __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPA_WDI_ERR(fmt, args...) \
do { \
pr_err(OFFLOAD_DRV_NAME " %s:%d " fmt, \
__func__, __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
OFFLOAD_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
struct ipa_wdi_intf_info {
char netdev_name[IPA_RESOURCE_NAME_MAX];
u8 hdr_len;
u32 partial_hdr_hdl[IPA_IP_MAX];
struct list_head link;
};
struct ipa_wdi_context {
struct list_head head_intf_list;
struct completion wdi_completion;
struct mutex lock;
enum ipa_wdi_version wdi_version;
u8 is_smmu_enabled;
u32 tx_pipe_hdl;
u32 rx_pipe_hdl;
u8 num_sys_pipe_needed;
u32 sys_pipe_hdl[IPA_WDI_MAX_SUPPORTED_SYS_PIPE];
u32 ipa_pm_hdl;
#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
ipa_wdi_meter_notifier_cb wdi_notify;
#endif
};
static struct ipa_wdi_context *ipa_wdi_ctx;
int ipa_wdi_init(struct ipa_wdi_init_in_params *in,
struct ipa_wdi_init_out_params *out)
{
struct ipa_wdi_uc_ready_params uc_ready_params;
struct ipa_smmu_in_params smmu_in;
struct ipa_smmu_out_params smmu_out;
if (ipa_wdi_ctx) {
IPA_WDI_ERR("ipa_wdi_ctx was initialized before\n");
return -EFAULT;
}
if (in->wdi_version > IPA_WDI_3 || in->wdi_version < IPA_WDI_1) {
IPA_WDI_ERR("wrong wdi version: %d\n", in->wdi_version);
return -EFAULT;
}
ipa_wdi_ctx = kzalloc(sizeof(*ipa_wdi_ctx), GFP_KERNEL);
if (ipa_wdi_ctx == NULL) {
IPA_WDI_ERR("fail to alloc wdi ctx\n");
return -ENOMEM;
}
mutex_init(&ipa_wdi_ctx->lock);
init_completion(&ipa_wdi_ctx->wdi_completion);
INIT_LIST_HEAD(&ipa_wdi_ctx->head_intf_list);
ipa_wdi_ctx->wdi_version = in->wdi_version;
uc_ready_params.notify = in->notify;
uc_ready_params.priv = in->priv;
#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
ipa_wdi_ctx->wdi_notify = in->wdi_notify;
#endif
if (ipa_uc_reg_rdyCB(&uc_ready_params) != 0) {
mutex_destroy(&ipa_wdi_ctx->lock);
kfree(ipa_wdi_ctx);
ipa_wdi_ctx = NULL;
return -EFAULT;
}
out->is_uC_ready = uc_ready_params.is_uC_ready;
smmu_in.smmu_client = IPA_SMMU_WLAN_CLIENT;
if (ipa_get_smmu_params(&smmu_in, &smmu_out))
out->is_smmu_enabled = false;
else
out->is_smmu_enabled = smmu_out.smmu_enable;
ipa_wdi_ctx->is_smmu_enabled = out->is_smmu_enabled;
if (ipa3_ctx->ipa_wdi3_over_gsi)
out->is_over_gsi = true;
else
out->is_over_gsi = false;
return 0;
}
EXPORT_SYMBOL(ipa_wdi_init);
int ipa_wdi_cleanup(void)
{
struct ipa_wdi_intf_info *entry;
struct ipa_wdi_intf_info *next;
/* clear interface list */
list_for_each_entry_safe(entry, next,
&ipa_wdi_ctx->head_intf_list, link) {
list_del(&entry->link);
kfree(entry);
}
mutex_destroy(&ipa_wdi_ctx->lock);
kfree(ipa_wdi_ctx);
ipa_wdi_ctx = NULL;
return 0;
}
EXPORT_SYMBOL(ipa_wdi_cleanup);
static int ipa_wdi_commit_partial_hdr(
struct ipa_ioc_add_hdr *hdr,
const char *netdev_name,
struct ipa_wdi_hdr_info *hdr_info)
{
int i;
if (!hdr || !hdr_info || !netdev_name) {
IPA_WDI_ERR("Invalid input\n");
return -EINVAL;
}
hdr->commit = 1;
hdr->num_hdrs = 2;
snprintf(hdr->hdr[0].name, sizeof(hdr->hdr[0].name),
"%s_ipv4", netdev_name);
snprintf(hdr->hdr[1].name, sizeof(hdr->hdr[1].name),
"%s_ipv6", netdev_name);
for (i = IPA_IP_v4; i < IPA_IP_MAX; i++) {
hdr->hdr[i].hdr_len = hdr_info[i].hdr_len;
memcpy(hdr->hdr[i].hdr, hdr_info[i].hdr, hdr->hdr[i].hdr_len);
hdr->hdr[i].type = hdr_info[i].hdr_type;
hdr->hdr[i].is_partial = 1;
hdr->hdr[i].is_eth2_ofst_valid = 1;
hdr->hdr[i].eth2_ofst = hdr_info[i].dst_mac_addr_offset;
}
if (ipa_add_hdr(hdr)) {
IPA_WDI_ERR("fail to add partial headers\n");
return -EFAULT;
}
return 0;
}
int ipa_wdi_reg_intf(struct ipa_wdi_reg_intf_in_params *in)
{
struct ipa_ioc_add_hdr *hdr;
struct ipa_wdi_intf_info *new_intf;
struct ipa_wdi_intf_info *entry;
struct ipa_tx_intf tx;
struct ipa_rx_intf rx;
struct ipa_ioc_tx_intf_prop tx_prop[2];
struct ipa_ioc_rx_intf_prop rx_prop[2];
u32 len;
int ret = 0;
if (in == NULL) {
IPA_WDI_ERR("invalid params in=%pK\n", in);
return -EINVAL;
}
if (!ipa_wdi_ctx) {
IPA_WDI_ERR("wdi ctx is not initialized\n");
return -EPERM;
}
IPA_WDI_DBG("register interface for netdev %s\n",
in->netdev_name);
mutex_lock(&ipa_wdi_ctx->lock);
list_for_each_entry(entry, &ipa_wdi_ctx->head_intf_list, link)
if (strcmp(entry->netdev_name, in->netdev_name) == 0) {
IPA_WDI_DBG("intf was added before.\n");
mutex_unlock(&ipa_wdi_ctx->lock);
return 0;
}
IPA_WDI_DBG("intf was not added before, proceed.\n");
new_intf = kzalloc(sizeof(*new_intf), GFP_KERNEL);
if (new_intf == NULL) {
IPA_WDI_ERR("fail to alloc new intf\n");
mutex_unlock(&ipa_wdi_ctx->lock);
return -ENOMEM;
}
INIT_LIST_HEAD(&new_intf->link);
strlcpy(new_intf->netdev_name, in->netdev_name,
sizeof(new_intf->netdev_name));
new_intf->hdr_len = in->hdr_info[0].hdr_len;
/* add partial header */
len = sizeof(struct ipa_ioc_add_hdr) + 2 * sizeof(struct ipa_hdr_add);
hdr = kzalloc(len, GFP_KERNEL);
if (hdr == NULL) {
IPA_WDI_ERR("fail to alloc %d bytes\n", len);
ret = -EFAULT;
goto fail_alloc_hdr;
}
if (ipa_wdi_commit_partial_hdr(hdr, in->netdev_name, in->hdr_info)) {
IPA_WDI_ERR("fail to commit partial headers\n");
ret = -EFAULT;
goto fail_commit_hdr;
}
new_intf->partial_hdr_hdl[IPA_IP_v4] = hdr->hdr[IPA_IP_v4].hdr_hdl;
new_intf->partial_hdr_hdl[IPA_IP_v6] = hdr->hdr[IPA_IP_v6].hdr_hdl;
IPA_WDI_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
hdr->hdr[IPA_IP_v4].hdr_hdl, hdr->hdr[IPA_IP_v6].hdr_hdl);
/* populate tx prop */
tx.num_props = 2;
tx.prop = tx_prop;
memset(tx_prop, 0, sizeof(tx_prop));
tx_prop[0].ip = IPA_IP_v4;
if (!ipa3_ctx->ipa_wdi3_over_gsi)
tx_prop[0].dst_pipe = IPA_CLIENT_WLAN1_CONS;
else
tx_prop[0].dst_pipe = IPA_CLIENT_WLAN2_CONS;
tx_prop[0].alt_dst_pipe = in->alt_dst_pipe;
tx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type;
strlcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
sizeof(tx_prop[0].hdr_name));
tx_prop[1].ip = IPA_IP_v6;
if (!ipa3_ctx->ipa_wdi3_over_gsi)
tx_prop[1].dst_pipe = IPA_CLIENT_WLAN1_CONS;
else
tx_prop[1].dst_pipe = IPA_CLIENT_WLAN2_CONS;
tx_prop[1].alt_dst_pipe = in->alt_dst_pipe;
tx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type;
strlcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
sizeof(tx_prop[1].hdr_name));
/* populate rx prop */
rx.num_props = 2;
rx.prop = rx_prop;
memset(rx_prop, 0, sizeof(rx_prop));
rx_prop[0].ip = IPA_IP_v4;
if (!ipa3_ctx->ipa_wdi3_over_gsi)
rx_prop[0].src_pipe = IPA_CLIENT_WLAN1_PROD;
else
rx_prop[0].src_pipe = IPA_CLIENT_WLAN2_PROD;
rx_prop[0].hdr_l2_type = in->hdr_info[0].hdr_type;
if (in->is_meta_data_valid) {
rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
rx_prop[0].attrib.meta_data = in->meta_data;
rx_prop[0].attrib.meta_data_mask = in->meta_data_mask;
}
rx_prop[1].ip = IPA_IP_v6;
if (!ipa3_ctx->ipa_wdi3_over_gsi)
rx_prop[1].src_pipe = IPA_CLIENT_WLAN1_PROD;
else
rx_prop[1].src_pipe = IPA_CLIENT_WLAN2_PROD;
rx_prop[1].hdr_l2_type = in->hdr_info[1].hdr_type;
if (in->is_meta_data_valid) {
rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
rx_prop[1].attrib.meta_data = in->meta_data;
rx_prop[1].attrib.meta_data_mask = in->meta_data_mask;
}
if (ipa_register_intf(in->netdev_name, &tx, &rx)) {
IPA_WDI_ERR("fail to add interface prop\n");
ret = -EFAULT;
goto fail_commit_hdr;
}
list_add(&new_intf->link, &ipa_wdi_ctx->head_intf_list);
init_completion(&ipa_wdi_ctx->wdi_completion);
kfree(hdr);
mutex_unlock(&ipa_wdi_ctx->lock);
return 0;
fail_commit_hdr:
kfree(hdr);
fail_alloc_hdr:
kfree(new_intf);
mutex_unlock(&ipa_wdi_ctx->lock);
return ret;
}
EXPORT_SYMBOL(ipa_wdi_reg_intf);
int ipa_wdi_dereg_intf(const char *netdev_name)
{
int len, ret = 0;
struct ipa_ioc_del_hdr *hdr = NULL;
struct ipa_wdi_intf_info *entry;
struct ipa_wdi_intf_info *next;
if (!netdev_name) {
IPA_WDI_ERR("no netdev name.\n");
return -EINVAL;
}
if (!ipa_wdi_ctx) {
IPA_WDI_ERR("wdi ctx is not initialized.\n");
return -EPERM;
}
mutex_lock(&ipa_wdi_ctx->lock);
list_for_each_entry_safe(entry, next, &ipa_wdi_ctx->head_intf_list,
link)
if (strcmp(entry->netdev_name, netdev_name) == 0) {
len = sizeof(struct ipa_ioc_del_hdr) +
2 * sizeof(struct ipa_hdr_del);
hdr = kzalloc(len, GFP_KERNEL);
if (hdr == NULL) {
IPA_WDI_ERR("fail to alloc %d bytes\n", len);
mutex_unlock(&ipa_wdi_ctx->lock);
return -ENOMEM;
}
hdr->commit = 1;
hdr->num_hdls = 2;
hdr->hdl[0].hdl = entry->partial_hdr_hdl[0];
hdr->hdl[1].hdl = entry->partial_hdr_hdl[1];
IPA_WDI_DBG("IPv4 hdr hdl: %d IPv6 hdr hdl: %d\n",
hdr->hdl[0].hdl, hdr->hdl[1].hdl);
if (ipa_del_hdr(hdr)) {
IPA_WDI_ERR("fail to delete partial header\n");
ret = -EFAULT;
goto fail;
}
if (ipa_deregister_intf(entry->netdev_name)) {
IPA_WDI_ERR("fail to del interface props\n");
ret = -EFAULT;
goto fail;
}
list_del(&entry->link);
kfree(entry);
break;
}
fail:
kfree(hdr);
mutex_unlock(&ipa_wdi_ctx->lock);
return ret;
}
EXPORT_SYMBOL(ipa_wdi_dereg_intf);
static void ipa_wdi_rm_notify(void *user_data, enum ipa_rm_event event,
unsigned long data)
{
if (!ipa_wdi_ctx) {
IPA_WDI_ERR("Invalid context\n");
return;
}
switch (event) {
case IPA_RM_RESOURCE_GRANTED:
complete_all(&ipa_wdi_ctx->wdi_completion);
break;
case IPA_RM_RESOURCE_RELEASED:
break;
default:
IPA_WDI_ERR("Invalid RM Evt: %d", event);
break;
}
}
static int ipa_wdi_cons_release(void)
{
return 0;
}
static int ipa_wdi_cons_request(void)
{
int ret = 0;
if (!ipa_wdi_ctx) {
IPA_WDI_ERR("wdi ctx is not initialized\n");
ret = -EFAULT;
}
return ret;
}
static void ipa_wdi_pm_cb(void *p, enum ipa_pm_cb_event event)
{
IPA_WDI_DBG("received pm event %d\n", event);
}
int ipa_wdi_conn_pipes(struct ipa_wdi_conn_in_params *in,
struct ipa_wdi_conn_out_params *out)
{
int i, j, ret = 0;
struct ipa_rm_create_params param;
struct ipa_pm_register_params pm_params;
struct ipa_wdi_in_params in_tx;
struct ipa_wdi_in_params in_rx;
struct ipa_wdi_out_params out_tx;
struct ipa_wdi_out_params out_rx;
if (!(in && out)) {
IPA_WDI_ERR("empty parameters. in=%pK out=%pK\n", in, out);
return -EINVAL;
}
if (!ipa_wdi_ctx) {
IPA_WDI_ERR("wdi ctx is not initialized\n");
return -EPERM;
}
if (in->num_sys_pipe_needed > IPA_WDI_MAX_SUPPORTED_SYS_PIPE) {
IPA_WDI_ERR("ipa can only support up to %d sys pipe\n",
IPA_WDI_MAX_SUPPORTED_SYS_PIPE);
return -EINVAL;
}
ipa_wdi_ctx->num_sys_pipe_needed = in->num_sys_pipe_needed;
IPA_WDI_DBG("number of sys pipe %d\n", in->num_sys_pipe_needed);
/* setup sys pipe when needed */
for (i = 0; i < ipa_wdi_ctx->num_sys_pipe_needed; i++) {
ret = ipa_setup_sys_pipe(&in->sys_in[i],
&ipa_wdi_ctx->sys_pipe_hdl[i]);
if (ret) {
IPA_WDI_ERR("fail to setup sys pipe %d\n", i);
ret = -EFAULT;
goto fail_setup_sys_pipe;
}
}
if (!ipa_pm_is_used()) {
memset(&param, 0, sizeof(param));
param.name = IPA_RM_RESOURCE_WLAN_PROD;
param.reg_params.user_data = ipa_wdi_ctx;
param.reg_params.notify_cb = ipa_wdi_rm_notify;
param.floor_voltage = IPA_VOLTAGE_SVS;
ret = ipa_rm_create_resource(&param);
if (ret) {
IPA_WDI_ERR("fail to create WLAN_PROD resource\n");
ret = -EFAULT;
goto fail_setup_sys_pipe;
}
memset(&param, 0, sizeof(param));
param.name = IPA_RM_RESOURCE_WLAN_CONS;
param.request_resource = ipa_wdi_cons_request;
param.release_resource = ipa_wdi_cons_release;
ret = ipa_rm_create_resource(&param);
if (ret) {
IPA_WDI_ERR("fail to create WLAN_CONS resource\n");
goto fail_create_rm_cons;
}
if (ipa_rm_add_dependency(IPA_RM_RESOURCE_WLAN_PROD,
IPA_RM_RESOURCE_APPS_CONS)) {
IPA_WDI_ERR("fail to add rm dependency\n");
ret = -EFAULT;
goto fail_add_dependency;
}
} else {
memset(&pm_params, 0, sizeof(pm_params));
pm_params.name = "wdi";
pm_params.callback = ipa_wdi_pm_cb;
pm_params.user_data = NULL;
pm_params.group = IPA_PM_GROUP_DEFAULT;
if (ipa_pm_register(&pm_params, &ipa_wdi_ctx->ipa_pm_hdl)) {
IPA_WDI_ERR("fail to register ipa pm\n");
ret = -EFAULT;
goto fail_setup_sys_pipe;
}
}
if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
if (ipa_conn_wdi_pipes(in, out, ipa_wdi_ctx->wdi_notify)) {
IPA_WDI_ERR("fail to setup wdi pipes\n");
ret = -EFAULT;
goto fail_connect_pipe;
}
} else {
memset(&in_tx, 0, sizeof(in_tx));
memset(&in_rx, 0, sizeof(in_rx));
memset(&out_tx, 0, sizeof(out_tx));
memset(&out_rx, 0, sizeof(out_rx));
#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
in_rx.wdi_notify = ipa_wdi_ctx->wdi_notify;
#endif
if (in->is_smmu_enabled == false) {
/* firsr setup rx pipe */
in_rx.sys.ipa_ep_cfg = in->u_rx.rx.ipa_ep_cfg;
in_rx.sys.client = in->u_rx.rx.client;
in_rx.sys.notify = in->notify;
in_rx.sys.priv = in->priv;
in_rx.smmu_enabled = in->is_smmu_enabled;
in_rx.u.ul.rdy_ring_base_pa =
in->u_rx.rx.transfer_ring_base_pa;
in_rx.u.ul.rdy_ring_size =
in->u_rx.rx.transfer_ring_size;
in_rx.u.ul.rdy_ring_rp_pa =
in->u_rx.rx.transfer_ring_doorbell_pa;
in_rx.u.ul.rdy_comp_ring_base_pa =
in->u_rx.rx.event_ring_base_pa;
in_rx.u.ul.rdy_comp_ring_wp_pa =
in->u_rx.rx.event_ring_doorbell_pa;
in_rx.u.ul.rdy_comp_ring_size =
in->u_rx.rx.event_ring_size;
if (ipa_connect_wdi_pipe(&in_rx, &out_rx)) {
IPA_WDI_ERR("fail to setup rx pipe\n");
ret = -EFAULT;
goto fail_connect_pipe;
}
ipa_wdi_ctx->rx_pipe_hdl = out_rx.clnt_hdl;
out->rx_uc_db_pa = out_rx.uc_door_bell_pa;
IPA_WDI_DBG("rx uc db pa: 0x%pad\n", &out->rx_uc_db_pa);
/* then setup tx pipe */
in_tx.sys.ipa_ep_cfg = in->u_tx.tx.ipa_ep_cfg;
in_tx.sys.client = in->u_tx.tx.client;
in_tx.smmu_enabled = in->is_smmu_enabled;
in_tx.u.dl.comp_ring_base_pa =
in->u_tx.tx.transfer_ring_base_pa;
in_tx.u.dl.comp_ring_size =
in->u_tx.tx.transfer_ring_size;
in_tx.u.dl.ce_ring_base_pa =
in->u_tx.tx.event_ring_base_pa;
in_tx.u.dl.ce_door_bell_pa =
in->u_tx.tx.event_ring_doorbell_pa;
in_tx.u.dl.ce_ring_size =
in->u_tx.tx.event_ring_size;
in_tx.u.dl.num_tx_buffers =
in->u_tx.tx.num_pkt_buffers;
if (ipa_connect_wdi_pipe(&in_tx, &out_tx)) {
IPA_WDI_ERR("fail to setup tx pipe\n");
ret = -EFAULT;
goto fail;
}
ipa_wdi_ctx->tx_pipe_hdl = out_tx.clnt_hdl;
out->tx_uc_db_pa = out_tx.uc_door_bell_pa;
IPA_WDI_DBG("tx uc db pa: 0x%pad\n", &out->tx_uc_db_pa);
} else { /* smmu is enabled */
/* firsr setup rx pipe */
in_rx.sys.ipa_ep_cfg = in->u_rx.rx_smmu.ipa_ep_cfg;
in_rx.sys.client = in->u_rx.rx_smmu.client;
in_rx.sys.notify = in->notify;
in_rx.sys.priv = in->priv;
in_rx.smmu_enabled = in->is_smmu_enabled;
in_rx.u.ul_smmu.rdy_ring =
in->u_rx.rx_smmu.transfer_ring_base;
in_rx.u.ul_smmu.rdy_ring_size =
in->u_rx.rx_smmu.transfer_ring_size;
in_rx.u.ul_smmu.rdy_ring_rp_pa =
in->u_rx.rx_smmu.transfer_ring_doorbell_pa;
in_rx.u.ul_smmu.rdy_comp_ring =
in->u_rx.rx_smmu.event_ring_base;
in_rx.u.ul_smmu.rdy_comp_ring_wp_pa =
in->u_rx.rx_smmu.event_ring_doorbell_pa;
in_rx.u.ul_smmu.rdy_comp_ring_size =
in->u_rx.rx_smmu.event_ring_size;
if (ipa_connect_wdi_pipe(&in_rx, &out_rx)) {
IPA_WDI_ERR("fail to setup rx pipe\n");
ret = -EFAULT;
goto fail_connect_pipe;
}
ipa_wdi_ctx->rx_pipe_hdl = out_rx.clnt_hdl;
out->rx_uc_db_pa = out_rx.uc_door_bell_pa;
IPA_WDI_DBG("rx uc db pa: 0x%pad\n", &out->rx_uc_db_pa);
/* then setup tx pipe */
in_tx.sys.ipa_ep_cfg = in->u_tx.tx_smmu.ipa_ep_cfg;
in_tx.sys.client = in->u_tx.tx_smmu.client;
in_tx.smmu_enabled = in->is_smmu_enabled;
in_tx.u.dl_smmu.comp_ring =
in->u_tx.tx_smmu.transfer_ring_base;
in_tx.u.dl_smmu.comp_ring_size =
in->u_tx.tx_smmu.transfer_ring_size;
in_tx.u.dl_smmu.ce_ring =
in->u_tx.tx_smmu.event_ring_base;
in_tx.u.dl_smmu.ce_door_bell_pa =
in->u_tx.tx_smmu.event_ring_doorbell_pa;
in_tx.u.dl_smmu.ce_ring_size =
in->u_tx.tx_smmu.event_ring_size;
in_tx.u.dl_smmu.num_tx_buffers =
in->u_tx.tx_smmu.num_pkt_buffers;
if (ipa_connect_wdi_pipe(&in_tx, &out_tx)) {
IPA_WDI_ERR("fail to setup tx pipe\n");
ret = -EFAULT;
goto fail;
}
ipa_wdi_ctx->tx_pipe_hdl = out_tx.clnt_hdl;
out->tx_uc_db_pa = out_tx.uc_door_bell_pa;
IPA_WDI_DBG("tx uc db pa: 0x%pad\n", &out->tx_uc_db_pa);
}
}
return 0;
fail:
ipa_disconnect_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl);
fail_connect_pipe:
if (!ipa_pm_is_used())
ipa_rm_delete_dependency(IPA_RM_RESOURCE_WLAN_PROD,
IPA_RM_RESOURCE_APPS_CONS);
else
ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl);
fail_add_dependency:
if (!ipa_pm_is_used())
ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS);
fail_create_rm_cons:
if (!ipa_pm_is_used())
ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD);
fail_setup_sys_pipe:
for (j = 0; j < i; j++)
ipa_teardown_sys_pipe(ipa_wdi_ctx->sys_pipe_hdl[j]);
return ret;
}
EXPORT_SYMBOL(ipa_wdi_conn_pipes);
int ipa_wdi_disconn_pipes(void)
{
int i, ipa_ep_idx_rx, ipa_ep_idx_tx;
if (!ipa_wdi_ctx) {
IPA_WDI_ERR("wdi ctx is not initialized\n");
return -EPERM;
}
/* tear down sys pipe if needed */
for (i = 0; i < ipa_wdi_ctx->num_sys_pipe_needed; i++) {
if (ipa_teardown_sys_pipe(ipa_wdi_ctx->sys_pipe_hdl[i])) {
IPA_WDI_ERR("fail to tear down sys pipe %d\n", i);
return -EFAULT;
}
}
if (!ipa3_ctx->ipa_wdi3_over_gsi) {
ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
} else {
ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
}
if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
if (ipa_disconn_wdi_pipes(ipa_ep_idx_rx, ipa_ep_idx_tx)) {
IPA_WDI_ERR("fail to tear down wdi pipes\n");
return -EFAULT;
}
} else {
if (ipa_disconnect_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
IPA_WDI_ERR("fail to tear down wdi tx pipes\n");
return -EFAULT;
}
if (ipa_disconnect_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
IPA_WDI_ERR("fail to tear down wdi rx pipes\n");
return -EFAULT;
}
}
if (!ipa_pm_is_used()) {
if (ipa_rm_delete_dependency(IPA_RM_RESOURCE_WLAN_PROD,
IPA_RM_RESOURCE_APPS_CONS)) {
IPA_WDI_ERR("fail to delete rm dependency\n");
return -EFAULT;
}
if (ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_PROD)) {
IPA_WDI_ERR("fail to delete WLAN_PROD resource\n");
return -EFAULT;
}
if (ipa_rm_delete_resource(IPA_RM_RESOURCE_WLAN_CONS)) {
IPA_WDI_ERR("fail to delete WLAN_CONS resource\n");
return -EFAULT;
}
} else {
if (ipa_pm_deregister(ipa_wdi_ctx->ipa_pm_hdl)) {
IPA_WDI_ERR("fail to deregister ipa pm\n");
return -EFAULT;
}
}
return 0;
}
EXPORT_SYMBOL(ipa_wdi_disconn_pipes);
int ipa_wdi_enable_pipes(void)
{
int ret;
int ipa_ep_idx_tx, ipa_ep_idx_rx;
if (!ipa_wdi_ctx) {
IPA_WDI_ERR("wdi ctx is not initialized.\n");
return -EPERM;
}
if (!ipa3_ctx->ipa_wdi3_over_gsi) {
ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
} else {
ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
}
if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
if (ipa_enable_wdi_pipes(ipa_ep_idx_tx, ipa_ep_idx_rx)) {
IPA_WDI_ERR("fail to enable wdi pipes\n");
return -EFAULT;
}
} else {
if (ipa_enable_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
IPA_WDI_ERR("fail to enable wdi tx pipe\n");
return -EFAULT;
}
if (ipa_resume_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
IPA_WDI_ERR("fail to resume wdi tx pipe\n");
return -EFAULT;
}
if (ipa_enable_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
IPA_WDI_ERR("fail to enable wdi rx pipe\n");
return -EFAULT;
}
if (ipa_resume_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
IPA_WDI_ERR("fail to resume wdi rx pipe\n");
return -EFAULT;
}
}
if (!ipa_pm_is_used()) {
ret = ipa_rm_request_resource(IPA_RM_RESOURCE_WLAN_PROD);
if (ret == -EINPROGRESS) {
if (wait_for_completion_timeout(
&ipa_wdi_ctx->wdi_completion, 10*HZ) == 0) {
IPA_WDI_ERR("WLAN_PROD res req time out\n");
return -EFAULT;
}
} else if (ret != 0) {
IPA_WDI_ERR("fail to request resource\n");
return -EFAULT;
}
} else {
ret = ipa_pm_activate_sync(ipa_wdi_ctx->ipa_pm_hdl);
if (ret) {
IPA_WDI_ERR("fail to activate ipa pm\n");
return -EFAULT;
}
}
return 0;
}
EXPORT_SYMBOL(ipa_wdi_enable_pipes);
int ipa_wdi_disable_pipes(void)
{
int ret;
int ipa_ep_idx_tx, ipa_ep_idx_rx;
if (!ipa_wdi_ctx) {
IPA_WDI_ERR("wdi ctx is not initialized.\n");
return -EPERM;
}
if (!ipa3_ctx->ipa_wdi3_over_gsi) {
ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_PROD);
ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN1_CONS);
} else {
ipa_ep_idx_rx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_PROD);
ipa_ep_idx_tx = ipa_get_ep_mapping(IPA_CLIENT_WLAN2_CONS);
}
if (ipa_wdi_ctx->wdi_version == IPA_WDI_3) {
if (ipa_disable_wdi_pipes(ipa_ep_idx_tx, ipa_ep_idx_rx)) {
IPA_WDI_ERR("fail to disable wdi pipes\n");
return -EFAULT;
}
} else {
if (ipa_suspend_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
IPA_WDI_ERR("fail to suspend wdi tx pipe\n");
return -EFAULT;
}
if (ipa_disable_wdi_pipe(ipa_wdi_ctx->tx_pipe_hdl)) {
IPA_WDI_ERR("fail to disable wdi tx pipe\n");
return -EFAULT;
}
if (ipa_suspend_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
IPA_WDI_ERR("fail to suspend wdi rx pipe\n");
return -EFAULT;
}
if (ipa_disable_wdi_pipe(ipa_wdi_ctx->rx_pipe_hdl)) {
IPA_WDI_ERR("fail to disable wdi rx pipe\n");
return -EFAULT;
}
}
if (!ipa_pm_is_used()) {
ret = ipa_rm_release_resource(IPA_RM_RESOURCE_WLAN_PROD);
if (ret != 0) {
IPA_WDI_ERR("fail to release resource\n");
return -EFAULT;
}
} else {
ret = ipa_pm_deactivate_sync(ipa_wdi_ctx->ipa_pm_hdl);
if (ret) {
IPA_WDI_ERR("fail to deactivate ipa pm\n");
return -EFAULT;
}
}
return 0;
}
EXPORT_SYMBOL(ipa_wdi_disable_pipes);
int ipa_wdi_set_perf_profile(struct ipa_wdi_perf_profile *profile)
{
struct ipa_rm_perf_profile rm_profile;
enum ipa_rm_resource_name resource_name;
if (profile == NULL) {
IPA_WDI_ERR("Invalid input\n");
return -EINVAL;
}
if (!ipa_pm_is_used()) {
rm_profile.max_supported_bandwidth_mbps =
profile->max_supported_bw_mbps;
if (profile->client == IPA_CLIENT_WLAN1_PROD ||
profile->client == IPA_CLIENT_WLAN2_PROD) {
resource_name = IPA_RM_RESOURCE_WLAN_PROD;
} else if (profile->client == IPA_CLIENT_WLAN1_CONS ||
profile->client == IPA_CLIENT_WLAN2_CONS) {
resource_name = IPA_RM_RESOURCE_WLAN_CONS;
} else {
IPA_WDI_ERR("not supported\n");
return -EINVAL;
}
if (ipa_rm_set_perf_profile(resource_name, &rm_profile)) {
IPA_WDI_ERR("fail to setup rm perf profile\n");
return -EFAULT;
}
} else {
if (ipa_pm_set_throughput(ipa_wdi_ctx->ipa_pm_hdl,
profile->max_supported_bw_mbps)) {
IPA_WDI_ERR("fail to setup pm perf profile\n");
return -EFAULT;
}
}
return 0;
}
EXPORT_SYMBOL(ipa_wdi_set_perf_profile);
int ipa_wdi_create_smmu_mapping(u32 num_buffers,
struct ipa_wdi_buffer_info *info)
{
return ipa_create_wdi_mapping(num_buffers, info);
}
EXPORT_SYMBOL(ipa_wdi_create_smmu_mapping);
int ipa_wdi_release_smmu_mapping(u32 num_buffers,
struct ipa_wdi_buffer_info *info)
{
return ipa_release_wdi_mapping(num_buffers, info);
}
EXPORT_SYMBOL(ipa_wdi_release_smmu_mapping);
int ipa_wdi_get_stats(struct IpaHwStatsWDIInfoData_t *stats)
{
return ipa_get_wdi_stats(stats);
}
EXPORT_SYMBOL(ipa_wdi_get_stats);

1905
ipa/ipa_clients/ipa_wigig.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

1256
ipa/ipa_clients/odu_bridge.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

2704
ipa/ipa_clients/rndis_ipa.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

Ver arquivo

@@ -0,0 +1,74 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rndis_ipa
#define TRACE_INCLUDE_FILE rndis_ipa_trace
#if !defined(_RNDIS_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _RNDIS_IPA_TRACE_H
#include <linux/tracepoint.h>
TRACE_EVENT(
rndis_netif_ni,
TP_PROTO(unsigned long proto),
TP_ARGS(proto),
TP_STRUCT__entry(
__field(unsigned long, proto)
),
TP_fast_assign(
__entry->proto = proto;
),
TP_printk("proto =%lu\n", __entry->proto)
);
TRACE_EVENT(
rndis_tx_dp,
TP_PROTO(unsigned long proto),
TP_ARGS(proto),
TP_STRUCT__entry(
__field(unsigned long, proto)
),
TP_fast_assign(
__entry->proto = proto;
),
TP_printk("proto =%lu\n", __entry->proto)
);
TRACE_EVENT(
rndis_status_rcvd,
TP_PROTO(unsigned long proto),
TP_ARGS(proto),
TP_STRUCT__entry(
__field(unsigned long, proto)
),
TP_fast_assign(
__entry->proto = proto;
),
TP_printk("proto =%lu\n", __entry->proto)
);
#endif /* _RNDIS_IPA_TRACE_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../techpack/dataipa/ipa/ipa_clients
#include <trace/define_trace.h>

476
ipa/ipa_common_i.h Arquivo normal
Ver arquivo

@@ -0,0 +1,476 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/ipa_mhi.h>
#include <linux/ipa_qmi_service_v01.h>
#ifndef _IPA_COMMON_I_H_
#define _IPA_COMMON_I_H_
#include <linux/errno.h>
#include <linux/ipc_logging.h>
#include <linux/ipa.h>
#include <linux/ipa_uc_offload.h>
#include <linux/ipa_wdi3.h>
#include <linux/ipa_wigig.h>
#include <linux/ratelimit.h>
#define WARNON_RATELIMIT_BURST 1
#define IPA_RATELIMIT_BURST 1
#define __FILENAME__ \
(strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__)
#define IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client) \
log_info.file = __FILENAME__; \
log_info.line = __LINE__; \
log_info.type = EP; \
log_info.id_string = (client < 0 || client >= IPA_CLIENT_MAX) \
? "Invalid Client" : ipa_clients_strings[client]
#define IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info) \
log_info.file = __FILENAME__; \
log_info.line = __LINE__; \
log_info.type = SIMPLE; \
log_info.id_string = __func__
#define IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name) \
log_info.file = __FILENAME__; \
log_info.line = __LINE__; \
log_info.type = RESOURCE; \
log_info.id_string = resource_name
#define IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str) \
log_info.file = __FILENAME__; \
log_info.line = __LINE__; \
log_info.type = SPECIAL; \
log_info.id_string = id_str
#define IPA_ACTIVE_CLIENTS_INC_EP(client) \
do { \
struct ipa_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
ipa_inc_client_enable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_DEC_EP(client) \
do { \
struct ipa_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_EP(log_info, client); \
ipa_dec_client_disable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_INC_SIMPLE() \
do { \
struct ipa_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
ipa_inc_client_enable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_DEC_SIMPLE() \
do { \
struct ipa_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info); \
ipa_dec_client_disable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_INC_RESOURCE(resource_name) \
do { \
struct ipa_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
ipa_inc_client_enable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_DEC_RESOURCE(resource_name) \
do { \
struct ipa_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_RESOURCE(log_info, resource_name); \
ipa_dec_client_disable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_INC_SPECIAL(id_str) \
do { \
struct ipa_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
ipa_inc_client_enable_clks(&log_info); \
} while (0)
#define IPA_ACTIVE_CLIENTS_DEC_SPECIAL(id_str) \
do { \
struct ipa_active_client_logging_info log_info; \
IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log_info, id_str); \
ipa_dec_client_disable_clks(&log_info); \
} while (0)
/*
* Printing one warning message in 5 seconds if multiple warning messages
* are coming back to back.
*/
#define WARN_ON_RATELIMIT_IPA(condition) \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
WARNON_RATELIMIT_BURST); \
int rtn = !!(condition); \
\
if (unlikely(rtn && __ratelimit(&_rs))) \
WARN_ON(rtn); \
})
/*
* Printing one error message in 5 seconds if multiple error messages
* are coming back to back.
*/
#define pr_err_ratelimited_ipa(fmt, args...) \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
DEFAULT_RATELIMIT_INTERVAL, \
IPA_RATELIMIT_BURST); \
\
if (__ratelimit(&_rs)) \
pr_err(fmt, ## args); \
})
#define ipa_assert_on(condition)\
do {\
if (unlikely(condition))\
ipa_assert();\
} while (0)
#define IPA_CLIENT_IS_PROD(x) \
(x < IPA_CLIENT_MAX && (x & 0x1) == 0)
#define IPA_CLIENT_IS_CONS(x) \
(x < IPA_CLIENT_MAX && (x & 0x1) == 1)
#define IPA_GSI_CHANNEL_STOP_SLEEP_MIN_USEC (1000)
#define IPA_GSI_CHANNEL_STOP_SLEEP_MAX_USEC (2000)
enum ipa_active_client_log_type {
EP,
SIMPLE,
RESOURCE,
SPECIAL,
INVALID
};
struct ipa_active_client_logging_info {
const char *id_string;
char *file;
int line;
enum ipa_active_client_log_type type;
};
/**
* struct ipa_mem_buffer - IPA memory buffer
* @base: base
* @phys_base: physical base address
* @size: size of memory buffer
*/
struct ipa_mem_buffer {
void *base;
dma_addr_t phys_base;
u32 size;
};
/**
* enum ipa3_mhi_burst_mode - MHI channel burst mode state
*
* Values are according to MHI specification
* @IPA_MHI_BURST_MODE_DEFAULT: burst mode enabled for HW channels,
* disabled for SW channels
* @IPA_MHI_BURST_MODE_RESERVED:
* @IPA_MHI_BURST_MODE_DISABLE: Burst mode is disabled for this channel
* @IPA_MHI_BURST_MODE_ENABLE: Burst mode is enabled for this channel
*
*/
enum ipa3_mhi_burst_mode {
IPA_MHI_BURST_MODE_DEFAULT,
IPA_MHI_BURST_MODE_RESERVED,
IPA_MHI_BURST_MODE_DISABLE,
IPA_MHI_BURST_MODE_ENABLE,
};
/**
* enum ipa_hw_mhi_channel_states - MHI channel state machine
*
* Values are according to MHI specification
* @IPA_HW_MHI_CHANNEL_STATE_DISABLE: Channel is disabled and not processed by
* the host or device.
* @IPA_HW_MHI_CHANNEL_STATE_ENABLE: A channel is enabled after being
* initialized and configured by host, including its channel context and
* associated transfer ring. While this state, the channel is not active
* and the device does not process transfer.
* @IPA_HW_MHI_CHANNEL_STATE_RUN: The device processes transfers and doorbell
* for channels.
* @IPA_HW_MHI_CHANNEL_STATE_SUSPEND: Used to halt operations on the channel.
* The device does not process transfers for the channel in this state.
* This state is typically used to synchronize the transition to low power
* modes.
* @IPA_HW_MHI_CHANNEL_STATE_STOP: Used to halt operations on the channel.
* The device does not process transfers for the channel in this state.
* @IPA_HW_MHI_CHANNEL_STATE_ERROR: The device detected an error in an element
* from the transfer ring associated with the channel.
* @IPA_HW_MHI_CHANNEL_STATE_INVALID: Invalid state. Shall not be in use in
* operational scenario.
*/
enum ipa_hw_mhi_channel_states {
IPA_HW_MHI_CHANNEL_STATE_DISABLE = 0,
IPA_HW_MHI_CHANNEL_STATE_ENABLE = 1,
IPA_HW_MHI_CHANNEL_STATE_RUN = 2,
IPA_HW_MHI_CHANNEL_STATE_SUSPEND = 3,
IPA_HW_MHI_CHANNEL_STATE_STOP = 4,
IPA_HW_MHI_CHANNEL_STATE_ERROR = 5,
IPA_HW_MHI_CHANNEL_STATE_INVALID = 0xFF
};
/**
* Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
* command. Parameters are sent as 32b immediate parameters.
* @isDlUlSyncEnabled: Flag to indicate if DL UL Syncronization is enabled
* @UlAccmVal: UL Timer Accumulation value (Period after which device will poll
* for UL data)
* @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
* @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
*/
union IpaHwMhiDlUlSyncCmdData_t {
struct IpaHwMhiDlUlSyncCmdParams_t {
u32 isDlUlSyncEnabled:8;
u32 UlAccmVal:8;
u32 ulMsiEventThreshold:8;
u32 dlMsiEventThreshold:8;
} params;
u32 raw32b;
};
struct ipa_mhi_ch_ctx {
u8 chstate;/*0-7*/
u8 brstmode:2;/*8-9*/
u8 pollcfg:6;/*10-15*/
u16 rsvd;/*16-31*/
u32 chtype;
u32 erindex;
u64 rbase;
u64 rlen;
u64 rp;
u64 wp;
} __packed;
struct ipa_mhi_ev_ctx {
u32 intmodc:16;
u32 intmodt:16;
u32 ertype;
u32 msivec;
u64 rbase;
u64 rlen;
u64 rp;
u64 wp;
} __packed;
struct ipa_mhi_init_uc_engine {
struct ipa_mhi_msi_info *msi;
u32 mmio_addr;
u32 host_ctrl_addr;
u32 host_data_addr;
u32 first_ch_idx;
u32 first_er_idx;
union IpaHwMhiDlUlSyncCmdData_t *ipa_cached_dl_ul_sync_info;
};
struct ipa_mhi_init_gsi_engine {
u32 first_ch_idx;
};
struct ipa_mhi_init_engine {
struct ipa_mhi_init_uc_engine uC;
struct ipa_mhi_init_gsi_engine gsi;
};
struct start_gsi_channel {
enum ipa_hw_mhi_channel_states state;
struct ipa_mhi_msi_info *msi;
struct ipa_mhi_ev_ctx *ev_ctx_host;
u64 event_context_addr;
struct ipa_mhi_ch_ctx *ch_ctx_host;
u64 channel_context_addr;
void (*ch_err_cb)(struct gsi_chan_err_notify *notify);
void (*ev_err_cb)(struct gsi_evt_err_notify *notify);
void *channel;
bool assert_bit40;
struct gsi_mhi_channel_scratch *mhi;
unsigned long *cached_gsi_evt_ring_hdl;
uint8_t evchid;
};
struct start_uc_channel {
enum ipa_hw_mhi_channel_states state;
u8 index;
u8 id;
};
struct start_mhi_channel {
struct start_uc_channel uC;
struct start_gsi_channel gsi;
};
struct ipa_mhi_connect_params_internal {
struct ipa_sys_connect_params *sys;
u8 channel_id;
struct start_mhi_channel start;
};
/**
* struct ipa_hdr_offset_entry - IPA header offset entry
* @link: entry's link in global header offset entries list
* @offset: the offset
* @bin: bin
* @ipacm_installed: indicate if installed by ipacm
*/
struct ipa_hdr_offset_entry {
struct list_head link;
u32 offset;
u32 bin;
bool ipacm_installed;
};
extern const char *ipa_clients_strings[];
#define IPA_IPC_LOGGING(buf, fmt, args...) \
do { \
if (buf) \
ipc_log_string((buf), fmt, __func__, __LINE__, \
## args); \
} while (0)
void ipa_inc_client_enable_clks(struct ipa_active_client_logging_info *id);
void ipa_dec_client_disable_clks(struct ipa_active_client_logging_info *id);
int ipa_inc_client_enable_clks_no_block(
struct ipa_active_client_logging_info *id);
int ipa_suspend_resource_no_block(enum ipa_rm_resource_name resource);
int ipa_resume_resource(enum ipa_rm_resource_name name);
int ipa_suspend_resource_sync(enum ipa_rm_resource_name resource);
int ipa_set_required_perf_profile(enum ipa_voltage_level floor_voltage,
u32 bandwidth_mbps);
void *ipa_get_ipc_logbuf(void);
void *ipa_get_ipc_logbuf_low(void);
void ipa_assert(void);
/* MHI */
int ipa_mhi_init_engine(struct ipa_mhi_init_engine *params);
int ipa_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
u32 *clnt_hdl);
int ipa_disconnect_mhi_pipe(u32 clnt_hdl);
bool ipa_mhi_stop_gsi_channel(enum ipa_client_type client);
int ipa_qmi_enable_force_clear_datapath_send(
struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
int ipa_qmi_disable_force_clear_datapath_send(
struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
int ipa_generate_tag_process(void);
int ipa_disable_sps_pipe(enum ipa_client_type client);
int ipa_mhi_reset_channel_internal(enum ipa_client_type client);
int ipa_mhi_start_channel_internal(enum ipa_client_type client);
bool ipa_mhi_sps_channel_empty(enum ipa_client_type client);
int ipa_mhi_resume_channels_internal(enum ipa_client_type client,
bool LPTransitionRejected, bool brstmode_enabled,
union __packed gsi_channel_scratch ch_scratch, u8 index);
int ipa_mhi_handle_ipa_config_req(struct ipa_config_req_msg_v01 *config_req);
int ipa_mhi_query_ch_info(enum ipa_client_type client,
struct gsi_chan_info *ch_info);
int ipa_mhi_destroy_channel(enum ipa_client_type client);
int ipa_mhi_is_using_dma(bool *flag);
const char *ipa_mhi_get_state_str(int state);
/* MHI uC */
int ipa_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd);
int ipa_uc_mhi_init
(void (*ready_cb)(void), void (*wakeup_request_cb)(void));
void ipa_uc_mhi_cleanup(void);
int ipa_uc_mhi_reset_channel(int channelHandle);
int ipa_uc_mhi_suspend_channel(int channelHandle);
int ipa_uc_mhi_stop_event_update_channel(int channelHandle);
int ipa_uc_mhi_print_stats(char *dbg_buff, int size);
/* uC */
int ipa_uc_state_check(void);
/* general */
void ipa_get_holb(int ep_idx, struct ipa_ep_cfg_holb *holb);
void ipa_set_tag_process_before_gating(bool val);
bool ipa_has_open_aggr_frame(enum ipa_client_type client);
int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
ipa_notify_cb notify, void *priv, u8 hdr_len,
struct ipa_ntn_conn_out_params *outp);
int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl,
struct ipa_ntn_conn_in_params *params);
u8 *ipa_write_64(u64 w, u8 *dest);
u8 *ipa_write_32(u32 w, u8 *dest);
u8 *ipa_write_16(u16 hw, u8 *dest);
u8 *ipa_write_8(u8 b, u8 *dest);
u8 *ipa_pad_to_64(u8 *dest);
u8 *ipa_pad_to_32(u8 *dest);
int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data),
void *user_data);
void ipa_ntn_uc_dereg_rdyCB(void);
int ipa_conn_wdi_pipes(struct ipa_wdi_conn_in_params *in,
struct ipa_wdi_conn_out_params *out,
ipa_wdi_meter_notifier_cb wdi_notify);
int ipa_disconn_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
int ipa_enable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
int ipa_disable_wdi_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx);
const char *ipa_get_version_string(enum ipa_hw_type ver);
int ipa_start_gsi_channel(u32 clnt_hdl);
bool ipa_pm_is_used(void);
int ipa_smmu_store_sgt(struct sg_table **out_ch_ptr,
struct sg_table *in_sgt_ptr);
int ipa_smmu_free_sgt(struct sg_table **out_sgt_ptr);
int ipa_ut_module_init(void);
void ipa_ut_module_exit(void);
int ipa_wigig_uc_init(
struct ipa_wdi_uc_ready_params *inout,
ipa_wigig_misc_int_cb int_notify,
phys_addr_t *uc_db_pa);
int ipa_conn_wigig_rx_pipe_i(void *in, struct ipa_wigig_conn_out_params *out);
int ipa_conn_wigig_client_i(void *in, struct ipa_wigig_conn_out_params *out);
int ipa_wigig_uc_msi_init(
bool init,
phys_addr_t periph_baddr_pa,
phys_addr_t pseudo_cause_pa,
phys_addr_t int_gen_tx_pa,
phys_addr_t int_gen_rx_pa,
phys_addr_t dma_ep_misc_pa);
int ipa_disconn_wigig_pipe_i(enum ipa_client_type client,
struct ipa_wigig_pipe_setup_info_smmu *pipe_smmu,
void *dbuff);
int ipa_enable_wigig_pipe_i(enum ipa_client_type client);
int ipa_disable_wigig_pipe_i(enum ipa_client_type client);
int ipa_wigig_send_msg(int msg_type,
const char *netdev_name, u8 *mac,
enum ipa_client_type client, bool to_wigig);
int ipa_wigig_save_regs(void);
void ipa_register_client_callback(int (*client_cb)(bool is_lock),
bool (*teth_port_state)(void), u32 ipa_ep_idx);
void ipa_deregister_client_callback(u32 ipa_ep_idx);
#endif /* _IPA_COMMON_I_H_ */

1184
ipa/ipa_rm.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

240
ipa/ipa_rm_dependency_graph.c Arquivo normal
Ver arquivo

@@ -0,0 +1,240 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/slab.h>
#include "ipa_rm_dependency_graph.h"
#include "ipa_rm_i.h"
static int ipa_rm_dep_get_index(enum ipa_rm_resource_name resource_name)
{
int resource_index = IPA_RM_INDEX_INVALID;
if (IPA_RM_RESORCE_IS_PROD(resource_name))
resource_index = ipa_rm_prod_index(resource_name);
else if (IPA_RM_RESORCE_IS_CONS(resource_name))
resource_index = ipa_rm_cons_index(resource_name);
return resource_index;
}
/**
* ipa_rm_dep_graph_create() - creates graph
* @dep_graph: [out] created dependency graph
*
* Returns: dependency graph on success, NULL on failure
*/
int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph)
{
int result = 0;
*dep_graph = kzalloc(sizeof(**dep_graph), GFP_KERNEL);
if (!*dep_graph)
result = -ENOMEM;
return result;
}
/**
* ipa_rm_dep_graph_delete() - destroyes the graph
* @graph: [in] dependency graph
*
* Frees all resources.
*/
void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph)
{
int resource_index;
if (!graph) {
IPA_RM_ERR("invalid params\n");
return;
}
for (resource_index = 0;
resource_index < IPA_RM_RESOURCE_MAX;
resource_index++)
kfree(graph->resource_table[resource_index]);
memset(graph->resource_table, 0, sizeof(graph->resource_table));
}
/**
* ipa_rm_dep_graph_get_resource() - provides a resource by name
* @graph: [in] dependency graph
* @name: [in] name of the resource
* @resource: [out] resource in case of success
*
* Returns: 0 on success, negative on failure
*/
int ipa_rm_dep_graph_get_resource(
struct ipa_rm_dep_graph *graph,
enum ipa_rm_resource_name resource_name,
struct ipa_rm_resource **resource)
{
int result;
int resource_index;
if (!graph) {
result = -EINVAL;
goto bail;
}
resource_index = ipa_rm_dep_get_index(resource_name);
if (resource_index == IPA_RM_INDEX_INVALID) {
result = -EINVAL;
goto bail;
}
*resource = graph->resource_table[resource_index];
if (!*resource) {
result = -EINVAL;
goto bail;
}
result = 0;
bail:
return result;
}
/**
* ipa_rm_dep_graph_add() - adds resource to graph
* @graph: [in] dependency graph
* @resource: [in] resource to add
*
* Returns: 0 on success, negative on failure
*/
int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph,
struct ipa_rm_resource *resource)
{
int result = 0;
int resource_index;
if (!graph || !resource) {
result = -EINVAL;
goto bail;
}
resource_index = ipa_rm_dep_get_index(resource->name);
if (resource_index == IPA_RM_INDEX_INVALID) {
result = -EINVAL;
goto bail;
}
graph->resource_table[resource_index] = resource;
bail:
return result;
}
/**
* ipa_rm_dep_graph_remove() - removes resource from graph
* @graph: [in] dependency graph
* @resource: [in] resource to add
*
* Returns: 0 on success, negative on failure
*/
int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph,
enum ipa_rm_resource_name resource_name)
{
if (!graph)
return -EINVAL;
graph->resource_table[resource_name] = NULL;
return 0;
}
/**
* ipa_rm_dep_graph_add_dependency() - adds dependency between
* two nodes in graph
* @graph: [in] dependency graph
* @resource_name: [in] resource to add
* @depends_on_name: [in] resource to add
* @userspace_dep: [in] operation requested by userspace ?
*
* Returns: 0 on success, negative on failure
*/
int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph,
enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_name depends_on_name,
bool userspace_dep)
{
struct ipa_rm_resource *dependent = NULL;
struct ipa_rm_resource *dependency = NULL;
int result;
if (!graph ||
!IPA_RM_RESORCE_IS_PROD(resource_name) ||
!IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
IPA_RM_ERR("invalid params\n");
result = -EINVAL;
goto bail;
}
if (ipa_rm_dep_graph_get_resource(graph,
resource_name,
&dependent)) {
IPA_RM_ERR("%s does not exist\n",
ipa_rm_resource_str(resource_name));
result = -EINVAL;
goto bail;
}
if (ipa_rm_dep_graph_get_resource(graph,
depends_on_name,
&dependency)) {
IPA_RM_ERR("%s does not exist\n",
ipa_rm_resource_str(depends_on_name));
result = -EINVAL;
goto bail;
}
result = ipa_rm_resource_add_dependency(dependent, dependency,
userspace_dep);
bail:
IPA_RM_DBG("EXIT with %d\n", result);
return result;
}
/**
* ipa_rm_dep_graph_delete_dependency() - deleted dependency between
* two nodes in graph
* @graph: [in] dependency graph
* @resource_name: [in] resource to delete
* @depends_on_name: [in] resource to delete
* @userspace_dep: [in] operation requested by userspace ?
*
* Returns: 0 on success, negative on failure
*
*/
int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph,
enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_name depends_on_name,
bool userspace_dep)
{
struct ipa_rm_resource *dependent = NULL;
struct ipa_rm_resource *dependency = NULL;
int result;
if (!graph ||
!IPA_RM_RESORCE_IS_PROD(resource_name) ||
!IPA_RM_RESORCE_IS_CONS(depends_on_name)) {
IPA_RM_ERR("invalid params\n");
result = -EINVAL;
goto bail;
}
if (ipa_rm_dep_graph_get_resource(graph,
resource_name,
&dependent)) {
IPA_RM_DBG("%s does not exist\n",
ipa_rm_resource_str(resource_name));
result = -EINVAL;
goto bail;
}
if (ipa_rm_dep_graph_get_resource(graph,
depends_on_name,
&dependency)) {
IPA_RM_DBG("%s does not exist\n",
ipa_rm_resource_str(depends_on_name));
result = -EINVAL;
goto bail;
}
result = ipa_rm_resource_delete_dependency(dependent, dependency,
userspace_dep);
bail:
IPA_RM_DBG("EXIT with %d\n", result);
return result;
}

42
ipa/ipa_rm_dependency_graph.h Arquivo normal
Ver arquivo

@@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _IPA_RM_DEPENDENCY_GRAPH_H_
#define _IPA_RM_DEPENDENCY_GRAPH_H_
#include <linux/list.h>
#include <linux/ipa.h>
#include "ipa_rm_resource.h"
struct ipa_rm_dep_graph {
struct ipa_rm_resource *resource_table[IPA_RM_RESOURCE_MAX];
};
int ipa_rm_dep_graph_get_resource(
struct ipa_rm_dep_graph *graph,
enum ipa_rm_resource_name name,
struct ipa_rm_resource **resource);
int ipa_rm_dep_graph_create(struct ipa_rm_dep_graph **dep_graph);
void ipa_rm_dep_graph_delete(struct ipa_rm_dep_graph *graph);
int ipa_rm_dep_graph_add(struct ipa_rm_dep_graph *graph,
struct ipa_rm_resource *resource);
int ipa_rm_dep_graph_remove(struct ipa_rm_dep_graph *graph,
enum ipa_rm_resource_name resource_name);
int ipa_rm_dep_graph_add_dependency(struct ipa_rm_dep_graph *graph,
enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_name depends_on_name,
bool userspsace_dep);
int ipa_rm_dep_graph_delete_dependency(struct ipa_rm_dep_graph *graph,
enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_name depends_on_name,
bool userspsace_dep);
#endif /* _IPA_RM_DEPENDENCY_GRAPH_H_ */

150
ipa/ipa_rm_i.h Arquivo normal
Ver arquivo

@@ -0,0 +1,150 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _IPA_RM_I_H_
#define _IPA_RM_I_H_
#include <linux/workqueue.h>
#include <linux/ipa.h>
#include "ipa_rm_resource.h"
#include "ipa_common_i.h"
#define IPA_RM_DRV_NAME "ipa_rm"
#define IPA_RM_DBG_LOW(fmt, args...) \
do { \
pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPA_RM_DBG(fmt, args...) \
do { \
pr_debug(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPA_RM_ERR(fmt, args...) \
do { \
pr_err(IPA_RM_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
IPA_RM_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPA_RM_RESORCE_IS_PROD(x) \
(x < IPA_RM_RESOURCE_MAX && (x & 0x1) == 0)
#define IPA_RM_RESORCE_IS_CONS(x) \
(x < IPA_RM_RESOURCE_MAX && (x & 0x1) == 1)
#define IPA_RM_INDEX_INVALID (-1)
#define IPA_RM_RELEASE_DELAY_IN_MSEC 1000
int ipa_rm_prod_index(enum ipa_rm_resource_name resource_name);
int ipa_rm_cons_index(enum ipa_rm_resource_name resource_name);
/**
* struct ipa_rm_delayed_release_work_type - IPA RM delayed resource release
* work type
* @delayed_work: work struct
* @ipa_rm_resource_name: name of the resource on which this work should be done
* @needed_bw: bandwidth required for resource in Mbps
* @dec_usage_count: decrease usage count on release ?
*/
struct ipa_rm_delayed_release_work_type {
struct delayed_work work;
enum ipa_rm_resource_name resource_name;
u32 needed_bw;
bool dec_usage_count;
};
/**
* enum ipa_rm_wq_cmd - workqueue commands
*/
enum ipa_rm_wq_cmd {
IPA_RM_WQ_NOTIFY_PROD,
IPA_RM_WQ_NOTIFY_CONS,
IPA_RM_WQ_RESOURCE_CB
};
/**
* struct ipa_rm_wq_work_type - IPA RM worqueue specific
* work type
* @work: work struct
* @wq_cmd: command that should be processed in workqueue context
* @resource_name: name of the resource on which this work
* should be done
* @dep_graph: data structure to search for resource if exists
* @event: event to notify
* @notify_registered_only: notify only clients registered by
* ipa_rm_register()
*/
struct ipa_rm_wq_work_type {
struct work_struct work;
enum ipa_rm_wq_cmd wq_cmd;
enum ipa_rm_resource_name resource_name;
enum ipa_rm_event event;
bool notify_registered_only;
};
/**
* struct ipa_rm_wq_suspend_resume_work_type - IPA RM worqueue resume or
* suspend work type
* @work: work struct
* @resource_name: name of the resource on which this work
* should be done
* @prev_state:
* @needed_bw:
*/
struct ipa_rm_wq_suspend_resume_work_type {
struct work_struct work;
enum ipa_rm_resource_name resource_name;
enum ipa_rm_resource_state prev_state;
u32 needed_bw;
bool inc_usage_count;
};
int ipa_rm_wq_send_cmd(enum ipa_rm_wq_cmd wq_cmd,
enum ipa_rm_resource_name resource_name,
enum ipa_rm_event event,
bool notify_registered_only);
int ipa_rm_wq_send_resume_cmd(enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_state prev_state,
u32 needed_bw,
bool inc_usage_count);
int ipa_rm_wq_send_suspend_cmd(enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_state prev_state,
u32 needed_bw);
int ipa_rm_initialize(void);
int ipa_rm_stat(char *buf, int size);
const char *ipa_rm_resource_str(enum ipa_rm_resource_name resource_name);
void ipa_rm_perf_profile_change(enum ipa_rm_resource_name resource_name);
int ipa_rm_request_resource_with_timer(enum ipa_rm_resource_name resource_name);
void delayed_release_work_func(struct work_struct *work);
int ipa_rm_add_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_name depends_on_name);
int ipa_rm_delete_dependency_from_ioctl(enum ipa_rm_resource_name resource_name,
enum ipa_rm_resource_name depends_on_name);
void ipa_rm_exit(void);
#endif /* _IPA_RM_I_H_ */

277
ipa/ipa_rm_inactivity_timer.c Arquivo normal
Ver arquivo

@@ -0,0 +1,277 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/unistd.h>
#include <linux/workqueue.h>
#include <linux/ipa.h>
#include "ipa_rm_i.h"
#define MAX_WS_NAME 20
/**
* struct ipa_rm_it_private - IPA RM Inactivity Timer private
* data
* @initied: indicates if instance was initialized
* @lock - spinlock for mutual exclusion
* @resource_name - resource name
* @work: delayed work object for running delayed releas
* function
* @resource_requested: boolean flag indicates if resource was requested
* @reschedule_work: boolean flag indicates to not release and to
* reschedule the release work.
* @work_in_progress: boolean flag indicates is release work was scheduled.
* @jiffies: number of jiffies for timeout
*
* WWAN private - holds all relevant info about WWAN driver
*/
struct ipa_rm_it_private {
bool initied;
enum ipa_rm_resource_name resource_name;
spinlock_t lock;
struct delayed_work work;
bool resource_requested;
bool reschedule_work;
bool work_in_progress;
unsigned long jiffies;
struct wakeup_source w_lock;
char w_lock_name[MAX_WS_NAME];
};
static struct ipa_rm_it_private ipa_rm_it_handles[IPA_RM_RESOURCE_MAX];
/**
* ipa_rm_inactivity_timer_func() - called when timer expired in
* the context of the shared workqueue. Checks internally if
* reschedule_work flag is set. In case it is not set this function calls to
* ipa_rm_release_resource(). In case reschedule_work is set this function
* reschedule the work. This flag is cleared cleared when
* calling to ipa_rm_inactivity_timer_release_resource().
*
* @work: work object provided by the work queue
*
* Return codes:
* None
*/
static void ipa_rm_inactivity_timer_func(struct work_struct *work)
{
struct ipa_rm_it_private *me = container_of(to_delayed_work(work),
struct ipa_rm_it_private,
work);
unsigned long flags;
IPA_RM_DBG_LOW("timer expired for resource %d\n", me->resource_name);
spin_lock_irqsave(
&ipa_rm_it_handles[me->resource_name].lock, flags);
if (ipa_rm_it_handles[me->resource_name].reschedule_work) {
IPA_RM_DBG_LOW("setting delayed work\n");
ipa_rm_it_handles[me->resource_name].reschedule_work = false;
queue_delayed_work(system_unbound_wq,
&ipa_rm_it_handles[me->resource_name].work,
ipa_rm_it_handles[me->resource_name].jiffies);
} else if (ipa_rm_it_handles[me->resource_name].resource_requested) {
IPA_RM_DBG_LOW("not calling release\n");
ipa_rm_it_handles[me->resource_name].work_in_progress = false;
} else {
IPA_RM_DBG_LOW("calling release_resource on resource %d\n",
me->resource_name);
__pm_relax(&ipa_rm_it_handles[me->resource_name].w_lock);
ipa_rm_release_resource(me->resource_name);
ipa_rm_it_handles[me->resource_name].work_in_progress = false;
}
spin_unlock_irqrestore(
&ipa_rm_it_handles[me->resource_name].lock, flags);
}
/**
* ipa_rm_inactivity_timer_init() - Init function for IPA RM
* inactivity timer. This function shall be called prior calling
* any other API of IPA RM inactivity timer.
*
* @resource_name: Resource name. @see ipa_rm.h
* @msecs: time in miliseccond, that IPA RM inactivity timer
* shall wait prior calling to ipa_rm_release_resource().
*
* Return codes:
* 0: success
* -EINVAL: invalid parameters
*/
int ipa_rm_inactivity_timer_init(enum ipa_rm_resource_name resource_name,
unsigned long msecs)
{
struct wakeup_source *pwlock;
char *name;
IPA_RM_DBG_LOW("resource %d\n", resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
IPA_RM_ERR("Invalid parameter\n");
return -EINVAL;
}
if (ipa_rm_it_handles[resource_name].initied) {
IPA_RM_ERR("resource %d already inited\n", resource_name);
return -EINVAL;
}
spin_lock_init(&ipa_rm_it_handles[resource_name].lock);
ipa_rm_it_handles[resource_name].resource_name = resource_name;
ipa_rm_it_handles[resource_name].jiffies = msecs_to_jiffies(msecs);
ipa_rm_it_handles[resource_name].resource_requested = false;
ipa_rm_it_handles[resource_name].reschedule_work = false;
ipa_rm_it_handles[resource_name].work_in_progress = false;
pwlock = &(ipa_rm_it_handles[resource_name].w_lock);
name = ipa_rm_it_handles[resource_name].w_lock_name;
snprintf(name, MAX_WS_NAME, "IPA_RM%d\n", resource_name);
wakeup_source_init(pwlock, name);
INIT_DELAYED_WORK(&ipa_rm_it_handles[resource_name].work,
ipa_rm_inactivity_timer_func);
ipa_rm_it_handles[resource_name].initied = true;
return 0;
}
EXPORT_SYMBOL(ipa_rm_inactivity_timer_init);
/**
* ipa_rm_inactivity_timer_destroy() - De-Init function for IPA
* RM inactivity timer.
* @resource_name: Resource name. @see ipa_rm.h
* Return codes:
* 0: success
* -EINVAL: invalid parameters
*/
int ipa_rm_inactivity_timer_destroy(enum ipa_rm_resource_name resource_name)
{
struct wakeup_source *pwlock;
IPA_RM_DBG_LOW("resource %d\n", resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
IPA_RM_ERR("Invalid parameter\n");
return -EINVAL;
}
if (!ipa_rm_it_handles[resource_name].initied) {
IPA_RM_ERR("resource %d already inited\n",
resource_name);
return -EINVAL;
}
cancel_delayed_work_sync(&ipa_rm_it_handles[resource_name].work);
pwlock = &(ipa_rm_it_handles[resource_name].w_lock);
wakeup_source_trash(pwlock);
memset(&ipa_rm_it_handles[resource_name], 0,
sizeof(struct ipa_rm_it_private));
return 0;
}
EXPORT_SYMBOL(ipa_rm_inactivity_timer_destroy);
/**
* ipa_rm_inactivity_timer_request_resource() - Same as
* ipa_rm_request_resource(), with a difference that calling to
* this function will also cancel the inactivity timer, if
* ipa_rm_inactivity_timer_release_resource() was called earlier.
*
* @resource_name: Resource name. @see ipa_rm.h
*
* Return codes:
* 0: success
* -EINVAL: invalid parameters
*/
int ipa_rm_inactivity_timer_request_resource(
enum ipa_rm_resource_name resource_name)
{
int ret;
unsigned long flags;
IPA_RM_DBG_LOW("resource %d\n", resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
IPA_RM_ERR("Invalid parameter\n");
return -EINVAL;
}
if (!ipa_rm_it_handles[resource_name].initied) {
IPA_RM_ERR("Not initialized\n");
return -EINVAL;
}
spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
ipa_rm_it_handles[resource_name].resource_requested = true;
spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
ret = ipa_rm_request_resource(resource_name);
IPA_RM_DBG_LOW("resource %d: returning %d\n", resource_name, ret);
return ret;
}
EXPORT_SYMBOL(ipa_rm_inactivity_timer_request_resource);
/**
* ipa_rm_inactivity_timer_release_resource() - Sets the
* inactivity timer to the timeout set by
* ipa_rm_inactivity_timer_init(). When the timeout expires, IPA
* RM inactivity timer will call to ipa_rm_release_resource().
* If a call to ipa_rm_inactivity_timer_request_resource() was
* made BEFORE the timeout has expired, rge timer will be
* cancelled.
*
* @resource_name: Resource name. @see ipa_rm.h
*
* Return codes:
* 0: success
* -EINVAL: invalid parameters
*/
int ipa_rm_inactivity_timer_release_resource(
enum ipa_rm_resource_name resource_name)
{
unsigned long flags;
IPA_RM_DBG_LOW("resource %d\n", resource_name);
if (resource_name < 0 ||
resource_name >= IPA_RM_RESOURCE_MAX) {
IPA_RM_ERR("Invalid parameter\n");
return -EINVAL;
}
if (!ipa_rm_it_handles[resource_name].initied) {
IPA_RM_ERR("Not initialized\n");
return -EINVAL;
}
spin_lock_irqsave(&ipa_rm_it_handles[resource_name].lock, flags);
ipa_rm_it_handles[resource_name].resource_requested = false;
if (ipa_rm_it_handles[resource_name].work_in_progress) {
IPA_RM_DBG_LOW("Timer already set, no sched again %d\n",
resource_name);
ipa_rm_it_handles[resource_name].reschedule_work = true;
spin_unlock_irqrestore(
&ipa_rm_it_handles[resource_name].lock, flags);
return 0;
}
ipa_rm_it_handles[resource_name].work_in_progress = true;
ipa_rm_it_handles[resource_name].reschedule_work = false;
__pm_stay_awake(&ipa_rm_it_handles[resource_name].w_lock);
IPA_RM_DBG_LOW("setting delayed work\n");
queue_delayed_work(system_unbound_wq,
&ipa_rm_it_handles[resource_name].work,
ipa_rm_it_handles[resource_name].jiffies);
spin_unlock_irqrestore(&ipa_rm_it_handles[resource_name].lock, flags);
return 0;
}
EXPORT_SYMBOL(ipa_rm_inactivity_timer_release_resource);

270
ipa/ipa_rm_peers_list.c Arquivo normal
Ver arquivo

@@ -0,0 +1,270 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/slab.h>
#include "ipa_rm_i.h"
/**
* ipa_rm_peers_list_get_resource_index() - resource name to index
* of this resource in corresponding peers list
* @resource_name: [in] resource name
*
* Returns: resource index mapping, IPA_RM_INDEX_INVALID
* in case provided resource name isn't contained in enum
* ipa_rm_resource_name.
*
*/
static int ipa_rm_peers_list_get_resource_index(
enum ipa_rm_resource_name resource_name)
{
int resource_index = IPA_RM_INDEX_INVALID;
if (IPA_RM_RESORCE_IS_PROD(resource_name))
resource_index = ipa_rm_prod_index(resource_name);
else if (IPA_RM_RESORCE_IS_CONS(resource_name))
resource_index = ipa_rm_cons_index(resource_name);
return resource_index;
}
static bool ipa_rm_peers_list_check_index(int index,
struct ipa_rm_peers_list *peers_list)
{
return !(index > peers_list->max_peers || index < 0);
}
/**
* ipa_rm_peers_list_create() - creates the peers list
*
* @max_peers: maximum number of peers in new list
* @peers_list: [out] newly created peers list
*
* Returns: 0 in case of SUCCESS, negative otherwise
*/
int ipa_rm_peers_list_create(int max_peers,
struct ipa_rm_peers_list **peers_list)
{
int result;
*peers_list = kzalloc(sizeof(**peers_list), GFP_ATOMIC);
if (!*peers_list) {
IPA_RM_ERR("no mem\n");
result = -ENOMEM;
goto bail;
}
(*peers_list)->max_peers = max_peers;
(*peers_list)->peers = kzalloc((*peers_list)->max_peers *
sizeof(*((*peers_list)->peers)), GFP_ATOMIC);
if (!((*peers_list)->peers)) {
IPA_RM_ERR("no mem\n");
result = -ENOMEM;
goto list_alloc_fail;
}
return 0;
list_alloc_fail:
kfree(*peers_list);
bail:
return result;
}
/**
* ipa_rm_peers_list_delete() - deletes the peers list
*
* @peers_list: peers list
*
*/
void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list)
{
if (peers_list) {
kfree(peers_list->peers);
kfree(peers_list);
}
}
/**
* ipa_rm_peers_list_remove_peer() - removes peer from the list
*
* @peers_list: peers list
* @resource_name: name of the resource to remove
*
*/
void ipa_rm_peers_list_remove_peer(
struct ipa_rm_peers_list *peers_list,
enum ipa_rm_resource_name resource_name)
{
if (!peers_list)
return;
peers_list->peers[ipa_rm_peers_list_get_resource_index(
resource_name)].resource = NULL;
peers_list->peers[ipa_rm_peers_list_get_resource_index(
resource_name)].userspace_dep = false;
peers_list->peers_count--;
}
/**
* ipa_rm_peers_list_add_peer() - adds peer to the list
*
* @peers_list: peers list
* @resource: resource to add
*
*/
void ipa_rm_peers_list_add_peer(
struct ipa_rm_peers_list *peers_list,
struct ipa_rm_resource *resource,
bool userspace_dep)
{
if (!peers_list || !resource)
return;
peers_list->peers[ipa_rm_peers_list_get_resource_index(
resource->name)].resource = resource;
peers_list->peers[ipa_rm_peers_list_get_resource_index(
resource->name)].userspace_dep = userspace_dep;
peers_list->peers_count++;
}
/**
* ipa_rm_peers_list_is_empty() - checks
* if resource peers list is empty
*
* @peers_list: peers list
*
* Returns: true if the list is empty, false otherwise
*/
bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list)
{
bool result = true;
if (!peers_list)
goto bail;
if (peers_list->peers_count > 0)
result = false;
bail:
return result;
}
/**
* ipa_rm_peers_list_has_last_peer() - checks
* if resource peers list has exactly one peer
*
* @peers_list: peers list
*
* Returns: true if the list has exactly one peer, false otherwise
*/
bool ipa_rm_peers_list_has_last_peer(
struct ipa_rm_peers_list *peers_list)
{
bool result = false;
if (!peers_list)
goto bail;
if (peers_list->peers_count == 1)
result = true;
bail:
return result;
}
/**
* ipa_rm_peers_list_check_dependency() - check dependency
* between 2 peer lists
* @resource_peers: first peers list
* @resource_name: first peers list resource name
* @depends_on_peers: second peers list
* @depends_on_name: second peers list resource name
* @userspace_dep: [out] dependency was created by userspace
*
* Returns: true if there is dependency, false otherwise
*
*/
bool ipa_rm_peers_list_check_dependency(
struct ipa_rm_peers_list *resource_peers,
enum ipa_rm_resource_name resource_name,
struct ipa_rm_peers_list *depends_on_peers,
enum ipa_rm_resource_name depends_on_name,
bool *userspace_dep)
{
bool result = false;
int resource_index;
struct ipa_rm_resource_peer *peer_ptr;
if (!resource_peers || !depends_on_peers || !userspace_dep)
return result;
resource_index = ipa_rm_peers_list_get_resource_index(depends_on_name);
peer_ptr = &resource_peers->peers[resource_index];
if (peer_ptr->resource != NULL) {
result = true;
*userspace_dep = peer_ptr->userspace_dep;
}
resource_index = ipa_rm_peers_list_get_resource_index(resource_name);
peer_ptr = &depends_on_peers->peers[resource_index];
if (peer_ptr->resource != NULL) {
result = true;
*userspace_dep = peer_ptr->userspace_dep;
}
return result;
}
/**
* ipa_rm_peers_list_get_resource() - get resource by
* resource index
* @resource_index: resource index
* @resource_peers: peers list
*
* Returns: the resource if found, NULL otherwise
*/
struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index,
struct ipa_rm_peers_list *resource_peers)
{
struct ipa_rm_resource *result = NULL;
if (!ipa_rm_peers_list_check_index(resource_index, resource_peers))
goto bail;
result = resource_peers->peers[resource_index].resource;
bail:
return result;
}
/**
* ipa_rm_peers_list_get_userspace_dep() - returns whether resource dependency
* was added by userspace
* @resource_index: resource index
* @resource_peers: peers list
*
* Returns: true if dependency was added by userspace, false by kernel
*/
bool ipa_rm_peers_list_get_userspace_dep(int resource_index,
struct ipa_rm_peers_list *resource_peers)
{
bool result = false;
if (!ipa_rm_peers_list_check_index(resource_index, resource_peers))
goto bail;
result = resource_peers->peers[resource_index].userspace_dep;
bail:
return result;
}
/**
* ipa_rm_peers_list_get_size() - get peers list sise
*
* @peers_list: peers list
*
* Returns: the size of the peers list
*/
int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list)
{
return peers_list->max_peers;
}

55
ipa/ipa_rm_peers_list.h Arquivo normal
Ver arquivo

@@ -0,0 +1,55 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _IPA_RM_PEERS_LIST_H_
#define _IPA_RM_PEERS_LIST_H_
#include "ipa_rm_resource.h"
struct ipa_rm_resource_peer {
struct ipa_rm_resource *resource;
bool userspace_dep;
};
/**
* struct ipa_rm_peers_list - IPA RM resource peers list
* @peers: the list of references to resources dependent on this resource
* in case of producer or list of dependencies in case of consumer
* @max_peers: maximum number of peers for this resource
* @peers_count: actual number of peers for this resource
*/
struct ipa_rm_peers_list {
struct ipa_rm_resource_peer *peers;
int max_peers;
int peers_count;
};
int ipa_rm_peers_list_create(int max_peers,
struct ipa_rm_peers_list **peers_list);
void ipa_rm_peers_list_delete(struct ipa_rm_peers_list *peers_list);
void ipa_rm_peers_list_remove_peer(
struct ipa_rm_peers_list *peers_list,
enum ipa_rm_resource_name resource_name);
void ipa_rm_peers_list_add_peer(
struct ipa_rm_peers_list *peers_list,
struct ipa_rm_resource *resource,
bool userspace_dep);
bool ipa_rm_peers_list_check_dependency(
struct ipa_rm_peers_list *resource_peers,
enum ipa_rm_resource_name resource_name,
struct ipa_rm_peers_list *depends_on_peers,
enum ipa_rm_resource_name depends_on_name,
bool *userspace_dep);
struct ipa_rm_resource *ipa_rm_peers_list_get_resource(int resource_index,
struct ipa_rm_peers_list *peers_list);
bool ipa_rm_peers_list_get_userspace_dep(int resource_index,
struct ipa_rm_peers_list *resource_peers);
int ipa_rm_peers_list_get_size(struct ipa_rm_peers_list *peers_list);
bool ipa_rm_peers_list_is_empty(struct ipa_rm_peers_list *peers_list);
bool ipa_rm_peers_list_has_last_peer(
struct ipa_rm_peers_list *peers_list);
#endif /* _IPA_RM_PEERS_LIST_H_ */

1204
ipa/ipa_rm_resource.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

159
ipa/ipa_rm_resource.h Arquivo normal
Ver arquivo

@@ -0,0 +1,159 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _IPA_RM_RESOURCE_H_
#define _IPA_RM_RESOURCE_H_
#include <linux/list.h>
#include <linux/ipa.h>
#include "ipa_rm_peers_list.h"
/**
* enum ipa_rm_resource_state - resource state
*/
enum ipa_rm_resource_state {
IPA_RM_RELEASED,
IPA_RM_REQUEST_IN_PROGRESS,
IPA_RM_GRANTED,
IPA_RM_RELEASE_IN_PROGRESS
};
/**
* enum ipa_rm_resource_type - IPA resource manager resource type
*/
enum ipa_rm_resource_type {
IPA_RM_PRODUCER,
IPA_RM_CONSUMER
};
/**
* struct ipa_rm_notification_info - notification information
* of IPA RM client
* @reg_params: registration parameters
* @explicit: registered explicitly by ipa_rm_register()
* @link: link to the list of all registered clients information
*/
struct ipa_rm_notification_info {
struct ipa_rm_register_params reg_params;
bool explicit;
struct list_head link;
};
/**
* struct ipa_rm_resource - IPA RM resource
* @name: name identifying resource
* @type: type of resource (PRODUCER or CONSUMER)
* @floor_voltage: minimum voltage level for operation
* @max_bw: maximum bandwidth required for resource in Mbps
* @state: state of the resource
* @peers_list: list of the peers of the resource
*/
struct ipa_rm_resource {
enum ipa_rm_resource_name name;
enum ipa_rm_resource_type type;
enum ipa_voltage_level floor_voltage;
u32 max_bw;
u32 needed_bw;
enum ipa_rm_resource_state state;
struct ipa_rm_peers_list *peers_list;
};
/**
* struct ipa_rm_resource_cons - IPA RM consumer
* @resource: resource
* @usage_count: number of producers in GRANTED / REQUESTED state
* using this consumer
* @request_consumer_in_progress: when set, the consumer is during its request
* phase
* @request_resource: function which should be called to request resource
* from resource manager
* @release_resource: function which should be called to release resource
* from resource manager
* Add new fields after @resource only.
*/
struct ipa_rm_resource_cons {
struct ipa_rm_resource resource;
int usage_count;
struct completion request_consumer_in_progress;
int (*request_resource)(void);
int (*release_resource)(void);
};
/**
* struct ipa_rm_resource_prod - IPA RM producer
* @resource: resource
* @event_listeners: clients registered with this producer
* for notifications in resource state
* list Add new fields after @resource only.
*/
struct ipa_rm_resource_prod {
struct ipa_rm_resource resource;
struct list_head event_listeners;
int pending_request;
int pending_release;
};
int ipa_rm_resource_create(
struct ipa_rm_create_params *create_params,
struct ipa_rm_resource **resource);
int ipa_rm_resource_delete(struct ipa_rm_resource *resource);
int ipa_rm_resource_producer_register(struct ipa_rm_resource_prod *producer,
struct ipa_rm_register_params *reg_params,
bool explicit);
int ipa_rm_resource_producer_deregister(struct ipa_rm_resource_prod *producer,
struct ipa_rm_register_params *reg_params);
int ipa_rm_resource_add_dependency(struct ipa_rm_resource *resource,
struct ipa_rm_resource *depends_on,
bool userspace_dep);
int ipa_rm_resource_delete_dependency(struct ipa_rm_resource *resource,
struct ipa_rm_resource *depends_on,
bool userspace_dep);
int ipa_rm_resource_producer_request(struct ipa_rm_resource_prod *producer);
int ipa_rm_resource_producer_release(struct ipa_rm_resource_prod *producer);
int ipa_rm_resource_consumer_request(struct ipa_rm_resource_cons *consumer,
u32 needed_bw,
bool inc_usage_count,
bool wake_client);
int ipa_rm_resource_consumer_release(struct ipa_rm_resource_cons *consumer,
u32 needed_bw,
bool dec_usage_count);
int ipa_rm_resource_set_perf_profile(struct ipa_rm_resource *resource,
struct ipa_rm_perf_profile *profile);
void ipa_rm_resource_consumer_handle_cb(struct ipa_rm_resource_cons *consumer,
enum ipa_rm_event event);
void ipa_rm_resource_producer_notify_clients(
struct ipa_rm_resource_prod *producer,
enum ipa_rm_event event,
bool notify_registered_only);
int ipa_rm_resource_producer_print_stat(
struct ipa_rm_resource *resource,
char *buf,
int size);
int ipa_rm_resource_consumer_request_work(struct ipa_rm_resource_cons *consumer,
enum ipa_rm_resource_state prev_state,
u32 needed_bw,
bool notify_completion,
bool dec_client_on_err);
int ipa_rm_resource_consumer_release_work(
struct ipa_rm_resource_cons *consumer,
enum ipa_rm_resource_state prev_state,
bool notify_completion);
#endif /* _IPA_RM_RESOURCE_H_ */

22
ipa/ipa_uc_offload_common_i.h Arquivo normal
Ver arquivo

@@ -0,0 +1,22 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/ipa_mhi.h>
#include <linux/ipa_qmi_service_v01.h>
#ifndef _IPA_UC_OFFLOAD_COMMON_I_H_
#define _IPA_UC_OFFLOAD_COMMON_I_H_
int ipa_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
ipa_notify_cb notify, void *priv, u8 hdr_len,
struct ipa_ntn_conn_out_params *outp);
int ipa_tear_down_uc_offload_pipes(int ipa_ep_idx_ul, int ipa_ep_idx_dl,
struct ipa_ntn_conn_in_params *params);
int ipa_ntn_uc_reg_rdyCB(void (*ipauc_ready_cb)(void *user_data),
void *user_data);
void ipa_ntn_uc_dereg_rdyCB(void);
#endif /* _IPA_UC_OFFLOAD_COMMON_I_H_ */

23
ipa/ipa_v3/Makefile Arquivo normal
Ver arquivo

@@ -0,0 +1,23 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_IPA3) += ipahal/
obj-$(CONFIG_IPA3) += ipat.o
ipat-y := ipa.o ipa_debugfs.o ipa_hdr.o ipa_flt.o ipa_rt.o ipa_dp.o ipa_client.o \
ipa_utils.o ipa_nat.o ipa_intf.o teth_bridge.o ipa_interrupts.o \
ipa_uc.o ipa_uc_wdi.o ipa_dma.o ipa_uc_mhi.o ipa_mhi.o ipa_uc_ntn.o \
ipa_hw_stats.o ipa_pm.o ipa_wdi3_i.o ipa_odl.o ipa_wigig_i.o
ipat-$(CONFIG_IPA_EMULATION) += ipa_dt_replacement.o
obj-$(CONFIG_RMNET_IPA3) += rmnet_ipa.o ipa_qmi_service_v01.o ipa_qmi_service.o rmnet_ipa_fd_ioctl.o
obj-$(CONFIG_IPA3_MHI_PROXY) += ipa_mhi_proxy.o
obj-$(CONFIG_IPA3_MHI_PRIME_MANAGER) += ipa_mpm.o
ipat-$(CONFIG_IPA3_REGDUMP) += dump/ipa_reg_dump.o
ccflags-$(CONFIG_IPA3_REGDUMP) += -Itechpack/dataipa/ipa/ipa_v3/dump
ccflags-$(CONFIG_IPA3_REGDUMP_IPA_4_5) += -Itechpack/dataipa/ipa/ipa_v3/dump/ipa4.5

2392
ipa/ipa_v3/dump/ipa4.5/gsi_hwio.h Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

Ver arquivo

@@ -0,0 +1,530 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#if !defined(_GSI_HWIO_DEF_H_)
#define _GSI_HWIO_DEF_H_
struct gsi_hwio_def_gsi_cfg_s {
u32 gsi_enable : 1;
u32 mcs_enable : 1;
u32 double_mcs_clk_freq : 1;
u32 uc_is_mcs : 1;
u32 gsi_pwr_clps : 1;
u32 bp_mtrix_disable : 1;
u32 reserved0 : 2;
u32 sleep_clk_div : 4;
u32 reserved1 : 20;
};
union gsi_hwio_def_gsi_cfg_u {
struct gsi_hwio_def_gsi_cfg_s def;
u32 value;
};
struct gsi_hwio_def_gsi_ree_cfg_s {
u32 move_to_esc_clr_mode_trsh : 1;
u32 channel_empty_int_enable : 1;
u32 reserved0 : 6;
u32 max_burst_size : 8;
u32 reserved1 : 16;
};
union gsi_hwio_def_gsi_ree_cfg_u {
struct gsi_hwio_def_gsi_ree_cfg_s def;
u32 value;
};
struct gsi_hwio_def_gsi_manager_ee_qos_n_s {
u32 ee_prio : 2;
u32 reserved0 : 6;
u32 max_ch_alloc : 5;
u32 reserved1 : 3;
u32 max_ev_alloc : 5;
u32 reserved2 : 11;
};
union gsi_hwio_def_gsi_manager_ee_qos_n_u {
struct gsi_hwio_def_gsi_manager_ee_qos_n_s def;
u32 value;
};
struct gsi_hwio_def_gsi_shram_n_s {
u32 shram : 32;
};
union gsi_hwio_def_gsi_shram_n_u {
struct gsi_hwio_def_gsi_shram_n_s def;
u32 value;
};
struct gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_s {
u32 phy_ch : 5;
u32 valid : 1;
u32 reserved0 : 26;
};
union gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_u {
struct gsi_hwio_def_gsi_map_ee_n_ch_k_vp_table_s def;
u32 value;
};
struct gsi_hwio_def_gsi_test_bus_sel_s {
u32 gsi_testbus_sel : 8;
u32 reserved0 : 8;
u32 gsi_hw_events_sel : 4;
u32 reserved1 : 12;
};
union gsi_hwio_def_gsi_test_bus_sel_u {
struct gsi_hwio_def_gsi_test_bus_sel_s def;
u32 value;
};
struct gsi_hwio_def_gsi_test_bus_reg_s {
u32 gsi_testbus_reg : 32;
};
union gsi_hwio_def_gsi_test_bus_reg_u {
struct gsi_hwio_def_gsi_test_bus_reg_s def;
u32 value;
};
struct gsi_hwio_def_gsi_debug_countern_s {
u32 counter_value : 16;
u32 reserved0 : 16;
};
union gsi_hwio_def_gsi_debug_countern_u {
struct gsi_hwio_def_gsi_debug_countern_s def;
u32 value;
};
struct gsi_hwio_def_gsi_debug_qsb_log_last_misc_idn_s {
u32 addr_20_0 : 21;
u32 write : 1;
u32 tid : 5;
u32 mid : 5;
};
union gsi_hwio_def_gsi_debug_qsb_log_last_misc_idn_u {
struct gsi_hwio_def_gsi_debug_qsb_log_last_misc_idn_s def;
u32 value;
};
struct gsi_hwio_def_gsi_debug_sw_rf_n_read_s {
u32 rf_reg : 32;
};
union gsi_hwio_def_gsi_debug_sw_rf_n_read_u {
struct gsi_hwio_def_gsi_debug_sw_rf_n_read_s def;
u32 value;
};
struct gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_s {
u32 phy_ev_ch : 5;
u32 valid : 1;
u32 reserved0 : 26;
};
union gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_u {
struct gsi_hwio_def_gsi_debug_ee_n_ev_k_vp_table_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_s {
u32 chtype_protocol : 3;
u32 chtype_dir : 1;
u32 ee : 4;
u32 chid : 5;
u32 chtype_protocol_msb : 1;
u32 erindex : 5;
u32 reserved0 : 1;
u32 chstate : 4;
u32 element_size : 8;
};
union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_0_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_s {
u32 r_length : 16;
u32 reserved0 : 16;
};
union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_1_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_s {
u32 r_base_addr_lsbs : 32;
};
union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_2_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_s {
u32 r_base_addr_msbs : 32;
};
union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_3_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_s {
u32 read_ptr_lsb : 32;
};
union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_4_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_s {
u32 read_ptr_msb : 32;
};
union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_5_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_s {
u32 write_ptr_lsb : 32;
};
union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_6_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_s {
u32 write_ptr_msb : 32;
};
union gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_cntxt_7_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_s {
u32 read_ptr : 16;
u32 reserved0 : 16;
};
union gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_read_ptr_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_s {
u32 re_intr_db : 16;
u32 reserved0 : 16;
};
union gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_re_fetch_write_ptr_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_qos_s {
u32 wrr_weight : 4;
u32 reserved0 : 4;
u32 max_prefetch : 1;
u32 use_db_eng : 1;
u32 prefetch_mode : 4;
u32 reserved1 : 2;
u32 empty_lvl_thrshold : 8;
u32 reserved2 : 8;
};
union gsi_hwio_def_ee_n_gsi_ch_k_qos_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_qos_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_s {
u32 scratch : 32;
};
union gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_0_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_s {
u32 scratch : 32;
};
union gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_1_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_s {
u32 scratch : 32;
};
union gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_2_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_s {
u32 scratch : 32;
};
union gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_u {
struct gsi_hwio_def_ee_n_gsi_ch_k_scratch_3_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_s {
u32 chtype : 4;
u32 ee : 4;
u32 evchid : 8;
u32 intype : 1;
u32 reserved0 : 3;
u32 chstate : 4;
u32 element_size : 8;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_0_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_s {
u32 r_length : 16;
u32 reserved0 : 16;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_1_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_s {
u32 r_base_addr_lsbs : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_2_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_s {
u32 r_base_addr_msbs : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_3_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_s {
u32 read_ptr_lsb : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_4_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_s {
u32 read_ptr_msb : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_5_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_s {
u32 write_ptr_lsb : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_6_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_s {
u32 write_ptr_msb : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_7_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_s {
u32 int_modt : 16;
u32 int_modc : 8;
u32 int_mod_cnt : 8;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_8_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_s {
u32 intvec : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_9_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_s {
u32 msi_addr_lsb : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_10_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_s {
u32 msi_addr_msb : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_11_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_s {
u32 rp_update_addr_lsb : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_12_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_s {
u32 rp_update_addr_msb : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_u {
struct gsi_hwio_def_ee_n_ev_ch_k_cntxt_13_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_scratch_0_s {
u32 scratch : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_scratch_0_u {
struct gsi_hwio_def_ee_n_ev_ch_k_scratch_0_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_ev_ch_k_scratch_1_s {
u32 scratch : 32;
};
union gsi_hwio_def_ee_n_ev_ch_k_scratch_1_u {
struct gsi_hwio_def_ee_n_ev_ch_k_scratch_1_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_gsi_status_s {
u32 enabled : 1;
u32 reserved0 : 31;
};
union gsi_hwio_def_ee_n_gsi_status_u {
struct gsi_hwio_def_ee_n_gsi_status_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_type_irq_s {
u32 ch_ctrl : 1;
u32 ev_ctrl : 1;
u32 glob_ee : 1;
u32 ieob : 1;
u32 inter_ee_ch_ctrl : 1;
u32 inter_ee_ev_ctrl : 1;
u32 general : 1;
u32 reserved0 : 25;
};
union gsi_hwio_def_ee_n_cntxt_type_irq_u {
struct gsi_hwio_def_ee_n_cntxt_type_irq_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_type_irq_msk_s {
u32 ch_ctrl : 1;
u32 ev_ctrl : 1;
u32 glob_ee : 1;
u32 ieob : 1;
u32 inter_ee_ch_ctrl : 1;
u32 inter_ee_ev_ctrl : 1;
u32 general : 1;
u32 reserved0 : 25;
};
union gsi_hwio_def_ee_n_cntxt_type_irq_msk_u {
struct gsi_hwio_def_ee_n_cntxt_type_irq_msk_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_s {
u32 gsi_ch_bit_map : 32;
};
union gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_u {
struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_s {
u32 ev_ch_bit_map : 32;
};
union gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_u {
struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_s {
u32 gsi_ch_bit_map_msk : 23;
u32 reserved0 : 9;
};
union gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_u {
struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_msk_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_s {
u32 ev_ch_bit_map_msk : 20;
u32 reserved0 : 12;
};
union gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_u {
struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_msk_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_clr_s {
u32 gsi_ch_bit_map : 32;
};
union gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_clr_u {
struct gsi_hwio_def_ee_n_cntxt_src_gsi_ch_irq_clr_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_clr_s {
u32 ev_ch_bit_map : 32;
};
union gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_clr_u {
struct gsi_hwio_def_ee_n_cntxt_src_ev_ch_irq_clr_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_s {
u32 ev_ch_bit_map : 32;
};
union gsi_hwio_def_ee_n_cntxt_src_ieob_irq_u {
struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_s {
u32 ev_ch_bit_map_msk : 20;
u32 reserved0 : 12;
};
union gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_u {
struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_msk_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_clr_s {
u32 ev_ch_bit_map : 32;
};
union gsi_hwio_def_ee_n_cntxt_src_ieob_irq_clr_u {
struct gsi_hwio_def_ee_n_cntxt_src_ieob_irq_clr_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_glob_irq_stts_s {
u32 error_int : 1;
u32 gp_int1 : 1;
u32 gp_int2 : 1;
u32 gp_int3 : 1;
u32 reserved0 : 28;
};
union gsi_hwio_def_ee_n_cntxt_glob_irq_stts_u {
struct gsi_hwio_def_ee_n_cntxt_glob_irq_stts_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_s {
u32 gsi_break_point : 1;
u32 gsi_bus_error : 1;
u32 gsi_cmd_fifo_ovrflow : 1;
u32 gsi_mcs_stack_ovrflow : 1;
u32 reserved0 : 28;
};
union gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_u {
struct gsi_hwio_def_ee_n_cntxt_gsi_irq_stts_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_intset_s {
u32 intype : 1;
u32 reserved0 : 31;
};
union gsi_hwio_def_ee_n_cntxt_intset_u {
struct gsi_hwio_def_ee_n_cntxt_intset_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_msi_base_lsb_s {
u32 msi_addr_lsb : 32;
};
union gsi_hwio_def_ee_n_cntxt_msi_base_lsb_u {
struct gsi_hwio_def_ee_n_cntxt_msi_base_lsb_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_msi_base_msb_s {
u32 msi_addr_msb : 32;
};
union gsi_hwio_def_ee_n_cntxt_msi_base_msb_u {
struct gsi_hwio_def_ee_n_cntxt_msi_base_msb_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_error_log_s {
u32 error_log : 32;
};
union gsi_hwio_def_ee_n_error_log_u {
struct gsi_hwio_def_ee_n_error_log_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_error_log_clr_s {
u32 error_log_clr : 32;
};
union gsi_hwio_def_ee_n_error_log_clr_u {
struct gsi_hwio_def_ee_n_error_log_clr_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_scratch_0_s {
u32 scratch : 32;
};
union gsi_hwio_def_ee_n_cntxt_scratch_0_u {
struct gsi_hwio_def_ee_n_cntxt_scratch_0_s def;
u32 value;
};
struct gsi_hwio_def_ee_n_cntxt_scratch_1_s {
u32 scratch : 32;
};
union gsi_hwio_def_ee_n_cntxt_scratch_1_u {
struct gsi_hwio_def_ee_n_cntxt_scratch_1_s def;
u32 value;
};
#endif

Ver arquivo

@@ -0,0 +1,42 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#if !defined(_IPA_ACCESS_CONTROL_H_)
#define _IPA_ACCESS_CONTROL_H_
#include "ipa_reg_dump.h"
/*
* The following is target specific.
*/
static struct reg_mem_access_map_t mem_access_map[] = {
/*------------------------------------------------------------*/
/* Range Use when Use when */
/* Begin End SD_ENABLED SD_DISABLED */
/*------------------------------------------------------------*/
{ 0x04000, 0x05000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
{ 0x1F000, 0x27000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
{ 0x05000, 0x0f000, { &io_matrix[AA_COMBO], &io_matrix[AN_COMBO] } },
{ 0x0f000, 0x10000, { &io_matrix[NN_COMBO], &io_matrix[NN_COMBO] } },
{ 0x13000, 0x17000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
{ 0x17000, 0x1b000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
{ 0x1b000, 0x1f000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
{ 0x10000, 0x11000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
{ 0x11000, 0x12000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
{ 0x12000, 0x13000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
{ 0x43000, 0x44000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
{ 0x44000, 0x45000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
{ 0x45000, 0x47000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
{ 0x40000, 0x42000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
{ 0x42000, 0x43000, { &io_matrix[AA_COMBO], &io_matrix[AN_COMBO] } },
{ 0x50000, 0x60000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
{ 0x60000, 0x80000, { &io_matrix[AN_COMBO], &io_matrix[NN_COMBO] } },
{ 0x80000, 0x81000, { &io_matrix[NN_COMBO], &io_matrix[NN_COMBO] } },
{ 0x81000, 0x83000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
{ 0xa0000, 0xc0000, { &io_matrix[AN_COMBO], &io_matrix[AN_COMBO] } },
{ 0xc0000, 0xc2000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
{ 0xc2000, 0xd0000, { &io_matrix[AA_COMBO], &io_matrix[AA_COMBO] } },
};
#endif /* #if !defined(_IPA_ACCESS_CONTROL_H_) */

Ver arquivo

@@ -0,0 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#if !defined(_IPA_GCC_HWIO_H_)
#define _IPA_GCC_HWIO_H_
/*
*
* HWIO register definitions to follow:
*
*/
#endif

Ver arquivo

@@ -0,0 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#if !defined(_IPA_GCC_HWIO_DEF_H_)
#define _IPA_GCC_HWIO_DEF_H_
#endif

Ver arquivo

@@ -0,0 +1,593 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#if !defined(_IPA_HW_COMMON_EX_H_)
#define _IPA_HW_COMMON_EX_H_
/* VLVL defs are available for 854 */
#define FEATURE_VLVL_DEFS true
#define FEATURE_IPA_HW_VERSION_4_5 true
/* Important Platform Specific Values : IRQ_NUM, IRQ_CNT, BCR */
#define IPA_HW_BAM_IRQ_NUM 639
/* Q6 IRQ number for IPA. */
#define IPA_HW_IRQ_NUM 640
/* Total number of different interrupts that can be enabled */
#define IPA_HW_IRQ_CNT_TOTAL 23
/* IPAv4 spare reg value */
#define IPA_HW_SPARE_1_REG_VAL 0xC0000005
/* Whether to allow setting step mode on IPA when we crash or not */
#define IPA_CFG_HW_IS_STEP_MODE_ALLOWED (false)
/* GSI MHI related definitions */
#define IPA_HW_GSI_MHI_CONSUMER_CHANNEL_NUM 0x0
#define IPA_HW_GSI_MHI_PRODUCER_CHANNEL_NUM 0x1
#define IPA_HW_GSI_MHI_CONSUMER_EP_NUM 0x1
#define IPA_HW_GSI_MHI_PRODUCER_EP_NUM 0x11
/* IPA ZIP WA related Macros */
#define IPA_HW_DCMP_SRC_PIPE 0x8
#define IPA_HW_DCMP_DEST_PIPE 0x4
#define IPA_HW_ACK_MNGR_MASK 0x1D
#define IPA_HW_DCMP_SRC_GRP 0x5
/* IPA Clock resource name */
#define IPA_CLK_RESOURCE_NAME "/clk/pcnoc"
/* IPA Clock Bus Client name */
#define IPA_CLK_BUS_CLIENT_NAME "IPA_PCNOC_BUS_CLIENT"
/* HPS Sequences */
#define IPA_HW_PKT_PROCESS_HPS_DMA 0x0
#define IPA_HW_PKT_PROCESS_HPS_DMA_DECIPH_CIPHE 0x1
#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_NO_DECIPH_UCP 0x2
#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_DECIPH_UCP 0x3
#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_NO_DECIPH 0x4
#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_DECIPH 0x5
#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_NO_DECIPH_NO_UCP 0x6
#define IPA_HW_PKT_PROCESS_HPS_PKT_PRS_DECIPH_NO_UCP 0x7
#define IPA_HW_PKT_PROCESS_HPS_DMA_PARSER 0x8
#define IPA_HW_PKT_PROCESS_HPS_DMA_DECIPH_PARSER 0x9
#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_UCP_TWICE_NO_DECIPH 0xA
#define IPA_HW_PKT_PROCESS_HPS_2_PKT_PRS_UCP_TWICE_DECIPH 0xB
#define IPA_HW_PKT_PROCESS_HPS_3_PKT_PRS_UCP_TWICE_NO_DECIPH 0xC
#define IPA_HW_PKT_PROCESS_HPS_3_PKT_PRS_UCP_TWICE_DECIPH 0xD
/* DPS Sequences */
#define IPA_HW_PKT_PROCESS_DPS_DMA 0x0
#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_DECIPH 0x1
#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_DECOMP 0x2
#define IPA_HW_PKT_PROCESS_DPS_DMA_WITH_CIPH 0x3
/* Src RSRC GRP config */
#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_0 0x0B040803
#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_1 0x0C0C0909
#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_2 0x0E0E0909
#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_3 0x3F003F00
#define IPA_HW_SRC_RSRC_GRP_01_RSRC_TYPE_4 0x10101616
#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_0 0x01010101
#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_1 0x02020202
#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_2 0x04040404
#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_3 0x3F003F00
#define IPA_HW_SRC_RSRC_GRP_23_RSRC_TYPE_4 0x02020606
#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_0 0x00000000
#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_1 0x00000000
#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_2 0x00000000
#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_3 0x00003F00
#define IPA_HW_SRC_RSRC_GRP_45_RSRC_TYPE_4 0x00000000
/* Dest RSRC GRP config */
#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_0 0x05051010
#define IPA_HW_DST_RSRC_GRP_01_RSRC_TYPE_1 0x3F013F02
#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_0 0x02020202
#define IPA_HW_DST_RSRC_GRP_23_RSRC_TYPE_1 0x02010201
#define IPA_HW_DST_RSRC_GRP_45_RSRC_TYPE_0 0x00000000
#define IPA_HW_DST_RSRC_GRP_45_RSRC_TYPE_1 0x00000200
#define IPA_HW_RX_HPS_CLIENTS_MIN_DEPTH_0 0x03030303
#define IPA_HW_RX_HPS_CLIENTS_MAX_DEPTH_0 0x03030303
#define IPA_HW_RSRP_GRP_0 0x0
#define IPA_HW_RSRP_GRP_1 0x1
#define IPA_HW_RSRP_GRP_2 0x2
#define IPA_HW_RSRP_GRP_3 0x3
#define IPA_HW_PCIE_SRC_RSRP_GRP IPA_HW_RSRP_GRP_0
#define IPA_HW_PCIE_DEST_RSRP_GRP IPA_HW_RSRP_GRP_0
#define IPA_HW_DDR_SRC_RSRP_GRP IPA_HW_RSRP_GRP_1
#define IPA_HW_DDR_DEST_RSRP_GRP IPA_HW_RSRP_GRP_1
#define IPA_HW_DMA_SRC_RSRP_GRP IPA_HW_RSRP_GRP_2
#define IPA_HW_DMA_DEST_RSRP_GRP IPA_HW_RSRP_GRP_2
#define IPA_HW_SRC_RSRP_TYPE_MAX 0x05
#define IPA_HW_DST_RSRP_TYPE_MAX 0x02
#define GSI_HW_QSB_LOG_MISC_MAX 0x4
/* IPA Clock Bus Client name */
#define IPA_CLK_BUS_CLIENT_NAME "IPA_PCNOC_BUS_CLIENT"
/* Is IPA decompression feature enabled */
#define IPA_HW_IS_DECOMPRESSION_ENABLED (1)
/* Whether to allow setting step mode on IPA when we crash or not */
#define IPA_HW_IS_STEP_MODE_ALLOWED (true)
/* Max number of virtual pipes for UL QBAP provided by HW */
#define IPA_HW_MAX_VP_NUM (32)
/*
* HW specific clock vote freq values in KHz
* (BIMC/SNOC/PCNOC/IPA/Q6 CPU)
*/
enum ipa_hw_clk_freq_e {
/* BIMC */
IPA_HW_CLK_FREQ_BIMC_PEAK = 518400,
IPA_HW_CLK_FREQ_BIMC_NOM_PLUS = 404200,
IPA_HW_CLK_FREQ_BIMC_NOM = 404200,
IPA_HW_CLK_FREQ_BIMC_SVS = 100000,
/* PCNOC */
IPA_HW_CLK_FREQ_PCNOC_PEAK = 133330,
IPA_HW_CLK_FREQ_PCNOC_NOM_PLUS = 100000,
IPA_HW_CLK_FREQ_PCNOC_NOM = 100000,
IPA_HW_CLK_FREQ_PCNOC_SVS = 50000,
/*IPA_HW_CLK_SNOC*/
IPA_HW_CLK_FREQ_SNOC_PEAK = 200000,
IPA_HW_CLK_FREQ_SNOC_NOM_PLUS = 150000,
IPA_HW_CLK_FREQ_SNOC_NOM = 150000,
IPA_HW_CLK_FREQ_SNOC_SVS = 85000,
IPA_HW_CLK_FREQ_SNOC_SVS_2 = 50000,
/* IPA */
IPA_HW_CLK_FREQ_IPA_PEAK = 600000,
IPA_HW_CLK_FREQ_IPA_NOM_PLUS = 500000,
IPA_HW_CLK_FREQ_IPA_NOM = 500000,
IPA_HW_CLK_FREQ_IPA_SVS = 250000,
IPA_HW_CLK_FREQ_IPA_SVS_2 = 150000,
/* Q6 CPU */
IPA_HW_CLK_FREQ_Q6_PEAK = 729600,
IPA_HW_CLK_FREQ_Q6_NOM_PLUS = 729600,
IPA_HW_CLK_FREQ_Q6_NOM = 729600,
IPA_HW_CLK_FREQ_Q6_SVS = 729600,
};
enum ipa_hw_qtimer_gran_e {
IPA_HW_QTIMER_GRAN_0 = 0, /* granularity 0 is 10us */
IPA_HW_QTIMER_GRAN_1 = 1, /* granularity 1 is 100us */
IPA_HW_QTIMER_GRAN_MAX,
};
/* Pipe ID of all the IPA pipes */
enum ipa_hw_pipe_id_e {
IPA_HW_PIPE_ID_0,
IPA_HW_PIPE_ID_1,
IPA_HW_PIPE_ID_2,
IPA_HW_PIPE_ID_3,
IPA_HW_PIPE_ID_4,
IPA_HW_PIPE_ID_5,
IPA_HW_PIPE_ID_6,
IPA_HW_PIPE_ID_7,
IPA_HW_PIPE_ID_8,
IPA_HW_PIPE_ID_9,
IPA_HW_PIPE_ID_10,
IPA_HW_PIPE_ID_11,
IPA_HW_PIPE_ID_12,
IPA_HW_PIPE_ID_13,
IPA_HW_PIPE_ID_14,
IPA_HW_PIPE_ID_15,
IPA_HW_PIPE_ID_16,
IPA_HW_PIPE_ID_17,
IPA_HW_PIPE_ID_18,
IPA_HW_PIPE_ID_19,
IPA_HW_PIPE_ID_20,
IPA_HW_PIPE_ID_21,
IPA_HW_PIPE_ID_22,
IPA_HW_PIPE_ID_23,
IPA_HW_PIPE_ID_24,
IPA_HW_PIPE_ID_25,
IPA_HW_PIPE_ID_26,
IPA_HW_PIPE_ID_27,
IPA_HW_PIPE_ID_28,
IPA_HW_PIPE_ID_29,
IPA_HW_PIPE_ID_30,
IPA_HW_PIPE_ID_MAX
};
/* Pipe ID's of System Bam Endpoints between Q6 & IPA */
enum ipa_hw_q6_pipe_id_e {
/* Pipes used by IPA Q6 driver */
IPA_HW_Q6_DL_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_5,
IPA_HW_Q6_CTL_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_6,
IPA_HW_Q6_DL_NLO_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_8,
IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_20,
IPA_HW_Q6_UL_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_21,
IPA_HW_Q6_DL_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_17,
IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_18,
IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_19,
IPA_HW_Q6_UL_ACK_PRODUCER_PIPE_ID =
IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE_ID,
IPA_HW_Q6_UL_DATA_PRODUCER_PIPE_ID =
IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE_ID,
IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_4,
IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE_ID = IPA_HW_PIPE_ID_29,
/* Test Simulator Pipes */
IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_ID = IPA_HW_PIPE_ID_0,
IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_ID = IPA_HW_PIPE_ID_1,
/* GSI UT channel SW->IPA */
IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_ID = IPA_HW_PIPE_ID_3,
/* GSI UT channel SW->IPA */
IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_ID = IPA_HW_PIPE_ID_10,
IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_ID = IPA_HW_PIPE_ID_7,
/* GSI UT channel IPA->SW */
IPA_HW_Q6_DIAG_CONSUMER_PIPE_ID = IPA_HW_PIPE_ID_9,
IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_ID = IPA_HW_PIPE_ID_23,
IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_ID = IPA_HW_PIPE_ID_24,
IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_ID = IPA_HW_PIPE_ID_25,
/* GSI UT channel IPA->SW */
IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_ID = IPA_HW_PIPE_ID_26,
/* GSI UT channel IPA->SW */
IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_ID = IPA_HW_PIPE_ID_27,
IPA_HW_Q6_PIPE_ID_MAX = IPA_HW_PIPE_ID_MAX,
};
enum ipa_hw_q6_pipe_ch_id_e {
/* Channels used by IPA Q6 driver */
IPA_HW_Q6_DL_CONSUMER_PIPE_CH_ID = 0,
IPA_HW_Q6_CTL_CONSUMER_PIPE_CH_ID = 1,
IPA_HW_Q6_DL_NLO_CONSUMER_PIPE_CH_ID = 2,
IPA_HW_Q6_UL_ACC_PATH_ACK_PRODUCER_PIPE_CH_ID = 6,
IPA_HW_Q6_UL_PRODUCER_PIPE_CH_ID = 7,
IPA_HW_Q6_DL_PRODUCER_PIPE_CH_ID = 3,
IPA_HW_Q6_UL_ACC_PATH_DATA_PRODUCER_PIPE_CH_ID = 5,
IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE_CH_ID = 4,
IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE_CH_ID = 8,
IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE_CH_ID = 9,
/* CH_ID 8 and 9 are Q6 SPARE CONSUMERs */
/* Test Simulator Channels */
IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0_CH_ID = 10,
IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_CH_ID = 11,
IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1_CH_ID = 12,
IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_CH_ID = 13,
IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2_CH_ID = 14,
IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_CH_ID = 15,
/* GSI UT channel SW->IPA */
IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1_CH_ID = 16,
/* GSI UT channel IPA->SW */
IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1_CH_ID = 17,
/* GSI UT channel SW->IPA */
IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2_CH_ID = 18,
/* GSI UT channel IPA->SW */
IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2_CH_ID = 19,
};
/* System Bam Endpoints between Q6 & IPA */
enum ipa_hw_q6_pipe_e {
/* DL Pipe IPA->Q6 */
IPA_HW_Q6_DL_PRODUCER_PIPE = 0,
/* UL Pipe IPA->Q6 */
IPA_HW_Q6_UL_PRODUCER_PIPE = 1,
/* DL Pipe Q6->IPA */
IPA_HW_Q6_DL_CONSUMER_PIPE = 2,
/* CTL Pipe Q6->IPA */
IPA_HW_Q6_CTL_CONSUMER_PIPE = 3,
/* Q6 -> IPA, DL NLO */
IPA_HW_Q6_DL_NLO_CONSUMER_PIPE = 4,
/* DMA ASYNC CONSUMER */
IPA_HW_Q6_DMA_ASYNC_CONSUMER_PIPE = 5,
/* DMA ASYNC PRODUCER */
IPA_HW_Q6_DMA_ASYNC_PRODUCER_PIPE = 6,
/* UL Acc Path Data Pipe IPA->Q6 */
IPA_HW_Q6_UL_ACC_DATA_PRODUCER_PIPE = 7,
/* UL Acc Path ACK Pipe IPA->Q6 */
IPA_HW_Q6_UL_ACC_ACK_PRODUCER_PIPE = 8,
/* UL Acc Path QBAP status Pipe IPA->Q6 */
IPA_HW_Q6_QBAP_STATUS_PRODUCER_PIPE = 9,
/* Diag status pipe IPA->Q6 */
/* Used only when FEATURE_IPA_TEST_PER_SIM is ON */
/* SIM Pipe IPA->Sim */
IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0 = 10,
/* SIM Pipe Sim->IPA */
IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1 = 11,
/* SIM Pipe Sim->IPA */
IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2 = 12,
/* SIM Pipe Sim->IPA */
IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_0 = 13,
/* SIM B2B PROD Pipe */
IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_1 = 14,
/* SIM Pipe IPA->Sim */
IPA_HW_Q6_SIM_UL_CONSUMER_PIPE_2 = 15,
/* End FEATURE_IPA_TEST_PER_SIM */
/* GSI UT channel SW->IPA */
IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_1 = 16,
/* GSI UT channel IPA->SW */
IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_1 = 17,
/* GSI UT channel SW->IPA */
IPA_HW_Q6_GSI_UT_CONSUMER_PIPE_2 = 18,
/* GSI UT channel IPA->SW */
IPA_HW_Q6_GSI_UT_PRODUCER_PIPE_2 = 19,
IPA_HW_Q6_PIPE_TOTAL
};
/* System Bam Endpoints between Q6 & IPA */
enum ipa_hw_q6_gsi_ev_e { /* In Sdx24 0..11 */
/* DL Pipe IPA->Q6 */
IPA_HW_Q6_DL_PRODUCER_PIPE_GSI_EV = 0,
/* UL Pipe IPA->Q6 */
IPA_HW_Q6_UL_PRODUCER_PIPE_GSI_EV = 1,
/* DL Pipe Q6->IPA */
//IPA_HW_Q6_DL_CONSUMER_PIPE_GSI_EV = 2,
/* CTL Pipe Q6->IPA */
//IPA_HW_Q6_CTL_CONSUMER_PIPE_GSI_EV = 3,
/* Q6 -> IPA, LTE DL Optimized path */
//IPA_HW_Q6_LTE_DL_CONSUMER_PIPE_GSI_EV = 4,
/* LWA DL(Wifi to Q6) */
//IPA_HW_Q6_LWA_DL_PRODUCER_PIPE_GSI_EV = 5,
/* Diag status pipe IPA->Q6 */
//IPA_HW_Q6_DIAG_STATUS_PRODUCER_PIPE_GSI_EV = 6,
/* Used only when FEATURE_IPA_TEST_PER_SIM is ON */
/* SIM Pipe IPA->Sim */
IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_0_GSI_EV = 2,
/* SIM Pipe Sim->IPA */
IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_1_GSI_EV = 3,
/* SIM Pipe Sim->IPA */
IPA_HW_Q6_SIM_DL_PRODUCER_PIPE_2_GSI_EV = 4,
/* SIM Pipe Sim->IPA */
IPA_HW_Q6_SIM_1_GSI_EV = 5,
IPA_HW_Q6_SIM_2_GSI_EV = 6,
IPA_HW_Q6_SIM_3_GSI_EV = 7,
IPA_HW_Q6_SIM_4_GSI_EV = 8,
IPA_HW_Q6_PIPE_GSI_EV_TOTAL
};
/*
* All the IRQ's supported by the IPA HW. Use this enum to set IRQ_EN
* register and read IRQ_STTS register
*/
enum ipa_hw_irq_e {
IPA_HW_IRQ_GSI_HWP = (1 << 25),
IPA_HW_IRQ_GSI_IPA_IF_TLV_RCVD = (1 << 24),
IPA_HW_IRQ_GSI_EE_IRQ = (1 << 23),
IPA_HW_IRQ_DCMP_ERR = (1 << 22),
IPA_HW_IRQ_HWP_ERR = (1 << 21),
IPA_HW_IRQ_RED_MARKER_ABOVE = (1 << 20),
IPA_HW_IRQ_YELLOW_MARKER_ABOVE = (1 << 19),
IPA_HW_IRQ_RED_MARKER_BELOW = (1 << 18),
IPA_HW_IRQ_YELLOW_MARKER_BELOW = (1 << 17),
IPA_HW_IRQ_BAM_IDLE_IRQ = (1 << 16),
IPA_HW_IRQ_TX_HOLB_DROP = (1 << 15),
IPA_HW_IRQ_TX_SUSPEND = (1 << 14),
IPA_HW_IRQ_PROC_ERR = (1 << 13),
IPA_HW_IRQ_STEP_MODE = (1 << 12),
IPA_HW_IRQ_TX_ERR = (1 << 11),
IPA_HW_IRQ_DEAGGR_ERR = (1 << 10),
IPA_HW_IRQ_RX_ERR = (1 << 9),
IPA_HW_IRQ_PROC_TO_HW_ACK_Q_NOT_EMPTY = (1 << 8),
IPA_HW_IRQ_HWP_RX_CMD_Q_NOT_FULL = (1 << 7),
IPA_HW_IRQ_HWP_IN_Q_NOT_EMPTY = (1 << 6),
IPA_HW_IRQ_HWP_IRQ_3 = (1 << 5),
IPA_HW_IRQ_HWP_IRQ_2 = (1 << 4),
IPA_HW_IRQ_HWP_IRQ_1 = (1 << 3),
IPA_HW_IRQ_HWP_IRQ_0 = (1 << 2),
IPA_HW_IRQ_EOT_COAL = (1 << 1),
IPA_HW_IRQ_BAD_SNOC_ACCESS = (1 << 0),
IPA_HW_IRQ_NONE = 0,
IPA_HW_IRQ_ALL = 0xFFFFFFFF
};
/*
* All the IRQ sources supported by the IPA HW. Use this enum to set
* IRQ_SRCS register
*/
enum ipa_hw_irq_srcs_e {
IPA_HW_IRQ_SRCS_PIPE_0 = (1 << IPA_HW_PIPE_ID_0),
IPA_HW_IRQ_SRCS_PIPE_1 = (1 << IPA_HW_PIPE_ID_1),
IPA_HW_IRQ_SRCS_PIPE_2 = (1 << IPA_HW_PIPE_ID_2),
IPA_HW_IRQ_SRCS_PIPE_3 = (1 << IPA_HW_PIPE_ID_3),
IPA_HW_IRQ_SRCS_PIPE_4 = (1 << IPA_HW_PIPE_ID_4),
IPA_HW_IRQ_SRCS_PIPE_5 = (1 << IPA_HW_PIPE_ID_5),
IPA_HW_IRQ_SRCS_PIPE_6 = (1 << IPA_HW_PIPE_ID_6),
IPA_HW_IRQ_SRCS_PIPE_7 = (1 << IPA_HW_PIPE_ID_7),
IPA_HW_IRQ_SRCS_PIPE_8 = (1 << IPA_HW_PIPE_ID_8),
IPA_HW_IRQ_SRCS_PIPE_9 = (1 << IPA_HW_PIPE_ID_9),
IPA_HW_IRQ_SRCS_PIPE_10 = (1 << IPA_HW_PIPE_ID_10),
IPA_HW_IRQ_SRCS_PIPE_11 = (1 << IPA_HW_PIPE_ID_11),
IPA_HW_IRQ_SRCS_PIPE_12 = (1 << IPA_HW_PIPE_ID_12),
IPA_HW_IRQ_SRCS_PIPE_13 = (1 << IPA_HW_PIPE_ID_13),
IPA_HW_IRQ_SRCS_PIPE_14 = (1 << IPA_HW_PIPE_ID_14),
IPA_HW_IRQ_SRCS_PIPE_15 = (1 << IPA_HW_PIPE_ID_15),
IPA_HW_IRQ_SRCS_PIPE_16 = (1 << IPA_HW_PIPE_ID_16),
IPA_HW_IRQ_SRCS_PIPE_17 = (1 << IPA_HW_PIPE_ID_17),
IPA_HW_IRQ_SRCS_PIPE_18 = (1 << IPA_HW_PIPE_ID_18),
IPA_HW_IRQ_SRCS_PIPE_19 = (1 << IPA_HW_PIPE_ID_19),
IPA_HW_IRQ_SRCS_PIPE_20 = (1 << IPA_HW_PIPE_ID_20),
IPA_HW_IRQ_SRCS_PIPE_21 = (1 << IPA_HW_PIPE_ID_21),
IPA_HW_IRQ_SRCS_PIPE_22 = (1 << IPA_HW_PIPE_ID_22),
IPA_HW_IRQ_SRCS_NONE = 0,
IPA_HW_IRQ_SRCS_ALL = 0xFFFFFFFF,
};
/*
* Total number of channel contexts that need to be saved for APPS
*/
#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_A7 20
/*
* Total number of channel contexts that need to be saved for UC
*/
#define IPA_HW_REG_SAVE_GSI_NUM_CH_CNTXT_UC 2
/*
* Total number of event ring contexts that need to be saved for APPS
*/
#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_A7 19
/*
* Total number of event ring contexts that need to be saved for UC
*/
#define IPA_HW_REG_SAVE_GSI_NUM_EVT_CNTXT_UC 1
/*
* Total number of endpoints for which ipa_reg_save.pipes[endp_number]
* are not saved by default (only if ipa_cfg.gen.full_reg_trace =
* true) There is no extra endpoints in Stingray
*/
#define IPA_HW_REG_SAVE_NUM_ENDP_EXTRA 0
/*
* Total number of endpoints for which ipa_reg_save.pipes[endp_number]
* are always saved
*/
#define IPA_HW_REG_SAVE_NUM_ACTIVE_PIPES IPA_HW_PIPE_ID_MAX
/*
* SHRAM Bytes per ch
*/
#define IPA_REG_SAVE_BYTES_PER_CHNL_SHRAM 12
/*
* Total number of rx splt cmdq's see:
* ipa_rx_splt_cmdq_n_cmd[IPA_RX_SPLT_CMDQ_MAX]
*/
#define IPA_RX_SPLT_CMDQ_MAX 4
/*
* Although not necessary for the numbers below, the use of round_up
* is so that future developers know that these particular constants
* have to be a multiple of four bytes, because the IPA memory reads
* that they drive are always 32 bits...
*/
#define IPA_IU_ADDR 0x000A0000
#define IPA_IU_SIZE round_up(40704, sizeof(u32))
#define IPA_SRAM_ADDR 0x00050000
#define IPA_SRAM_SIZE round_up(19232, sizeof(u32))
#define IPA_MBOX_ADDR 0x000C2000
#define IPA_MBOX_SIZE round_up(256, sizeof(u32))
#define IPA_HRAM_ADDR 0x00060000
#define IPA_HRAM_SIZE round_up(47536, sizeof(u32))
#define IPA_SEQ_ADDR 0x00081000
#define IPA_SEQ_SIZE round_up(768, sizeof(u32))
#define IPA_GSI_ADDR 0x00006000
#define IPA_GSI_SIZE round_up(5376, sizeof(u32))
/*
* Macro to define a particular register cfg entry for all pipe
* indexed register
*/
#define IPA_REG_SAVE_CFG_ENTRY_PIPE_ENDP(reg_name, var_name) \
{ GEN_1xVECTOR_REG_OFST(reg_name, 0), \
(u32 *)&ipa_reg_save.ipa.pipes[0].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 1), \
(u32 *)&ipa_reg_save.ipa.pipes[1].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 2), \
(u32 *)&ipa_reg_save.ipa.pipes[2].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 3), \
(u32 *)&ipa_reg_save.ipa.pipes[3].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 4), \
(u32 *)&ipa_reg_save.ipa.pipes[4].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 5), \
(u32 *)&ipa_reg_save.ipa.pipes[5].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 6), \
(u32 *)&ipa_reg_save.ipa.pipes[6].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 7), \
(u32 *)&ipa_reg_save.ipa.pipes[7].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 8), \
(u32 *)&ipa_reg_save.ipa.pipes[8].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 9), \
(u32 *)&ipa_reg_save.ipa.pipes[9].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 10), \
(u32 *)&ipa_reg_save.ipa.pipes[10].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 11), \
(u32 *)&ipa_reg_save.ipa.pipes[11].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 12), \
(u32 *)&ipa_reg_save.ipa.pipes[12].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 13), \
(u32 *)&ipa_reg_save.ipa.pipes[13].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 14), \
(u32 *)&ipa_reg_save.ipa.pipes[14].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 15), \
(u32 *)&ipa_reg_save.ipa.pipes[15].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 16), \
(u32 *)&ipa_reg_save.ipa.pipes[16].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 17), \
(u32 *)&ipa_reg_save.ipa.pipes[17].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 18), \
(u32 *)&ipa_reg_save.ipa.pipes[18].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 19), \
(u32 *)&ipa_reg_save.ipa.pipes[19].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 20), \
(u32 *)&ipa_reg_save.ipa.pipes[20].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 21), \
(u32 *)&ipa_reg_save.ipa.pipes[21].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 22), \
(u32 *)&ipa_reg_save.ipa.pipes[22].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 23), \
(u32 *)&ipa_reg_save.ipa.pipes[23].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 24), \
(u32 *)&ipa_reg_save.ipa.pipes[24].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 25), \
(u32 *)&ipa_reg_save.ipa.pipes[25].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 26), \
(u32 *)&ipa_reg_save.ipa.pipes[26].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 27), \
(u32 *)&ipa_reg_save.ipa.pipes[27].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 28), \
(u32 *)&ipa_reg_save.ipa.pipes[28].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 29), \
(u32 *)&ipa_reg_save.ipa.pipes[29].endp.var_name }, \
{ GEN_1xVECTOR_REG_OFST(reg_name, 30), \
(u32 *)&ipa_reg_save.ipa.pipes[30].endp.var_name }
/*
* Macro to define a particular register cfg entry for the remaining
* pipe indexed register. In Stingray case we don't have extra
* endpoints so it is intentially empty
*/
#define IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA(REG_NAME, VAR_NAME) \
{ 0, 0 }
/*
* Macro to set the active flag for all active pipe indexed register
* In Stingray case we don't have extra endpoints so it is intentially
* empty
*/
#define IPA_HW_REG_SAVE_CFG_ENTRY_PIPE_ENDP_EXTRA_ACTIVE() \
do { \
} while (0)
#endif /* #if !defined(_IPA_HW_COMMON_EX_H_) */

10895
ipa/ipa_v3/dump/ipa4.5/ipa_hwio.h Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

Ver arquivo

@@ -0,0 +1,183 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2019, The Linux Foundation. All rights reserved.
*/
#if !defined(_IPA_PKT_CNTXT_H_)
#define _IPA_PKT_CNTXT_H_
#define IPA_HW_PKT_CTNTX_MAX 0x10
#define IPA_HW_NUM_SAVE_PKT_CTNTX 0x8
#define IPA_HW_PKT_CTNTX_START_ADDR 0xE434CA00
#define IPA_HW_PKT_CTNTX_SIZE (sizeof(ipa_pkt_ctntx_opcode_state_s) + \
sizeof(ipa_pkt_ctntx_u))
/*
* Packet Context States
*/
enum ipa_hw_pkt_cntxt_state_e {
IPA_HW_PKT_CNTXT_STATE_HFETCHER_INIT = 1,
IPA_HW_PKT_CNTXT_STATE_HFETCHER_DMAR,
IPA_HW_PKT_CNTXT_STATE_HFETCHER_DMAR_REP,
IPA_HW_PKT_CNTXT_STATE_H_DCPH,
IPA_HW_PKT_CNTXT_STATE_PKT_PARSER,
IPA_HW_PKT_CNTXT_STATE_FILTER_NAT,
IPA_HW_PKT_CNTXT_STATE_ROUTER,
IPA_HW_PKT_CNTXT_STATE_HDRI,
IPA_HW_PKT_CNTXT_STATE_UCP,
IPA_HW_PKT_CNTXT_STATE_ENQUEUER,
IPA_HW_PKT_CNTXT_STATE_DFETCHER,
IPA_HW_PKT_CNTXT_STATE_D_DCPH,
IPA_HW_PKT_CNTXT_STATE_DISPATCHER,
IPA_HW_PKT_CNTXT_STATE_TX,
IPA_HW_PKT_CNTXT_STATE_TX_ZLT,
IPA_HW_PKT_CNTXT_STATE_DFETCHER_DMAR,
IPA_HW_PKT_CNTXT_STATE_DCMP,
};
/*
* Packet Context fields as received from VI/Design
*/
struct ipa_pkt_ctntx_s {
u64 opcode : 8;
u64 state : 5;
u64 not_used_1 : 2;
u64 tx_pkt_dma_done : 1;
u64 exc_deagg : 1;
u64 exc_pkt_version : 1;
u64 exc_pkt_len : 1;
u64 exc_threshold : 1;
u64 exc_sw : 1;
u64 exc_nat : 1;
u64 exc_frag_miss : 1;
u64 filter_bypass : 1;
u64 router_bypass : 1;
u64 nat_bypass : 1;
u64 hdri_bypass : 1;
u64 dcph_bypass : 1;
u64 security_credentials_select : 1;
u64 pkt_2nd_pass : 1;
u64 xlat_bypass : 1;
u64 dcph_valid : 1;
u64 ucp_on : 1;
u64 replication : 1;
u64 src_status_en : 1;
u64 dest_status_en : 1;
u64 frag_status_en : 1;
u64 eot_dest : 1;
u64 eot_notif : 1;
u64 prev_eot_dest : 1;
u64 src_hdr_len : 8;
u64 tx_valid_sectors : 8;
u64 rx_flags : 8;
u64 rx_packet_length : 16;
u64 revised_packet_length : 16;
u64 frag_en : 1;
u64 frag_bypass : 1;
u64 frag_process : 1;
u64 notif_pipe : 5;
u64 src_id : 8;
u64 tx_pkt_transferred : 1;
u64 src_pipe : 5;
u64 dest_pipe : 5;
u64 frag_pipe : 5;
u64 ihl_offset : 8;
u64 protocol : 8;
u64 tos : 8;
u64 id : 16;
u64 v6_reserved : 4;
u64 ff : 1;
u64 mf : 1;
u64 pkt_israg : 1;
u64 tx_holb_timer_overflow : 1;
u64 tx_holb_timer_running : 1;
u64 trnseq_0 : 3;
u64 trnseq_1 : 3;
u64 trnseq_2 : 3;
u64 trnseq_3 : 3;
u64 trnseq_4 : 3;
u64 trnseq_ex_length : 8;
u64 trnseq_4_length : 8;
u64 trnseq_4_offset : 8;
u64 dps_tx_pop_cnt : 2;
u64 dps_tx_push_cnt : 2;
u64 vol_ic_dcph_cfg : 1;
u64 vol_ic_tag_stts : 1;
u64 vol_ic_pxkt_init_e : 1;
u64 vol_ic_pkt_init : 1;
u64 tx_holb_counter : 32;
u64 trnseq_0_length : 8;
u64 trnseq_0_offset : 8;
u64 trnseq_1_length : 8;
u64 trnseq_1_offset : 8;
u64 trnseq_2_length : 8;
u64 trnseq_2_offset : 8;
u64 trnseq_3_length : 8;
u64 trnseq_3_offset : 8;
u64 dmar_valid_length : 16;
u64 dcph_valid_length : 16;
u64 frag_hdr_offset : 9;
u64 ip_payload_offset : 9;
u64 frag_rule : 4;
u64 frag_table : 1;
u64 frag_hit : 1;
u64 data_cmdq_ptr : 8;
u64 filter_result : 6;
u64 router_result : 6;
u64 nat_result : 6;
u64 hdri_result : 6;
u64 dcph_result : 6;
u64 dcph_result_valid : 1;
u32 not_used_2 : 4;
u64 tx_pkt_suspended : 1;
u64 tx_pkt_dropped : 1;
u32 not_used_3 : 3;
u64 metadata_valid : 1;
u64 metadata_type : 4;
u64 ul_cs_start_diff : 9;
u64 cs_disable_trlr_vld_bit : 1;
u64 cs_required : 1;
u64 dest_hdr_len : 8;
u64 fr_l : 1;
u64 fl_h : 1;
u64 fr_g : 1;
u64 fr_ret : 1;
u64 fr_rule_id : 10;
u64 rt_l : 1;
u64 rt_h : 1;
u64 rtng_tbl_index : 5;
u64 rt_match : 1;
u64 rt_rule_id : 10;
u64 nat_tbl_index : 13;
u64 nat_type : 2;
u64 hdr_l : 1;
u64 header_offset : 10;
u64 not_used_4 : 1;
u64 filter_result_valid : 1;
u64 router_result_valid : 1;
u64 nat_result_valid : 1;
u64 hdri_result_valid : 1;
u64 not_used_5 : 1;
u64 stream_id : 8;
u64 not_used_6 : 6;
u64 dcph_context_index : 2;
u64 dcph_cfg_size : 16;
u64 dcph_cfg_count : 32;
u64 tag_info : 48;
u64 ucp_cmd_id : 16;
u64 metadata : 32;
u64 ucp_cmd_params : 32;
u64 nat_ip_address : 32;
u64 nat_ip_cs_diff : 16;
u64 frag_dest_pipe : 5;
u64 frag_nat_type : 2;
u64 fragr_ret : 1;
u64 frag_protocol : 8;
u64 src_ip_address : 32;
u64 dest_ip_address : 32;
u64 not_used_7 : 37;
u64 frag_hdr_l : 1;
u64 frag_header_offset : 10;
u64 frag_id : 16;
} __packed;
#endif /* #if !defined(_IPA_PKT_CNTXT_H_) */

1632
ipa/ipa_v3/dump/ipa_reg_dump.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

1397
ipa/ipa_v3/dump/ipa_reg_dump.h Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

8379
ipa/ipa_v3/ipa.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

1744
ipa/ipa_v3/ipa_client.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

2569
ipa/ipa_v3/ipa_debugfs.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

94
ipa/ipa_v3/ipa_defs.h Arquivo normal
Ver arquivo

@@ -0,0 +1,94 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _IPA_DEFS_H_
#define _IPA_DEFS_H_
#include <linux/ipa.h>
/**
* struct ipa_rt_rule_i - attributes of a routing rule
* @dst: dst "client"
* @hdr_hdl: handle to the dynamic header
it is not an index or an offset
* @hdr_proc_ctx_hdl: handle to header processing context. if it is provided
hdr_hdl shall be 0
* @attrib: attributes of the rule
* @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
* IPA will use the rule and will not look for other rules that may have
* higher priority
* @hashable: bool switch. is this rule hashable or not?
* ipa uses hashable rules to cache their hit results to be used in
* consecutive packets
* @retain_hdr: bool switch to instruct IPA core to add back to the packet
* the header removed as part of header removal
* @coalesce: bool to decide whether packets should be coalesced or not
* @enable_stats: is true when we want to enable stats for this
* rt rule.
* @cnt_idx: if enable_stats is 1 and cnt_idx is 0, then cnt_idx
* will be assigned by ipa driver.
*/
struct ipa_rt_rule_i {
enum ipa_client_type dst;
u32 hdr_hdl;
u32 hdr_proc_ctx_hdl;
struct ipa_rule_attrib attrib;
u8 max_prio;
u8 hashable;
u8 retain_hdr;
u8 coalesce;
u8 enable_stats;
u8 cnt_idx;
};
/**
* struct ipa_flt_rule_i - attributes of a filtering rule
* @retain_hdr: bool switch to instruct IPA core to add back to the packet
* the header removed as part of header removal
* @to_uc: bool switch to pass packet to micro-controller
* @action: action field
* @rt_tbl_hdl: handle of table from "get"
* @attrib: attributes of the rule
* @eq_attrib: attributes of the rule in equation form (valid when
* eq_attrib_type is true)
* @rt_tbl_idx: index of RT table referred to by filter rule (valid when
* eq_attrib_type is true and non-exception action)
* @eq_attrib_type: true if equation level form used to specify attributes
* @max_prio: bool switch. is this rule with Max priority? meaning on rule hit,
* IPA will use the rule and will not look for other rules that may have
* higher priority
* @hashable: bool switch. is this rule hashable or not?
* ipa uses hashable rules to cache their hit results to be used in
* consecutive packets
* @rule_id: rule_id to be assigned to the filter rule. In case client specifies
* rule_id as 0 the driver will assign a new rule_id
* @set_metadata: bool switch. should metadata replacement at the NAT block
* take place?
* @pdn_idx: if action is "pass to source\destination NAT" then a comparison
* against the PDN index in the matching PDN entry will take place as an
* additional condition for NAT hit.
* @enable_stats: is true when we want to enable stats for this
* flt rule.
* @cnt_idx: if 0 means disable, otherwise use for index.
* will be assigned by ipa driver.
*/
struct ipa_flt_rule_i {
u8 retain_hdr;
u8 to_uc;
enum ipa_flt_action action;
u32 rt_tbl_hdl;
struct ipa_rule_attrib attrib;
struct ipa_ipfltri_rule_eq eq_attrib;
u32 rt_tbl_idx;
u8 eq_attrib_type;
u8 max_prio;
u8 hashable;
u16 rule_id;
u8 set_metadata;
u8 pdn_idx;
u8 enable_stats;
u8 cnt_idx;
};
#endif /* _IPA_DEFS_H_ */

1243
ipa/ipa_v3/ipa_dma.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

4619
ipa/ipa_v3/ipa_dp.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

874
ipa/ipa_v3/ipa_dt_replacement.c Arquivo normal
Ver arquivo

@@ -0,0 +1,874 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/msm_ipa.h>
#include "ipa_i.h"
#include "ipa_emulation_stubs.h"
# undef strsame
# define strsame(x, y) \
(!strcmp((x), (y)))
/*
* The following enum values used to index tables below.
*/
enum dtsi_index_e {
DTSI_INDEX_3_5_1 = 0,
DTSI_INDEX_4_0 = 1,
DTSI_INDEX_4_5 = 2,
};
struct dtsi_replacement_u32 {
char *key;
u32 value;
};
struct dtsi_replacement_u32_table {
struct dtsi_replacement_u32 *p_table;
u32 num_entries;
};
struct dtsi_replacement_bool {
char *key;
bool value;
};
struct dtsi_replacement_bool_table {
struct dtsi_replacement_bool *p_table;
u32 num_entries;
};
struct dtsi_replacement_u32_array {
char *key;
u32 *p_value;
u32 num_elements;
};
struct dtsi_replacement_u32_array_table {
struct dtsi_replacement_u32_array *p_table;
u32 num_entries;
};
struct dtsi_replacement_resource_table {
struct resource *p_table;
u32 num_entries;
};
/*
* Any of the data below with _4_5 in the name represent data taken
* from the 4.5 dtsi file.
*
* Any of the data below with _4_0 in the name represent data taken
* from the 4.0 dtsi file.
*
* Any of the data below with _3_5_1 in the name represent data taken
* from the 3.5.1 dtsi file.
*/
static struct dtsi_replacement_bool ipa3_plat_drv_bool_4_5[] = {
{"qcom,use-ipa-tethering-bridge", true},
{"qcom,modem-cfg-emb-pipe-flt", true},
{"qcom,ipa-wdi2", false},
{"qcom,use-64-bit-dma-mask", false},
{"qcom,bandwidth-vote-for-ipa", true},
{"qcom,skip-uc-pipe-reset", false},
{"qcom,tethered-flow-control", false},
{"qcom,use-rg10-limitation-mitigation", false},
{"qcom,do-not-use-ch-gsi-20", false},
{"qcom,use-ipa-pm", true},
{"qcom,register-collection-on-crash", true},
{"qcom,testbus-collection-on-crash", true},
{"qcom,non-tn-collection-on-crash", true},
};
static struct dtsi_replacement_bool ipa3_plat_drv_bool_4_0[] = {
{"qcom,use-ipa-tethering-bridge", true},
{"qcom,modem-cfg-emb-pipe-flt", true},
{"qcom,ipa-wdi2", true},
{"qcom,use-64-bit-dma-mask", false},
{"qcom,bandwidth-vote-for-ipa", false},
{"qcom,skip-uc-pipe-reset", false},
{"qcom,tethered-flow-control", true},
{"qcom,use-rg10-limitation-mitigation", false},
{"qcom,do-not-use-ch-gsi-20", false},
{"qcom,use-ipa-pm", false},
{"qcom,register-collection-on-crash", true},
{"qcom,testbus-collection-on-crash", true},
{"qcom,non-tn-collection-on-crash", true},
};
static struct dtsi_replacement_bool ipa3_plat_drv_bool_3_5_1[] = {
{"qcom,use-ipa-tethering-bridge", true},
{"qcom,modem-cfg-emb-pipe-flt", true},
{"qcom,ipa-wdi2", true},
{"qcom,use-64-bit-dma-mask", false},
{"qcom,bandwidth-vote-for-ipa", true},
{"qcom,skip-uc-pipe-reset", false},
{"qcom,tethered-flow-control", false},
{"qcom,use-rg10-limitation-mitigation", false},
{"qcom,do-not-use-ch-gsi-20", false},
{"qcom,use-ipa-pm", false},
{"qcom,register-collection-on-crash", true},
{"qcom,testbus-collection-on-crash", true},
{"qcom,non-tn-collection-on-crash", true},
};
static struct dtsi_replacement_bool_table
ipa3_plat_drv_bool_table[] = {
{ ipa3_plat_drv_bool_3_5_1,
ARRAY_SIZE(ipa3_plat_drv_bool_3_5_1) },
{ ipa3_plat_drv_bool_4_0,
ARRAY_SIZE(ipa3_plat_drv_bool_4_0) },
{ ipa3_plat_drv_bool_4_5,
ARRAY_SIZE(ipa3_plat_drv_bool_4_5) },
};
static struct dtsi_replacement_u32 ipa3_plat_drv_u32_4_5[] = {
{"qcom,ipa-hw-ver", IPA_HW_v4_5},
{"qcom,ipa-hw-mode", 3},
{"qcom,wan-rx-ring-size", 192},
{"qcom,lan-rx-ring-size", 192},
{"qcom,ee", 0},
{"qcom,msm-bus,num-cases", 5},
{"emulator-bar0-offset", 0x01C00000},
{"qcom,entire-ipa-block-size", 0x00100000},
};
static struct dtsi_replacement_u32 ipa3_plat_drv_u32_4_0[] = {
{"qcom,ipa-hw-ver", IPA_HW_v4_0},
{"qcom,ipa-hw-mode", 3},
{"qcom,wan-rx-ring-size", 192},
{"qcom,lan-rx-ring-size", 192},
{"qcom,ee", 0},
{"emulator-bar0-offset", 0x01C00000},
{"qcom,entire-ipa-block-size", 0x00100000},
};
static struct dtsi_replacement_u32 ipa3_plat_drv_u32_3_5_1[] = {
{"qcom,ipa-hw-ver", IPA_HW_v3_5_1},
{"qcom,ipa-hw-mode", 3},
{"qcom,wan-rx-ring-size", 192},
{"qcom,lan-rx-ring-size", 192},
{"qcom,ee", 0},
{"emulator-bar0-offset", 0x01C00000},
{"qcom,entire-ipa-block-size", 0x00100000},
};
static struct dtsi_replacement_u32_table ipa3_plat_drv_u32_table[] = {
{ ipa3_plat_drv_u32_3_5_1,
ARRAY_SIZE(ipa3_plat_drv_u32_3_5_1) },
{ ipa3_plat_drv_u32_4_0,
ARRAY_SIZE(ipa3_plat_drv_u32_4_0) },
{ ipa3_plat_drv_u32_4_5,
ARRAY_SIZE(ipa3_plat_drv_u32_4_5) },
};
static u32 mhi_event_ring_id_limits_array_4_5[] = {
9, 10
};
static u32 mhi_event_ring_id_limits_array_4_0[] = {
9, 10
};
static u32 mhi_event_ring_id_limits_array_3_5_1[] = {
IPA_MHI_GSI_EVENT_RING_ID_START, IPA_MHI_GSI_EVENT_RING_ID_END
};
static u32 ipa_tz_unlock_reg_array_4_5[] = {
0x04043583c, 0x00001000
};
static u32 ipa_throughput_thresh_array_4_5[] = {
310, 600, 1000
};
static u32 ipa_tz_unlock_reg_array_4_0[] = {
0x04043583c, 0x00001000
};
static u32 ipa_tz_unlock_reg_array_3_5_1[] = {
0x04043583c, 0x00001000
};
struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_4_5[] = {
{"qcom,mhi-event-ring-id-limits",
mhi_event_ring_id_limits_array_4_5,
ARRAY_SIZE(mhi_event_ring_id_limits_array_4_5) },
{"qcom,ipa-tz-unlock-reg",
ipa_tz_unlock_reg_array_4_5,
ARRAY_SIZE(ipa_tz_unlock_reg_array_4_5) },
{"qcom,throughput-threshold",
ipa_throughput_thresh_array_4_5,
ARRAY_SIZE(ipa_throughput_thresh_array_4_5) },
};
struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_4_0[] = {
{"qcom,mhi-event-ring-id-limits",
mhi_event_ring_id_limits_array_4_0,
ARRAY_SIZE(mhi_event_ring_id_limits_array_4_0) },
{"qcom,ipa-tz-unlock-reg",
ipa_tz_unlock_reg_array_4_0,
ARRAY_SIZE(ipa_tz_unlock_reg_array_4_0) },
};
struct dtsi_replacement_u32_array ipa3_plat_drv_u32_array_3_5_1[] = {
{"qcom,mhi-event-ring-id-limits",
mhi_event_ring_id_limits_array_3_5_1,
ARRAY_SIZE(mhi_event_ring_id_limits_array_3_5_1) },
{"qcom,ipa-tz-unlock-reg",
ipa_tz_unlock_reg_array_3_5_1,
ARRAY_SIZE(ipa_tz_unlock_reg_array_3_5_1) },
};
struct dtsi_replacement_u32_array_table
ipa3_plat_drv_u32_array_table[] = {
{ ipa3_plat_drv_u32_array_3_5_1,
ARRAY_SIZE(ipa3_plat_drv_u32_array_3_5_1) },
{ ipa3_plat_drv_u32_array_4_0,
ARRAY_SIZE(ipa3_plat_drv_u32_array_4_0) },
{ ipa3_plat_drv_u32_array_4_5,
ARRAY_SIZE(ipa3_plat_drv_u32_array_4_5) },
};
#define INTCTRL_OFFSET 0x083C0000
#define INTCTRL_SIZE 0x00000110
#define IPA_BASE_OFFSET_4_5 0x01e00000
#define IPA_BASE_SIZE_4_5 0x000c0000
#define GSI_BASE_OFFSET_4_5 0x01e04000
#define GSI_BASE_SIZE_4_5 0x00023000
struct resource ipa3_plat_drv_resource_4_5[] = {
/*
* PLEASE NOTE: The following offset values below ("ipa-base",
* "gsi-base", and "intctrl-base") are used to calculate
* offsets relative to the PCI BAR0 address provided by the
* PCI probe. After their use to calculate the offsets, they
* are not used again, since PCI ultimately dictates where
* things live.
*/
{
IPA_BASE_OFFSET_4_5,
(IPA_BASE_OFFSET_4_5 + IPA_BASE_SIZE_4_5),
"ipa-base",
IORESOURCE_MEM,
0,
NULL,
NULL,
NULL
},
{
GSI_BASE_OFFSET_4_5,
(GSI_BASE_OFFSET_4_5 + GSI_BASE_SIZE_4_5),
"gsi-base",
IORESOURCE_MEM,
0,
NULL,
NULL,
NULL
},
/*
* The following entry is germane only to the emulator
* environment. It is needed to locate the emulator's PCI
* interrupt controller...
*/
{
INTCTRL_OFFSET,
(INTCTRL_OFFSET + INTCTRL_SIZE),
"intctrl-base",
IORESOURCE_MEM,
0,
NULL,
NULL,
NULL
},
{
IPA_PIPE_MEM_START_OFST,
(IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE),
"ipa-pipe-mem",
IORESOURCE_MEM,
0,
NULL,
NULL,
NULL
},
{
0,
0,
"gsi-irq",
IORESOURCE_IRQ,
0,
NULL,
NULL,
NULL
},
{
0,
0,
"ipa-irq",
IORESOURCE_IRQ,
0,
NULL,
NULL,
NULL
},
};
#define IPA_BASE_OFFSET_4_0 0x01e00000
#define IPA_BASE_SIZE_4_0 0x00034000
#define GSI_BASE_OFFSET_4_0 0x01e04000
#define GSI_BASE_SIZE_4_0 0x00028000
struct resource ipa3_plat_drv_resource_4_0[] = {
/*
* PLEASE NOTE: The following offset values below ("ipa-base",
* "gsi-base", and "intctrl-base") are used to calculate
* offsets relative to the PCI BAR0 address provided by the
* PCI probe. After their use to calculate the offsets, they
* are not used again, since PCI ultimately dictates where
* things live.
*/
{
IPA_BASE_OFFSET_4_0,
(IPA_BASE_OFFSET_4_0 + IPA_BASE_SIZE_4_0),
"ipa-base",
IORESOURCE_MEM,
0,
NULL,
NULL,
NULL
},
{
GSI_BASE_OFFSET_4_0,
(GSI_BASE_OFFSET_4_0 + GSI_BASE_SIZE_4_0),
"gsi-base",
IORESOURCE_MEM,
0,
NULL,
NULL,
NULL
},
/*
* The following entry is germane only to the emulator
* environment. It is needed to locate the emulator's PCI
* interrupt controller...
*/
{
INTCTRL_OFFSET,
(INTCTRL_OFFSET + INTCTRL_SIZE),
"intctrl-base",
IORESOURCE_MEM,
0,
NULL,
NULL,
NULL
},
{
IPA_PIPE_MEM_START_OFST,
(IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE),
"ipa-pipe-mem",
IORESOURCE_MEM,
0,
NULL,
NULL,
NULL
},
{
0,
0,
"gsi-irq",
IORESOURCE_IRQ,
0,
NULL,
NULL,
NULL
},
{
0,
0,
"ipa-irq",
IORESOURCE_IRQ,
0,
NULL,
NULL,
NULL
},
};
#define IPA_BASE_OFFSET_3_5_1 0x01e00000
#define IPA_BASE_SIZE_3_5_1 0x00034000
#define GSI_BASE_OFFSET_3_5_1 0x01e04000
#define GSI_BASE_SIZE_3_5_1 0x0002c000
struct resource ipa3_plat_drv_resource_3_5_1[] = {
/*
* PLEASE NOTE: The following offset values below ("ipa-base",
* "gsi-base", and "intctrl-base") are used to calculate
* offsets relative to the PCI BAR0 address provided by the
* PCI probe. After their use to calculate the offsets, they
* are not used again, since PCI ultimately dictates where
* things live.
*/
{
IPA_BASE_OFFSET_3_5_1,
(IPA_BASE_OFFSET_3_5_1 + IPA_BASE_SIZE_3_5_1),
"ipa-base",
IORESOURCE_MEM,
0,
NULL,
NULL,
NULL
},
{
GSI_BASE_OFFSET_3_5_1,
(GSI_BASE_OFFSET_3_5_1 + GSI_BASE_SIZE_3_5_1),
"gsi-base",
IORESOURCE_MEM,
0,
NULL,
NULL,
NULL
},
/*
* The following entry is germane only to the emulator
* environment. It is needed to locate the emulator's PCI
* interrupt controller...
*/
{
INTCTRL_OFFSET,
(INTCTRL_OFFSET + INTCTRL_SIZE),
"intctrl-base",
IORESOURCE_MEM,
0,
NULL,
NULL,
NULL
},
{
IPA_PIPE_MEM_START_OFST,
(IPA_PIPE_MEM_START_OFST + IPA_PIPE_MEM_SIZE),
"ipa-pipe-mem",
IORESOURCE_MEM,
0,
NULL,
NULL,
NULL
},
{
0,
0,
"gsi-irq",
IORESOURCE_IRQ,
0,
NULL,
NULL,
NULL
},
{
0,
0,
"ipa-irq",
IORESOURCE_IRQ,
0,
NULL,
NULL,
NULL
},
};
struct dtsi_replacement_resource_table
ipa3_plat_drv_resource_table[] = {
{ ipa3_plat_drv_resource_3_5_1,
ARRAY_SIZE(ipa3_plat_drv_resource_3_5_1) },
{ ipa3_plat_drv_resource_4_0,
ARRAY_SIZE(ipa3_plat_drv_resource_4_0) },
{ ipa3_plat_drv_resource_4_5,
ARRAY_SIZE(ipa3_plat_drv_resource_4_5) },
};
/*
* The following code uses the data above...
*/
static u32 emulator_type_to_index(void)
{
/*
* Use the input parameter to the IPA driver loadable module,
* which specifies the type of hardware the driver is running
* on.
*/
u32 index = DTSI_INDEX_4_0;
uint emulation_type = ipa3_get_emulation_type();
switch (emulation_type) {
case IPA_HW_v3_5_1:
index = DTSI_INDEX_3_5_1;
break;
case IPA_HW_v4_0:
index = DTSI_INDEX_4_0;
break;
case IPA_HW_v4_5:
index = DTSI_INDEX_4_5;
break;
default:
break;
}
IPADBG("emulation_type(%u) emulation_index(%u)\n",
emulation_type, index);
return index;
}
/* From include/linux/of.h */
/**
* emulator_of_property_read_bool - Find from a property
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
*
* Search for a property in a device node.
* Returns true if the property exists false otherwise.
*/
bool emulator_of_property_read_bool(
const struct device_node *np,
const char *propname)
{
u16 i;
u32 index;
struct dtsi_replacement_bool *ipa3_plat_drv_boolP;
/*
* Get the index for the type of hardware we're running on.
* This is used as a table index.
*/
index = emulator_type_to_index();
if (index >= ARRAY_SIZE(ipa3_plat_drv_bool_table)) {
IPADBG(
"Did not find ipa3_plat_drv_bool_table for index %u\n",
index);
return false;
}
ipa3_plat_drv_boolP =
ipa3_plat_drv_bool_table[index].p_table;
for (i = 0;
i < ipa3_plat_drv_bool_table[index].num_entries;
i++) {
if (strsame(ipa3_plat_drv_boolP[i].key, propname)) {
IPADBG(
"Found value %u for propname %s index %u\n",
ipa3_plat_drv_boolP[i].value,
propname,
index);
return ipa3_plat_drv_boolP[i].value;
}
}
IPADBG("Did not find match for propname %s index %u\n",
propname,
index);
return false;
}
/* From include/linux/of.h */
int emulator_of_property_read_u32(
const struct device_node *np,
const char *propname,
u32 *out_value)
{
u16 i;
u32 index;
struct dtsi_replacement_u32 *ipa3_plat_drv_u32P;
/*
* Get the index for the type of hardware we're running on.
* This is used as a table index.
*/
index = emulator_type_to_index();
if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_table)) {
IPADBG(
"Did not find ipa3_plat_drv_u32_table for index %u\n",
index);
return false;
}
ipa3_plat_drv_u32P =
ipa3_plat_drv_u32_table[index].p_table;
for (i = 0;
i < ipa3_plat_drv_u32_table[index].num_entries;
i++) {
if (strsame(ipa3_plat_drv_u32P[i].key, propname)) {
*out_value = ipa3_plat_drv_u32P[i].value;
IPADBG(
"Found value %u for propname %s index %u\n",
ipa3_plat_drv_u32P[i].value,
propname,
index);
return 0;
}
}
IPADBG("Did not find match for propname %s index %u\n",
propname,
index);
return -EINVAL;
}
/* From include/linux/of.h */
/**
* emulator_of_property_read_u32_array - Find and read an array of 32
* bit integers from a property.
*
* @np: device node from which the property value is to be read.
* @propname: name of the property to be searched.
* @out_values: pointer to return value, modified only if return value is 0.
* @sz: number of array elements to read
*
* Search for a property in a device node and read 32-bit value(s) from
* it. Returns 0 on success, -EINVAL if the property does not exist,
* -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data isn't large enough.
*
* The out_values is modified only if a valid u32 value can be decoded.
*/
int emulator_of_property_read_u32_array(
const struct device_node *np,
const char *propname,
u32 *out_values,
size_t sz)
{
u16 i;
u32 index;
struct dtsi_replacement_u32_array *u32_arrayP;
/*
* Get the index for the type of hardware we're running on.
* This is used as a table index.
*/
index = emulator_type_to_index();
if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) {
IPADBG(
"Did not find ipa3_plat_drv_u32_array_table for index %u\n",
index);
return false;
}
u32_arrayP =
ipa3_plat_drv_u32_array_table[index].p_table;
for (i = 0;
i < ipa3_plat_drv_u32_array_table[index].num_entries;
i++) {
if (strsame(
u32_arrayP[i].key, propname)) {
u32 num_elements =
u32_arrayP[i].num_elements;
u32 *p_element =
&u32_arrayP[i].p_value[0];
size_t j = 0;
if (num_elements > sz) {
IPAERR(
"Found array of %u values for propname %s; only room for %u elements in copy buffer\n",
num_elements,
propname,
(unsigned int) sz);
return -EOVERFLOW;
}
while (j++ < num_elements)
*out_values++ = *p_element++;
IPADBG(
"Found array of values starting with %u for propname %s index %u\n",
u32_arrayP[i].p_value[0],
propname,
index);
return 0;
}
}
IPADBG("Did not find match for propname %s index %u\n",
propname,
index);
return -EINVAL;
}
/* From drivers/base/platform.c */
/**
* emulator_platform_get_resource_byname - get a resource for a device by name
* @dev: platform device
* @type: resource type
* @name: resource name
*/
struct resource *emulator_platform_get_resource_byname(
struct platform_device *dev,
unsigned int type,
const char *name)
{
u16 i;
u32 index;
struct resource *ipa3_plat_drv_resourceP;
/*
* Get the index for the type of hardware we're running on.
* This is used as a table index.
*/
index = emulator_type_to_index();
if (index >= ARRAY_SIZE(ipa3_plat_drv_resource_table)) {
IPADBG(
"Did not find ipa3_plat_drv_resource_table for index %u\n",
index);
return false;
}
ipa3_plat_drv_resourceP =
ipa3_plat_drv_resource_table[index].p_table;
for (i = 0;
i < ipa3_plat_drv_resource_table[index].num_entries;
i++) {
struct resource *r = &ipa3_plat_drv_resourceP[i];
if (type == resource_type(r) && strsame(r->name, name)) {
IPADBG(
"Found start 0x%x size %u for name %s index %u\n",
(unsigned int) (r->start),
(unsigned int) (resource_size(r)),
name,
index);
return r;
}
}
IPADBG("Did not find match for name %s index %u\n",
name,
index);
return NULL;
}
/* From drivers/of/base.c */
/**
* emulator_of_property_count_elems_of_size - Count the number of
* elements in a property
*
* @np: device node from which the property value is to
* be read. Not used.
* @propname: name of the property to be searched.
* @elem_size: size of the individual element
*
* Search for a property and count the number of elements of size
* elem_size in it. Returns number of elements on success, -EINVAL if
* the property does not exist or its length does not match a multiple
* of elem_size and -ENODATA if the property does not have a value.
*/
int emulator_of_property_count_elems_of_size(
const struct device_node *np,
const char *propname,
int elem_size)
{
u32 index;
/*
* Get the index for the type of hardware we're running on.
* This is used as a table index.
*/
index = emulator_type_to_index();
/*
* Use elem_size to determine which table to search for the
* specified property name
*/
if (elem_size == sizeof(u32)) {
u16 i;
struct dtsi_replacement_u32_array *u32_arrayP;
if (index >= ARRAY_SIZE(ipa3_plat_drv_u32_array_table)) {
IPADBG(
"Did not find ipa3_plat_drv_u32_array_table for index %u\n",
index);
return false;
}
u32_arrayP =
ipa3_plat_drv_u32_array_table[index].p_table;
for (i = 0;
i < ipa3_plat_drv_u32_array_table[index].num_entries;
i++) {
if (strsame(u32_arrayP[i].key, propname)) {
if (u32_arrayP[i].p_value == NULL) {
IPADBG(
"Found no elements for propname %s index %u\n",
propname,
index);
return -ENODATA;
}
IPADBG(
"Found %u elements for propname %s index %u\n",
u32_arrayP[i].num_elements,
propname,
index);
return u32_arrayP[i].num_elements;
}
}
IPADBG(
"Found no match in table with elem_size %d for propname %s index %u\n",
elem_size,
propname,
index);
return -EINVAL;
}
IPAERR(
"Found no tables with element size %u to search for propname %s index %u\n",
elem_size,
propname,
index);
return -EINVAL;
}
int emulator_of_property_read_variable_u32_array(
const struct device_node *np,
const char *propname,
u32 *out_values,
size_t sz_min,
size_t sz_max)
{
return emulator_of_property_read_u32_array(
np, propname, out_values, sz_max);
}
resource_size_t emulator_resource_size(const struct resource *res)
{
return res->end - res->start + 1;
}

Ver arquivo

@@ -0,0 +1,121 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#if !defined(_IPA_EMULATION_STUBS_H_)
# define _IPA_EMULATION_STUBS_H_
# define outer_flush_range(x, y)
# define __flush_dcache_area(x, y)
# define __cpuc_flush_dcache_area(x, y) __flush_dcache_area(x, y)
/* Point several API calls to these new EMULATION functions */
# define of_property_read_bool(np, propname) \
emulator_of_property_read_bool(NULL, propname)
# define of_property_read_u32(np, propname, out_value) \
emulator_of_property_read_u32(NULL, propname, out_value)
# define of_property_read_u32_array(np, propname, out_values, sz) \
emulator_of_property_read_u32_array(NULL, propname, out_values, sz)
# define platform_get_resource_byname(dev, type, name) \
emulator_platform_get_resource_byname(NULL, type, name)
# define of_property_count_elems_of_size(np, propname, elem_size) \
emulator_of_property_count_elems_of_size(NULL, propname, elem_size)
# define of_property_read_variable_u32_array( \
np, propname, out_values, sz_min, sz_max) \
emulator_of_property_read_variable_u32_array( \
NULL, propname, out_values, sz_min, sz_max)
# define resource_size(res) \
emulator_resource_size(res)
/**
* emulator_of_property_read_bool - Findfrom a property
* @np: device node used to find the property value. (not used)
* @propname: name of the property to be searched.
*
* Search for a property in a device node.
* Returns true if the property exists false otherwise.
*/
bool emulator_of_property_read_bool(
const struct device_node *np,
const char *propname);
int emulator_of_property_read_u32(
const struct device_node *np,
const char *propname,
u32 *out_value);
/**
* emulator_of_property_read_u32_array - Find and read an array of 32
* bit integers from a property.
*
* @np: device node used to find the property value. (not used)
* @propname: name of the property to be searched.
* @out_values: pointer to return value, modified only if return value is 0.
* @sz: number of array elements to read
*
* Search for a property in a device node and read 32-bit value(s) from
* it. Returns 0 on success, -EINVAL if the property does not exist,
* -ENODATA if property does not have a value, and -EOVERFLOW if the
* property data isn't large enough.
*
* The out_values is modified only if a valid u32 value can be decoded.
*/
int emulator_of_property_read_u32_array(
const struct device_node *np,
const char *propname,
u32 *out_values,
size_t sz);
/**
* emulator_platform_get_resource_byname - get a resource for a device
* by name
*
* @dev: platform device
* @type: resource type
* @name: resource name
*/
struct resource *emulator_platform_get_resource_byname(
struct platform_device *dev,
unsigned int type,
const char *name);
/**
* emulator_of_property_count_elems_of_size - Count the number of
* elements in a property
*
* @np: device node used to find the property value. (not used)
* @propname: name of the property to be searched.
* @elem_size: size of the individual element
*
* Search for a property and count the number of elements of size
* elem_size in it. Returns number of elements on success, -EINVAL if
* the property does not exist or its length does not match a multiple
* of elem_size and -ENODATA if the property does not have a value.
*/
int emulator_of_property_count_elems_of_size(
const struct device_node *np,
const char *propname,
int elem_size);
int emulator_of_property_read_variable_u32_array(
const struct device_node *np,
const char *propname,
u32 *out_values,
size_t sz_min,
size_t sz_max);
resource_size_t emulator_resource_size(
const struct resource *res);
static inline bool is_device_dma_coherent(struct device *dev)
{
return false;
}
static inline phys_addr_t qcom_smem_virt_to_phys(void *addr)
{
return 0;
}
#endif /* #if !defined(_IPA_EMULATION_STUBS_H_) */

2110
ipa/ipa_v3/ipa_flt.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

1335
ipa/ipa_v3/ipa_hdr.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

2027
ipa/ipa_v3/ipa_hw_stats.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

3040
ipa/ipa_v3/ipa_i.h Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

609
ipa/ipa_v3/ipa_interrupts.c Arquivo normal
Ver arquivo

@@ -0,0 +1,609 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/interrupt.h>
#include "ipa_i.h"
#define INTERRUPT_WORKQUEUE_NAME "ipa_interrupt_wq"
#define DIS_SUSPEND_INTERRUPT_TIMEOUT 5
#define IPA_IRQ_NUM_MAX 32
struct ipa3_interrupt_info {
ipa_irq_handler_t handler;
enum ipa_irq_type interrupt;
void *private_data;
bool deferred_flag;
};
struct ipa3_interrupt_work_wrap {
struct work_struct interrupt_work;
ipa_irq_handler_t handler;
enum ipa_irq_type interrupt;
void *private_data;
void *interrupt_data;
};
static struct ipa3_interrupt_info ipa_interrupt_to_cb[IPA_IRQ_NUM_MAX];
static struct workqueue_struct *ipa_interrupt_wq;
static u32 ipa_ee;
static void ipa3_tx_suspend_interrupt_wa(void);
static void ipa3_enable_tx_suspend_wa(struct work_struct *work);
static DECLARE_DELAYED_WORK(dwork_en_suspend_int,
ipa3_enable_tx_suspend_wa);
static spinlock_t suspend_wa_lock;
static void ipa3_process_interrupts(bool isr_context);
static int ipa3_irq_mapping[IPA_IRQ_MAX] = {
[IPA_BAD_SNOC_ACCESS_IRQ] = 0,
[IPA_UC_IRQ_0] = 2,
[IPA_UC_IRQ_1] = 3,
[IPA_UC_IRQ_2] = 4,
[IPA_UC_IRQ_3] = 5,
[IPA_UC_IN_Q_NOT_EMPTY_IRQ] = 6,
[IPA_UC_RX_CMD_Q_NOT_FULL_IRQ] = 7,
[IPA_PROC_TO_UC_ACK_Q_NOT_EMPTY_IRQ] = 8,
[IPA_RX_ERR_IRQ] = 9,
[IPA_DEAGGR_ERR_IRQ] = 10,
[IPA_TX_ERR_IRQ] = 11,
[IPA_STEP_MODE_IRQ] = 12,
[IPA_PROC_ERR_IRQ] = 13,
[IPA_TX_SUSPEND_IRQ] = 14,
[IPA_TX_HOLB_DROP_IRQ] = 15,
[IPA_BAM_GSI_IDLE_IRQ] = 16,
[IPA_PIPE_YELLOW_MARKER_BELOW_IRQ] = 17,
[IPA_PIPE_RED_MARKER_BELOW_IRQ] = 18,
[IPA_PIPE_YELLOW_MARKER_ABOVE_IRQ] = 19,
[IPA_PIPE_RED_MARKER_ABOVE_IRQ] = 20,
[IPA_UCP_IRQ] = 21,
[IPA_DCMP_IRQ] = 22,
[IPA_GSI_EE_IRQ] = 23,
[IPA_GSI_IPA_IF_TLV_RCVD_IRQ] = 24,
[IPA_GSI_UC_IRQ] = 25,
[IPA_TLV_LEN_MIN_DSM_IRQ] = 26,
};
static void ipa3_interrupt_defer(struct work_struct *work);
static DECLARE_WORK(ipa3_interrupt_defer_work, ipa3_interrupt_defer);
static void ipa3_deferred_interrupt_work(struct work_struct *work)
{
struct ipa3_interrupt_work_wrap *work_data =
container_of(work,
struct ipa3_interrupt_work_wrap,
interrupt_work);
IPADBG("call handler from workq for interrupt %d...\n",
work_data->interrupt);
work_data->handler(work_data->interrupt, work_data->private_data,
work_data->interrupt_data);
kfree(work_data->interrupt_data);
kfree(work_data);
}
static bool ipa3_is_valid_ep(u32 ep_suspend_data)
{
u32 bmsk = 1;
u32 i = 0;
for (i = 0; i < ipa3_ctx->ipa_num_pipes; i++) {
if ((ep_suspend_data & bmsk) && (ipa3_ctx->ep[i].valid))
return true;
bmsk = bmsk << 1;
}
return false;
}
static int ipa3_handle_interrupt(int irq_num, bool isr_context)
{
struct ipa3_interrupt_info interrupt_info;
struct ipa3_interrupt_work_wrap *work_data;
u32 suspend_data;
void *interrupt_data = NULL;
struct ipa_tx_suspend_irq_data *suspend_interrupt_data = NULL;
int res;
interrupt_info = ipa_interrupt_to_cb[irq_num];
if (interrupt_info.handler == NULL) {
IPAERR("A callback function wasn't set for interrupt num %d\n",
irq_num);
return -EINVAL;
}
switch (interrupt_info.interrupt) {
case IPA_TX_SUSPEND_IRQ:
IPADBG_LOW("processing TX_SUSPEND interrupt\n");
ipa3_tx_suspend_interrupt_wa();
suspend_data = ipahal_read_reg_n(IPA_SUSPEND_IRQ_INFO_EE_n,
ipa_ee);
IPADBG_LOW("get interrupt %d\n", suspend_data);
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1) {
/* Clearing L2 interrupts status */
ipahal_write_reg_n(IPA_SUSPEND_IRQ_CLR_EE_n,
ipa_ee, suspend_data);
}
if (!ipa3_is_valid_ep(suspend_data))
return 0;
suspend_interrupt_data =
kzalloc(sizeof(*suspend_interrupt_data), GFP_ATOMIC);
if (!suspend_interrupt_data) {
IPAERR("failed allocating suspend_interrupt_data\n");
return -ENOMEM;
}
suspend_interrupt_data->endpoints = suspend_data;
interrupt_data = suspend_interrupt_data;
break;
default:
break;
}
/* Force defer processing if in ISR context. */
if (interrupt_info.deferred_flag || isr_context) {
IPADBG_LOW("Defer handling interrupt %d\n",
interrupt_info.interrupt);
work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
GFP_ATOMIC);
if (!work_data) {
IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
res = -ENOMEM;
goto fail_alloc_work;
}
INIT_WORK(&work_data->interrupt_work,
ipa3_deferred_interrupt_work);
work_data->handler = interrupt_info.handler;
work_data->interrupt = interrupt_info.interrupt;
work_data->private_data = interrupt_info.private_data;
work_data->interrupt_data = interrupt_data;
queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
} else {
IPADBG_LOW("Handle interrupt %d\n", interrupt_info.interrupt);
interrupt_info.handler(interrupt_info.interrupt,
interrupt_info.private_data,
interrupt_data);
kfree(interrupt_data);
}
return 0;
fail_alloc_work:
kfree(interrupt_data);
return res;
}
static void ipa3_enable_tx_suspend_wa(struct work_struct *work)
{
u32 en;
u32 suspend_bmask;
int irq_num;
IPADBG_LOW("Enter\n");
irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
if (irq_num == -1) {
WARN_ON(1);
return;
}
/* make sure ipa hw is clocked on*/
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
suspend_bmask = 1 << irq_num;
/*enable TX_SUSPEND_IRQ*/
en |= suspend_bmask;
IPADBG("enable TX_SUSPEND_IRQ, IPA_IRQ_EN_EE reg, write val = %u\n"
, en);
ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, en);
ipa3_process_interrupts(false);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG_LOW("Exit\n");
}
static void ipa3_tx_suspend_interrupt_wa(void)
{
u32 val;
u32 suspend_bmask;
int irq_num;
int wa_delay;
IPADBG_LOW("Enter\n");
irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
if (irq_num == -1) {
WARN_ON(1);
return;
}
/*disable TX_SUSPEND_IRQ*/
val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
suspend_bmask = 1 << irq_num;
val &= ~suspend_bmask;
IPADBG("Disabling TX_SUSPEND_IRQ, write val: %u to IPA_IRQ_EN_EE reg\n",
val);
ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, val);
IPADBG_LOW(" processing suspend interrupt work-around, delayed work\n");
wa_delay = DIS_SUSPEND_INTERRUPT_TIMEOUT;
if (ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_VIRTUAL ||
ipa3_ctx->ipa3_hw_mode == IPA_HW_MODE_EMULATION) {
wa_delay *= 400;
}
IPADBG_LOW("Delay period %d msec\n", wa_delay);
queue_delayed_work(ipa_interrupt_wq, &dwork_en_suspend_int,
msecs_to_jiffies(wa_delay));
IPADBG_LOW("Exit\n");
}
static inline bool is_uc_irq(int irq_num)
{
if (ipa_interrupt_to_cb[irq_num].interrupt >= IPA_UC_IRQ_0 &&
ipa_interrupt_to_cb[irq_num].interrupt <= IPA_UC_IRQ_3)
return true;
else
return false;
}
static void ipa3_process_interrupts(bool isr_context)
{
u32 reg;
u32 bmsk;
u32 i = 0;
u32 en;
unsigned long flags;
bool uc_irq;
IPADBG_LOW("Enter isr_context=%d\n", isr_context);
spin_lock_irqsave(&suspend_wa_lock, flags);
en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
while (en & reg) {
IPADBG_LOW("en=0x%x reg=0x%x\n", en, reg);
bmsk = 1;
for (i = 0; i < IPA_IRQ_NUM_MAX; i++) {
IPADBG_LOW("Check irq number %d\n", i);
if (en & reg & bmsk) {
IPADBG_LOW("Irq number %d asserted\n", i);
uc_irq = is_uc_irq(i);
/*
* Clear uC interrupt before processing to avoid
* clearing unhandled interrupts
*/
if (uc_irq)
ipahal_write_reg_n(IPA_IRQ_CLR_EE_n,
ipa_ee, bmsk);
/*
* handle the interrupt with spin_lock
* unlocked to avoid calling client in atomic
* context. mutual exclusion still preserved
* as the read/clr is done with spin_lock
* locked.
*/
spin_unlock_irqrestore(&suspend_wa_lock, flags);
ipa3_handle_interrupt(i, isr_context);
spin_lock_irqsave(&suspend_wa_lock, flags);
/*
* Clear non uC interrupt after processing
* to avoid clearing interrupt data
*/
if (!uc_irq)
ipahal_write_reg_n(IPA_IRQ_CLR_EE_n,
ipa_ee, bmsk);
}
bmsk = bmsk << 1;
}
reg = ipahal_read_reg_n(IPA_IRQ_STTS_EE_n, ipa_ee);
/* since the suspend interrupt HW bug we must
* read again the EN register, otherwise the while is endless
*/
en = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
}
spin_unlock_irqrestore(&suspend_wa_lock, flags);
IPADBG_LOW("Exit\n");
}
static void ipa3_interrupt_defer(struct work_struct *work)
{
IPADBG("processing interrupts in wq\n");
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipa3_process_interrupts(false);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("Done\n");
}
static irqreturn_t ipa3_isr(int irq, void *ctxt)
{
struct ipa_active_client_logging_info log_info;
IPA_ACTIVE_CLIENTS_PREP_SIMPLE(log_info);
IPADBG_LOW("Enter\n");
/* defer interrupt handling in case IPA is not clocked on */
if (ipa3_inc_client_enable_clks_no_block(&log_info)) {
IPADBG("defer interrupt processing\n");
queue_work(ipa3_ctx->power_mgmt_wq, &ipa3_interrupt_defer_work);
return IRQ_HANDLED;
}
ipa3_process_interrupts(true);
IPADBG_LOW("Exit\n");
ipa3_dec_client_disable_clks_no_block(&log_info);
return IRQ_HANDLED;
}
irq_handler_t ipa3_get_isr(void)
{
return ipa3_isr;
}
/**
* ipa3_add_interrupt_handler() - Adds handler to an interrupt type
* @interrupt: Interrupt type
* @handler: The handler to be added
* @deferred_flag: whether the handler processing should be deferred in
* a workqueue
* @private_data: the client's private data
*
* Adds handler to an interrupt type and enable the specific bit
* in IRQ_EN register, associated interrupt in IRQ_STTS register will be enabled
*/
int ipa3_add_interrupt_handler(enum ipa_irq_type interrupt,
ipa_irq_handler_t handler,
bool deferred_flag,
void *private_data)
{
u32 val;
u32 bmsk;
int irq_num;
int client_idx, ep_idx;
IPADBG("interrupt_enum(%d)\n", interrupt);
if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
interrupt >= IPA_IRQ_MAX) {
IPAERR("invalid interrupt number %d\n", interrupt);
return -EINVAL;
}
irq_num = ipa3_irq_mapping[interrupt];
if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
IPAERR("interrupt %d not supported\n", interrupt);
WARN_ON(1);
return -EFAULT;
}
IPADBG("ipa_interrupt_to_cb irq_num(%d)\n", irq_num);
ipa_interrupt_to_cb[irq_num].deferred_flag = deferred_flag;
ipa_interrupt_to_cb[irq_num].handler = handler;
ipa_interrupt_to_cb[irq_num].private_data = private_data;
ipa_interrupt_to_cb[irq_num].interrupt = interrupt;
val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
IPADBG("read IPA_IRQ_EN_EE_n register. reg = %d\n", val);
bmsk = 1 << irq_num;
val |= bmsk;
ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, val);
IPADBG("wrote IPA_IRQ_EN_EE_n register. reg = %d\n", val);
/* register SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt*/
if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
(ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
val = ~0;
for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
if (IPA_CLIENT_IS_Q6_CONS(client_idx) ||
IPA_CLIENT_IS_Q6_PROD(client_idx)) {
ep_idx = ipa3_get_ep_mapping(client_idx);
IPADBG("modem ep_idx(%d) client_idx = %d\n",
ep_idx, client_idx);
if (ep_idx == -1)
IPADBG("Invalid IPA client\n");
else
val &= ~(1 << ep_idx);
}
ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, val);
IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", val);
}
return 0;
}
/**
* ipa3_remove_interrupt_handler() - Removes handler to an interrupt type
* @interrupt: Interrupt type
*
* Removes the handler and disable the specific bit in IRQ_EN register
*/
int ipa3_remove_interrupt_handler(enum ipa_irq_type interrupt)
{
u32 val;
u32 bmsk;
int irq_num;
if (interrupt < IPA_BAD_SNOC_ACCESS_IRQ ||
interrupt >= IPA_IRQ_MAX) {
IPAERR("invalid interrupt number %d\n", interrupt);
return -EINVAL;
}
irq_num = ipa3_irq_mapping[interrupt];
if (irq_num < 0 || irq_num >= IPA_IRQ_NUM_MAX) {
IPAERR("interrupt %d not supported\n", interrupt);
WARN_ON(1);
return -EFAULT;
}
kfree(ipa_interrupt_to_cb[irq_num].private_data);
ipa_interrupt_to_cb[irq_num].deferred_flag = false;
ipa_interrupt_to_cb[irq_num].handler = NULL;
ipa_interrupt_to_cb[irq_num].private_data = NULL;
ipa_interrupt_to_cb[irq_num].interrupt = -1;
/* clean SUSPEND_IRQ_EN_EE_n_ADDR for L2 interrupt */
if ((interrupt == IPA_TX_SUSPEND_IRQ) &&
(ipa3_ctx->ipa_hw_type >= IPA_HW_v3_1)) {
ipahal_write_reg_n(IPA_SUSPEND_IRQ_EN_EE_n, ipa_ee, 0);
IPADBG("wrote IPA_SUSPEND_IRQ_EN_EE_n reg = %d\n", 0);
}
val = ipahal_read_reg_n(IPA_IRQ_EN_EE_n, ipa_ee);
bmsk = 1 << irq_num;
val &= ~bmsk;
ipahal_write_reg_n(IPA_IRQ_EN_EE_n, ipa_ee, val);
return 0;
}
/**
* ipa3_interrupts_init() - Initialize the IPA interrupts framework
* @ipa_irq: The interrupt number to allocate
* @ee: Execution environment
* @ipa_dev: The basic device structure representing the IPA driver
*
* - Initialize the ipa_interrupt_to_cb array
* - Clear interrupts status
* - Register the ipa interrupt handler - ipa3_isr
* - Enable apps processor wakeup by IPA interrupts
*/
int ipa3_interrupts_init(u32 ipa_irq, u32 ee, struct device *ipa_dev)
{
int idx;
int res = 0;
ipa_ee = ee;
for (idx = 0; idx < IPA_IRQ_NUM_MAX; idx++) {
ipa_interrupt_to_cb[idx].deferred_flag = false;
ipa_interrupt_to_cb[idx].handler = NULL;
ipa_interrupt_to_cb[idx].private_data = NULL;
ipa_interrupt_to_cb[idx].interrupt = -1;
}
ipa_interrupt_wq = create_singlethread_workqueue(
INTERRUPT_WORKQUEUE_NAME);
if (!ipa_interrupt_wq) {
IPAERR("workqueue creation failed\n");
return -ENOMEM;
}
/*
* NOTE:
*
* We'll only register an isr on non-emulator (ie. real UE)
* systems.
*
* On the emulator, emulator_soft_irq_isr() will be calling
* ipa3_isr, so hence, no isr registration here, and instead,
* we'll pass the address of ipa3_isr to the gsi layer where
* emulator interrupts are handled...
*/
if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
res = request_irq(ipa_irq, (irq_handler_t) ipa3_isr,
IRQF_TRIGGER_RISING, "ipa", ipa_dev);
if (res) {
IPAERR(
"fail to register IPA IRQ handler irq=%d\n",
ipa_irq);
destroy_workqueue(ipa_interrupt_wq);
ipa_interrupt_wq = NULL;
return -ENODEV;
}
IPADBG("IPA IRQ handler irq=%d registered\n", ipa_irq);
res = enable_irq_wake(ipa_irq);
if (res)
IPAERR("fail to enable IPA IRQ wakeup irq=%d res=%d\n",
ipa_irq, res);
else
IPADBG("IPA IRQ wakeup enabled irq=%d\n", ipa_irq);
}
spin_lock_init(&suspend_wa_lock);
return 0;
}
/**
* ipa3_interrupts_destroy() - Destroy the IPA interrupts framework
* @ipa_irq: The interrupt number to allocate
* @ee: Execution environment
* @ipa_dev: The basic device structure representing the IPA driver
*
* - Disable apps processor wakeup by IPA interrupts
* - Unregister the ipa interrupt handler - ipa3_isr
* - Destroy the interrupt workqueue
*/
void ipa3_interrupts_destroy(u32 ipa_irq, struct device *ipa_dev)
{
if (ipa3_ctx->ipa3_hw_mode != IPA_HW_MODE_EMULATION) {
disable_irq_wake(ipa_irq);
free_irq(ipa_irq, ipa_dev);
}
destroy_workqueue(ipa_interrupt_wq);
ipa_interrupt_wq = NULL;
}
/**
* ipa3_suspend_active_aggr_wa() - Emulate suspend IRQ
* @clnt_hndl: suspended client handle, IRQ is emulated for this pipe
*
* Emulate suspend IRQ to unsuspend client which was suspended with an open
* aggregation frame in order to bypass HW bug of IRQ not generated when
* endpoint is suspended during an open aggregation.
*/
void ipa3_suspend_active_aggr_wa(u32 clnt_hdl)
{
struct ipa3_interrupt_info interrupt_info;
struct ipa3_interrupt_work_wrap *work_data;
struct ipa_tx_suspend_irq_data *suspend_interrupt_data;
int irq_num;
int aggr_active_bitmap = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
if (aggr_active_bitmap & (1 << clnt_hdl)) {
/* force close aggregation */
ipahal_write_reg(IPA_AGGR_FORCE_CLOSE, (1 << clnt_hdl));
/* simulate suspend IRQ */
irq_num = ipa3_irq_mapping[IPA_TX_SUSPEND_IRQ];
interrupt_info = ipa_interrupt_to_cb[irq_num];
if (interrupt_info.handler == NULL) {
IPAERR("no CB function for IPA_TX_SUSPEND_IRQ\n");
return;
}
suspend_interrupt_data = kzalloc(
sizeof(*suspend_interrupt_data),
GFP_ATOMIC);
if (!suspend_interrupt_data) {
IPAERR("failed allocating suspend_interrupt_data\n");
return;
}
suspend_interrupt_data->endpoints = 1 << clnt_hdl;
work_data = kzalloc(sizeof(struct ipa3_interrupt_work_wrap),
GFP_ATOMIC);
if (!work_data) {
IPAERR("failed allocating ipa3_interrupt_work_wrap\n");
goto fail_alloc_work;
}
INIT_WORK(&work_data->interrupt_work,
ipa3_deferred_interrupt_work);
work_data->handler = interrupt_info.handler;
work_data->interrupt = IPA_TX_SUSPEND_IRQ;
work_data->private_data = interrupt_info.private_data;
work_data->interrupt_data = (void *)suspend_interrupt_data;
queue_work(ipa_interrupt_wq, &work_data->interrupt_work);
return;
fail_alloc_work:
kfree(suspend_interrupt_data);
}
}

810
ipa/ipa_v3/ipa_intf.c Arquivo normal
Ver arquivo

@@ -0,0 +1,810 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/sched/signal.h>
#include "ipa_i.h"
#include <linux/msm_ipa.h>
struct ipa3_intf {
char name[IPA_RESOURCE_NAME_MAX];
struct list_head link;
u32 num_tx_props;
u32 num_rx_props;
u32 num_ext_props;
struct ipa_ioc_tx_intf_prop *tx;
struct ipa_ioc_rx_intf_prop *rx;
struct ipa_ioc_ext_intf_prop *ext;
enum ipa_client_type excp_pipe;
};
struct ipa3_push_msg {
struct ipa_msg_meta meta;
ipa_msg_free_fn callback;
void *buff;
struct list_head link;
};
struct ipa3_pull_msg {
struct ipa_msg_meta meta;
ipa_msg_pull_fn callback;
struct list_head link;
};
/**
* ipa3_register_intf() - register "logical" interface
* @name: [in] interface name
* @tx: [in] TX properties of the interface
* @rx: [in] RX properties of the interface
*
* Register an interface and its tx and rx properties, this allows
* configuration of rules from user-space
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa3_register_intf(const char *name, const struct ipa_tx_intf *tx,
const struct ipa_rx_intf *rx)
{
return ipa3_register_intf_ext(name, tx, rx, NULL);
}
/**
* ipa3_register_intf_ext() - register "logical" interface which has only
* extended properties
* @name: [in] interface name
* @tx: [in] TX properties of the interface
* @rx: [in] RX properties of the interface
* @ext: [in] EXT properties of the interface
*
* Register an interface and its tx, rx and ext properties, this allows
* configuration of rules from user-space
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa3_register_intf_ext(const char *name, const struct ipa_tx_intf *tx,
const struct ipa_rx_intf *rx,
const struct ipa_ext_intf *ext)
{
struct ipa3_intf *intf;
u32 len;
if (name == NULL || (tx == NULL && rx == NULL && ext == NULL)) {
IPAERR_RL("invalid params name=%pK tx=%pK rx=%pK ext=%pK\n",
name, tx, rx, ext);
return -EINVAL;
}
if (tx && tx->num_props > IPA_NUM_PROPS_MAX) {
IPAERR_RL("invalid tx num_props=%d max=%d\n", tx->num_props,
IPA_NUM_PROPS_MAX);
return -EINVAL;
}
if (rx && rx->num_props > IPA_NUM_PROPS_MAX) {
IPAERR_RL("invalid rx num_props=%d max=%d\n", rx->num_props,
IPA_NUM_PROPS_MAX);
return -EINVAL;
}
if (ext && ext->num_props > IPA_NUM_PROPS_MAX) {
IPAERR_RL("invalid ext num_props=%d max=%d\n", ext->num_props,
IPA_NUM_PROPS_MAX);
return -EINVAL;
}
len = sizeof(struct ipa3_intf);
intf = kzalloc(len, GFP_KERNEL);
if (intf == NULL)
return -ENOMEM;
strlcpy(intf->name, name, IPA_RESOURCE_NAME_MAX);
if (tx) {
intf->num_tx_props = tx->num_props;
len = tx->num_props * sizeof(struct ipa_ioc_tx_intf_prop);
intf->tx = kmemdup(tx->prop, len, GFP_KERNEL);
if (intf->tx == NULL) {
kfree(intf);
return -ENOMEM;
}
}
if (rx) {
intf->num_rx_props = rx->num_props;
len = rx->num_props * sizeof(struct ipa_ioc_rx_intf_prop);
intf->rx = kmemdup(rx->prop, len, GFP_KERNEL);
if (intf->rx == NULL) {
kfree(intf->tx);
kfree(intf);
return -ENOMEM;
}
memcpy(intf->rx, rx->prop, len);
}
if (ext) {
intf->num_ext_props = ext->num_props;
len = ext->num_props * sizeof(struct ipa_ioc_ext_intf_prop);
intf->ext = kmemdup(ext->prop, len, GFP_KERNEL);
if (intf->ext == NULL) {
kfree(intf->rx);
kfree(intf->tx);
kfree(intf);
return -ENOMEM;
}
memcpy(intf->ext, ext->prop, len);
}
if (ext && ext->excp_pipe_valid)
intf->excp_pipe = ext->excp_pipe;
else
intf->excp_pipe = IPA_CLIENT_APPS_LAN_CONS;
mutex_lock(&ipa3_ctx->lock);
list_add_tail(&intf->link, &ipa3_ctx->intf_list);
mutex_unlock(&ipa3_ctx->lock);
return 0;
}
/**
* ipa3_deregister_intf() - de-register previously registered logical interface
* @name: [in] interface name
*
* De-register a previously registered interface
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa3_deregister_intf(const char *name)
{
struct ipa3_intf *entry;
struct ipa3_intf *next;
int result = -EINVAL;
if ((name == NULL) ||
(strnlen(name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX)) {
IPAERR_RL("invalid param name=%s\n", name);
return result;
}
mutex_lock(&ipa3_ctx->lock);
list_for_each_entry_safe(entry, next, &ipa3_ctx->intf_list, link) {
if (!strcmp(entry->name, name)) {
list_del(&entry->link);
kfree(entry->ext);
kfree(entry->rx);
kfree(entry->tx);
kfree(entry);
result = 0;
break;
}
}
mutex_unlock(&ipa3_ctx->lock);
return result;
}
/**
* ipa3_query_intf() - query logical interface properties
* @lookup: [inout] interface name and number of properties
*
* Obtain the handle and number of tx and rx properties for the named
* interface, used as part of querying the tx and rx properties for
* configuration of various rules from user-space
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa3_query_intf(struct ipa_ioc_query_intf *lookup)
{
struct ipa3_intf *entry;
int result = -EINVAL;
if (lookup == NULL) {
IPAERR_RL("invalid param lookup=%pK\n", lookup);
return result;
}
lookup->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
if (strnlen(lookup->name, IPA_RESOURCE_NAME_MAX) ==
IPA_RESOURCE_NAME_MAX) {
IPAERR_RL("Interface name too long. (%s)\n", lookup->name);
return result;
}
mutex_lock(&ipa3_ctx->lock);
list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
if (!strcmp(entry->name, lookup->name)) {
lookup->num_tx_props = entry->num_tx_props;
lookup->num_rx_props = entry->num_rx_props;
lookup->num_ext_props = entry->num_ext_props;
lookup->excp_pipe = entry->excp_pipe;
result = 0;
break;
}
}
mutex_unlock(&ipa3_ctx->lock);
return result;
}
/**
* ipa3_query_intf_tx_props() - qeury TX props of an interface
* @tx: [inout] interface tx attributes
*
* Obtain the tx properties for the specified interface
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa3_query_intf_tx_props(struct ipa_ioc_query_intf_tx_props *tx)
{
struct ipa3_intf *entry;
int result = -EINVAL;
if (tx == NULL) {
IPAERR_RL("null args: tx\n");
return result;
}
tx->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
if (strnlen(tx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
IPAERR_RL("Interface name too long. (%s)\n", tx->name);
return result;
}
mutex_lock(&ipa3_ctx->lock);
list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
if (!strcmp(entry->name, tx->name)) {
/* add the entry check */
if (entry->num_tx_props != tx->num_tx_props) {
IPAERR("invalid entry number(%u %u)\n",
entry->num_tx_props,
tx->num_tx_props);
mutex_unlock(&ipa3_ctx->lock);
return result;
}
memcpy(tx->tx, entry->tx, entry->num_tx_props *
sizeof(struct ipa_ioc_tx_intf_prop));
result = 0;
break;
}
}
mutex_unlock(&ipa3_ctx->lock);
return result;
}
/**
* ipa3_query_intf_rx_props() - qeury RX props of an interface
* @rx: [inout] interface rx attributes
*
* Obtain the rx properties for the specified interface
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa3_query_intf_rx_props(struct ipa_ioc_query_intf_rx_props *rx)
{
struct ipa3_intf *entry;
int result = -EINVAL;
if (rx == NULL) {
IPAERR_RL("null args: rx\n");
return result;
}
rx->name[IPA_RESOURCE_NAME_MAX-1] = '\0';
if (strnlen(rx->name, IPA_RESOURCE_NAME_MAX) == IPA_RESOURCE_NAME_MAX) {
IPAERR_RL("Interface name too long. (%s)\n", rx->name);
return result;
}
mutex_lock(&ipa3_ctx->lock);
list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
if (!strcmp(entry->name, rx->name)) {
/* add the entry check */
if (entry->num_rx_props != rx->num_rx_props) {
IPAERR("invalid entry number(%u %u)\n",
entry->num_rx_props,
rx->num_rx_props);
mutex_unlock(&ipa3_ctx->lock);
return result;
}
memcpy(rx->rx, entry->rx, entry->num_rx_props *
sizeof(struct ipa_ioc_rx_intf_prop));
result = 0;
break;
}
}
mutex_unlock(&ipa3_ctx->lock);
return result;
}
/**
* ipa3_query_intf_ext_props() - qeury EXT props of an interface
* @ext: [inout] interface ext attributes
*
* Obtain the ext properties for the specified interface
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa3_query_intf_ext_props(struct ipa_ioc_query_intf_ext_props *ext)
{
struct ipa3_intf *entry;
int result = -EINVAL;
if (ext == NULL) {
IPAERR_RL("invalid param ext=%pK\n", ext);
return result;
}
mutex_lock(&ipa3_ctx->lock);
list_for_each_entry(entry, &ipa3_ctx->intf_list, link) {
if (!strcmp(entry->name, ext->name)) {
/* add the entry check */
if (entry->num_ext_props != ext->num_ext_props) {
IPAERR("invalid entry number(%u %u)\n",
entry->num_ext_props,
ext->num_ext_props);
mutex_unlock(&ipa3_ctx->lock);
return result;
}
memcpy(ext->ext, entry->ext, entry->num_ext_props *
sizeof(struct ipa_ioc_ext_intf_prop));
result = 0;
break;
}
}
mutex_unlock(&ipa3_ctx->lock);
return result;
}
static void ipa3_send_msg_free(void *buff, u32 len, u32 type)
{
kfree(buff);
}
static int wlan_msg_process(struct ipa_msg_meta *meta, void *buff)
{
struct ipa3_push_msg *msg_dup;
struct ipa_wlan_msg_ex *event_ex_cur_con = NULL;
struct ipa_wlan_msg_ex *event_ex_list = NULL;
struct ipa_wlan_msg *event_ex_cur_discon = NULL;
void *data_dup = NULL;
struct ipa3_push_msg *entry;
struct ipa3_push_msg *next;
int cnt = 0, total = 0, max = 0;
uint8_t mac[IPA_MAC_ADDR_SIZE];
uint8_t mac2[IPA_MAC_ADDR_SIZE];
if (!buff)
return -EINVAL;
if (meta->msg_type == WLAN_CLIENT_CONNECT_EX) {
/* debug print */
event_ex_cur_con = buff;
for (cnt = 0; cnt < event_ex_cur_con->num_of_attribs; cnt++) {
if (event_ex_cur_con->attribs[cnt].attrib_type ==
WLAN_HDR_ATTRIB_MAC_ADDR) {
IPADBG("%02x:%02x:%02x:%02x:%02x:%02x,(%d)\n",
event_ex_cur_con->attribs[cnt].u.mac_addr[0],
event_ex_cur_con->attribs[cnt].u.mac_addr[1],
event_ex_cur_con->attribs[cnt].u.mac_addr[2],
event_ex_cur_con->attribs[cnt].u.mac_addr[3],
event_ex_cur_con->attribs[cnt].u.mac_addr[4],
event_ex_cur_con->attribs[cnt].u.mac_addr[5],
meta->msg_type);
}
}
mutex_lock(&ipa3_ctx->msg_wlan_client_lock);
msg_dup = kzalloc(sizeof(*msg_dup), GFP_KERNEL);
if (msg_dup == NULL) {
mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
return -ENOMEM;
}
msg_dup->meta = *meta;
if (meta->msg_len > 0 && buff) {
data_dup = kmemdup(buff, meta->msg_len, GFP_KERNEL);
if (data_dup == NULL) {
kfree(msg_dup);
mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
return -ENOMEM;
}
memcpy(data_dup, buff, meta->msg_len);
msg_dup->buff = data_dup;
msg_dup->callback = ipa3_send_msg_free;
} else {
IPAERR("msg_len %d\n", meta->msg_len);
kfree(msg_dup);
mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
return -ENOMEM;
}
list_add_tail(&msg_dup->link, &ipa3_ctx->msg_wlan_client_list);
mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
}
/* remove the cache */
if (meta->msg_type == WLAN_CLIENT_DISCONNECT) {
/* debug print */
event_ex_cur_discon = buff;
IPADBG("Mac %pM, msg %d\n",
event_ex_cur_discon->mac_addr,
meta->msg_type);
memcpy(mac2,
event_ex_cur_discon->mac_addr,
sizeof(mac2));
mutex_lock(&ipa3_ctx->msg_wlan_client_lock);
list_for_each_entry_safe(entry, next,
&ipa3_ctx->msg_wlan_client_list,
link) {
event_ex_list = entry->buff;
max = event_ex_list->num_of_attribs;
for (cnt = 0; cnt < max; cnt++) {
memcpy(mac,
event_ex_list->attribs[cnt].u.mac_addr,
sizeof(mac));
if (event_ex_list->attribs[cnt].attrib_type ==
WLAN_HDR_ATTRIB_MAC_ADDR) {
pr_debug("%pM\n", mac);
/* compare to delete one*/
if (memcmp(mac2, mac,
sizeof(mac)) == 0) {
IPADBG("clean %d\n", total);
list_del(&entry->link);
kfree(entry);
break;
}
}
}
total++;
}
mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
}
return 0;
}
/**
* ipa3_send_msg() - Send "message" from kernel client to IPA driver
* @meta: [in] message meta-data
* @buff: [in] the payload for message
* @callback: [in] free callback
*
* Client supplies the message meta-data and payload which IPA driver buffers
* till read by user-space. After read from user space IPA driver invokes the
* callback supplied to free the message payload. Client must not touch/free
* the message payload after calling this API.
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa3_send_msg(struct ipa_msg_meta *meta, void *buff,
ipa_msg_free_fn callback)
{
struct ipa3_push_msg *msg;
void *data = NULL;
if (meta == NULL || (buff == NULL && callback != NULL) ||
(buff != NULL && callback == NULL)) {
IPAERR_RL("invalid param meta=%pK buff=%pK, callback=%pK\n",
meta, buff, callback);
return -EINVAL;
}
if (meta->msg_type >= IPA_EVENT_MAX_NUM) {
IPAERR_RL("unsupported message type %d\n", meta->msg_type);
return -EINVAL;
}
msg = kzalloc(sizeof(struct ipa3_push_msg), GFP_KERNEL);
if (msg == NULL)
return -ENOMEM;
msg->meta = *meta;
if (meta->msg_len > 0 && buff) {
data = kmemdup(buff, meta->msg_len, GFP_KERNEL);
if (data == NULL) {
kfree(msg);
return -ENOMEM;
}
msg->buff = data;
msg->callback = ipa3_send_msg_free;
}
mutex_lock(&ipa3_ctx->msg_lock);
list_add_tail(&msg->link, &ipa3_ctx->msg_list);
/* support for softap client event cache */
if (wlan_msg_process(meta, buff))
IPAERR_RL("wlan_msg_process failed\n");
/* unlock only after process */
mutex_unlock(&ipa3_ctx->msg_lock);
IPA_STATS_INC_CNT(ipa3_ctx->stats.msg_w[meta->msg_type]);
wake_up(&ipa3_ctx->msg_waitq);
if (buff)
callback(buff, meta->msg_len, meta->msg_type);
return 0;
}
/**
* ipa3_resend_wlan_msg() - Resend cached "message" to IPACM
*
* resend wlan client connect events to user-space
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa3_resend_wlan_msg(void)
{
struct ipa_wlan_msg_ex *event_ex_list = NULL;
struct ipa3_push_msg *entry;
struct ipa3_push_msg *next;
int cnt = 0, total = 0;
struct ipa3_push_msg *msg;
void *data = NULL;
IPADBG("\n");
mutex_lock(&ipa3_ctx->msg_wlan_client_lock);
list_for_each_entry_safe(entry, next, &ipa3_ctx->msg_wlan_client_list,
link) {
event_ex_list = entry->buff;
for (cnt = 0; cnt < event_ex_list->num_of_attribs; cnt++) {
if (event_ex_list->attribs[cnt].attrib_type ==
WLAN_HDR_ATTRIB_MAC_ADDR) {
IPADBG("%d-Mac %pM\n", total,
event_ex_list->attribs[cnt].u.mac_addr);
}
}
msg = kzalloc(sizeof(*msg), GFP_KERNEL);
if (msg == NULL) {
mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
return -ENOMEM;
}
msg->meta = entry->meta;
data = kmemdup(entry->buff, entry->meta.msg_len, GFP_KERNEL);
if (data == NULL) {
kfree(msg);
mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
return -ENOMEM;
}
msg->buff = data;
msg->callback = ipa3_send_msg_free;
mutex_lock(&ipa3_ctx->msg_lock);
list_add_tail(&msg->link, &ipa3_ctx->msg_list);
mutex_unlock(&ipa3_ctx->msg_lock);
wake_up(&ipa3_ctx->msg_waitq);
total++;
}
mutex_unlock(&ipa3_ctx->msg_wlan_client_lock);
return 0;
}
/**
* ipa3_register_pull_msg() - register pull message type
* @meta: [in] message meta-data
* @callback: [in] pull callback
*
* Register message callback by kernel client with IPA driver for IPA driver to
* pull message on-demand.
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa3_register_pull_msg(struct ipa_msg_meta *meta, ipa_msg_pull_fn callback)
{
struct ipa3_pull_msg *msg;
if (meta == NULL || callback == NULL) {
IPAERR_RL("invalid param meta=%pK callback=%pK\n",
meta, callback);
return -EINVAL;
}
msg = kzalloc(sizeof(struct ipa3_pull_msg), GFP_KERNEL);
if (msg == NULL)
return -ENOMEM;
msg->meta = *meta;
msg->callback = callback;
mutex_lock(&ipa3_ctx->msg_lock);
list_add_tail(&msg->link, &ipa3_ctx->pull_msg_list);
mutex_unlock(&ipa3_ctx->msg_lock);
return 0;
}
/**
* ipa3_deregister_pull_msg() - De-register pull message type
* @meta: [in] message meta-data
*
* De-register "message" by kernel client from IPA driver
*
* Returns: 0 on success, negative on failure
*
* Note: Should not be called from atomic context
*/
int ipa3_deregister_pull_msg(struct ipa_msg_meta *meta)
{
struct ipa3_pull_msg *entry;
struct ipa3_pull_msg *next;
int result = -EINVAL;
if (meta == NULL) {
IPAERR_RL("null arg: meta\n");
return result;
}
mutex_lock(&ipa3_ctx->msg_lock);
list_for_each_entry_safe(entry, next, &ipa3_ctx->pull_msg_list, link) {
if (entry->meta.msg_len == meta->msg_len &&
entry->meta.msg_type == meta->msg_type) {
list_del(&entry->link);
kfree(entry);
result = 0;
break;
}
}
mutex_unlock(&ipa3_ctx->msg_lock);
return result;
}
/**
* ipa3_read() - read message from IPA device
* @filp: [in] file pointer
* @buf: [out] buffer to read into
* @count: [in] size of above buffer
* @f_pos: [inout] file position
*
* Uer-space should continually read from /dev/ipa, read wll block when there
* are no messages to read. Upon return, user-space should read the ipa_msg_meta
* from the start of the buffer to know what type of message was read and its
* length in the remainder of the buffer. Buffer supplied must be big enough to
* hold the message meta-data and the largest defined message type
*
* Returns: how many bytes copied to buffer
*
* Note: Should not be called from atomic context
*/
ssize_t ipa3_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos)
{
char __user *start;
struct ipa3_push_msg *msg = NULL;
int ret;
DEFINE_WAIT_FUNC(wait, woken_wake_function);
int locked;
start = buf;
add_wait_queue(&ipa3_ctx->msg_waitq, &wait);
while (1) {
mutex_lock(&ipa3_ctx->msg_lock);
locked = 1;
if (!list_empty(&ipa3_ctx->msg_list)) {
msg = list_first_entry(&ipa3_ctx->msg_list,
struct ipa3_push_msg, link);
list_del(&msg->link);
}
IPADBG_LOW("msg=%pK\n", msg);
if (msg) {
locked = 0;
mutex_unlock(&ipa3_ctx->msg_lock);
if (copy_to_user(buf, &msg->meta,
sizeof(struct ipa_msg_meta))) {
ret = -EFAULT;
kfree(msg);
msg = NULL;
break;
}
buf += sizeof(struct ipa_msg_meta);
count -= sizeof(struct ipa_msg_meta);
if (msg->buff) {
if (copy_to_user(buf, msg->buff,
msg->meta.msg_len)) {
ret = -EFAULT;
kfree(msg);
msg = NULL;
break;
}
buf += msg->meta.msg_len;
count -= msg->meta.msg_len;
msg->callback(msg->buff, msg->meta.msg_len,
msg->meta.msg_type);
}
IPA_STATS_INC_CNT(
ipa3_ctx->stats.msg_r[msg->meta.msg_type]);
kfree(msg);
msg = NULL;
}
ret = -EAGAIN;
if (filp->f_flags & O_NONBLOCK)
break;
ret = -EINTR;
if (signal_pending(current))
break;
if (start != buf)
break;
locked = 0;
mutex_unlock(&ipa3_ctx->msg_lock);
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
}
remove_wait_queue(&ipa3_ctx->msg_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
if (locked)
mutex_unlock(&ipa3_ctx->msg_lock);
return ret;
}
/**
* ipa3_pull_msg() - pull the specified message from client
* @meta: [in] message meta-data
* @buf: [out] buffer to read into
* @count: [in] size of above buffer
*
* Populate the supplied buffer with the pull message which is fetched
* from client, the message must have previously been registered with
* the IPA driver
*
* Returns: how many bytes copied to buffer
*
* Note: Should not be called from atomic context
*/
int ipa3_pull_msg(struct ipa_msg_meta *meta, char *buff, size_t count)
{
struct ipa3_pull_msg *entry;
int result = -EINVAL;
if (meta == NULL || buff == NULL || !count) {
IPAERR_RL("invalid param name=%pK buff=%pK count=%zu\n",
meta, buff, count);
return result;
}
mutex_lock(&ipa3_ctx->msg_lock);
list_for_each_entry(entry, &ipa3_ctx->pull_msg_list, link) {
if (entry->meta.msg_len == meta->msg_len &&
entry->meta.msg_type == meta->msg_type) {
result = entry->callback(buff, count, meta->msg_type);
break;
}
}
mutex_unlock(&ipa3_ctx->msg_lock);
return result;
}

750
ipa/ipa_v3/ipa_mhi.c Arquivo normal
Ver arquivo

@@ -0,0 +1,750 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/ipa.h>
#include <linux/msm_gsi.h>
#include <linux/ipa_mhi.h>
#include "../ipa_common_i.h"
#include "ipa_i.h"
#include "ipa_qmi_service.h"
#define IPA_MHI_DRV_NAME "ipa_mhi"
#define IPA_MHI_DBG(fmt, args...) \
do { \
pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
__func__, __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPA_MHI_DBG_LOW(fmt, args...) \
do { \
pr_debug(IPA_MHI_DRV_NAME " %s:%d " fmt, \
__func__, __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPA_MHI_ERR(fmt, args...) \
do { \
pr_err(IPA_MHI_DRV_NAME " %s:%d " fmt, \
__func__, __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
IPA_MHI_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPA_MHI_FUNC_ENTRY() \
IPA_MHI_DBG("ENTRY\n")
#define IPA_MHI_FUNC_EXIT() \
IPA_MHI_DBG("EXIT\n")
#define IPA_MHI_MAX_UL_CHANNELS 1
#define IPA_MHI_MAX_DL_CHANNELS 2
/* bit #40 in address should be asserted for MHI transfers over pcie */
#define IPA_MHI_HOST_ADDR_COND(addr) \
((params->assert_bit40)?(IPA_MHI_HOST_ADDR(addr)):(addr))
enum ipa3_mhi_polling_mode {
IPA_MHI_POLLING_MODE_DB_MODE,
IPA_MHI_POLLING_MODE_POLL_MODE,
};
bool ipa3_mhi_stop_gsi_channel(enum ipa_client_type client)
{
int res;
int ipa_ep_idx;
struct ipa3_ep_context *ep;
IPA_MHI_FUNC_ENTRY();
ipa_ep_idx = ipa3_get_ep_mapping(client);
if (ipa_ep_idx == -1) {
IPA_MHI_ERR("Invalid client.\n");
return -EINVAL;
}
ep = &ipa3_ctx->ep[ipa_ep_idx];
IPA_MHI_DBG_LOW("Stopping GSI channel %ld\n", ep->gsi_chan_hdl);
res = gsi_stop_channel(ep->gsi_chan_hdl);
if (res != 0 &&
res != -GSI_STATUS_AGAIN &&
res != -GSI_STATUS_TIMED_OUT) {
IPA_MHI_ERR("GSI stop channel failed %d\n",
res);
WARN_ON(1);
return false;
}
if (res == 0) {
IPA_MHI_DBG_LOW("GSI channel %ld STOP\n",
ep->gsi_chan_hdl);
return true;
}
return false;
}
static int ipa3_mhi_reset_gsi_channel(enum ipa_client_type client)
{
int res;
int clnt_hdl;
IPA_MHI_FUNC_ENTRY();
clnt_hdl = ipa3_get_ep_mapping(client);
if (clnt_hdl < 0)
return -EFAULT;
res = ipa3_reset_gsi_channel(clnt_hdl);
if (res) {
IPA_MHI_ERR("ipa3_reset_gsi_channel failed %d\n", res);
return -EFAULT;
}
IPA_MHI_FUNC_EXIT();
return 0;
}
int ipa3_mhi_reset_channel_internal(enum ipa_client_type client)
{
int res;
IPA_MHI_FUNC_ENTRY();
res = ipa3_mhi_reset_gsi_channel(client);
if (res) {
IPAERR("ipa3_mhi_reset_gsi_channel failed\n");
ipa_assert();
return res;
}
res = ipa3_disable_data_path(ipa3_get_ep_mapping(client));
if (res) {
IPA_MHI_ERR("ipa3_disable_data_path failed %d\n", res);
return res;
}
IPA_MHI_FUNC_EXIT();
return 0;
}
int ipa3_mhi_start_channel_internal(enum ipa_client_type client)
{
int res;
int ipa_ep_idx;
IPA_MHI_FUNC_ENTRY();
ipa_ep_idx = ipa3_get_ep_mapping(client);
if (ipa_ep_idx < 0) {
IPA_MHI_ERR("Invalid client %d\n", client);
return -EINVAL;
}
res = ipa3_enable_data_path(ipa_ep_idx);
if (res) {
IPA_MHI_ERR("ipa3_enable_data_path failed %d\n", res);
return res;
}
IPA_MHI_FUNC_EXIT();
return 0;
}
static int ipa3_mhi_get_ch_poll_cfg(enum ipa_client_type client,
struct ipa_mhi_ch_ctx *ch_ctx_host, int ring_size)
{
switch (ch_ctx_host->pollcfg) {
case 0:
/*set default polling configuration according to MHI spec*/
if (IPA_CLIENT_IS_PROD(client))
return 7;
else
return (ring_size/2)/8;
break;
default:
return ch_ctx_host->pollcfg;
}
}
static int ipa_mhi_start_gsi_channel(enum ipa_client_type client,
int ipa_ep_idx, struct start_gsi_channel *params)
{
int res = 0;
struct gsi_evt_ring_props ev_props;
struct ipa_mhi_msi_info *msi;
struct gsi_chan_props ch_props;
union __packed gsi_channel_scratch ch_scratch;
struct ipa3_ep_context *ep;
const struct ipa_gsi_ep_config *ep_cfg;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
bool burst_mode_enabled = false;
IPA_MHI_FUNC_ENTRY();
ep = &ipa3_ctx->ep[ipa_ep_idx];
msi = params->msi;
ep_cfg = ipa3_get_gsi_ep_info(client);
if (!ep_cfg) {
IPA_MHI_ERR("Wrong parameter, ep_cfg is NULL\n");
return -EPERM;
}
/* allocate event ring only for the first time pipe is connected */
if (params->state == IPA_HW_MHI_CHANNEL_STATE_INVALID) {
memset(&ev_props, 0, sizeof(ev_props));
ev_props.intf = GSI_EVT_CHTYPE_MHI_EV;
ev_props.intr = GSI_INTR_MSI;
ev_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
ev_props.ring_len = params->ev_ctx_host->rlen;
ev_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
params->ev_ctx_host->rbase);
ev_props.int_modt = params->ev_ctx_host->intmodt *
IPA_SLEEP_CLK_RATE_KHZ;
ev_props.int_modc = params->ev_ctx_host->intmodc;
ev_props.intvec = ((msi->data & ~msi->mask) |
(params->ev_ctx_host->msivec & msi->mask));
ev_props.msi_addr = IPA_MHI_HOST_ADDR_COND(
(((u64)msi->addr_hi << 32) | msi->addr_low));
ev_props.rp_update_addr = IPA_MHI_HOST_ADDR_COND(
params->event_context_addr +
offsetof(struct ipa_mhi_ev_ctx, rp));
ev_props.exclusive = true;
ev_props.err_cb = params->ev_err_cb;
ev_props.user_data = params->channel;
ev_props.evchid_valid = true;
ev_props.evchid = params->evchid;
IPA_MHI_DBG("allocating event ring ep:%u evchid:%u\n",
ipa_ep_idx, ev_props.evchid);
res = gsi_alloc_evt_ring(&ev_props, ipa3_ctx->gsi_dev_hdl,
&ep->gsi_evt_ring_hdl);
if (res) {
IPA_MHI_ERR("gsi_alloc_evt_ring failed %d\n", res);
goto fail_alloc_evt;
}
IPA_MHI_DBG("client %d, caching event ring hdl %lu\n",
client,
ep->gsi_evt_ring_hdl);
*params->cached_gsi_evt_ring_hdl =
ep->gsi_evt_ring_hdl;
} else {
IPA_MHI_DBG("event ring already exists: evt_ring_hdl=%lu\n",
*params->cached_gsi_evt_ring_hdl);
ep->gsi_evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
}
if (params->ev_ctx_host->wp == params->ev_ctx_host->rbase) {
IPA_MHI_ERR("event ring wp is not updated. base=wp=0x%llx\n",
params->ev_ctx_host->wp);
goto fail_alloc_ch;
}
IPA_MHI_DBG("Ring event db: evt_ring_hdl=%lu host_wp=0x%llx\n",
ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp);
res = gsi_ring_evt_ring_db(ep->gsi_evt_ring_hdl,
params->ev_ctx_host->wp);
if (res) {
IPA_MHI_ERR("fail to ring evt ring db %d. hdl=%lu wp=0x%llx\n",
res, ep->gsi_evt_ring_hdl, params->ev_ctx_host->wp);
goto fail_alloc_ch;
}
memset(&ch_props, 0, sizeof(ch_props));
ch_props.prot = GSI_CHAN_PROT_MHI;
ch_props.dir = IPA_CLIENT_IS_PROD(client) ?
GSI_CHAN_DIR_TO_GSI : GSI_CHAN_DIR_FROM_GSI;
ch_props.ch_id = ep_cfg->ipa_gsi_chan_num;
ch_props.evt_ring_hdl = *params->cached_gsi_evt_ring_hdl;
ch_props.re_size = GSI_CHAN_RE_SIZE_16B;
ch_props.ring_len = params->ch_ctx_host->rlen;
ch_props.ring_base_addr = IPA_MHI_HOST_ADDR_COND(
params->ch_ctx_host->rbase);
/* Burst mode is not supported on DPL pipes */
if ((client != IPA_CLIENT_MHI_DPL_CONS) &&
(params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_DEFAULT ||
params->ch_ctx_host->brstmode == IPA_MHI_BURST_MODE_ENABLE)) {
burst_mode_enabled = true;
}
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0 &&
!burst_mode_enabled)
ch_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
else
ch_props.use_db_eng = GSI_CHAN_DB_MODE;
ch_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
ch_props.low_weight = 1;
ch_props.prefetch_mode = ep_cfg->prefetch_mode;
ch_props.empty_lvl_threshold = ep_cfg->prefetch_threshold;
ch_props.err_cb = params->ch_err_cb;
ch_props.chan_user_data = params->channel;
res = gsi_alloc_channel(&ch_props, ipa3_ctx->gsi_dev_hdl,
&ep->gsi_chan_hdl);
if (res) {
IPA_MHI_ERR("gsi_alloc_channel failed %d\n",
res);
goto fail_alloc_ch;
}
memset(&ch_scratch, 0, sizeof(ch_scratch));
ch_scratch.mhi.mhi_host_wp_addr = IPA_MHI_HOST_ADDR_COND(
params->channel_context_addr +
offsetof(struct ipa_mhi_ch_ctx, wp));
ch_scratch.mhi.assert_bit40 = params->assert_bit40;
/*
* Update scratch for MCS smart prefetch:
* Starting IPA4.5, smart prefetch implemented by H/W.
* At IPA 4.0/4.1/4.2, we do not use MCS smart prefetch
* so keep the fields zero.
*/
if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_0) {
ch_scratch.mhi.max_outstanding_tre =
ep_cfg->ipa_if_tlv * ch_props.re_size;
ch_scratch.mhi.outstanding_threshold =
min(ep_cfg->ipa_if_tlv / 2, 8) * ch_props.re_size;
}
ch_scratch.mhi.oob_mod_threshold = 4;
if (burst_mode_enabled) {
ch_scratch.mhi.burst_mode_enabled = burst_mode_enabled;
ch_scratch.mhi.polling_configuration =
ipa3_mhi_get_ch_poll_cfg(client, params->ch_ctx_host,
(ch_props.ring_len / ch_props.re_size));
ch_scratch.mhi.polling_mode = IPA_MHI_POLLING_MODE_DB_MODE;
} else {
ch_scratch.mhi.burst_mode_enabled = false;
}
res = gsi_write_channel_scratch(ep->gsi_chan_hdl,
ch_scratch);
if (res) {
IPA_MHI_ERR("gsi_write_channel_scratch failed %d\n",
res);
goto fail_ch_scratch;
}
*params->mhi = ch_scratch.mhi;
if (IPA_CLIENT_IS_PROD(ep->client) && ep->skip_ep_cfg) {
memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
ep_cfg_ctrl.ipa_ep_delay = true;
ep->ep_delay_set = true;
res = ipa3_cfg_ep_ctrl(ipa_ep_idx, &ep_cfg_ctrl);
if (res)
IPA_MHI_ERR("client (ep: %d) failed result=%d\n",
ipa_ep_idx, res);
else
IPA_MHI_DBG("client (ep: %d) success\n", ipa_ep_idx);
} else {
ep->ep_delay_set = false;
}
IPA_MHI_DBG("Starting channel\n");
res = gsi_start_channel(ep->gsi_chan_hdl);
if (res) {
IPA_MHI_ERR("gsi_start_channel failed %d\n", res);
goto fail_ch_start;
}
IPA_MHI_FUNC_EXIT();
return 0;
fail_ch_start:
fail_ch_scratch:
gsi_dealloc_channel(ep->gsi_chan_hdl);
fail_alloc_ch:
gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
ep->gsi_evt_ring_hdl = ~0;
fail_alloc_evt:
return res;
}
int ipa3_mhi_init_engine(struct ipa_mhi_init_engine *params)
{
int res;
struct gsi_device_scratch gsi_scratch;
const struct ipa_gsi_ep_config *gsi_ep_info;
IPA_MHI_FUNC_ENTRY();
if (!params) {
IPA_MHI_ERR("null args\n");
return -EINVAL;
}
if ((IPA_MHI_MAX_UL_CHANNELS + IPA_MHI_MAX_DL_CHANNELS) >
((ipa3_ctx->mhi_evid_limits[1] -
ipa3_ctx->mhi_evid_limits[0]) + 1)) {
IPAERR("Not enough event rings for MHI\n");
ipa_assert();
return -EINVAL;
}
/* Initialize IPA MHI engine */
gsi_ep_info = ipa3_get_gsi_ep_info(IPA_CLIENT_MHI_PROD);
if (!gsi_ep_info) {
IPAERR("MHI PROD has no ep allocated\n");
ipa_assert();
}
memset(&gsi_scratch, 0, sizeof(gsi_scratch));
gsi_scratch.mhi_base_chan_idx_valid = true;
gsi_scratch.mhi_base_chan_idx = gsi_ep_info->ipa_gsi_chan_num +
params->gsi.first_ch_idx;
res = gsi_write_device_scratch(ipa3_ctx->gsi_dev_hdl,
&gsi_scratch);
if (res) {
IPA_MHI_ERR("failed to write device scratch %d\n", res);
goto fail_init_engine;
}
IPA_MHI_FUNC_EXIT();
return 0;
fail_init_engine:
return res;
}
/**
* ipa3_connect_mhi_pipe() - Connect pipe to IPA and start corresponding
* MHI channel
* @in: connect parameters
* @clnt_hdl: [out] client handle for this pipe
*
* This function is called by IPA MHI client driver on MHI channel start.
* This function is called after MHI engine was started.
*
* Return codes: 0 : success
* negative : error
*/
int ipa3_connect_mhi_pipe(struct ipa_mhi_connect_params_internal *in,
u32 *clnt_hdl)
{
struct ipa3_ep_context *ep;
int ipa_ep_idx;
int res;
enum ipa_client_type client;
IPA_MHI_FUNC_ENTRY();
if (!in || !clnt_hdl) {
IPA_MHI_ERR("NULL args\n");
return -EINVAL;
}
in->start.gsi.evchid += ipa3_ctx->mhi_evid_limits[0];
client = in->sys->client;
ipa_ep_idx = ipa3_get_ep_mapping(client);
if (ipa_ep_idx == -1) {
IPA_MHI_ERR("Invalid client.\n");
return -EINVAL;
}
ep = &ipa3_ctx->ep[ipa_ep_idx];
if (ep->valid == 1) {
IPA_MHI_ERR("EP already allocated.\n");
return -EPERM;
}
memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
ep->valid = 1;
ep->skip_ep_cfg = in->sys->skip_ep_cfg;
ep->client = client;
ep->client_notify = in->sys->notify;
ep->priv = in->sys->priv;
ep->keep_ipa_awake = in->sys->keep_ipa_awake;
res = ipa_mhi_start_gsi_channel(client,
ipa_ep_idx, &in->start.gsi);
if (res) {
IPA_MHI_ERR("ipa_mhi_start_gsi_channel failed %d\n",
res);
goto fail_start_channel;
}
res = ipa3_enable_data_path(ipa_ep_idx);
if (res) {
IPA_MHI_ERR("enable data path failed res=%d clnt=%d.\n", res,
ipa_ep_idx);
goto fail_ep_cfg;
}
if (!ep->skip_ep_cfg) {
if (ipa3_cfg_ep(ipa_ep_idx, &in->sys->ipa_ep_cfg)) {
IPAERR("fail to configure EP.\n");
goto fail_ep_cfg;
}
if (ipa3_cfg_ep_status(ipa_ep_idx, &ep->status)) {
IPAERR("fail to configure status of EP.\n");
goto fail_ep_cfg;
}
IPA_MHI_DBG("ep configuration successful\n");
} else {
IPA_MHI_DBG("skipping ep configuration\n");
}
*clnt_hdl = ipa_ep_idx;
if (!ep->skip_ep_cfg && IPA_CLIENT_IS_PROD(client))
ipa3_install_dflt_flt_rules(ipa_ep_idx);
ipa3_ctx->skip_ep_cfg_shadow[ipa_ep_idx] = ep->skip_ep_cfg;
IPA_MHI_DBG("client %d (ep: %d) connected\n", client,
ipa_ep_idx);
IPA_MHI_FUNC_EXIT();
return 0;
fail_ep_cfg:
ipa3_disable_data_path(ipa_ep_idx);
fail_start_channel:
memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
return -EPERM;
}
/**
* ipa3_disconnect_mhi_pipe() - Disconnect pipe from IPA and reset corresponding
* MHI channel
* @clnt_hdl: client handle for this pipe
*
* This function is called by IPA MHI client driver on MHI channel reset.
* This function is called after MHI channel was started.
* This function is doing the following:
* - Send command to uC/GSI to reset corresponding MHI channel
* - Configure IPA EP control
*
* Return codes: 0 : success
* negative : error
*/
int ipa3_disconnect_mhi_pipe(u32 clnt_hdl)
{
struct ipa3_ep_context *ep;
int res;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
IPA_MHI_FUNC_ENTRY();
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes) {
IPAERR("invalid handle %d\n", clnt_hdl);
return -EINVAL;
}
if (ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR("pipe was not connected %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa3_ctx->ep[clnt_hdl];
if (ep->ep_delay_set) {
memset(&ep_cfg_ctrl, 0, sizeof(struct ipa_ep_cfg_ctrl));
ep_cfg_ctrl.ipa_ep_delay = false;
res = ipa3_cfg_ep_ctrl(clnt_hdl,
&ep_cfg_ctrl);
if (res) {
IPAERR
("client(ep:%d) failed to remove delay res=%d\n",
clnt_hdl, res);
} else {
IPADBG("client (ep: %d) delay removed\n",
clnt_hdl);
ep->ep_delay_set = false;
}
}
res = gsi_dealloc_channel(ep->gsi_chan_hdl);
if (res) {
IPAERR("gsi_dealloc_channel failed %d\n", res);
goto fail_reset_channel;
}
ep->valid = 0;
ipa3_delete_dflt_flt_rules(clnt_hdl);
IPA_MHI_DBG("client (ep: %d) disconnected\n", clnt_hdl);
IPA_MHI_FUNC_EXIT();
return 0;
fail_reset_channel:
return res;
}
int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
bool LPTransitionRejected, bool brstmode_enabled,
union __packed gsi_channel_scratch ch_scratch, u8 index)
{
int res;
int ipa_ep_idx;
struct ipa3_ep_context *ep;
union __packed gsi_channel_scratch gsi_ch_scratch;
IPA_MHI_FUNC_ENTRY();
ipa_ep_idx = ipa3_get_ep_mapping(client);
if (ipa_ep_idx < 0) {
IPA_MHI_ERR("Invalid client %d\n", client);
return -EINVAL;
}
ep = &ipa3_ctx->ep[ipa_ep_idx];
if (brstmode_enabled && !LPTransitionRejected) {
res = gsi_read_channel_scratch(ep->gsi_chan_hdl,
&gsi_ch_scratch);
if (res) {
IPA_MHI_ERR("read ch scratch fail %d\n", res);
return res;
}
/*
* set polling mode bit to DB mode before
* resuming the channel
*
* For MHI-->IPA pipes:
* when resuming due to transition to M0,
* set the polling mode bit to 0.
* In other cases, restore it's value form
* when you stopped the channel.
* Here, after successful resume client move to M0 state.
* So, by default setting polling mode bit to 0.
*
* For IPA-->MHI pipe:
* always restore the polling mode bit.
*/
if (IPA_CLIENT_IS_PROD(client))
ch_scratch.mhi.polling_mode =
IPA_MHI_POLLING_MODE_DB_MODE;
else
ch_scratch.mhi.polling_mode =
gsi_ch_scratch.mhi.polling_mode;
/* Use GSI update API to not affect non-SWI fields
* inside the scratch while in suspend-resume operation
*/
res = gsi_update_mhi_channel_scratch(
ep->gsi_chan_hdl, ch_scratch.mhi);
if (res) {
IPA_MHI_ERR("write ch scratch fail %d\n"
, res);
return res;
}
}
res = gsi_start_channel(ep->gsi_chan_hdl);
if (res) {
IPA_MHI_ERR("failed to resume channel error %d\n", res);
return res;
}
IPA_MHI_FUNC_EXIT();
return 0;
}
int ipa3_mhi_query_ch_info(enum ipa_client_type client,
struct gsi_chan_info *ch_info)
{
int ipa_ep_idx;
int res;
struct ipa3_ep_context *ep;
IPA_MHI_FUNC_ENTRY();
ipa_ep_idx = ipa3_get_ep_mapping(client);
if (ipa_ep_idx < 0) {
IPA_MHI_ERR("Invalid client %d\n", client);
return -EINVAL;
}
ep = &ipa3_ctx->ep[ipa_ep_idx];
res = gsi_query_channel_info(ep->gsi_chan_hdl, ch_info);
if (res) {
IPA_MHI_ERR("gsi_query_channel_info failed\n");
return res;
}
IPA_MHI_FUNC_EXIT();
return 0;
}
bool ipa3_has_open_aggr_frame(enum ipa_client_type client)
{
u32 aggr_state_active;
int ipa_ep_idx;
aggr_state_active = ipahal_read_reg(IPA_STATE_AGGR_ACTIVE);
IPA_MHI_DBG_LOW("IPA_STATE_AGGR_ACTIVE_OFST 0x%x\n", aggr_state_active);
ipa_ep_idx = ipa_get_ep_mapping(client);
if (ipa_ep_idx == -1) {
ipa_assert();
return false;
}
if ((1 << ipa_ep_idx) & aggr_state_active)
return true;
return false;
}
int ipa3_mhi_destroy_channel(enum ipa_client_type client)
{
int res;
int ipa_ep_idx;
struct ipa3_ep_context *ep;
ipa_ep_idx = ipa3_get_ep_mapping(client);
if (ipa_ep_idx < 0) {
IPA_MHI_ERR("Invalid client %d\n", client);
return -EINVAL;
}
ep = &ipa3_ctx->ep[ipa_ep_idx];
IPA_ACTIVE_CLIENTS_INC_EP(client);
IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n",
ep->gsi_evt_ring_hdl, ipa_ep_idx);
res = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
if (res) {
IPAERR(" failed to reset evt ring %lu, err %d\n"
, ep->gsi_evt_ring_hdl, res);
goto fail;
}
IPA_MHI_DBG("dealloc event ring (hdl: %lu, ep: %d)\n",
ep->gsi_evt_ring_hdl, ipa_ep_idx);
res = gsi_dealloc_evt_ring(
ep->gsi_evt_ring_hdl);
if (res) {
IPAERR("dealloc evt ring %lu failed, err %d\n"
, ep->gsi_evt_ring_hdl, res);
goto fail;
}
IPA_ACTIVE_CLIENTS_DEC_EP(client);
return 0;
fail:
IPA_ACTIVE_CLIENTS_DEC_EP(client);
return res;
}
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("IPA MHI driver");

1075
ipa/ipa_v3/ipa_mhi_proxy.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

49
ipa/ipa_v3/ipa_mhi_proxy.h Arquivo normal
Ver arquivo

@@ -0,0 +1,49 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#ifndef __IMP_H_
#define __IMP_H_
#ifdef CONFIG_IPA3_MHI_PROXY
#include "ipa_qmi_service.h"
void imp_handle_modem_ready(void);
struct ipa_mhi_alloc_channel_resp_msg_v01 *imp_handle_allocate_channel_req(
struct ipa_mhi_alloc_channel_req_msg_v01 *req);
struct ipa_mhi_clk_vote_resp_msg_v01 *imp_handle_vote_req(bool vote);
void imp_handle_modem_shutdown(void);
#else /* CONFIG_IPA3_MHI_PROXY */
static inline void imp_handle_modem_ready(void)
{
}
static inline struct ipa_mhi_alloc_channel_resp_msg_v01
*imp_handle_allocate_channel_req(
struct ipa_mhi_alloc_channel_req_msg_v01 *req)
{
return NULL;
}
static inline struct ipa_mhi_clk_vote_resp_msg_v01
*imp_handle_vote_req(bool vote)
{
return NULL;
}
static inline void imp_handle_modem_shutdown(void)
{
}
#endif /* CONFIG_IPA3_MHI_PROXY */
#endif /* __IMP_H_ */

2854
ipa/ipa_v3/ipa_mpm.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

1630
ipa/ipa_v3/ipa_nat.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

689
ipa/ipa_v3/ipa_odl.c Arquivo normal
Ver arquivo

@@ -0,0 +1,689 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include "ipa_i.h"
#include "ipa_odl.h"
#include <linux/msm_ipa.h>
#include <linux/sched/signal.h>
#include <linux/poll.h>
struct ipa_odl_context *ipa3_odl_ctx;
static DECLARE_WAIT_QUEUE_HEAD(odl_ctl_msg_wq);
static void print_ipa_odl_state_bit_mask(void)
{
IPADBG("ipa3_odl_ctx->odl_state.odl_init --> %d\n",
ipa3_odl_ctx->odl_state.odl_init);
IPADBG("ipa3_odl_ctx->odl_state.odl_open --> %d\n",
ipa3_odl_ctx->odl_state.odl_open);
IPADBG("ipa3_odl_ctx->odl_state.adpl_open --> %d\n",
ipa3_odl_ctx->odl_state.adpl_open);
IPADBG("ipa3_odl_ctx->odl_state.aggr_byte_limit_sent --> %d\n",
ipa3_odl_ctx->odl_state.aggr_byte_limit_sent);
IPADBG("ipa3_odl_ctx->odl_state.odl_ep_setup --> %d\n",
ipa3_odl_ctx->odl_state.odl_ep_setup);
IPADBG("ipa3_odl_ctx->odl_state.odl_setup_done_sent --> %d\n",
ipa3_odl_ctx->odl_state.odl_setup_done_sent);
IPADBG("ipa3_odl_ctx->odl_state.odl_ep_info_sent --> %d\n",
ipa3_odl_ctx->odl_state.odl_ep_info_sent);
IPADBG("ipa3_odl_ctx->odl_state.odl_connected --> %d\n",
ipa3_odl_ctx->odl_state.odl_connected);
IPADBG("ipa3_odl_ctx->odl_state.odl_disconnected --> %d\n\n",
ipa3_odl_ctx->odl_state.odl_disconnected);
}
static int ipa_odl_ctl_fops_open(struct inode *inode, struct file *filp)
{
int ret = 0;
if (ipa3_odl_ctx->odl_state.odl_init) {
ipa3_odl_ctx->odl_state.odl_open = true;
} else {
IPAERR("Before odl init trying to open odl ctl pipe\n");
print_ipa_odl_state_bit_mask();
ret = -ENODEV;
}
return ret;
}
static int ipa_odl_ctl_fops_release(struct inode *inode, struct file *filp)
{
IPADBG("QTI closed ipa_odl_ctl node\n");
ipa3_odl_ctx->odl_state.odl_open = false;
return 0;
}
/**
* ipa_odl_ctl_fops_read() - read message from IPA ODL device
* @filp: [in] file pointer
* @buf: [out] buffer to read into
* @count: [in] size of above buffer
* @f_pos: [inout] file position
*
* Uer-space should continuously read from /dev/ipa_odl_ctl,
* read will block when there are no messages to read.
* Upon return, user-space should read the u32 data from the
* start of the buffer.
*
* 0 --> ODL disconnected.
* 1 --> ODL connected.
*
* Buffer supplied must be big enough to
* hold the message of size u32.
*
* Returns: how many bytes copied to buffer
*
* Note: Should not be called from atomic context
*/
static ssize_t ipa_odl_ctl_fops_read(struct file *filp, char __user *buf,
size_t count, loff_t *f_pos)
{
char __user *start;
u8 data;
int ret = 0;
static bool old_state;
bool new_state = false;
start = buf;
ipa3_odl_ctx->odl_ctl_msg_wq_flag = false;
if (!ipa3_odl_ctx->odl_state.adpl_open &&
!ipa3_odl_ctx->odl_state.odl_disconnected) {
IPADBG("Failed to send data odl pipe already disconnected\n");
ret = -EFAULT;
goto send_failed;
}
if (ipa3_odl_ctx->odl_state.odl_ep_setup)
new_state = true;
else if (ipa3_odl_ctx->odl_state.odl_disconnected)
new_state = false;
else {
IPADBG("Failed to send data odl already running\n");
ret = -EFAULT;
goto send_failed;
}
if (old_state != new_state) {
old_state = new_state;
if (new_state)
data = 1;
else if (!new_state)
data = 0;
if (copy_to_user(buf, &data,
sizeof(data))) {
IPADBG("Cpoying data to user failed\n");
ret = -EFAULT;
goto send_failed;
}
buf += sizeof(data);
if (data == 1)
ipa3_odl_ctx->odl_state.odl_setup_done_sent =
true;
}
if (start != buf && ret != -EFAULT)
ret = buf - start;
send_failed:
return ret;
}
static unsigned int ipa_odl_ctl_fops_poll(struct file *file, poll_table *wait)
{
unsigned int mask = 0;
poll_wait(file, &odl_ctl_msg_wq, wait);
if (ipa3_odl_ctx->odl_ctl_msg_wq_flag) {
IPADBG("Sending read mask to odl control pipe\n");
mask |= POLLIN | POLLRDNORM;
}
return mask;
}
static long ipa_odl_ctl_fops_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
struct ipa_odl_ep_info ep_info = {0};
struct ipa_odl_modem_config status;
int retval = 0;
IPADBG("Calling odl ioctl cmd = %d\n", cmd);
if (!ipa3_odl_ctx->odl_state.odl_setup_done_sent) {
IPAERR("Before complete the odl setup trying calling ioctl\n");
print_ipa_odl_state_bit_mask();
retval = -ENODEV;
goto fail;
}
switch (cmd) {
case IPA_IOC_ODL_QUERY_ADAPL_EP_INFO:
/* Send ep_info to user APP */
ep_info.ep_type = ODL_EP_TYPE_HSUSB;
ep_info.peripheral_iface_id = ODL_EP_PERIPHERAL_IFACE_ID;
ep_info.cons_pipe_num = -1;
ep_info.prod_pipe_num =
ipa3_odl_ctx->odl_client_hdl;
if (copy_to_user((void __user *)arg, &ep_info,
sizeof(ep_info))) {
retval = -EFAULT;
goto fail;
}
ipa3_odl_ctx->odl_state.odl_ep_info_sent = true;
break;
case IPA_IOC_ODL_QUERY_MODEM_CONFIG:
IPADBG("Received the IPA_IOC_ODL_QUERY_MODEM_CONFIG :\n");
if (copy_from_user(&status, (const void __user *)arg,
sizeof(status))) {
retval = -EFAULT;
break;
}
if (status.config_status == CONFIG_SUCCESS)
ipa3_odl_ctx->odl_state.odl_connected = true;
IPADBG("status.config_status = %d odl_connected = %d\n",
status.config_status, ipa3_odl_ctx->odl_state.odl_connected);
break;
default:
retval = -ENOIOCTLCMD;
break;
}
fail:
return retval;
}
static void delete_first_node(void)
{
struct ipa3_push_msg_odl *msg;
if (!list_empty(&ipa3_odl_ctx->adpl_msg_list)) {
msg = list_first_entry(&ipa3_odl_ctx->adpl_msg_list,
struct ipa3_push_msg_odl, link);
if (msg) {
list_del(&msg->link);
kfree(msg->buff);
kfree(msg);
ipa3_odl_ctx->stats.odl_drop_pkt++;
if (atomic_read(&ipa3_odl_ctx->stats.numer_in_queue))
atomic_dec(&ipa3_odl_ctx->stats.numer_in_queue);
}
} else {
IPADBG("List Empty\n");
}
}
int ipa3_send_adpl_msg(unsigned long skb_data)
{
struct ipa3_push_msg_odl *msg;
struct sk_buff *skb = (struct sk_buff *)skb_data;
void *data;
IPADBG_LOW("Processing DPL data\n");
msg = kzalloc(sizeof(struct ipa3_push_msg_odl), GFP_KERNEL);
if (msg == NULL) {
IPADBG("Memory allocation failed\n");
return -ENOMEM;
}
data = kmemdup(skb->data, skb->len, GFP_KERNEL);
if (data == NULL) {
kfree(msg);
return -ENOMEM;
}
memcpy(data, skb->data, skb->len);
msg->buff = data;
msg->len = skb->len;
mutex_lock(&ipa3_odl_ctx->adpl_msg_lock);
if (atomic_read(&ipa3_odl_ctx->stats.numer_in_queue) >=
MAX_QUEUE_TO_ODL)
delete_first_node();
list_add_tail(&msg->link, &ipa3_odl_ctx->adpl_msg_list);
atomic_inc(&ipa3_odl_ctx->stats.numer_in_queue);
mutex_unlock(&ipa3_odl_ctx->adpl_msg_lock);
IPA_STATS_INC_CNT(ipa3_odl_ctx->stats.odl_rx_pkt);
return 0;
}
/**
* odl_ipa_packet_receive_notify() - Rx notify
*
* @priv: driver context
* @evt: event type
* @data: data provided with event
*
* IPA will pass a packet to the Linux network stack with skb->data
*/
static void odl_ipa_packet_receive_notify(void *priv,
enum ipa_dp_evt_type evt,
unsigned long data)
{
IPADBG_LOW("Rx packet was received\n");
if (evt == IPA_RECEIVE)
ipa3_send_adpl_msg(data);
else
IPAERR("Invalid evt %d received in wan_ipa_receive\n", evt);
}
int ipa_setup_odl_pipe(void)
{
struct ipa_sys_connect_params *ipa_odl_ep_cfg;
int ret;
ipa_odl_ep_cfg = &ipa3_odl_ctx->odl_sys_param;
IPADBG("Setting up the odl endpoint\n");
ipa_odl_ep_cfg->ipa_ep_cfg.cfg.cs_offload_en = IPA_ENABLE_CS_OFFLOAD_DL;
ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_hard_byte_limit_en = 1;
ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr = IPA_GENERIC;
ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_byte_limit =
IPA_ODL_AGGR_BYTE_LIMIT;
ipa_odl_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit = 0;
ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_len = 4;
ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata_valid = 1;
ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_metadata = 1;
ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size_valid = 1;
ipa_odl_ep_cfg->ipa_ep_cfg.hdr.hdr_ofst_pkt_size = 2;
ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_valid = true;
ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad = 0;
ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_payload_len_inc_padding = true;
ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_total_len_or_pad_offset = 0;
ipa_odl_ep_cfg->ipa_ep_cfg.hdr_ext.hdr_little_endian = 0;
ipa_odl_ep_cfg->ipa_ep_cfg.metadata_mask.metadata_mask = 0xFF000000;
ipa_odl_ep_cfg->client = IPA_CLIENT_ODL_DPL_CONS;
ipa_odl_ep_cfg->notify = odl_ipa_packet_receive_notify;
ipa_odl_ep_cfg->napi_obj = NULL;
ipa_odl_ep_cfg->desc_fifo_sz = IPA_ODL_RX_RING_SIZE *
IPA_FIFO_ELEMENT_SIZE;
ipa3_odl_ctx->odl_client_hdl = -1;
ret = ipa3_setup_sys_pipe(ipa_odl_ep_cfg,
&ipa3_odl_ctx->odl_client_hdl);
return ret;
}
int ipa3_odl_pipe_open(void)
{
int ret = 0;
struct ipa_ep_cfg_holb holb_cfg;
if (!ipa3_odl_ctx->odl_state.adpl_open) {
IPAERR("adpl pipe not configured\n");
return 0;
}
memset(&holb_cfg, 0, sizeof(holb_cfg));
holb_cfg.tmr_val = 0;
holb_cfg.en = 1;
ipa3_cfg_ep_holb_by_client(IPA_CLIENT_USB_DPL_CONS, &holb_cfg);
ret = ipa_setup_odl_pipe();
if (ret) {
IPAERR(" Setup endpoint config failed\n");
ipa3_odl_ctx->odl_state.adpl_open = false;
goto fail;
}
ipa3_cfg_ep_holb_by_client(IPA_CLIENT_ODL_DPL_CONS, &holb_cfg);
ipa3_odl_ctx->odl_state.odl_ep_setup = true;
IPADBG("Setup endpoint config success\n");
ipa3_odl_ctx->stats.odl_drop_pkt = 0;
atomic_set(&ipa3_odl_ctx->stats.numer_in_queue, 0);
ipa3_odl_ctx->stats.odl_rx_pkt = 0;
ipa3_odl_ctx->stats.odl_tx_diag_pkt = 0;
/*
* Send signal to ipa_odl_ctl_fops_read,
* to send ODL ep open notification
*/
ipa3_odl_ctx->odl_ctl_msg_wq_flag = true;
IPADBG("Wake up odl ctl\n");
wake_up_interruptible(&odl_ctl_msg_wq);
if (ipa3_odl_ctx->odl_state.odl_disconnected)
ipa3_odl_ctx->odl_state.odl_disconnected = false;
fail:
return ret;
}
static int ipa_adpl_open(struct inode *inode, struct file *filp)
{
int ret = 0;
IPADBG("Called the function :\n");
if (ipa3_odl_ctx->odl_state.odl_init &&
!ipa3_odl_ctx->odl_state.adpl_open) {
ipa3_odl_ctx->odl_state.adpl_open = true;
ret = ipa3_odl_pipe_open();
} else {
IPAERR("Before odl init trying to open adpl pipe\n");
print_ipa_odl_state_bit_mask();
ret = -ENODEV;
}
return ret;
}
static int ipa_adpl_release(struct inode *inode, struct file *filp)
{
ipa3_odl_pipe_cleanup(false);
return 0;
}
void ipa3_odl_pipe_cleanup(bool is_ssr)
{
bool ipa_odl_opened = false;
struct ipa_ep_cfg_holb holb_cfg;
if (!ipa3_odl_ctx->odl_state.adpl_open) {
IPAERR("adpl pipe not configured\n");
return;
}
if (ipa3_odl_ctx->odl_state.odl_open)
ipa_odl_opened = true;
memset(&ipa3_odl_ctx->odl_state, 0, sizeof(ipa3_odl_ctx->odl_state));
/*Since init will not be done again*/
ipa3_odl_ctx->odl_state.odl_init = true;
memset(&holb_cfg, 0, sizeof(holb_cfg));
holb_cfg.tmr_val = 0;
holb_cfg.en = 0;
ipa3_cfg_ep_holb_by_client(IPA_CLIENT_USB_DPL_CONS, &holb_cfg);
ipa3_teardown_sys_pipe(ipa3_odl_ctx->odl_client_hdl);
ipa3_odl_ctx->odl_client_hdl = -1;
/*Assume QTI will never close this node once opened*/
if (ipa_odl_opened)
ipa3_odl_ctx->odl_state.odl_open = true;
/*Assume DIAG will not close this node in SSR case*/
if (is_ssr)
ipa3_odl_ctx->odl_state.adpl_open = true;
else
ipa3_odl_ctx->odl_state.adpl_open = false;
ipa3_odl_ctx->odl_state.odl_disconnected = true;
ipa3_odl_ctx->odl_state.odl_ep_setup = false;
ipa3_odl_ctx->odl_state.aggr_byte_limit_sent = false;
ipa3_odl_ctx->odl_state.odl_connected = false;
/*
* Send signal to ipa_odl_ctl_fops_read,
* to send ODL ep close notification
*/
ipa3_odl_ctx->odl_ctl_msg_wq_flag = true;
ipa3_odl_ctx->stats.odl_drop_pkt = 0;
atomic_set(&ipa3_odl_ctx->stats.numer_in_queue, 0);
ipa3_odl_ctx->stats.odl_rx_pkt = 0;
ipa3_odl_ctx->stats.odl_tx_diag_pkt = 0;
IPADBG("Wake up odl ctl\n");
wake_up_interruptible(&odl_ctl_msg_wq);
}
/**
* ipa_adpl_read() - read message from IPA device
* @filp: [in] file pointer
* @buf: [out] buffer to read into
* @count: [in] size of above buffer
* @f_pos: [inout] file position
*
* User-space should continually read from /dev/ipa_adpl,
* read will block when there are no messages to read.
* Upon return, user-space should read
* Buffer supplied must be big enough to
* hold the data.
*
* Returns: how many bytes copied to buffer
*
* Note: Should not be called from atomic context
*/
static ssize_t ipa_adpl_read(struct file *filp, char __user *buf, size_t count,
loff_t *f_pos)
{
int ret = 0;
char __user *start = buf;
struct ipa3_push_msg_odl *msg;
while (1) {
IPADBG_LOW("Writing message to adpl pipe\n");
if (!ipa3_odl_ctx->odl_state.odl_open)
break;
mutex_lock(&ipa3_odl_ctx->adpl_msg_lock);
msg = NULL;
if (!list_empty(&ipa3_odl_ctx->adpl_msg_list)) {
msg = list_first_entry(&ipa3_odl_ctx->adpl_msg_list,
struct ipa3_push_msg_odl, link);
list_del(&msg->link);
if (atomic_read(&ipa3_odl_ctx->stats.numer_in_queue))
atomic_dec(&ipa3_odl_ctx->stats.numer_in_queue);
}
mutex_unlock(&ipa3_odl_ctx->adpl_msg_lock);
if (msg != NULL) {
if (msg->len > count) {
IPAERR("Message length greater than count\n");
kfree(msg->buff);
kfree(msg);
msg = NULL;
ret = -EAGAIN;
break;
}
if (msg->buff) {
if (copy_to_user(buf, msg->buff,
msg->len)) {
ret = -EFAULT;
kfree(msg->buff);
kfree(msg);
msg = NULL;
ret = -EAGAIN;
break;
}
buf += msg->len;
count -= msg->len;
kfree(msg->buff);
}
IPA_STATS_INC_CNT(ipa3_odl_ctx->stats.odl_tx_diag_pkt);
kfree(msg);
msg = NULL;
} else {
ret = -EAGAIN;
break;
}
ret = -EAGAIN;
if (filp->f_flags & O_NONBLOCK)
break;
ret = -EINTR;
if (signal_pending(current))
break;
if (start != buf)
break;
}
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret;
}
static long ipa_adpl_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg)
{
struct odl_agg_pipe_info odl_pipe_info;
int retval = 0;
if (!ipa3_odl_ctx->odl_state.odl_connected) {
IPAERR("ODL config in progress not allowed ioctl\n");
print_ipa_odl_state_bit_mask();
retval = -ENODEV;
goto fail;
}
IPADBG("Calling adpl ioctl\n");
switch (cmd) {
case IPA_IOC_ODL_GET_AGG_BYTE_LIMIT:
odl_pipe_info.agg_byte_limit =
ipa3_odl_ctx->odl_sys_param.ipa_ep_cfg.aggr.aggr_byte_limit;
if (copy_to_user((void __user *)arg, &odl_pipe_info,
sizeof(odl_pipe_info))) {
retval = -EFAULT;
goto fail;
}
ipa3_odl_ctx->odl_state.aggr_byte_limit_sent = true;
break;
default:
retval = -ENOIOCTLCMD;
print_ipa_odl_state_bit_mask();
break;
}
fail:
return retval;
}
static const struct file_operations ipa_odl_ctl_fops = {
.owner = THIS_MODULE,
.open = ipa_odl_ctl_fops_open,
.release = ipa_odl_ctl_fops_release,
.read = ipa_odl_ctl_fops_read,
.unlocked_ioctl = ipa_odl_ctl_fops_ioctl,
.poll = ipa_odl_ctl_fops_poll,
};
static const struct file_operations ipa_adpl_fops = {
.owner = THIS_MODULE,
.open = ipa_adpl_open,
.release = ipa_adpl_release,
.read = ipa_adpl_read,
.unlocked_ioctl = ipa_adpl_ioctl,
};
int ipa_odl_init(void)
{
int result = 0;
struct cdev *cdev;
int loop = 0;
struct ipa3_odl_char_device_context *odl_cdev;
ipa3_odl_ctx = kzalloc(sizeof(*ipa3_odl_ctx), GFP_KERNEL);
if (!ipa3_odl_ctx) {
result = -ENOMEM;
goto fail_mem_ctx;
}
odl_cdev = ipa3_odl_ctx->odl_cdev;
INIT_LIST_HEAD(&ipa3_odl_ctx->adpl_msg_list);
mutex_init(&ipa3_odl_ctx->adpl_msg_lock);
odl_cdev[loop].class = class_create(THIS_MODULE, "ipa_adpl");
if (IS_ERR(odl_cdev[loop].class)) {
IPAERR("Error: odl_cdev->class NULL\n");
result = -ENODEV;
goto create_char_dev0_fail;
}
result = alloc_chrdev_region(&odl_cdev[loop].dev_num, 0, 1, "ipa_adpl");
if (result) {
IPAERR("alloc_chrdev_region error for ipa adpl pipe\n");
result = -ENODEV;
goto alloc_chrdev0_region_fail;
}
odl_cdev[loop].dev = device_create(odl_cdev[loop].class, NULL,
odl_cdev[loop].dev_num, ipa3_ctx, "ipa_adpl");
if (IS_ERR(odl_cdev[loop].dev)) {
IPAERR("device_create err:%ld\n", PTR_ERR(odl_cdev[loop].dev));
result = PTR_ERR(odl_cdev[loop].dev);
goto device0_create_fail;
}
cdev = &odl_cdev[loop].cdev;
cdev_init(cdev, &ipa_adpl_fops);
cdev->owner = THIS_MODULE;
cdev->ops = &ipa_adpl_fops;
result = cdev_add(cdev, odl_cdev[loop].dev_num, 1);
if (result) {
IPAERR("cdev_add err=%d\n", -result);
goto cdev0_add_fail;
}
loop++;
odl_cdev[loop].class = class_create(THIS_MODULE, "ipa_odl_ctl");
if (IS_ERR(odl_cdev[loop].class)) {
IPAERR("Error: odl_cdev->class NULL\n");
result = -ENODEV;
goto create_char_dev1_fail;
}
result = alloc_chrdev_region(&odl_cdev[loop].dev_num, 0, 1,
"ipa_odl_ctl");
if (result) {
IPAERR("alloc_chrdev_region error for ipa odl ctl pipe\n");
goto alloc_chrdev1_region_fail;
}
odl_cdev[loop].dev = device_create(odl_cdev[loop].class, NULL,
odl_cdev[loop].dev_num, ipa3_ctx, "ipa_odl_ctl");
if (IS_ERR(odl_cdev[loop].dev)) {
IPAERR("device_create err:%ld\n", PTR_ERR(odl_cdev[loop].dev));
result = PTR_ERR(odl_cdev[loop].dev);
goto device1_create_fail;
}
cdev = &odl_cdev[loop].cdev;
cdev_init(cdev, &ipa_odl_ctl_fops);
cdev->owner = THIS_MODULE;
cdev->ops = &ipa_odl_ctl_fops;
result = cdev_add(cdev, odl_cdev[loop].dev_num, 1);
if (result) {
IPAERR(":cdev_add err=%d\n", -result);
goto cdev1_add_fail;
}
ipa3_odl_ctx->odl_state.odl_init = true;
return 0;
cdev1_add_fail:
device_destroy(odl_cdev[1].class, odl_cdev[1].dev_num);
device1_create_fail:
unregister_chrdev_region(odl_cdev[1].dev_num, 1);
alloc_chrdev1_region_fail:
class_destroy(odl_cdev[1].class);
create_char_dev1_fail:
cdev0_add_fail:
device_destroy(odl_cdev[0].class, odl_cdev[0].dev_num);
device0_create_fail:
unregister_chrdev_region(odl_cdev[0].dev_num, 1);
alloc_chrdev0_region_fail:
class_destroy(odl_cdev[0].class);
create_char_dev0_fail:
kfree(ipa3_odl_ctx);
fail_mem_ctx:
return result;
}

73
ipa/ipa_v3/ipa_odl.h Arquivo normal
Ver arquivo

@@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#ifndef _IPA3_ODL_H_
#define _IPA3_ODL_H_
#define IPA_ODL_AGGR_BYTE_LIMIT (15 * 1024)
#define IPA_ODL_RX_RING_SIZE 192
#define MAX_QUEUE_TO_ODL 1024
#define CONFIG_SUCCESS 1
#define ODL_EP_TYPE_HSUSB 2
#define ODL_EP_PERIPHERAL_IFACE_ID 3
struct ipa3_odlstats {
u32 odl_rx_pkt;
u32 odl_tx_diag_pkt;
u32 odl_drop_pkt;
atomic_t numer_in_queue;
};
struct odl_state_bit_mask {
u32 odl_init:1;
u32 odl_open:1;
u32 adpl_open:1;
u32 aggr_byte_limit_sent:1;
u32 odl_ep_setup:1;
u32 odl_setup_done_sent:1;
u32 odl_ep_info_sent:1;
u32 odl_connected:1;
u32 odl_disconnected:1;
u32:0;
};
/**
* struct ipa3_odl_char_device_context - IPA ODL character device
* @class: pointer to the struct class
* @dev_num: device number
* @dev: the dev_t of the device
* @cdev: cdev of the device
*/
struct ipa3_odl_char_device_context {
struct class *class;
dev_t dev_num;
struct device *dev;
struct cdev cdev;
};
struct ipa_odl_context {
struct ipa3_odl_char_device_context odl_cdev[2];
struct list_head adpl_msg_list;
struct mutex adpl_msg_lock;
struct ipa_sys_connect_params odl_sys_param;
u32 odl_client_hdl;
struct odl_state_bit_mask odl_state;
bool odl_ctl_msg_wq_flag;
struct ipa3_odlstats stats;
};
struct ipa3_push_msg_odl {
void *buff;
int len;
struct list_head link;
};
extern struct ipa_odl_context *ipa3_odl_ctx;
int ipa_odl_init(void);
void ipa3_odl_pipe_cleanup(bool is_ssr);
int ipa3_odl_pipe_open(void);
#endif /* _IPA3_ODL_H_ */

1412
ipa/ipa_v3/ipa_pm.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

181
ipa/ipa_v3/ipa_pm.h Arquivo normal
Ver arquivo

@@ -0,0 +1,181 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _IPA_PM_H_
#define _IPA_PM_H_
#include <linux/msm_ipa.h>
/* internal to ipa */
#define IPA_PM_MAX_CLIENTS 32 /* actual max is value -1 since we start from 1*/
#define IPA_PM_MAX_EX_CL 64
#define IPA_PM_THRESHOLD_MAX 5
#define IPA_PM_EXCEPTION_MAX 2
#define IPA_PM_DEFERRED_TIMEOUT 10
/*
* ipa_pm group names
*
* Default stands for individual clients while other groups share one throughput
* Some groups also have special flags like modem which do not vote for clock
* but is accounted for in clock scaling while activated
*/
enum ipa_pm_group {
IPA_PM_GROUP_DEFAULT,
IPA_PM_GROUP_APPS,
IPA_PM_GROUP_MODEM,
IPA_PM_GROUP_MAX,
};
/*
* ipa_pm_cb_event
*
* specifies what kind of callback is being called.
* IPA_PM_CLIENT_ACTIVATED: the client has completed asynchronous activation
* IPA_PM_REQUEST_WAKEUP: wake up the client after it has been suspended
*/
enum ipa_pm_cb_event {
IPA_PM_CLIENT_ACTIVATED,
IPA_PM_REQUEST_WAKEUP,
IPA_PM_CB_EVENT_MAX,
};
/*
* struct ipa_pm_exception - clients included in exception and its threshold
* @usecase: comma separated client names
* @threshold: the threshold values for the exception
*/
struct ipa_pm_exception {
const char *usecase;
int threshold[IPA_PM_THRESHOLD_MAX];
};
/*
* struct ipa_pm_init_params - parameters needed for initializng the pm
* @default_threshold: the thresholds used if no exception passes
* @threshold_size: size of the threshold
* @exceptions: list of exceptions for the pm
* @exception_size: size of the exception_list
*/
struct ipa_pm_init_params {
int default_threshold[IPA_PM_THRESHOLD_MAX];
int threshold_size;
struct ipa_pm_exception exceptions[IPA_PM_EXCEPTION_MAX];
int exception_size;
};
/*
* struct ipa_pm_register_params - parameters needed to register a client
* @name: name of the client
* @callback: pointer to the client's callback function
* @user_data: pointer to the client's callback parameters
* @group: group number of the client
* @skip_clk_vote: 0 if client votes for clock when activated, 1 if no vote
*/
struct ipa_pm_register_params {
const char *name;
void (*callback)(void *user_data, enum ipa_pm_cb_event);
void *user_data;
enum ipa_pm_group group;
bool skip_clk_vote;
};
#ifdef CONFIG_IPA3
int ipa_pm_register(struct ipa_pm_register_params *params, u32 *hdl);
int ipa_pm_associate_ipa_cons_to_client(u32 hdl, enum ipa_client_type consumer);
int ipa_pm_activate(u32 hdl);
int ipa_pm_activate_sync(u32 hdl);
int ipa_pm_deferred_deactivate(u32 hdl);
int ipa_pm_deactivate_sync(u32 hdl);
int ipa_pm_set_throughput(u32 hdl, int throughput);
int ipa_pm_deregister(u32 hdl);
/* IPA Internal Functions */
int ipa_pm_init(struct ipa_pm_init_params *params);
int ipa_pm_destroy(void);
int ipa_pm_handle_suspend(u32 pipe_bitmask);
int ipa_pm_deactivate_all_deferred(void);
int ipa_pm_stat(char *buf, int size);
int ipa_pm_exceptions_stat(char *buf, int size);
void ipa_pm_set_clock_index(int index);
#else
static inline int ipa_pm_register(
struct ipa_pm_register_params *params, u32 *hdl)
{
return -EPERM;
}
static inline int ipa_pm_associate_ipa_cons_to_client(
u32 hdl, enum ipa_client_type consumer)
{
return -EPERM;
}
static inline int ipa_pm_activate(u32 hdl)
{
return -EPERM;
}
static inline int ipa_pm_activate_sync(u32 hdl)
{
return -EPERM;
}
static inline int ipa_pm_deferred_deactivate(u32 hdl)
{
return -EPERM;
}
static inline int ipa_pm_deactivate_sync(u32 hdl)
{
return -EPERM;
}
static inline int ipa_pm_set_throughput(u32 hdl, int throughput)
{
return -EPERM;
}
static inline int ipa_pm_deregister(u32 hdl)
{
return -EPERM;
}
/* IPA Internal Functions */
static inline int ipa_pm_init(struct ipa_pm_init_params *params)
{
return -EPERM;
}
static inline int ipa_pm_destroy(void)
{
return -EPERM;
}
static inline int ipa_pm_handle_suspend(u32 pipe_bitmask)
{
return -EPERM;
}
static inline int ipa_pm_deactivate_all_deferred(void)
{
return -EPERM;
}
static inline int ipa_pm_stat(char *buf, int size)
{
return -EPERM;
}
static inline int ipa_pm_exceptions_stat(char *buf, int size)
{
return -EPERM;
}
#endif
#endif /* _IPA_PM_H_ */

2174
ipa/ipa_v3/ipa_qmi_service.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

521
ipa/ipa_v3/ipa_qmi_service.h Arquivo normal
Ver arquivo

@@ -0,0 +1,521 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*/
#ifndef IPA_QMI_SERVICE_H
#define IPA_QMI_SERVICE_H
#include <linux/ipa.h>
#include <linux/ipa_qmi_service_v01.h>
#include <uapi/linux/msm_rmnet.h>
#include <linux/soc/qcom/qmi.h>
#include "ipa_i.h"
#include <linux/rmnet_ipa_fd_ioctl.h>
/**
* name of the DL wwan default routing tables for v4 and v6
*/
#define IPA_A7_QMAP_HDR_NAME "ipa_qmap_hdr"
#define IPA_DFLT_WAN_RT_TBL_NAME "ipa_dflt_wan_rt"
#define MAX_NUM_Q6_RULE 35
#define MAX_NUM_QMI_RULE_CACHE 10
#define MAX_NUM_QMI_MPM_AGGR_CACHE 3
#define DEV_NAME "ipa-wan"
#define SUBSYS_LOCAL_MODEM "modem"
#define SUBSYS_REMOTE_MODEM "esoc0"
#define IPAWANDBG(fmt, args...) \
do { \
pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
__LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANDBG_LOW(fmt, args...) \
do { \
pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
__LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANERR(fmt, args...) \
do { \
pr_err(DEV_NAME " %s:%d " fmt, __func__,\
__LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANERR_RL(fmt, args...) \
do { \
pr_err_ratelimited_ipa(DEV_NAME " %s:%d " fmt, __func__,\
__LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANINFO(fmt, args...) \
do { \
pr_info(DEV_NAME " %s:%d " fmt, __func__,\
__LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
extern struct ipa3_qmi_context *ipa3_qmi_ctx;
struct ipa_offload_connection_val {
enum ipa_ip_type_enum_v01 ip_type;
bool valid;
uint32_t rule_id;
uint32_t rule_hdl;
};
struct ipa3_qmi_context {
struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
int num_ipa_install_fltr_rule_req_msg;
struct ipa_install_fltr_rule_req_msg_v01
ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
int num_ipa_install_fltr_rule_req_ex_msg;
struct ipa_install_fltr_rule_req_ex_msg_v01
ipa_install_fltr_rule_req_ex_msg_cache[MAX_NUM_QMI_RULE_CACHE];
int num_ipa_fltr_installed_notif_req_msg;
struct ipa_fltr_installed_notif_req_msg_v01
ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
int num_ipa_configure_ul_firewall_rules_req_msg;
struct ipa_configure_ul_firewall_rules_req_msg_v01
ipa_configure_ul_firewall_rules_req_msg_cache
[MAX_NUM_QMI_RULE_CACHE];
struct ipa_mhi_prime_aggr_info_req_msg_v01
ipa_mhi_prime_aggr_info_req_msg_cache
[MAX_NUM_QMI_MPM_AGGR_CACHE];
bool modem_cfg_emb_pipe_flt;
struct sockaddr_qrtr client_sq;
struct sockaddr_qrtr server_sq;
int num_ipa_offload_connection;
struct ipa_offload_connection_val
ipa_offload_cache[QMI_IPA_MAX_FILTERS_V01];
};
struct ipa3_rmnet_mux_val {
uint32_t mux_id;
int8_t vchannel_name[IFNAMSIZ];
bool mux_channel_set;
bool ul_flt_reg;
bool mux_hdr_set;
uint32_t hdr_hdl;
};
extern struct qmi_elem_info
ipa3_init_modem_driver_req_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_init_modem_driver_resp_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_indication_reg_req_msg_data_v01_ei[];
extern struct qmi_elem_info ipa3_indication_reg_resp_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
extern struct qmi_elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[];
extern struct qmi_elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[];
extern struct qmi_elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[];
extern struct qmi_elem_info ipa3_config_req_msg_data_v01_ei[];
extern struct qmi_elem_info ipa3_config_resp_msg_data_v01_ei[];
extern struct qmi_elem_info ipa3_get_data_stats_req_msg_data_v01_ei[];
extern struct qmi_elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[];
extern struct qmi_elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[];
extern struct qmi_elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[];
extern struct qmi_elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_ul_firewall_rule_type_data_v01_ei[];
extern struct qmi_elem_info
ipa3_ul_firewall_config_result_type_data_v01_ei[];
extern struct
qmi_elem_info ipa3_per_client_stats_info_type_data_v01_ei[];
extern struct qmi_elem_info
ipa3_enable_per_client_stats_req_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_enable_per_client_stats_resp_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_get_stats_per_client_req_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_get_stats_per_client_resp_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[];
extern struct qmi_elem_info
ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[];
extern struct qmi_elem_info ipa_mhi_ready_indication_msg_v01_ei[];
extern struct qmi_elem_info ipa_mhi_mem_addr_info_type_v01_ei[];
extern struct qmi_elem_info ipa_mhi_tr_info_type_v01_ei[];
extern struct qmi_elem_info ipa_mhi_er_info_type_v01_ei[];
extern struct qmi_elem_info ipa_mhi_alloc_channel_req_msg_v01_ei[];
extern struct qmi_elem_info ipa_mhi_ch_alloc_resp_type_v01_ei[];
extern struct qmi_elem_info ipa_mhi_alloc_channel_resp_msg_v01_ei[];
extern struct qmi_elem_info ipa_mhi_clk_vote_req_msg_v01_ei[];
extern struct qmi_elem_info ipa_mhi_clk_vote_resp_msg_v01_ei[];
extern struct qmi_elem_info ipa_mhi_cleanup_req_msg_v01_ei[];
extern struct qmi_elem_info ipa_mhi_cleanup_resp_msg_v01_ei[];
extern struct qmi_elem_info ipa_endp_desc_indication_msg_v01_ei[];
extern struct qmi_elem_info ipa_mhi_prime_aggr_info_req_msg_v01_ei[];
extern struct qmi_elem_info ipa_mhi_prime_aggr_info_resp_msg_v01_ei[];
extern struct qmi_elem_info ipa_add_offload_connection_req_msg_v01_ei[];
extern struct qmi_elem_info ipa_add_offload_connection_resp_msg_v01_ei[];
extern struct qmi_elem_info ipa_remove_offload_connection_req_msg_v01_ei[];
extern struct qmi_elem_info ipa_remove_offload_connection_resp_msg_v01_ei[];
/**
* struct ipa3_rmnet_context - IPA rmnet context
* @ipa_rmnet_ssr: support modem SSR
* @polling_interval: Requested interval for polling tethered statistics
* @metered_mux_id: The mux ID on which quota has been set
*/
struct ipa3_rmnet_context {
bool ipa_rmnet_ssr;
u64 polling_interval;
u32 metered_mux_id;
};
extern struct ipa3_rmnet_context ipa3_rmnet_ctx;
#ifdef CONFIG_RMNET_IPA3
int ipa3_qmi_service_init(uint32_t wan_platform_type);
void ipa3_qmi_service_exit(void);
/* sending filter-install-request to modem*/
int ipa3_qmi_filter_request_send(
struct ipa_install_fltr_rule_req_msg_v01 *req);
int ipa3_qmi_filter_request_ex_send(
struct ipa_install_fltr_rule_req_ex_msg_v01 *req);
int ipa3_qmi_add_offload_request_send(
struct ipa_add_offload_connection_req_msg_v01 *req);
int ipa3_qmi_rmv_offload_request_send(
struct ipa_remove_offload_connection_req_msg_v01 *req);
int ipa3_qmi_ul_filter_request_send(
struct ipa_configure_ul_firewall_rules_req_msg_v01 *req);
/* sending filter-installed-notify-request to modem*/
int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01
*req);
/* voting for bus BW to ipa_rm*/
int ipa3_vote_for_bus_bw(uint32_t *bw_mbps);
int ipa3_qmi_enable_force_clear_datapath_send(
struct ipa_enable_force_clear_datapath_req_msg_v01 *req);
int ipa3_qmi_disable_force_clear_datapath_send(
struct ipa_disable_force_clear_datapath_req_msg_v01 *req);
int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
*rule_req);
int ipa3_wwan_update_mux_channel_prop(void);
int ipa3_wan_ioctl_init(void);
void ipa3_wan_ioctl_stop_qmi_messages(void);
void ipa3_wan_ioctl_enable_qmi_messages(void);
void ipa3_wan_ioctl_deinit(void);
void ipa3_qmi_stop_workqueues(void);
int rmnet_ipa3_poll_tethering_stats(struct wan_ioctl_poll_tethering_stats
*data);
int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data);
void ipa3_broadcast_quota_reach_ind(uint32_t mux_id,
enum ipa_upstream_type upstream_type);
int rmnet_ipa3_set_tether_client_pipe(struct wan_ioctl_set_tether_client_pipe
*data);
int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
bool reset);
int rmnet_ipa3_query_tethering_stats_all(
struct wan_ioctl_query_tether_stats_all *data);
int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
int rmnet_ipa3_set_lan_client_info(struct wan_ioctl_lan_client_info *data);
int rmnet_ipa3_clear_lan_client_info(struct wan_ioctl_lan_client_info *data);
int rmnet_ipa3_send_lan_client_msg(struct wan_ioctl_send_lan_client_msg *data);
int rmnet_ipa3_enable_per_client_stats(bool *data);
int rmnet_ipa3_query_per_client_stats(
struct wan_ioctl_query_per_client_stats *data);
int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
struct ipa_get_data_stats_resp_msg_v01 *resp);
int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
struct ipa_get_apn_data_stats_resp_msg_v01 *resp);
int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req);
int ipa3_qmi_set_aggr_info(
enum ipa_aggr_enum_type_v01 aggr_enum_type);
int ipa3_qmi_stop_data_qouta(void);
void ipa3_q6_handshake_complete(bool ssr_bootup);
int ipa3_wwan_set_modem_perf_profile(int throughput);
int ipa3_wwan_set_modem_state(struct wan_ioctl_notify_wan_state *state);
int ipa3_qmi_enable_per_client_stats(
struct ipa_enable_per_client_stats_req_msg_v01 *req,
struct ipa_enable_per_client_stats_resp_msg_v01 *resp);
int ipa3_qmi_get_per_client_packet_stats(
struct ipa_get_stats_per_client_req_msg_v01 *req,
struct ipa_get_stats_per_client_resp_msg_v01 *resp);
int ipa3_qmi_send_mhi_ready_indication(
struct ipa_mhi_ready_indication_msg_v01 *req);
int ipa3_qmi_send_mhi_cleanup_request(struct ipa_mhi_cleanup_req_msg_v01 *req);
void ipa3_qmi_init(void);
void ipa3_qmi_cleanup(void);
#else /* CONFIG_RMNET_IPA3 */
static inline int ipa3_qmi_service_init(uint32_t wan_platform_type)
{
return -EPERM;
}
static inline void ipa3_qmi_service_exit(void) { }
/* sending filter-install-request to modem*/
static inline int ipa3_qmi_filter_request_send(
struct ipa_install_fltr_rule_req_msg_v01 *req)
{
return -EPERM;
}
static inline int ipa3_qmi_add_offload_request_send(
struct ipa_add_offload_connection_req_msg_v01 *req)
{
return -EPERM;
}
static inline int ipa3_qmi_rmv_offload_request_send(
struct ipa_rmv_offload_connection_req_msg_v01 *req)
{
return -EPERM;
}
static inline int ipa3_qmi_ul_filter_request_send(
struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
{
return -EPERM;
}
static inline int ipa3_qmi_filter_request_ex_send(
struct ipa_install_fltr_rule_req_ex_msg_v01 *req)
{
return -EPERM;
}
/* sending filter-installed-notify-request to modem*/
static inline int ipa3_qmi_filter_notify_send(
struct ipa_fltr_installed_notif_req_msg_v01 *req)
{
return -EPERM;
}
static inline int ipa3_qmi_enable_force_clear_datapath_send(
struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
{
return -EPERM;
}
static inline int ipa3_qmi_disable_force_clear_datapath_send(
struct ipa_disable_force_clear_datapath_req_msg_v01 *req)
{
return -EPERM;
}
static inline int ipa3_copy_ul_filter_rule_to_ipa(
struct ipa_install_fltr_rule_req_msg_v01 *rule_req)
{
return -EPERM;
}
static inline int ipa3_wwan_update_mux_channel_prop(void)
{
return -EPERM;
}
static inline int ipa3_wan_ioctl_init(void)
{
return -EPERM;
}
static inline void ipa3_wan_ioctl_stop_qmi_messages(void) { }
static inline void ipa3_wan_ioctl_enable_qmi_messages(void) { }
static inline void ipa3_wan_ioctl_deinit(void) { }
static inline void ipa3_qmi_stop_workqueues(void) { }
static inline int ipa3_vote_for_bus_bw(uint32_t *bw_mbps)
{
return -EPERM;
}
static inline int rmnet_ipa3_poll_tethering_stats(
struct wan_ioctl_poll_tethering_stats *data)
{
return -EPERM;
}
static inline int rmnet_ipa3_set_data_quota(
struct wan_ioctl_set_data_quota *data)
{
return -EPERM;
}
static inline void ipa3_broadcast_quota_reach_ind(uint32_t mux_id,
enum ipa_upstream_type upstream_type) { }
static inline int ipa3_qmi_get_data_stats(
struct ipa_get_data_stats_req_msg_v01 *req,
struct ipa_get_data_stats_resp_msg_v01 *resp)
{
return -EPERM;
}
static inline int ipa3_qmi_get_network_stats(
struct ipa_get_apn_data_stats_req_msg_v01 *req,
struct ipa_get_apn_data_stats_resp_msg_v01 *resp)
{
return -EPERM;
}
static inline int ipa3_qmi_set_data_quota(
struct ipa_set_data_usage_quota_req_msg_v01 *req)
{
return -EPERM;
}
static inline int ipa3_qmi_stop_data_qouta(void)
{
return -EPERM;
}
static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
static inline int ipa3_qmi_send_mhi_ready_indication(
struct ipa_mhi_ready_indication_msg_v01 *req)
{
return -EPERM;
}
static inline int ipa3_qmi_send_mhi_cleanup_request(
struct ipa_mhi_cleanup_req_msg_v01 *req)
{
return -EPERM;
}
static inline int ipa3_wwan_set_modem_perf_profile(
int throughput)
{
return -EPERM;
}
static inline int ipa3_qmi_enable_per_client_stats(
struct ipa_enable_per_client_stats_req_msg_v01 *req,
struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
{
return -EPERM;
}
static inline int ipa3_qmi_get_per_client_packet_stats(
struct ipa_get_stats_per_client_req_msg_v01 *req,
struct ipa_get_stats_per_client_resp_msg_v01 *resp)
{
return -EPERM;
}
static inline int ipa3_qmi_set_aggr_info(
enum ipa_aggr_enum_type_v01 aggr_enum_type)
{
return -EPERM;
}
static inline void ipa3_qmi_init(void)
{
}
static inline void ipa3_qmi_cleanup(void)
{
}
#endif /* CONFIG_RMNET_IPA3 */
#endif /* IPA_QMI_SERVICE_H */

5070
ipa/ipa_v3/ipa_qmi_service_v01.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

2491
ipa/ipa_v3/ipa_rt.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

183
ipa/ipa_v3/ipa_trace.h Arquivo normal
Ver arquivo

@@ -0,0 +1,183 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2019, The Linux Foundation. All rights reserved.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM ipa
#define TRACE_INCLUDE_FILE ipa_trace
#if !defined(_IPA_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
#define _IPA_TRACE_H
#include <linux/tracepoint.h>
TRACE_EVENT(
intr_to_poll3,
TP_PROTO(unsigned long client),
TP_ARGS(client),
TP_STRUCT__entry(
__field(unsigned long, client)
),
TP_fast_assign(
__entry->client = client;
),
TP_printk("client=%lu", __entry->client)
);
TRACE_EVENT(
poll_to_intr3,
TP_PROTO(unsigned long client),
TP_ARGS(client),
TP_STRUCT__entry(
__field(unsigned long, client)
),
TP_fast_assign(
__entry->client = client;
),
TP_printk("client=%lu", __entry->client)
);
TRACE_EVENT(
idle_sleep_enter3,
TP_PROTO(unsigned long client),
TP_ARGS(client),
TP_STRUCT__entry(
__field(unsigned long, client)
),
TP_fast_assign(
__entry->client = client;
),
TP_printk("client=%lu", __entry->client)
);
TRACE_EVENT(
idle_sleep_exit3,
TP_PROTO(unsigned long client),
TP_ARGS(client),
TP_STRUCT__entry(
__field(unsigned long, client)
),
TP_fast_assign(
__entry->client = client;
),
TP_printk("client=%lu", __entry->client)
);
TRACE_EVENT(
rmnet_ipa_netifni3,
TP_PROTO(unsigned long rx_pkt_cnt),
TP_ARGS(rx_pkt_cnt),
TP_STRUCT__entry(
__field(unsigned long, rx_pkt_cnt)
),
TP_fast_assign(
__entry->rx_pkt_cnt = rx_pkt_cnt;
),
TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
);
TRACE_EVENT(
rmnet_ipa_netifrx3,
TP_PROTO(unsigned long rx_pkt_cnt),
TP_ARGS(rx_pkt_cnt),
TP_STRUCT__entry(
__field(unsigned long, rx_pkt_cnt)
),
TP_fast_assign(
__entry->rx_pkt_cnt = rx_pkt_cnt;
),
TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
);
TRACE_EVENT(
rmnet_ipa_netif_rcv_skb3,
TP_PROTO(unsigned long rx_pkt_cnt),
TP_ARGS(rx_pkt_cnt),
TP_STRUCT__entry(
__field(unsigned long, rx_pkt_cnt)
),
TP_fast_assign(
__entry->rx_pkt_cnt = rx_pkt_cnt;
),
TP_printk("rx_pkt_cnt=%lu", __entry->rx_pkt_cnt)
);
TRACE_EVENT(
ipa3_rx_poll_num,
TP_PROTO(int poll_num),
TP_ARGS(poll_num),
TP_STRUCT__entry(
__field(int, poll_num)
),
TP_fast_assign(
__entry->poll_num = poll_num;
),
TP_printk("each_poll_aggr_pkt_num=%d", __entry->poll_num)
);
TRACE_EVENT(
ipa3_rx_poll_cnt,
TP_PROTO(int poll_num),
TP_ARGS(poll_num),
TP_STRUCT__entry(
__field(int, poll_num)
),
TP_fast_assign(
__entry->poll_num = poll_num;
),
TP_printk("napi_overall_poll_pkt_cnt=%d", __entry->poll_num)
);
#endif /* _IPA_TRACE_H */
/* This part must be outside protection */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ../../techpack/dataipa/ipa/ipa_v3
#include <trace/define_trace.h>

1101
ipa/ipa_v3/ipa_uc.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

957
ipa/ipa_v3/ipa_uc_mhi.c Arquivo normal
Ver arquivo

@@ -0,0 +1,957 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
#include <linux/ipa.h>
#include "ipa_i.h"
/* MHI uC interface definitions */
#define IPA_HW_INTERFACE_MHI_VERSION 0x0004
#define IPA_HW_MAX_NUMBER_OF_CHANNELS 2
#define IPA_HW_MAX_NUMBER_OF_EVENTRINGS 2
#define IPA_HW_MAX_CHANNEL_HANDLE (IPA_HW_MAX_NUMBER_OF_CHANNELS-1)
/**
* Values that represent the MHI commands from CPU to IPA HW.
* @IPA_CPU_2_HW_CMD_MHI_INIT: Initialize HW to be ready for MHI processing.
* Once operation was completed HW shall respond with
* IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
* @IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL: Initialize specific channel to be ready
* to serve MHI transfers. Once initialization was completed HW shall
* respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
* IPA_HW_MHI_CHANNEL_STATE_ENABLE
* @IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI: Update MHI MSI interrupts data.
* Once operation was completed HW shall respond with
* IPA_HW_2_CPU_RESPONSE_CMD_COMPLETED.
* @IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE: Change specific channel
* processing state following host request. Once operation was completed
* HW shall respond with IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE.
* @IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO: Info related to DL UL syncronization.
* @IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE: Cmd to stop event ring processing.
*/
enum ipa_cpu_2_hw_mhi_commands {
IPA_CPU_2_HW_CMD_MHI_INIT
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 3),
IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5)
};
/**
* Values that represent MHI related HW responses to CPU commands.
* @IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE: Response to
* IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL or
* IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE commands.
*/
enum ipa_hw_2_cpu_mhi_responses {
IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
};
/**
* Values that represent MHI related HW event to be sent to CPU.
* @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR: Event specify the device detected an
* error in an element from the transfer ring associated with the channel
* @IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST: Event specify a transport
* interrupt was asserted when MHI engine is suspended
*/
enum ipa_hw_2_cpu_mhi_events {
IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
};
/**
* Channel error types.
* @IPA_HW_CHANNEL_ERROR_NONE: No error persists.
* @IPA_HW_CHANNEL_INVALID_RE_ERROR: Invalid Ring Element was detected
*/
enum ipa_hw_channel_errors {
IPA_HW_CHANNEL_ERROR_NONE,
IPA_HW_CHANNEL_INVALID_RE_ERROR
};
/**
* MHI error types.
* @IPA_HW_INVALID_MMIO_ERROR: Invalid data read from MMIO space
* @IPA_HW_INVALID_CHANNEL_ERROR: Invalid data read from channel context array
* @IPA_HW_INVALID_EVENT_ERROR: Invalid data read from event ring context array
* @IPA_HW_NO_ED_IN_RING_ERROR: No event descriptors are available to report on
* secondary event ring
* @IPA_HW_LINK_ERROR: Link error
*/
enum ipa_hw_mhi_errors {
IPA_HW_INVALID_MMIO_ERROR
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 0),
IPA_HW_INVALID_CHANNEL_ERROR
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 1),
IPA_HW_INVALID_EVENT_ERROR
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 2),
IPA_HW_NO_ED_IN_RING_ERROR
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 4),
IPA_HW_LINK_ERROR
= FEATURE_ENUM_VAL(IPA_HW_FEATURE_MHI, 5),
};
/**
* Structure referring to the common and MHI section of 128B shared memory
* located in offset zero of SW Partition in IPA SRAM.
* The shared memory is used for communication between IPA HW and CPU.
* @common: common section in IPA SRAM
* @interfaceVersionMhi: The MHI interface version as reported by HW
* @mhiState: Overall MHI state
* @reserved_2B: reserved
* @mhiCnl0State: State of MHI channel 0.
* The state carries information regarding the error type.
* See IPA_HW_MHI_CHANNEL_STATES.
* @mhiCnl0State: State of MHI channel 1.
* @mhiCnl0State: State of MHI channel 2.
* @mhiCnl0State: State of MHI channel 3
* @mhiCnl0State: State of MHI channel 4.
* @mhiCnl0State: State of MHI channel 5.
* @mhiCnl0State: State of MHI channel 6.
* @mhiCnl0State: State of MHI channel 7.
* @reserved_37_34: reserved
* @reserved_3B_38: reserved
* @reserved_3F_3C: reserved
*/
struct IpaHwSharedMemMhiMapping_t {
struct IpaHwSharedMemCommonMapping_t common;
u16 interfaceVersionMhi;
u8 mhiState;
u8 reserved_2B;
u8 mhiCnl0State;
u8 mhiCnl1State;
u8 mhiCnl2State;
u8 mhiCnl3State;
u8 mhiCnl4State;
u8 mhiCnl5State;
u8 mhiCnl6State;
u8 mhiCnl7State;
u32 reserved_37_34;
u32 reserved_3B_38;
u32 reserved_3F_3C;
};
/**
* Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT command.
* Parameters are sent as pointer thus should be reside in address accessible
* to HW.
* @msiAddress: The MSI base (in device space) used for asserting the interrupt
* (MSI) associated with the event ring
* mmioBaseAddress: The address (in device space) of MMIO structure in
* host space
* deviceMhiCtrlBaseAddress: Base address of the memory region in the device
* address space where the MHI control data structures are allocated by
* the host, including channel context array, event context array,
* and rings. This value is used for host/device address translation.
* deviceMhiDataBaseAddress: Base address of the memory region in the device
* address space where the MHI data buffers are allocated by the host.
* This value is used for host/device address translation.
* firstChannelIndex: First channel ID. Doorbell 0 is mapped to this channel
* firstEventRingIndex: First event ring ID. Doorbell 16 is mapped to this
* event ring.
*/
struct IpaHwMhiInitCmdData_t {
u32 msiAddress;
u32 mmioBaseAddress;
u32 deviceMhiCtrlBaseAddress;
u32 deviceMhiDataBaseAddress;
u32 firstChannelIndex;
u32 firstEventRingIndex;
};
/**
* Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL
* command. Parameters are sent as 32b immediate parameters.
* @hannelHandle: The channel identifier as allocated by driver.
* value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
* @contexArrayIndex: Unique index for channels, between 0 and 255. The index is
* used as an index in channel context array structures.
* @bamPipeId: The IPA pipe number for pipe dedicated for this channel
* @channelDirection: The direction of the channel as defined in the channel
* type field (CHTYPE) in the channel context data structure.
* @reserved: reserved.
*/
union IpaHwMhiInitChannelCmdData_t {
struct IpaHwMhiInitChannelCmdParams_t {
u32 channelHandle:8;
u32 contexArrayIndex:8;
u32 bamPipeId:6;
u32 channelDirection:2;
u32 reserved:8;
} params;
u32 raw32b;
};
/**
* Structure holding the parameters for IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI command.
* @msiAddress_low: The MSI lower base addr (in device space) used for asserting
* the interrupt (MSI) associated with the event ring.
* @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
* the interrupt (MSI) associated with the event ring.
* @msiMask: Mask indicating number of messages assigned by the host to device
* @msiData: Data Pattern to use when generating the MSI
*/
struct IpaHwMhiMsiCmdData_t {
u32 msiAddress_low;
u32 msiAddress_hi;
u32 msiMask;
u32 msiData;
};
/**
* Structure holding the parameters for
* IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE command.
* Parameters are sent as 32b immediate parameters.
* @requestedState: The requested channel state as was indicated from Host.
* Use IPA_HW_MHI_CHANNEL_STATES to specify the requested state
* @channelHandle: The channel identifier as allocated by driver.
* value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
* @LPTransitionRejected: Indication that low power state transition was
* rejected
* @reserved: reserved
*/
union IpaHwMhiChangeChannelStateCmdData_t {
struct IpaHwMhiChangeChannelStateCmdParams_t {
u32 requestedState:8;
u32 channelHandle:8;
u32 LPTransitionRejected:8;
u32 reserved:8;
} params;
u32 raw32b;
};
/**
* Structure holding the parameters for
* IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE command.
* Parameters are sent as 32b immediate parameters.
* @channelHandle: The channel identifier as allocated by driver.
* value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
* @reserved: reserved
*/
union IpaHwMhiStopEventUpdateData_t {
struct IpaHwMhiStopEventUpdateDataParams_t {
u32 channelHandle:8;
u32 reserved:24;
} params;
u32 raw32b;
};
/**
* Structure holding the parameters for
* IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE response.
* Parameters are sent as 32b immediate parameters.
* @state: The new channel state. In case state is not as requested this is
* error indication for the last command
* @channelHandle: The channel identifier
* @additonalParams: For stop: the number of pending transport descriptors
* currently queued
*/
union IpaHwMhiChangeChannelStateResponseData_t {
struct IpaHwMhiChangeChannelStateResponseParams_t {
u32 state:8;
u32 channelHandle:8;
u32 additonalParams:16;
} params;
u32 raw32b;
};
/**
* Structure holding the parameters for
* IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR event.
* Parameters are sent as 32b immediate parameters.
* @errorType: Type of error - IPA_HW_CHANNEL_ERRORS
* @channelHandle: The channel identifier as allocated by driver.
* value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
* @reserved: reserved
*/
union IpaHwMhiChannelErrorEventData_t {
struct IpaHwMhiChannelErrorEventParams_t {
u32 errorType:8;
u32 channelHandle:8;
u32 reserved:16;
} params;
u32 raw32b;
};
/**
* Structure holding the parameters for
* IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST event.
* Parameters are sent as 32b immediate parameters.
* @channelHandle: The channel identifier as allocated by driver.
* value is within the range 0 to IPA_HW_MAX_CHANNEL_HANDLE
* @reserved: reserved
*/
union IpaHwMhiChannelWakeupEventData_t {
struct IpaHwMhiChannelWakeupEventParams_t {
u32 channelHandle:8;
u32 reserved:24;
} params;
u32 raw32b;
};
/**
* Structure holding the MHI Common statistics
* @numULDLSync: Number of times UL activity trigged due to DL activity
* @numULTimerExpired: Number of times UL Accm Timer expired
*/
struct IpaHwStatsMhiCmnInfoData_t {
u32 numULDLSync;
u32 numULTimerExpired;
u32 numChEvCtxWpRead;
u32 reserved;
};
/**
* Structure holding the MHI Channel statistics
* @doorbellInt: The number of doorbell int
* @reProccesed: The number of ring elements processed
* @bamFifoFull: Number of times Bam Fifo got full
* @bamFifoEmpty: Number of times Bam Fifo got empty
* @bamFifoUsageHigh: Number of times Bam fifo usage went above 75%
* @bamFifoUsageLow: Number of times Bam fifo usage went below 25%
* @bamInt: Number of BAM Interrupts
* @ringFull: Number of times Transfer Ring got full
* @ringEmpty: umber of times Transfer Ring got empty
* @ringUsageHigh: Number of times Transfer Ring usage went above 75%
* @ringUsageLow: Number of times Transfer Ring usage went below 25%
* @delayedMsi: Number of times device triggered MSI to host after
* Interrupt Moderation Timer expiry
* @immediateMsi: Number of times device triggered MSI to host immediately
* @thresholdMsi: Number of times device triggered MSI due to max pending
* events threshold reached
* @numSuspend: Number of times channel was suspended
* @numResume: Number of times channel was suspended
* @num_OOB: Number of times we indicated that we are OOB
* @num_OOB_timer_expiry: Number of times we indicated that we are OOB
* after timer expiry
* @num_OOB_moderation_timer_start: Number of times we started timer after
* sending OOB and hitting OOB again before we processed threshold
* number of packets
* @num_db_mode_evt: Number of times we indicated that we are in Doorbell mode
*/
struct IpaHwStatsMhiCnlInfoData_t {
u32 doorbellInt;
u32 reProccesed;
u32 bamFifoFull;
u32 bamFifoEmpty;
u32 bamFifoUsageHigh;
u32 bamFifoUsageLow;
u32 bamInt;
u32 ringFull;
u32 ringEmpty;
u32 ringUsageHigh;
u32 ringUsageLow;
u32 delayedMsi;
u32 immediateMsi;
u32 thresholdMsi;
u32 numSuspend;
u32 numResume;
u32 num_OOB;
u32 num_OOB_timer_expiry;
u32 num_OOB_moderation_timer_start;
u32 num_db_mode_evt;
};
/**
* Structure holding the MHI statistics
* @mhiCmnStats: Stats pertaining to MHI
* @mhiCnlStats: Stats pertaining to each channel
*/
struct IpaHwStatsMhiInfoData_t {
struct IpaHwStatsMhiCmnInfoData_t mhiCmnStats;
struct IpaHwStatsMhiCnlInfoData_t mhiCnlStats[
IPA_HW_MAX_NUMBER_OF_CHANNELS];
};
/**
* Structure holding the MHI Common Config info
* @isDlUlSyncEnabled: Flag to indicate if DL-UL synchronization is enabled
* @UlAccmVal: Out Channel(UL) accumulation time in ms when DL UL Sync is
* enabled
* @ulMsiEventThreshold: Threshold at which HW fires MSI to host for UL events
* @dlMsiEventThreshold: Threshold at which HW fires MSI to host for DL events
*/
struct IpaHwConfigMhiCmnInfoData_t {
u8 isDlUlSyncEnabled;
u8 UlAccmVal;
u8 ulMsiEventThreshold;
u8 dlMsiEventThreshold;
};
/**
* Structure holding the parameters for MSI info data
* @msiAddress_low: The MSI lower base addr (in device space) used for asserting
* the interrupt (MSI) associated with the event ring.
* @msiAddress_hi: The MSI higher base addr (in device space) used for asserting
* the interrupt (MSI) associated with the event ring.
* @msiMask: Mask indicating number of messages assigned by the host to device
* @msiData: Data Pattern to use when generating the MSI
*/
struct IpaHwConfigMhiMsiInfoData_t {
u32 msiAddress_low;
u32 msiAddress_hi;
u32 msiMask;
u32 msiData;
};
/**
* Structure holding the MHI Channel Config info
* @transferRingSize: The Transfer Ring size in terms of Ring Elements
* @transferRingIndex: The Transfer Ring channel number as defined by host
* @eventRingIndex: The Event Ring Index associated with this Transfer Ring
* @bamPipeIndex: The BAM Pipe associated with this channel
* @isOutChannel: Indication for the direction of channel
* @reserved_0: Reserved byte for maintaining 4byte alignment
* @reserved_1: Reserved byte for maintaining 4byte alignment
*/
struct IpaHwConfigMhiCnlInfoData_t {
u16 transferRingSize;
u8 transferRingIndex;
u8 eventRingIndex;
u8 bamPipeIndex;
u8 isOutChannel;
u8 reserved_0;
u8 reserved_1;
};
/**
* Structure holding the MHI Event Config info
* @msiVec: msi vector to invoke MSI interrupt
* @intmodtValue: Interrupt moderation timer (in milliseconds)
* @eventRingSize: The Event Ring size in terms of Ring Elements
* @eventRingIndex: The Event Ring number as defined by host
* @reserved_0: Reserved byte for maintaining 4byte alignment
* @reserved_1: Reserved byte for maintaining 4byte alignment
* @reserved_2: Reserved byte for maintaining 4byte alignment
*/
struct IpaHwConfigMhiEventInfoData_t {
u32 msiVec;
u16 intmodtValue;
u16 eventRingSize;
u8 eventRingIndex;
u8 reserved_0;
u8 reserved_1;
u8 reserved_2;
};
/**
* Structure holding the MHI Config info
* @mhiCmnCfg: Common Config pertaining to MHI
* @mhiMsiCfg: Config pertaining to MSI config
* @mhiCnlCfg: Config pertaining to each channel
* @mhiEvtCfg: Config pertaining to each event Ring
*/
struct IpaHwConfigMhiInfoData_t {
struct IpaHwConfigMhiCmnInfoData_t mhiCmnCfg;
struct IpaHwConfigMhiMsiInfoData_t mhiMsiCfg;
struct IpaHwConfigMhiCnlInfoData_t mhiCnlCfg[
IPA_HW_MAX_NUMBER_OF_CHANNELS];
struct IpaHwConfigMhiEventInfoData_t mhiEvtCfg[
IPA_HW_MAX_NUMBER_OF_EVENTRINGS];
};
struct ipa3_uc_mhi_ctx {
u8 expected_responseOp;
u32 expected_responseParams;
void (*ready_cb)(void);
void (*wakeup_request_cb)(void);
u32 mhi_uc_stats_ofst;
struct IpaHwStatsMhiInfoData_t *mhi_uc_stats_mmio;
};
#define PRINT_COMMON_STATS(x) \
(nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
#x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCmnStats.x))
#define PRINT_CHANNEL_STATS(ch, x) \
(nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes, \
#x "=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_mmio->mhiCnlStats[ch].x))
struct ipa3_uc_mhi_ctx *ipa3_uc_mhi_ctx;
static int ipa3_uc_mhi_response_hdlr(struct IpaHwSharedMemCommonMapping_t
*uc_sram_mmio, u32 *uc_status)
{
IPADBG("responseOp=%d\n", uc_sram_mmio->responseOp);
if (uc_sram_mmio->responseOp == ipa3_uc_mhi_ctx->expected_responseOp &&
uc_sram_mmio->responseParams ==
ipa3_uc_mhi_ctx->expected_responseParams) {
*uc_status = 0;
return 0;
}
return -EINVAL;
}
static void ipa3_uc_mhi_event_hdlr(struct IpaHwSharedMemCommonMapping_t
*uc_sram_mmio)
{
if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
IPA_HW_2_CPU_EVENT_MHI_CHANNEL_ERROR) {
union IpaHwMhiChannelErrorEventData_t evt;
IPAERR("Channel error\n");
evt.raw32b = uc_sram_mmio->eventParams;
IPAERR("errorType=%d channelHandle=%d reserved=%d\n",
evt.params.errorType, evt.params.channelHandle,
evt.params.reserved);
} else if (ipa3_ctx->uc_ctx.uc_sram_mmio->eventOp ==
IPA_HW_2_CPU_EVENT_MHI_CHANNEL_WAKE_UP_REQUEST) {
union IpaHwMhiChannelWakeupEventData_t evt;
IPADBG("WakeUp channel request\n");
evt.raw32b = uc_sram_mmio->eventParams;
IPADBG("channelHandle=%d reserved=%d\n",
evt.params.channelHandle, evt.params.reserved);
ipa3_uc_mhi_ctx->wakeup_request_cb();
}
}
static void ipa3_uc_mhi_event_log_info_hdlr(
struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
{
struct Ipa3HwEventInfoData_t *evt_info_ptr;
u32 size;
if ((uc_event_top_mmio->protocolMask & (1 << IPA_HW_FEATURE_MHI))
== 0) {
IPAERR("MHI feature missing 0x%x\n",
uc_event_top_mmio->protocolMask);
return;
}
evt_info_ptr = &uc_event_top_mmio->statsInfo;
size = evt_info_ptr->featureInfo[IPA_HW_FEATURE_MHI].params.size;
if (size != sizeof(struct IpaHwStatsMhiInfoData_t)) {
IPAERR("mhi stats sz invalid exp=%zu is=%u\n",
sizeof(struct IpaHwStatsMhiInfoData_t),
size);
return;
}
ipa3_uc_mhi_ctx->mhi_uc_stats_ofst =
evt_info_ptr->baseAddrOffset +
evt_info_ptr->featureInfo[IPA_HW_FEATURE_MHI].params.offset;
IPAERR("MHI stats ofst=0x%x\n", ipa3_uc_mhi_ctx->mhi_uc_stats_ofst);
if (ipa3_uc_mhi_ctx->mhi_uc_stats_ofst +
sizeof(struct IpaHwStatsMhiInfoData_t) >=
ipa3_ctx->ctrl->ipa_reg_base_ofst +
ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0) +
ipa3_ctx->smem_sz) {
IPAERR("uc_mhi_stats 0x%x outside SRAM\n",
ipa3_uc_mhi_ctx->mhi_uc_stats_ofst);
return;
}
ipa3_uc_mhi_ctx->mhi_uc_stats_mmio =
ioremap(ipa3_ctx->ipa_wrapper_base +
ipa3_uc_mhi_ctx->mhi_uc_stats_ofst,
sizeof(struct IpaHwStatsMhiInfoData_t));
if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) {
IPAERR("fail to ioremap uc mhi stats\n");
return;
}
}
int ipa3_uc_mhi_init(void (*ready_cb)(void), void (*wakeup_request_cb)(void))
{
struct ipa3_uc_hdlrs hdlrs;
if (ipa3_uc_mhi_ctx) {
IPAERR("Already initialized\n");
return -EFAULT;
}
ipa3_uc_mhi_ctx = kzalloc(sizeof(*ipa3_uc_mhi_ctx), GFP_KERNEL);
if (!ipa3_uc_mhi_ctx) {
IPAERR("no mem\n");
return -ENOMEM;
}
ipa3_uc_mhi_ctx->ready_cb = ready_cb;
ipa3_uc_mhi_ctx->wakeup_request_cb = wakeup_request_cb;
memset(&hdlrs, 0, sizeof(hdlrs));
hdlrs.ipa_uc_loaded_hdlr = ipa3_uc_mhi_ctx->ready_cb;
hdlrs.ipa3_uc_response_hdlr = ipa3_uc_mhi_response_hdlr;
hdlrs.ipa_uc_event_hdlr = ipa3_uc_mhi_event_hdlr;
hdlrs.ipa_uc_event_log_info_hdlr = ipa3_uc_mhi_event_log_info_hdlr;
ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &hdlrs);
IPADBG("Done\n");
return 0;
}
void ipa3_uc_mhi_cleanup(void)
{
struct ipa3_uc_hdlrs null_hdlrs = { 0 };
IPADBG("Enter\n");
if (!ipa3_uc_mhi_ctx) {
IPAERR("ipa3_uc_mhi_ctx is not initialized\n");
return;
}
ipa3_uc_register_handlers(IPA_HW_FEATURE_MHI, &null_hdlrs);
kfree(ipa3_uc_mhi_ctx);
ipa3_uc_mhi_ctx = NULL;
IPADBG("Done\n");
}
int ipa3_uc_mhi_init_engine(struct ipa_mhi_msi_info *msi, u32 mmio_addr,
u32 host_ctrl_addr, u32 host_data_addr, u32 first_ch_idx,
u32 first_evt_idx)
{
int res;
struct ipa_mem_buffer mem;
struct IpaHwMhiInitCmdData_t *init_cmd_data;
struct IpaHwMhiMsiCmdData_t *msi_cmd;
if (!ipa3_uc_mhi_ctx) {
IPAERR("Not initialized\n");
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
res = ipa3_uc_update_hw_flags(0);
if (res) {
IPAERR("ipa3_uc_update_hw_flags failed %d\n", res);
goto disable_clks;
}
mem.size = sizeof(*init_cmd_data);
mem.base = dma_zalloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
GFP_KERNEL);
if (!mem.base) {
IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
res = -ENOMEM;
goto disable_clks;
}
init_cmd_data = (struct IpaHwMhiInitCmdData_t *)mem.base;
init_cmd_data->msiAddress = msi->addr_low;
init_cmd_data->mmioBaseAddress = mmio_addr;
init_cmd_data->deviceMhiCtrlBaseAddress = host_ctrl_addr;
init_cmd_data->deviceMhiDataBaseAddress = host_data_addr;
init_cmd_data->firstChannelIndex = first_ch_idx;
init_cmd_data->firstEventRingIndex = first_evt_idx;
res = ipa3_uc_send_cmd((u32)mem.phys_base, IPA_CPU_2_HW_CMD_MHI_INIT, 0,
false, HZ);
if (res) {
IPAERR("ipa3_uc_send_cmd failed %d\n", res);
dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
mem.phys_base);
goto disable_clks;
}
dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
mem.size = sizeof(*msi_cmd);
mem.base = dma_alloc_coherent(ipa3_ctx->pdev, mem.size, &mem.phys_base,
GFP_KERNEL);
if (!mem.base) {
IPAERR("fail to alloc DMA buff of size %d\n", mem.size);
res = -ENOMEM;
goto disable_clks;
}
msi_cmd = (struct IpaHwMhiMsiCmdData_t *)mem.base;
msi_cmd->msiAddress_hi = msi->addr_hi;
msi_cmd->msiAddress_low = msi->addr_low;
msi_cmd->msiData = msi->data;
msi_cmd->msiMask = msi->mask;
res = ipa3_uc_send_cmd((u32)mem.phys_base,
IPA_CPU_2_HW_CMD_MHI_UPDATE_MSI, 0, false, HZ);
if (res) {
IPAERR("ipa3_uc_send_cmd failed %d\n", res);
dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base,
mem.phys_base);
goto disable_clks;
}
dma_free_coherent(ipa3_ctx->pdev, mem.size, mem.base, mem.phys_base);
res = 0;
disable_clks:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
int ipa3_uc_mhi_init_channel(int ipa_ep_idx, int channelHandle,
int contexArrayIndex, int channelDirection)
{
int res;
union IpaHwMhiInitChannelCmdData_t init_cmd;
union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
if (!ipa3_uc_mhi_ctx) {
IPAERR("Not initialized\n");
return -EFAULT;
}
if (ipa_ep_idx < 0 || ipa_ep_idx >= ipa3_ctx->ipa_num_pipes) {
IPAERR("Invalid ipa_ep_idx.\n");
return -EINVAL;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
uc_rsp.params.channelHandle = channelHandle;
ipa3_uc_mhi_ctx->expected_responseOp =
IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
memset(&init_cmd, 0, sizeof(init_cmd));
init_cmd.params.channelHandle = channelHandle;
init_cmd.params.contexArrayIndex = contexArrayIndex;
init_cmd.params.bamPipeId = ipa_ep_idx;
init_cmd.params.channelDirection = channelDirection;
res = ipa3_uc_send_cmd(init_cmd.raw32b,
IPA_CPU_2_HW_CMD_MHI_INIT_CHANNEL, 0, false, HZ);
if (res) {
IPAERR("ipa3_uc_send_cmd failed %d\n", res);
goto disable_clks;
}
res = 0;
disable_clks:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
int ipa3_uc_mhi_reset_channel(int channelHandle)
{
union IpaHwMhiChangeChannelStateCmdData_t cmd;
union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
int res;
if (!ipa3_uc_mhi_ctx) {
IPAERR("Not initialized\n");
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
uc_rsp.params.channelHandle = channelHandle;
ipa3_uc_mhi_ctx->expected_responseOp =
IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
memset(&cmd, 0, sizeof(cmd));
cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_DISABLE;
cmd.params.channelHandle = channelHandle;
res = ipa3_uc_send_cmd(cmd.raw32b,
IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
if (res) {
IPAERR("ipa3_uc_send_cmd failed %d\n", res);
goto disable_clks;
}
res = 0;
disable_clks:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
int ipa3_uc_mhi_suspend_channel(int channelHandle)
{
union IpaHwMhiChangeChannelStateCmdData_t cmd;
union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
int res;
if (!ipa3_uc_mhi_ctx) {
IPAERR("Not initialized\n");
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
uc_rsp.params.channelHandle = channelHandle;
ipa3_uc_mhi_ctx->expected_responseOp =
IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
memset(&cmd, 0, sizeof(cmd));
cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_SUSPEND;
cmd.params.channelHandle = channelHandle;
res = ipa3_uc_send_cmd(cmd.raw32b,
IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
if (res) {
IPAERR("ipa3_uc_send_cmd failed %d\n", res);
goto disable_clks;
}
res = 0;
disable_clks:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
int ipa3_uc_mhi_resume_channel(int channelHandle, bool LPTransitionRejected)
{
union IpaHwMhiChangeChannelStateCmdData_t cmd;
union IpaHwMhiChangeChannelStateResponseData_t uc_rsp;
int res;
if (!ipa3_uc_mhi_ctx) {
IPAERR("Not initialized\n");
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&uc_rsp, 0, sizeof(uc_rsp));
uc_rsp.params.state = IPA_HW_MHI_CHANNEL_STATE_RUN;
uc_rsp.params.channelHandle = channelHandle;
ipa3_uc_mhi_ctx->expected_responseOp =
IPA_HW_2_CPU_RESPONSE_MHI_CHANGE_CHANNEL_STATE;
ipa3_uc_mhi_ctx->expected_responseParams = uc_rsp.raw32b;
memset(&cmd, 0, sizeof(cmd));
cmd.params.requestedState = IPA_HW_MHI_CHANNEL_STATE_RUN;
cmd.params.channelHandle = channelHandle;
cmd.params.LPTransitionRejected = LPTransitionRejected;
res = ipa3_uc_send_cmd(cmd.raw32b,
IPA_CPU_2_HW_CMD_MHI_CHANGE_CHANNEL_STATE, 0, false, HZ);
if (res) {
IPAERR("ipa3_uc_send_cmd failed %d\n", res);
goto disable_clks;
}
res = 0;
disable_clks:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
int ipa3_uc_mhi_stop_event_update_channel(int channelHandle)
{
union IpaHwMhiStopEventUpdateData_t cmd;
int res;
if (!ipa3_uc_mhi_ctx) {
IPAERR("Not initialized\n");
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
memset(&cmd, 0, sizeof(cmd));
cmd.params.channelHandle = channelHandle;
ipa3_uc_mhi_ctx->expected_responseOp =
IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE;
ipa3_uc_mhi_ctx->expected_responseParams = cmd.raw32b;
res = ipa3_uc_send_cmd(cmd.raw32b,
IPA_CPU_2_HW_CMD_MHI_STOP_EVENT_UPDATE, 0, false, HZ);
if (res) {
IPAERR("ipa3_uc_send_cmd failed %d\n", res);
goto disable_clks;
}
res = 0;
disable_clks:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
int ipa3_uc_mhi_send_dl_ul_sync_info(union IpaHwMhiDlUlSyncCmdData_t *cmd)
{
int res;
if (!ipa3_uc_mhi_ctx) {
IPAERR("Not initialized\n");
return -EFAULT;
}
IPADBG("isDlUlSyncEnabled=0x%x UlAccmVal=0x%x\n",
cmd->params.isDlUlSyncEnabled, cmd->params.UlAccmVal);
IPADBG("ulMsiEventThreshold=0x%x dlMsiEventThreshold=0x%x\n",
cmd->params.ulMsiEventThreshold,
cmd->params.dlMsiEventThreshold);
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
res = ipa3_uc_send_cmd(cmd->raw32b,
IPA_CPU_2_HW_CMD_MHI_DL_UL_SYNC_INFO, 0, false, HZ);
if (res) {
IPAERR("ipa3_uc_send_cmd failed %d\n", res);
goto disable_clks;
}
res = 0;
disable_clks:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return res;
}
int ipa3_uc_mhi_print_stats(char *dbg_buff, int size)
{
int nBytes = 0;
int i;
if (!ipa3_uc_mhi_ctx->mhi_uc_stats_mmio) {
IPAERR("MHI uc stats is not valid\n");
return 0;
}
nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
"Common Stats:\n");
PRINT_COMMON_STATS(numULDLSync);
PRINT_COMMON_STATS(numULTimerExpired);
PRINT_COMMON_STATS(numChEvCtxWpRead);
for (i = 0; i < IPA_HW_MAX_NUMBER_OF_CHANNELS; i++) {
nBytes += scnprintf(&dbg_buff[nBytes], size - nBytes,
"Channel %d Stats:\n", i);
PRINT_CHANNEL_STATS(i, doorbellInt);
PRINT_CHANNEL_STATS(i, reProccesed);
PRINT_CHANNEL_STATS(i, bamFifoFull);
PRINT_CHANNEL_STATS(i, bamFifoEmpty);
PRINT_CHANNEL_STATS(i, bamFifoUsageHigh);
PRINT_CHANNEL_STATS(i, bamFifoUsageLow);
PRINT_CHANNEL_STATS(i, bamInt);
PRINT_CHANNEL_STATS(i, ringFull);
PRINT_CHANNEL_STATS(i, ringEmpty);
PRINT_CHANNEL_STATS(i, ringUsageHigh);
PRINT_CHANNEL_STATS(i, ringUsageLow);
PRINT_CHANNEL_STATS(i, delayedMsi);
PRINT_CHANNEL_STATS(i, immediateMsi);
PRINT_CHANNEL_STATS(i, thresholdMsi);
PRINT_CHANNEL_STATS(i, numSuspend);
PRINT_CHANNEL_STATS(i, numResume);
PRINT_CHANNEL_STATS(i, num_OOB);
PRINT_CHANNEL_STATS(i, num_OOB_timer_expiry);
PRINT_CHANNEL_STATS(i, num_OOB_moderation_timer_start);
PRINT_CHANNEL_STATS(i, num_db_mode_evt);
}
return nBytes;
}

632
ipa/ipa_v3/ipa_uc_ntn.c Arquivo normal
Ver arquivo

@@ -0,0 +1,632 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#include "ipa_i.h"
#define IPA_UC_NTN_DB_PA_TX 0x79620DC
#define IPA_UC_NTN_DB_PA_RX 0x79620D8
static void ipa3_uc_ntn_event_log_info_handler(
struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
{
struct Ipa3HwEventInfoData_t *statsPtr = &uc_event_top_mmio->statsInfo;
if ((uc_event_top_mmio->protocolMask &
(1 << IPA_HW_PROTOCOL_ETH)) == 0) {
IPAERR("NTN protocol missing 0x%x\n",
uc_event_top_mmio->protocolMask);
return;
}
if (statsPtr->featureInfo[IPA_HW_PROTOCOL_ETH].params.size !=
sizeof(struct Ipa3HwStatsNTNInfoData_t)) {
IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
sizeof(struct Ipa3HwStatsNTNInfoData_t),
statsPtr->featureInfo[IPA_HW_PROTOCOL_ETH].params.size);
return;
}
ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst =
uc_event_top_mmio->statsInfo.baseAddrOffset +
statsPtr->featureInfo[IPA_HW_PROTOCOL_ETH].params.offset;
IPAERR("NTN stats ofst=0x%x\n", ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
if (ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
sizeof(struct Ipa3HwStatsNTNInfoData_t) >=
ipa3_ctx->ctrl->ipa_reg_base_ofst +
ipahal_get_reg_n_ofst(IPA_SW_AREA_RAM_DIRECT_ACCESS_n, 0) +
ipa3_ctx->smem_sz) {
IPAERR("uc_ntn_stats 0x%x outside SRAM\n",
ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
return;
}
ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio =
ioremap(ipa3_ctx->ipa_wrapper_base +
ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst,
sizeof(struct Ipa3HwStatsNTNInfoData_t));
if (!ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
IPAERR("fail to ioremap uc ntn stats\n");
return;
}
}
/**
* ipa2_get_wdi_stats() - Query WDI statistics from uc
* @stats: [inout] stats blob from client populated by driver
*
* Returns: 0 on success, negative on failure
*
* @note Cannot be called from atomic context
*
*/
int ipa3_get_ntn_stats(struct Ipa3HwStatsNTNInfoData_t *stats)
{
#define TX_STATS(y) stats->tx_ch_stats[0].y = \
ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->tx_ch_stats[0].y
#define RX_STATS(y) stats->rx_ch_stats[0].y = \
ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio->rx_ch_stats[0].y
if (unlikely(!ipa3_ctx)) {
IPAERR("IPA driver was not initialized\n");
return -EINVAL;
}
if (!stats || !ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio) {
IPAERR("bad parms stats=%pK ntn_stats=%pK\n",
stats,
ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_mmio);
return -EINVAL;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
TX_STATS(num_pkts_processed);
TX_STATS(ring_stats.ringFull);
TX_STATS(ring_stats.ringEmpty);
TX_STATS(ring_stats.ringUsageHigh);
TX_STATS(ring_stats.ringUsageLow);
TX_STATS(ring_stats.RingUtilCount);
TX_STATS(gsi_stats.bamFifoFull);
TX_STATS(gsi_stats.bamFifoEmpty);
TX_STATS(gsi_stats.bamFifoUsageHigh);
TX_STATS(gsi_stats.bamFifoUsageLow);
TX_STATS(gsi_stats.bamUtilCount);
TX_STATS(num_db);
TX_STATS(num_qmb_int_handled);
TX_STATS(ipa_pipe_number);
RX_STATS(num_pkts_processed);
RX_STATS(ring_stats.ringFull);
RX_STATS(ring_stats.ringEmpty);
RX_STATS(ring_stats.ringUsageHigh);
RX_STATS(ring_stats.ringUsageLow);
RX_STATS(ring_stats.RingUtilCount);
RX_STATS(gsi_stats.bamFifoFull);
RX_STATS(gsi_stats.bamFifoEmpty);
RX_STATS(gsi_stats.bamFifoUsageHigh);
RX_STATS(gsi_stats.bamFifoUsageLow);
RX_STATS(gsi_stats.bamUtilCount);
RX_STATS(num_db);
RX_STATS(num_qmb_int_handled);
RX_STATS(ipa_pipe_number);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
int ipa3_ntn_uc_reg_rdyCB(void (*ipa_ready_cb)(void *), void *user_data)
{
int ret;
if (!ipa3_ctx) {
IPAERR("IPA ctx is null\n");
return -ENXIO;
}
ret = ipa3_uc_state_check();
if (ret) {
ipa3_ctx->uc_ntn_ctx.uc_ready_cb = ipa_ready_cb;
ipa3_ctx->uc_ntn_ctx.priv = user_data;
return 0;
}
return -EEXIST;
}
void ipa3_ntn_uc_dereg_rdyCB(void)
{
ipa3_ctx->uc_ntn_ctx.uc_ready_cb = NULL;
ipa3_ctx->uc_ntn_ctx.priv = NULL;
}
static void ipa3_uc_ntn_loaded_handler(void)
{
if (!ipa3_ctx) {
IPAERR("IPA ctx is null\n");
return;
}
if (ipa3_ctx->uc_ntn_ctx.uc_ready_cb) {
ipa3_ctx->uc_ntn_ctx.uc_ready_cb(
ipa3_ctx->uc_ntn_ctx.priv);
ipa3_ctx->uc_ntn_ctx.uc_ready_cb =
NULL;
ipa3_ctx->uc_ntn_ctx.priv = NULL;
}
}
int ipa3_ntn_init(void)
{
struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 };
uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
ipa3_uc_ntn_event_log_info_handler;
uc_ntn_cbs.ipa_uc_loaded_hdlr =
ipa3_uc_ntn_loaded_handler;
ipa3_uc_register_handlers(IPA_HW_FEATURE_NTN, &uc_ntn_cbs);
return 0;
}
static int ipa3_uc_send_ntn_setup_pipe_cmd(
struct ipa_ntn_setup_info *ntn_info, u8 dir)
{
int ipa_ep_idx;
int result = 0;
struct ipa_mem_buffer cmd;
struct Ipa3HwNtnSetUpCmdData_t *Ntn_params;
struct IpaHwOffloadSetUpCmdData_t *cmd_data;
struct IpaHwOffloadSetUpCmdData_t_v4_0 *cmd_data_v4_0;
if (ntn_info == NULL) {
IPAERR("invalid input\n");
return -EINVAL;
}
ipa_ep_idx = ipa_get_ep_mapping(ntn_info->client);
if (ipa_ep_idx == -1) {
IPAERR("fail to get ep idx.\n");
return -EFAULT;
}
IPADBG("client=%d ep=%d\n", ntn_info->client, ipa_ep_idx);
IPADBG("ring_base_pa = 0x%pa\n",
&ntn_info->ring_base_pa);
IPADBG("ring_base_iova = 0x%pa\n",
&ntn_info->ring_base_iova);
IPADBG("ntn_ring_size = %d\n", ntn_info->ntn_ring_size);
IPADBG("buff_pool_base_pa = 0x%pa\n", &ntn_info->buff_pool_base_pa);
IPADBG("buff_pool_base_iova = 0x%pa\n", &ntn_info->buff_pool_base_iova);
IPADBG("num_buffers = %d\n", ntn_info->num_buffers);
IPADBG("data_buff_size = %d\n", ntn_info->data_buff_size);
IPADBG("tail_ptr_base_pa = 0x%pa\n", &ntn_info->ntn_reg_base_ptr_pa);
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
cmd.size = sizeof(*cmd_data_v4_0);
else
cmd.size = sizeof(*cmd_data);
cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
&cmd.phys_base, GFP_KERNEL);
if (cmd.base == NULL) {
IPAERR("fail to get DMA memory.\n");
return -ENOMEM;
}
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
cmd_data_v4_0 = (struct IpaHwOffloadSetUpCmdData_t_v4_0 *)
cmd.base;
cmd_data_v4_0->protocol = IPA_HW_PROTOCOL_ETH;
Ntn_params = &cmd_data_v4_0->SetupCh_params.NtnSetupCh_params;
} else {
cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
cmd_data->protocol = IPA_HW_PROTOCOL_ETH;
Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
}
if (ntn_info->smmu_enabled) {
Ntn_params->ring_base_pa = (u32)ntn_info->ring_base_iova;
Ntn_params->buff_pool_base_pa =
(u32)ntn_info->buff_pool_base_iova;
} else {
Ntn_params->ring_base_pa = ntn_info->ring_base_pa;
Ntn_params->buff_pool_base_pa = ntn_info->buff_pool_base_pa;
}
Ntn_params->ntn_ring_size = ntn_info->ntn_ring_size;
Ntn_params->num_buffers = ntn_info->num_buffers;
Ntn_params->ntn_reg_base_ptr_pa = ntn_info->ntn_reg_base_ptr_pa;
Ntn_params->data_buff_size = ntn_info->data_buff_size;
Ntn_params->ipa_pipe_number = ipa_ep_idx;
Ntn_params->dir = dir;
result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
false, 10*HZ);
if (result)
result = -EFAULT;
dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
return result;
}
static int ipa3_smmu_map_uc_ntn_pipes(struct ipa_ntn_setup_info *params,
bool map)
{
struct iommu_domain *smmu_domain;
int result;
int i;
u64 iova;
phys_addr_t pa;
u64 iova_p;
phys_addr_t pa_p;
u32 size_p;
if (params->data_buff_size > PAGE_SIZE) {
IPAERR("invalid data buff size\n");
return -EINVAL;
}
result = ipa3_smmu_map_peer_reg(rounddown(params->ntn_reg_base_ptr_pa,
PAGE_SIZE), map, IPA_SMMU_CB_UC);
if (result) {
IPAERR("failed to %s uC regs %d\n",
map ? "map" : "unmap", result);
goto fail;
}
if (params->smmu_enabled) {
IPADBG("smmu is enabled on EMAC\n");
result = ipa3_smmu_map_peer_buff((u64)params->ring_base_iova,
params->ntn_ring_size, map, params->ring_base_sgt,
IPA_SMMU_CB_UC);
if (result) {
IPAERR("failed to %s ntn ring %d\n",
map ? "map" : "unmap", result);
goto fail_map_ring;
}
result = ipa3_smmu_map_peer_buff(
(u64)params->buff_pool_base_iova,
params->num_buffers * 4, map,
params->buff_pool_base_sgt, IPA_SMMU_CB_UC);
if (result) {
IPAERR("failed to %s pool buffs %d\n",
map ? "map" : "unmap", result);
goto fail_map_buffer_smmu_enabled;
}
} else {
IPADBG("smmu is disabled on EMAC\n");
result = ipa3_smmu_map_peer_buff((u64)params->ring_base_pa,
params->ntn_ring_size, map, NULL, IPA_SMMU_CB_UC);
if (result) {
IPAERR("failed to %s ntn ring %d\n",
map ? "map" : "unmap", result);
goto fail_map_ring;
}
result = ipa3_smmu_map_peer_buff(params->buff_pool_base_pa,
params->num_buffers * 4, map, NULL, IPA_SMMU_CB_UC);
if (result) {
IPAERR("failed to %s pool buffs %d\n",
map ? "map" : "unmap", result);
goto fail_map_buffer_smmu_disabled;
}
}
if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
IPADBG("AP SMMU is set to s1 bypass\n");
return 0;
}
smmu_domain = ipa3_get_smmu_domain();
if (!smmu_domain) {
IPAERR("invalid smmu domain\n");
return -EINVAL;
}
for (i = 0; i < params->num_buffers; i++) {
iova = (u64)params->data_buff_list[i].iova;
pa = (phys_addr_t)params->data_buff_list[i].pa;
IPA_SMMU_ROUND_TO_PAGE(iova, pa, params->data_buff_size, iova_p,
pa_p, size_p);
IPADBG("%s 0x%llx to 0x%pa size %d\n", map ? "mapping" :
"unmapping", iova_p, &pa_p, size_p);
if (map) {
result = ipa3_iommu_map(smmu_domain, iova_p, pa_p,
size_p, IOMMU_READ | IOMMU_WRITE);
if (result)
IPAERR("Fail to map 0x%llx\n", iova);
} else {
result = iommu_unmap(smmu_domain, iova_p, size_p);
if (result != params->data_buff_size)
IPAERR("Fail to unmap 0x%llx\n", iova);
}
if (result) {
if (params->smmu_enabled)
goto fail_map_data_buff_smmu_enabled;
else
goto fail_map_data_buff_smmu_disabled;
}
}
return 0;
fail_map_data_buff_smmu_enabled:
ipa3_smmu_map_peer_buff((u64)params->buff_pool_base_iova,
params->num_buffers * 4, !map, NULL, IPA_SMMU_CB_UC);
goto fail_map_buffer_smmu_enabled;
fail_map_data_buff_smmu_disabled:
ipa3_smmu_map_peer_buff(params->buff_pool_base_pa,
params->num_buffers * 4, !map, NULL, IPA_SMMU_CB_UC);
goto fail_map_buffer_smmu_disabled;
fail_map_buffer_smmu_enabled:
ipa3_smmu_map_peer_buff((u64)params->ring_base_iova,
params->ntn_ring_size, !map, params->ring_base_sgt,
IPA_SMMU_CB_UC);
goto fail_map_ring;
fail_map_buffer_smmu_disabled:
ipa3_smmu_map_peer_buff((u64)params->ring_base_pa,
params->ntn_ring_size, !map, NULL, IPA_SMMU_CB_UC);
fail_map_ring:
ipa3_smmu_map_peer_reg(rounddown(params->ntn_reg_base_ptr_pa,
PAGE_SIZE), !map, IPA_SMMU_CB_UC);
fail:
return result;
}
/**
* ipa3_setup_uc_ntn_pipes() - setup uc offload pipes
*/
int ipa3_setup_uc_ntn_pipes(struct ipa_ntn_conn_in_params *in,
ipa_notify_cb notify, void *priv, u8 hdr_len,
struct ipa_ntn_conn_out_params *outp)
{
struct ipa3_ep_context *ep_ul;
struct ipa3_ep_context *ep_dl;
int ipa_ep_idx_ul;
int ipa_ep_idx_dl;
int result = 0;
if (in == NULL) {
IPAERR("invalid input\n");
return -EINVAL;
}
ipa_ep_idx_ul = ipa_get_ep_mapping(in->ul.client);
if (ipa_ep_idx_ul == IPA_EP_NOT_ALLOCATED ||
ipa_ep_idx_ul >= IPA3_MAX_NUM_PIPES) {
IPAERR("fail to alloc UL EP ipa_ep_idx_ul=%d\n",
ipa_ep_idx_ul);
return -EFAULT;
}
ipa_ep_idx_dl = ipa_get_ep_mapping(in->dl.client);
if (ipa_ep_idx_dl == IPA_EP_NOT_ALLOCATED ||
ipa_ep_idx_dl >= IPA3_MAX_NUM_PIPES) {
IPAERR("fail to alloc DL EP ipa_ep_idx_dl=%d\n",
ipa_ep_idx_dl);
return -EFAULT;
}
ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
if (ep_ul->valid || ep_dl->valid) {
IPAERR("EP already allocated ul:%d dl:%d\n",
ep_ul->valid, ep_dl->valid);
return -EFAULT;
}
memset(ep_ul, 0, offsetof(struct ipa3_ep_context, sys));
memset(ep_dl, 0, offsetof(struct ipa3_ep_context, sys));
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
/* setup ul ep cfg */
ep_ul->valid = 1;
ep_ul->client = in->ul.client;
ep_ul->client_notify = notify;
ep_ul->priv = priv;
memset(&ep_ul->cfg, 0, sizeof(ep_ul->cfg));
ep_ul->cfg.nat.nat_en = IPA_SRC_NAT;
ep_ul->cfg.hdr.hdr_len = hdr_len;
ep_ul->cfg.mode.mode = IPA_BASIC;
if (ipa3_cfg_ep(ipa_ep_idx_ul, &ep_ul->cfg)) {
IPAERR("fail to setup ul pipe cfg\n");
result = -EFAULT;
goto fail;
}
result = ipa3_smmu_map_uc_ntn_pipes(&in->ul, true);
if (result) {
IPAERR("failed to map SMMU for UL %d\n", result);
goto fail;
}
result = ipa3_enable_data_path(ipa_ep_idx_ul);
if (result) {
IPAERR("Enable data path failed res=%d pipe=%d.\n", result,
ipa_ep_idx_ul);
result = -EFAULT;
goto fail_smmu_unmap_ul;
}
if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->ul, IPA_NTN_RX_DIR)) {
IPAERR("fail to send cmd to uc for ul pipe\n");
result = -EFAULT;
goto fail_disable_dp_ul;
}
ipa3_install_dflt_flt_rules(ipa_ep_idx_ul);
outp->ul_uc_db_pa = IPA_UC_NTN_DB_PA_RX;
ep_ul->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
IPADBG("client %d (ep: %d) connected\n", in->ul.client,
ipa_ep_idx_ul);
/* setup dl ep cfg */
ep_dl->valid = 1;
ep_dl->client = in->dl.client;
memset(&ep_dl->cfg, 0, sizeof(ep_ul->cfg));
ep_dl->cfg.nat.nat_en = IPA_BYPASS_NAT;
ep_dl->cfg.hdr.hdr_len = hdr_len;
ep_dl->cfg.mode.mode = IPA_BASIC;
if (ipa3_cfg_ep(ipa_ep_idx_dl, &ep_dl->cfg)) {
IPAERR("fail to setup dl pipe cfg\n");
result = -EFAULT;
goto fail_disable_dp_ul;
}
result = ipa3_smmu_map_uc_ntn_pipes(&in->dl, true);
if (result) {
IPAERR("failed to map SMMU for DL %d\n", result);
goto fail_disable_dp_ul;
}
result = ipa3_enable_data_path(ipa_ep_idx_dl);
if (result) {
IPAERR("Enable data path failed res=%d pipe=%d.\n", result,
ipa_ep_idx_dl);
result = -EFAULT;
goto fail_smmu_unmap_dl;
}
if (ipa3_uc_send_ntn_setup_pipe_cmd(&in->dl, IPA_NTN_TX_DIR)) {
IPAERR("fail to send cmd to uc for dl pipe\n");
result = -EFAULT;
goto fail_disable_dp_dl;
}
outp->dl_uc_db_pa = IPA_UC_NTN_DB_PA_TX;
ep_dl->uc_offload_state |= IPA_UC_OFFLOAD_CONNECTED;
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("client %d (ep: %d) connected\n", in->dl.client,
ipa_ep_idx_dl);
return 0;
fail_disable_dp_dl:
ipa3_disable_data_path(ipa_ep_idx_dl);
fail_smmu_unmap_dl:
ipa3_smmu_map_uc_ntn_pipes(&in->dl, false);
fail_disable_dp_ul:
ipa3_disable_data_path(ipa_ep_idx_ul);
fail_smmu_unmap_ul:
ipa3_smmu_map_uc_ntn_pipes(&in->ul, false);
fail:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return result;
}
/**
* ipa3_tear_down_uc_offload_pipes() - tear down uc offload pipes
*/
int ipa3_tear_down_uc_offload_pipes(int ipa_ep_idx_ul,
int ipa_ep_idx_dl, struct ipa_ntn_conn_in_params *params)
{
struct ipa_mem_buffer cmd;
struct ipa3_ep_context *ep_ul, *ep_dl;
struct IpaHwOffloadCommonChCmdData_t *cmd_data;
struct IpaHwOffloadCommonChCmdData_t_v4_0 *cmd_data_v4_0;
union Ipa3HwNtnCommonChCmdData_t *tear;
int result = 0;
IPADBG("ep_ul = %d\n", ipa_ep_idx_ul);
IPADBG("ep_dl = %d\n", ipa_ep_idx_dl);
ep_ul = &ipa3_ctx->ep[ipa_ep_idx_ul];
ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
if (ep_ul->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED ||
ep_dl->uc_offload_state != IPA_UC_OFFLOAD_CONNECTED) {
IPAERR("channel bad state: ul %d dl %d\n",
ep_ul->uc_offload_state, ep_dl->uc_offload_state);
return -EFAULT;
}
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0)
cmd.size = sizeof(*cmd_data_v4_0);
else
cmd.size = sizeof(*cmd_data);
cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
&cmd.phys_base, GFP_KERNEL);
if (cmd.base == NULL) {
IPAERR("fail to get DMA memory.\n");
return -ENOMEM;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
cmd_data_v4_0 = (struct IpaHwOffloadCommonChCmdData_t_v4_0 *)
cmd.base;
cmd_data_v4_0->protocol = IPA_HW_PROTOCOL_ETH;
tear = &cmd_data_v4_0->CommonCh_params.NtnCommonCh_params;
} else {
cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
cmd_data->protocol = IPA_HW_PROTOCOL_ETH;
tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
}
/* teardown the DL pipe */
ipa3_disable_data_path(ipa_ep_idx_dl);
/*
* Reset ep before sending cmd otherwise disconnect
* during data transfer will result into
* enormous suspend interrupts
*/
memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
IPADBG("dl client (ep: %d) disconnected\n", ipa_ep_idx_dl);
tear->params.ipa_pipe_number = ipa_ep_idx_dl;
result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
false, 10*HZ);
if (result) {
IPAERR("fail to tear down dl pipe\n");
result = -EFAULT;
goto fail;
}
/* unmap the DL pipe */
result = ipa3_smmu_map_uc_ntn_pipes(&params->dl, false);
if (result) {
IPAERR("failed to unmap SMMU for DL %d\n", result);
goto fail;
}
/* teardown the UL pipe */
ipa3_disable_data_path(ipa_ep_idx_ul);
tear->params.ipa_pipe_number = ipa_ep_idx_ul;
result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN,
IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
false, 10*HZ);
if (result) {
IPAERR("fail to tear down ul pipe\n");
result = -EFAULT;
goto fail;
}
/* unmap the UL pipe */
result = ipa3_smmu_map_uc_ntn_pipes(&params->ul, false);
if (result) {
IPAERR("failed to unmap SMMU for UL %d\n", result);
goto fail;
}
ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
fail:
dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return result;
}

682
ipa/ipa_v3/ipa_uc_offload_i.h Arquivo normal
Ver arquivo

@@ -0,0 +1,682 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _IPA_UC_OFFLOAD_I_H_
#define _IPA_UC_OFFLOAD_I_H_
#include <linux/ipa.h>
#include "ipa_i.h"
/*
* Neutrino protocol related data structures
*/
#define IPA_UC_MAX_NTN_TX_CHANNELS 1
#define IPA_UC_MAX_NTN_RX_CHANNELS 1
#define IPA_NTN_TX_DIR 1
#define IPA_NTN_RX_DIR 2
#define MAX_CH_STATS_SUPPORTED 5
#define DIR_CONSUMER 0
#define DIR_PRODUCER 1
#define MAX_AQC_CHANNELS 2
#define MAX_11AD_CHANNELS 5
#define MAX_WDI2_CHANNELS 2
#define MAX_WDI3_CHANNELS 2
#define MAX_MHIP_CHANNELS 4
#define MAX_USB_CHANNELS 2
/**
* @brief Enum value determined based on the feature it
* corresponds to
* +----------------+----------------+
* | 3 bits | 5 bits |
* +----------------+----------------+
* | HW_FEATURE | OPCODE |
* +----------------+----------------+
*
*/
#define FEATURE_ENUM_VAL(feature, opcode) ((feature << 5) | opcode)
#define EXTRACT_UC_FEATURE(value) (value >> 5)
#define IPA_HW_NUM_FEATURES 0x8
/**
* enum ipa3_hw_features - Values that represent the features supported
* in IPA HW
* @IPA_HW_FEATURE_COMMON : Feature related to common operation of IPA HW
* @IPA_HW_FEATURE_MHI : Feature related to MHI operation in IPA HW
* @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse
* @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
* @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
* @IPA_HW_FEATURE_OFFLOAD : Feature related to several protocols operation in
* IPA HW. use protocol field to
* determine (e.g. IPA_HW_PROTOCOL_11ad).
*/
enum ipa3_hw_features {
IPA_HW_FEATURE_COMMON = 0x0,
IPA_HW_FEATURE_MHI = 0x1,
IPA_HW_FEATURE_POWER_COLLAPSE = 0x2,
IPA_HW_FEATURE_WDI = 0x3,
IPA_HW_FEATURE_ZIP = 0x4,
IPA_HW_FEATURE_NTN = 0x5,
IPA_HW_FEATURE_OFFLOAD = 0x6,
IPA_HW_FEATURE_MAX = IPA_HW_NUM_FEATURES
};
/**
* enum ipa4_hw_protocol - Values that represent the protocols supported
* in IPA HW when using the IPA_HW_FEATURE_OFFLOAD feature.
* @IPA_HW_FEATURE_COMMON : protocol related to common operation of IPA HW
* @IPA_HW_PROTOCOL_AQC : protocol related to AQC operation in IPA HW
* @IPA_HW_PROTOCOL_11ad: protocol related to 11ad operation in IPA HW
* @IPA_HW_PROTOCOL_WDI : protocol related to WDI operation in IPA HW
* @IPA_HW_PROTOCOL_WDI3: protocol related to WDI3 operation in IPA HW
* @IPA_HW_PROTOCOL_ETH : protocol related to ETH operation in IPA HW
* @IPA_HW_PROTOCOL_MHIP: protocol related to MHIP operation in IPA HW
* @IPA_HW_PROTOCOL_USB : protocol related to USB operation in IPA HW
*/
enum ipa4_hw_protocol {
IPA_HW_PROTOCOL_COMMON = 0x0,
IPA_HW_PROTOCOL_AQC = 0x1,
IPA_HW_PROTOCOL_11ad = 0x2,
IPA_HW_PROTOCOL_WDI = 0x3,
IPA_HW_PROTOCOL_WDI3 = 0x4,
IPA_HW_PROTOCOL_ETH = 0x5,
IPA_HW_PROTOCOL_MHIP = 0x6,
IPA_HW_PROTOCOL_USB = 0x7,
IPA_HW_PROTOCOL_MAX
};
/**
* enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
* @IPA_HW_2_CPU_EVENT_NO_OP : No event present
* @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
* device
* @IPA_HW_2_CPU_EVENT_LOG_INFO : Event providing logging specific information
*/
enum ipa3_hw_2_cpu_events {
IPA_HW_2_CPU_EVENT_NO_OP =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
IPA_HW_2_CPU_EVENT_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
IPA_HW_2_CPU_EVENT_LOG_INFO =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
};
/**
* enum ipa3_hw_errors - Common error types.
* @IPA_HW_ERROR_NONE : No error persists
* @IPA_HW_INVALID_DOORBELL_ERROR : Invalid data read from doorbell
* @IPA_HW_DMA_ERROR : Unexpected DMA error
* @IPA_HW_FATAL_SYSTEM_ERROR : HW has crashed and requires reset.
* @IPA_HW_INVALID_OPCODE : Invalid opcode sent
* @IPA_HW_INVALID_PARAMS : Invalid params for the requested command
* @IPA_HW_GSI_CH_NOT_EMPTY_FAILURE : GSI channel emptiness validation failed
* @IPA_HW_CONS_STOP_FAILURE : NTN/ETH CONS stop failed
* @IPA_HW_PROD_STOP_FAILURE : NTN/ETH PROD stop failed
*/
enum ipa3_hw_errors {
IPA_HW_ERROR_NONE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 0),
IPA_HW_INVALID_DOORBELL_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 1),
IPA_HW_DMA_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 2),
IPA_HW_FATAL_SYSTEM_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 3),
IPA_HW_INVALID_OPCODE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 4),
IPA_HW_INVALID_PARAMS =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 5),
IPA_HW_CONS_DISABLE_CMD_GSI_STOP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 6),
IPA_HW_PROD_DISABLE_CMD_GSI_STOP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 7),
IPA_HW_GSI_CH_NOT_EMPTY_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 8),
IPA_HW_CONS_STOP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 9),
IPA_HW_PROD_STOP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_COMMON, 10)
};
/**
* struct IpaHwSharedMemCommonMapping_t - Structure referring to the common
* section in 128B shared memory located in offset zero of SW Partition in IPA
* SRAM.
* @cmdOp : CPU->HW command opcode. See IPA_CPU_2_HW_COMMANDS
* @cmdParams : CPU->HW command parameter lower 32bit.
* @cmdParams_hi : CPU->HW command parameter higher 32bit.
* of parameters (immediate parameters) and point on structure in system memory
* (in such case the address must be accessible for HW)
* @responseOp : HW->CPU response opcode. See IPA_HW_2_CPU_RESPONSES
* @responseParams : HW->CPU response parameter. The parameter filed can hold 32
* bits of parameters (immediate parameters) and point on structure in system
* memory
* @eventOp : HW->CPU event opcode. See IPA_HW_2_CPU_EVENTS
* @eventParams : HW->CPU event parameter. The parameter filed can hold 32
* bits of parameters (immediate parameters) and point on
* structure in system memory
* @firstErrorAddress : Contains the address of first error-source on SNOC
* @hwState : State of HW. The state carries information regarding the
* error type.
* @warningCounter : The warnings counter. The counter carries information
* regarding non fatal errors in HW
* @interfaceVersionCommon : The Common interface version as reported by HW
* @responseParams_1: offset addr for uC stats
*
* The shared memory is used for communication between IPA HW and CPU.
*/
struct IpaHwSharedMemCommonMapping_t {
u8 cmdOp;
u8 reserved_01;
u16 reserved_03_02;
u32 cmdParams;
u32 cmdParams_hi;
u8 responseOp;
u8 reserved_0D;
u16 reserved_0F_0E;
u32 responseParams;
u8 eventOp;
u8 reserved_15;
u16 reserved_17_16;
u32 eventParams;
u32 firstErrorAddress;
u8 hwState;
u8 warningCounter;
u16 reserved_23_22;
u16 interfaceVersionCommon;
u16 reserved_27_26;
u32 responseParams_1;
} __packed;
/**
* union Ipa3HwFeatureInfoData_t - parameters for stats/config blob
*
* @offset : Location of a feature within the EventInfoData
* @size : Size of the feature
*/
union Ipa3HwFeatureInfoData_t {
struct IpaHwFeatureInfoParams_t {
u32 offset:16;
u32 size:16;
} __packed params;
u32 raw32b;
} __packed;
/**
* union IpaHwErrorEventData_t - HW->CPU Common Events
* @errorType : Entered when a system error is detected by the HW. Type of
* error is specified by IPA_HW_ERRORS
* @reserved : Reserved
*/
union IpaHwErrorEventData_t {
struct IpaHwErrorEventParams_t {
u32 errorType:8;
u32 reserved:24;
} __packed params;
u32 raw32b;
} __packed;
/**
* struct Ipa3HwEventInfoData_t - Structure holding the parameters for
* statistics and config info
*
* @baseAddrOffset : Base Address Offset of the statistics or config
* structure from IPA_WRAPPER_BASE
* @Ipa3HwFeatureInfoData_t : Location and size of each feature within
* the statistics or config structure
*
* @note Information about each feature in the featureInfo[]
* array is populated at predefined indices per the IPA_HW_FEATURES
* enum definition
*/
struct Ipa3HwEventInfoData_t {
u32 baseAddrOffset;
union Ipa3HwFeatureInfoData_t featureInfo[IPA_HW_NUM_FEATURES];
} __packed;
/**
* struct IpaHwEventLogInfoData_t - Structure holding the parameters for
* IPA_HW_2_CPU_EVENT_LOG_INFO Event
*
* @protocolMask : Mask indicating the protocols enabled in HW.
* Refer IPA_HW_FEATURE_MASK
* @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
* Log Buffer structure
* @statsInfo : Statistics related information
* @configInfo : Configuration related information
*
* @note The offset location of this structure from IPA_WRAPPER_BASE
* will be provided as Event Params for the IPA_HW_2_CPU_EVENT_LOG_INFO
* Event
*/
struct IpaHwEventLogInfoData_t {
u32 protocolMask;
u32 circBuffBaseAddrOffset;
struct Ipa3HwEventInfoData_t statsInfo;
struct Ipa3HwEventInfoData_t configInfo;
} __packed;
/**
* struct ipa3_uc_ntn_ctx
* @ntn_uc_stats_ofst: Neutrino stats offset
* @ntn_uc_stats_mmio: Neutrino stats
* @priv: private data of client
* @uc_ready_cb: uc Ready cb
*/
struct ipa3_uc_ntn_ctx {
u32 ntn_uc_stats_ofst;
struct Ipa3HwStatsNTNInfoData_t *ntn_uc_stats_mmio;
void *priv;
ipa_uc_ready_cb uc_ready_cb;
};
/**
* enum ipa3_hw_ntn_channel_states - Values that represent NTN
* channel state machine.
* @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
* initialized but disabled
* @IPA_HW_NTN_CHANNEL_STATE_RUNNING : Channel is running.
* Entered after SET_UP_COMMAND is processed successfully
* @IPA_HW_NTN_CHANNEL_STATE_ERROR : Channel is in error state
* @IPA_HW_NTN_CHANNEL_STATE_INVALID : Invalid state. Shall not
* be in use in operational scenario
*
* These states apply to both Tx and Rx paths. These do not reflect the
* sub-state the state machine may be in.
*/
enum ipa3_hw_ntn_channel_states {
IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED = 1,
IPA_HW_NTN_CHANNEL_STATE_RUNNING = 2,
IPA_HW_NTN_CHANNEL_STATE_ERROR = 3,
IPA_HW_NTN_CHANNEL_STATE_INVALID = 0xFF
};
/**
* enum ipa3_hw_ntn_channel_errors - List of NTN Channel error
* types. This is present in the event param
* @IPA_HW_NTN_CH_ERR_NONE: No error persists
* @IPA_HW_NTN_TX_FSM_ERROR: Error in the state machine
* transition
* @IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL: Error while calculating
* num RE to bring
* @IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL: Write pointer update
* failed in Rx ring
* @IPA_HW_NTN_RX_FSM_ERROR: Error in the state machine
* transition
* @IPA_HW_NTN_RX_CACHE_NON_EMPTY:
* @IPA_HW_NTN_CH_ERR_RESERVED:
*
* These states apply to both Tx and Rx paths. These do not
* reflect the sub-state the state machine may be in.
*/
enum ipa3_hw_ntn_channel_errors {
IPA_HW_NTN_CH_ERR_NONE = 0,
IPA_HW_NTN_TX_RING_WP_UPDATE_FAIL = 1,
IPA_HW_NTN_TX_FSM_ERROR = 2,
IPA_HW_NTN_TX_COMP_RE_FETCH_FAIL = 3,
IPA_HW_NTN_RX_RING_WP_UPDATE_FAIL = 4,
IPA_HW_NTN_RX_FSM_ERROR = 5,
IPA_HW_NTN_RX_CACHE_NON_EMPTY = 6,
IPA_HW_NTN_CH_ERR_RESERVED = 0xFF
};
/**
* struct Ipa3HwNtnSetUpCmdData_t - Ntn setup command data
* @ring_base_pa: physical address of the base of the Tx/Rx NTN
* ring
* @buff_pool_base_pa: physical address of the base of the Tx/Rx
* buffer pool
* @ntn_ring_size: size of the Tx/Rx NTN ring
* @num_buffers: Rx/tx buffer pool size
* @ntn_reg_base_ptr_pa: physical address of the Tx/Rx NTN
* Ring's tail pointer
* @ipa_pipe_number: IPA pipe number that has to be used for the
* Tx/Rx path
* @dir: Tx/Rx Direction
* @data_buff_size: size of the each data buffer allocated in
* DDR
*/
struct Ipa3HwNtnSetUpCmdData_t {
u32 ring_base_pa;
u32 buff_pool_base_pa;
u16 ntn_ring_size;
u16 num_buffers;
u32 ntn_reg_base_ptr_pa;
u8 ipa_pipe_number;
u8 dir;
u16 data_buff_size;
} __packed;
/**
* struct Ipa3HwNtnCommonChCmdData_t - Structure holding the
* parameters for Ntn Tear down command data params
*
*@ipa_pipe_number: IPA pipe number. This could be Tx or an Rx pipe
*/
union Ipa3HwNtnCommonChCmdData_t {
struct IpaHwNtnCommonChCmdParams_t {
u32 ipa_pipe_number :8;
u32 reserved :24;
} __packed params;
uint32_t raw32b;
} __packed;
/**
* struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe
* information
*
*@num_pkts_processed: Number of packets processed - cumulative
*
*@ring_stats:
*@gsi_stats:
*@num_db: Number of times the doorbell was rung
*@num_qmb_int_handled: Number of QMB interrupts handled
*@ipa_pipe_number: The IPA Rx/Tx pipe number.
*/
struct NTN3RxInfoData_t {
u32 num_pkts_processed;
struct IpaHwRingStats_t ring_stats;
struct IpaHwBamStats_t gsi_stats;
u32 num_db;
u32 num_qmb_int_handled;
u32 ipa_pipe_number;
} __packed;
/**
* struct NTN3TxInfoData_t - Structure holding the NTN Tx channel
* Ensure that this is always word aligned
*
*@num_pkts_processed: Number of packets processed - cumulative
*@tail_ptr_val: Latest value of doorbell written to copy engine
*@num_db_fired: Number of DB from uC FW to Copy engine
*
*@tx_comp_ring_stats:
*@bam_stats:
*@num_db: Number of times the doorbell was rung
*@num_qmb_int_handled: Number of QMB interrupts handled
*/
struct NTN3TxInfoData_t {
u32 num_pkts_processed;
struct IpaHwRingStats_t ring_stats;
struct IpaHwBamStats_t gsi_stats;
u32 num_db;
u32 num_qmb_int_handled;
u32 ipa_pipe_number;
} __packed;
/**
* struct Ipa3HwStatsNTNInfoData_t - Structure holding the NTN Tx
* channel Ensure that this is always word aligned
*
*/
struct Ipa3HwStatsNTNInfoData_t {
struct NTN3RxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
struct NTN3TxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
} __packed;
/*
* uC offload related data structures
*/
#define IPA_UC_OFFLOAD_CONNECTED BIT(0)
#define IPA_UC_OFFLOAD_ENABLED BIT(1)
#define IPA_UC_OFFLOAD_RESUMED BIT(2)
/**
* enum ipa_cpu_2_hw_offload_commands - Values that represent
* the offload commands from CPU
* @IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP : Command to set up
* Offload protocol's Tx/Rx Path
* @IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN : Command to tear down
* Offload protocol's Tx/ Rx Path
* @IPA_CPU_2_HW_CMD_PERIPHERAL_INIT :Command to initialize peripheral
* @IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT : Command to deinitialize peripheral
* @IPA_CPU_2_HW_CMD_OFFLOAD_STATS_ALLOC: Command to start the
* uC stats calculation for a particular protocol
* @IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC: Command to stop the
* uC stats calculation for a particular protocol
*/
enum ipa_cpu_2_hw_offload_commands {
IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
IPA_CPU_2_HW_CMD_PERIPHERAL_INIT =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
IPA_CPU_2_HW_CMD_OFFLOAD_STATS_ALLOC =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
IPA_CPU_2_HW_CMD_OFFLOAD_STATS_DEALLOC =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
};
/**
* struct IpaOffloadStatschannel_info - channel info for uC
* stats
* @dir: Director of the channel ID DIR_CONSUMER =0,
* DIR_PRODUCER = 1
* @ch_id: Channel id of the IPA endpoint for which stats need
* to be calculated, 0xFF means invalid channel or disable stats
* on already stats enabled channel
*/
struct IpaOffloadStatschannel_info {
uint8_t dir;
uint8_t ch_id;
} __packed;
/**
* struct IpaHwOffloadStatsAllocCmdData_t - protocol info for uC
* stats start
* @protocol: Enum that indicates the protocol type
* @ch_id_info: Channel id of the IPA endpoint for which stats
* need to be calculated
*/
struct IpaHwOffloadStatsAllocCmdData_t {
uint32_t protocol;
struct IpaOffloadStatschannel_info
ch_id_info[MAX_CH_STATS_SUPPORTED];
} __packed;
/**
* struct IpaHwOffloadStatsDeAllocCmdData_t - protocol info for
* uC stats stop
* @protocol: Enum that indicates the protocol type
*/
struct IpaHwOffloadStatsDeAllocCmdData_t {
uint32_t protocol;
} __packed;
/**
* enum ipa3_hw_offload_channel_states - Values that represent
* offload channel state machine.
* @IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED : Channel is
* initialized but disabled
* @IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING : Channel is running.
* Entered after SET_UP_COMMAND is processed successfully
* @IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR : Channel is in error state
* @IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID : Invalid state. Shall not
* be in use in operational scenario
*
* These states apply to both Tx and Rx paths. These do not
* reflect the sub-state the state machine may be in
*/
enum ipa3_hw_offload_channel_states {
IPA_HW_OFFLOAD_CHANNEL_STATE_INITED_DISABLED = 1,
IPA_HW_OFFLOAD_CHANNEL_STATE_RUNNING = 2,
IPA_HW_OFFLOAD_CHANNEL_STATE_ERROR = 3,
IPA_HW_OFFLOAD_CHANNEL_STATE_INVALID = 0xFF
};
/**
* enum ipa3_hw_2_cpu_cmd_resp_status - Values that represent
* offload related command response status to be sent to CPU.
*/
enum ipa3_hw_2_cpu_offload_cmd_resp_status {
IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 0),
IPA_HW_2_CPU_OFFLOAD_MAX_TX_CHANNELS =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
IPA_HW_2_CPU_OFFLOAD_TX_RING_OVERRUN_POSSIBILITY =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
IPA_HW_2_CPU_OFFLOAD_TX_RING_SET_UP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
IPA_HW_2_CPU_OFFLOAD_TX_RING_PARAMS_UNALIGNED =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
IPA_HW_2_CPU_OFFLOAD_UNKNOWN_TX_CHANNEL =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
IPA_HW_2_CPU_OFFLOAD_TX_INVALID_FSM_TRANSITION =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
IPA_HW_2_CPU_OFFLOAD_TX_FSM_TRANSITION_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 7),
IPA_HW_2_CPU_OFFLOAD_MAX_RX_CHANNELS =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 8),
IPA_HW_2_CPU_OFFLOAD_RX_RING_PARAMS_UNALIGNED =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 9),
IPA_HW_2_CPU_OFFLOAD_RX_RING_SET_UP_FAILURE =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 10),
IPA_HW_2_CPU_OFFLOAD_UNKNOWN_RX_CHANNEL =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 11),
IPA_HW_2_CPU_OFFLOAD_RX_INVALID_FSM_TRANSITION =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 12),
IPA_HW_2_CPU_OFFLOAD_RX_FSM_TRANSITION_ERROR =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 13),
IPA_HW_2_CPU_OFFLOAD_RX_RING_OVERRUN_POSSIBILITY =
FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 14),
};
/**
* struct IpaHw11adSetupCmdData_t - 11ad setup channel command data
* @dir: Direction RX/TX
* @wifi_ch: 11ad peripheral pipe number
* @gsi_ch: GSI Channel number
* @reserved: 8 bytes padding
* @wifi_hp_addr_lsb: Head/Tail pointer absolute address
* @wifi_hp_addr_msb: Head/Tail pointer absolute address
*/
struct IpaHw11adSetupCmdData_t {
u8 dir;
u8 wifi_ch;
u8 gsi_ch;
u8 reserved;
u32 wifi_hp_addr_lsb;
u32 wifi_hp_addr_msb;
} __packed;
/**
* struct IpaHw11adCommonChCmdData_t - 11ad tear down channel command data
* @gsi_ch: GSI Channel number
* @reserved_0: padding
* @reserved_1: padding
*/
struct IpaHw11adCommonChCmdData_t {
u8 gsi_ch;
u8 reserved_0;
u16 reserved_1;
} __packed;
/**
* struct IpaHw11adInitCmdData_t - 11ad peripheral init command data
* @periph_baddr_lsb: Peripheral Base Address LSB (pa/IOVA)
* @periph_baddr_msb: Peripheral Base Address MSB (pa/IOVA)
*/
struct IpaHw11adInitCmdData_t {
u32 periph_baddr_lsb;
u32 periph_baddr_msb;
} __packed;
/**
* struct IpaHw11adDeinitCmdData_t - 11ad peripheral deinit command data
* @reserved: Reserved for future
*/
struct IpaHw11adDeinitCmdData_t {
u32 reserved;
};
/**
* struct IpaHwSetUpCmd - Structure holding the parameters
* for IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP
*
*
*/
union IpaHwSetUpCmd {
struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params;
struct IpaHw11adSetupCmdData_t W11AdSetupCh_params;
} __packed;
struct IpaHwOffloadSetUpCmdData_t {
u8 protocol;
union IpaHwSetUpCmd SetupCh_params;
} __packed;
struct IpaHwOffloadSetUpCmdData_t_v4_0 {
u32 protocol;
union IpaHwSetUpCmd SetupCh_params;
} __packed;
/**
* struct IpaHwCommonChCmd - Structure holding the parameters
* for IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN
*
*
*/
union IpaHwCommonChCmd {
union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params;
struct IpaHw11adCommonChCmdData_t W11AdCommonCh_params;
} __packed;
struct IpaHwOffloadCommonChCmdData_t {
u8 protocol;
union IpaHwCommonChCmd CommonCh_params;
} __packed;
struct IpaHwOffloadCommonChCmdData_t_v4_0 {
u32 protocol;
union IpaHwCommonChCmd CommonCh_params;
} __packed;
/**
* union IpaHwPeripheralInitCmd - Structure holding the parameters
* for IPA_CPU_2_HW_CMD_PERIPHERAL_INIT
*
*/
union IpaHwPeripheralInitCmd {
struct IpaHw11adInitCmdData_t W11AdInit_params;
} __packed;
struct IpaHwPeripheralInitCmdData_t {
u32 protocol;
union IpaHwPeripheralInitCmd Init_params;
} __packed;
/**
* union IpaHwPeripheralDeinitCmd - Structure holding the parameters
* for IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT
*
*/
union IpaHwPeripheralDeinitCmd {
struct IpaHw11adDeinitCmdData_t W11AdDeinit_params;
} __packed;
struct IpaHwPeripheralDeinitCmdData_t {
u32 protocol;
union IpaHwPeripheralDeinitCmd PeripheralDeinit_params;
} __packed;
#endif /* _IPA_UC_OFFLOAD_I_H_ */

3092
ipa/ipa_v3/ipa_uc_wdi.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

8477
ipa/ipa_v3/ipa_utils.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

962
ipa/ipa_v3/ipa_wdi3_i.c Arquivo normal
Ver arquivo

@@ -0,0 +1,962 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018 - 2019, The Linux Foundation. All rights reserved.
*/
#include "ipa_i.h"
#include <linux/ipa_wdi3.h>
#define UPDATE_RP_MODERATION_CONFIG 1
#define UPDATE_RP_MODERATION_THRESHOLD 8
#define IPA_WLAN_AGGR_PKT_LIMIT 1
#define IPA_WLAN_AGGR_BYTE_LIMIT 2 /*2 Kbytes Agger hard byte limit*/
#define IPA_WDI3_GSI_EVT_RING_INT_MODT 32
static void ipa3_wdi3_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
{
switch (notify->evt_id) {
case GSI_EVT_OUT_OF_BUFFERS_ERR:
IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
break;
case GSI_EVT_OUT_OF_RESOURCES_ERR:
IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
break;
case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
break;
case GSI_EVT_EVT_RING_EMPTY_ERR:
IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
break;
default:
IPAERR("Unexpected err evt: %d\n", notify->evt_id);
}
ipa_assert();
}
static void ipa3_wdi3_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
{
switch (notify->evt_id) {
case GSI_CHAN_INVALID_TRE_ERR:
IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
break;
case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
break;
case GSI_CHAN_OUT_OF_BUFFERS_ERR:
IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
break;
case GSI_CHAN_OUT_OF_RESOURCES_ERR:
IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
break;
case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
break;
case GSI_CHAN_HWO_1_ERR:
IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
break;
default:
IPAERR("Unexpected err evt: %d\n", notify->evt_id);
}
ipa_assert();
}
static int ipa3_setup_wdi3_gsi_channel(u8 is_smmu_enabled,
struct ipa_wdi_pipe_setup_info *info,
struct ipa_wdi_pipe_setup_info_smmu *info_smmu, u8 dir,
struct ipa3_ep_context *ep)
{
struct gsi_evt_ring_props gsi_evt_ring_props;
struct gsi_chan_props gsi_channel_props;
union __packed gsi_channel_scratch ch_scratch;
union __packed gsi_evt_scratch evt_scratch;
const struct ipa_gsi_ep_config *gsi_ep_info;
int result, len;
unsigned long va;
uint32_t addr_low, addr_high;
if (!info || !info_smmu || !ep) {
IPAERR("invalid input\n");
return -EINVAL;
}
/* setup event ring */
memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_WDI3_EV;
gsi_evt_ring_props.intr = GSI_INTR_IRQ;
/* 16 (for Tx) and 8 (for Rx) */
if (dir == IPA_WDI3_TX_DIR)
gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
else
gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_8B;
if (!is_smmu_enabled) {
gsi_evt_ring_props.ring_len = info->event_ring_size;
gsi_evt_ring_props.ring_base_addr =
(u64)info->event_ring_base_pa;
} else {
len = info_smmu->event_ring_size;
if (dir == IPA_WDI3_TX_DIR) {
if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_RING_RES,
true, info->event_ring_base_pa,
&info_smmu->event_ring_base, len,
false, &va)) {
IPAERR("failed to get smmu mapping\n");
return -EFAULT;
}
} else {
if (ipa_create_gsi_smmu_mapping(
IPA_WDI_RX_COMP_RING_RES, true,
info->event_ring_base_pa,
&info_smmu->event_ring_base, len,
false, &va)) {
IPAERR("failed to get smmu mapping\n");
return -EFAULT;
}
}
gsi_evt_ring_props.ring_len = len;
gsi_evt_ring_props.ring_base_addr = (u64)va;
}
gsi_evt_ring_props.int_modt = IPA_WDI3_GSI_EVT_RING_INT_MODT;
gsi_evt_ring_props.int_modc = 1;
gsi_evt_ring_props.exclusive = true;
gsi_evt_ring_props.err_cb = ipa3_wdi3_gsi_evt_ring_err_cb;
gsi_evt_ring_props.user_data = NULL;
result = gsi_alloc_evt_ring(&gsi_evt_ring_props, ipa3_ctx->gsi_dev_hdl,
&ep->gsi_evt_ring_hdl);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("fail to alloc RX event ring\n");
result = -EFAULT;
goto fail_smmu_mapping;
}
ep->gsi_mem_info.evt_ring_len = gsi_evt_ring_props.ring_len;
ep->gsi_mem_info.evt_ring_base_addr =
gsi_evt_ring_props.ring_base_addr;
/* setup channel ring */
memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
gsi_channel_props.prot = GSI_CHAN_PROT_WDI3;
if (dir == IPA_WDI3_TX_DIR)
gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
else
gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
if (!gsi_ep_info) {
IPAERR("Failed getting GSI EP info for client=%d\n",
ep->client);
result = -EINVAL;
goto fail_get_gsi_ep_info;
} else
gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
gsi_channel_props.prefetch_mode =
gsi_ep_info->prefetch_mode;
gsi_channel_props.empty_lvl_threshold =
gsi_ep_info->prefetch_threshold;
gsi_channel_props.low_weight = 1;
gsi_channel_props.err_cb = ipa3_wdi3_gsi_chan_err_cb;
if (!is_smmu_enabled) {
gsi_channel_props.ring_len = (u16)info->transfer_ring_size;
gsi_channel_props.ring_base_addr =
(u64)info->transfer_ring_base_pa;
} else {
len = info_smmu->transfer_ring_size;
if (dir == IPA_WDI3_TX_DIR) {
if (ipa_create_gsi_smmu_mapping(IPA_WDI_TX_RING_RES,
true, info->transfer_ring_base_pa,
&info_smmu->transfer_ring_base, len,
false, &va)) {
IPAERR("failed to get smmu mapping\n");
result = -EFAULT;
goto fail_get_gsi_ep_info;
}
} else {
if (ipa_create_gsi_smmu_mapping(
IPA_WDI_RX_RING_RES, true,
info->transfer_ring_base_pa,
&info_smmu->transfer_ring_base, len,
false, &va)) {
IPAERR("failed to get smmu mapping\n");
result = -EFAULT;
goto fail_get_gsi_ep_info;
}
}
gsi_channel_props.ring_len = len;
gsi_channel_props.ring_base_addr = (u64)va;
}
result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
&ep->gsi_chan_hdl);
if (result != GSI_STATUS_SUCCESS) {
goto fail_get_gsi_ep_info;
}
ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
ep->gsi_mem_info.chan_ring_base_addr =
gsi_channel_props.ring_base_addr;
/* write event scratch */
memset(&evt_scratch, 0, sizeof(evt_scratch));
evt_scratch.wdi3.update_rp_moderation_config =
UPDATE_RP_MODERATION_CONFIG;
result = gsi_write_evt_ring_scratch(ep->gsi_evt_ring_hdl, evt_scratch);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to write evt ring scratch\n");
goto fail_write_scratch;
}
if (!is_smmu_enabled) {
IPADBG("smmu disabled\n");
if (info->is_evt_rn_db_pcie_addr == true)
IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
else
IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
IPADBG_LOW("LSB 0x%x\n",
(u32)info->event_ring_doorbell_pa);
IPADBG_LOW("MSB 0x%x\n",
(u32)((u64)info->event_ring_doorbell_pa >> 32));
} else {
IPADBG("smmu enabled\n");
if (info_smmu->is_evt_rn_db_pcie_addr == true)
IPADBG_LOW("is_evt_rn_db_pcie_addr is PCIE addr\n");
else
IPADBG_LOW("is_evt_rn_db_pcie_addr is DDR addr\n");
IPADBG_LOW("LSB 0x%x\n",
(u32)info_smmu->event_ring_doorbell_pa);
IPADBG_LOW("MSB 0x%x\n",
(u32)((u64)info_smmu->event_ring_doorbell_pa >> 32));
}
if (!is_smmu_enabled) {
addr_low = (u32)info->event_ring_doorbell_pa;
addr_high = (u32)((u64)info->event_ring_doorbell_pa >> 32);
} else {
if (dir == IPA_WDI3_TX_DIR) {
if (ipa_create_gsi_smmu_mapping(IPA_WDI_CE_DB_RES,
true, info_smmu->event_ring_doorbell_pa,
NULL, 4, true, &va)) {
IPAERR("failed to get smmu mapping\n");
result = -EFAULT;
goto fail_write_scratch;
}
} else {
if (ipa_create_gsi_smmu_mapping(
IPA_WDI_RX_COMP_RING_WP_RES,
true, info_smmu->event_ring_doorbell_pa,
NULL, 4, true, &va)) {
IPAERR("failed to get smmu mapping\n");
result = -EFAULT;
goto fail_write_scratch;
}
}
addr_low = (u32)va;
addr_high = (u32)((u64)va >> 32);
}
/*
* Arch specific:
* pcie addr which are not via smmu, use pa directly!
* pcie and DDR via 2 different port
* assert bit 40 to indicate it is pcie addr
* WDI-3.0, MSM --> pcie via smmu
* WDI-3.0, MDM --> pcie not via smmu + dual port
* assert bit 40 in case
*/
if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
is_smmu_enabled) {
/*
* Ir-respective of smmu enabled don't use IOVA addr
* since pcie not via smmu in MDM's
*/
if (info_smmu->is_evt_rn_db_pcie_addr == true) {
addr_low = (u32)info_smmu->event_ring_doorbell_pa;
addr_high =
(u32)((u64)info_smmu->event_ring_doorbell_pa
>> 32);
}
}
/*
* GSI recomendation to set bit-40 for (mdm targets && pcie addr)
* from wdi-3.0 interface document
*/
if (!is_smmu_enabled) {
if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
info->is_evt_rn_db_pcie_addr)
addr_high |= (1 << 8);
} else {
if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
info_smmu->is_evt_rn_db_pcie_addr)
addr_high |= (1 << 8);
}
gsi_wdi3_write_evt_ring_db(ep->gsi_evt_ring_hdl,
addr_low,
addr_high);
/* write channel scratch */
memset(&ch_scratch, 0, sizeof(ch_scratch));
ch_scratch.wdi3.update_rp_moderation_threshold =
UPDATE_RP_MODERATION_THRESHOLD;
if (dir == IPA_WDI3_RX_DIR) {
if (!is_smmu_enabled)
ch_scratch.wdi3.rx_pkt_offset = info->pkt_offset;
else
ch_scratch.wdi3.rx_pkt_offset = info_smmu->pkt_offset;
/* this metadata reg offset need to be in words */
ch_scratch.wdi3.endp_metadata_reg_offset =
ipahal_get_reg_mn_ofst(IPA_ENDP_INIT_HDR_METADATA_n, 0,
gsi_ep_info->ipa_ep_num) / 4;
}
if (!is_smmu_enabled) {
IPADBG_LOW("smmu disabled\n");
if (info->is_txr_rn_db_pcie_addr == true)
IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
else
IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
IPADBG_LOW("LSB 0x%x\n",
(u32)info->transfer_ring_doorbell_pa);
IPADBG_LOW("MSB 0x%x\n",
(u32)((u64)info->transfer_ring_doorbell_pa >> 32));
} else {
IPADBG_LOW("smmu eabled\n");
if (info_smmu->is_txr_rn_db_pcie_addr == true)
IPADBG_LOW("is_txr_rn_db_pcie_addr is PCIE addr\n");
else
IPADBG_LOW("is_txr_rn_db_pcie_addr is DDR addr\n");
IPADBG_LOW("LSB 0x%x\n",
(u32)info_smmu->transfer_ring_doorbell_pa);
IPADBG_LOW("MSB 0x%x\n",
(u32)((u64)info_smmu->transfer_ring_doorbell_pa >> 32));
}
if (!is_smmu_enabled) {
ch_scratch.wdi3.wifi_rp_address_low =
(u32)info->transfer_ring_doorbell_pa;
ch_scratch.wdi3.wifi_rp_address_high =
(u32)((u64)info->transfer_ring_doorbell_pa >> 32);
} else {
if (dir == IPA_WDI3_TX_DIR) {
if (ipa_create_gsi_smmu_mapping(IPA_WDI_TX_DB_RES,
true, info_smmu->transfer_ring_doorbell_pa,
NULL, 4, true, &va)) {
IPAERR("failed to get smmu mapping\n");
result = -EFAULT;
goto fail_write_scratch;
}
ch_scratch.wdi3.wifi_rp_address_low = (u32)va;
ch_scratch.wdi3.wifi_rp_address_high =
(u32)((u64)va >> 32);
} else {
if (ipa_create_gsi_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
true, info_smmu->transfer_ring_doorbell_pa,
NULL, 4, true, &va)) {
IPAERR("failed to get smmu mapping\n");
result = -EFAULT;
goto fail_write_scratch;
}
ch_scratch.wdi3.wifi_rp_address_low = (u32)va;
ch_scratch.wdi3.wifi_rp_address_high =
(u32)((u64)va >> 32);
}
}
/*
* Arch specific:
* pcie addr which are not via smmu, use pa directly!
* pcie and DDR via 2 different port
* assert bit 40 to indicate it is pcie addr
* WDI-3.0, MSM --> pcie via smmu
* WDI-3.0, MDM --> pcie not via smmu + dual port
* assert bit 40 in case
*/
if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
is_smmu_enabled) {
/*
* Ir-respective of smmu enabled don't use IOVA addr
* since pcie not via smmu in MDM's
*/
if (info_smmu->is_txr_rn_db_pcie_addr == true) {
ch_scratch.wdi3.wifi_rp_address_low =
(u32)info_smmu->transfer_ring_doorbell_pa;
ch_scratch.wdi3.wifi_rp_address_high =
(u32)((u64)info_smmu->transfer_ring_doorbell_pa
>> 32);
}
}
/*
* GSI recomendation to set bit-40 for (mdm targets && pcie addr)
* from wdi-3.0 interface document
*/
if (!is_smmu_enabled) {
if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
info->is_txr_rn_db_pcie_addr)
ch_scratch.wdi3.wifi_rp_address_high =
(u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
(1 << 8));
} else {
if ((ipa3_ctx->platform_type == IPA_PLAT_TYPE_MDM) &&
info_smmu->is_txr_rn_db_pcie_addr)
ch_scratch.wdi3.wifi_rp_address_high =
(u32)((u32)ch_scratch.wdi3.wifi_rp_address_high |
(1 << 8));
}
result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to write evt ring scratch\n");
goto fail_write_scratch;
}
return 0;
fail_write_scratch:
gsi_dealloc_channel(ep->gsi_chan_hdl);
ep->gsi_chan_hdl = ~0;
fail_get_gsi_ep_info:
gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
ep->gsi_evt_ring_hdl = ~0;
fail_smmu_mapping:
ipa3_release_wdi3_gsi_smmu_mappings(dir);
return result;
}
int ipa3_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in,
struct ipa_wdi_conn_out_params *out,
ipa_wdi_meter_notifier_cb wdi_notify)
{
enum ipa_client_type rx_client;
enum ipa_client_type tx_client;
struct ipa3_ep_context *ep_rx;
struct ipa3_ep_context *ep_tx;
int ipa_ep_idx_rx;
int ipa_ep_idx_tx;
int result = 0;
u32 gsi_db_addr_low, gsi_db_addr_high;
void __iomem *db_addr;
u32 evt_ring_db_addr_low, evt_ring_db_addr_high;
/* wdi3 only support over gsi */
if (!ipa3_ctx->ipa_wdi3_over_gsi) {
IPAERR("wdi3 over uc offload not supported");
WARN_ON(1);
return -EFAULT;
}
if (in == NULL || out == NULL) {
IPAERR("invalid input\n");
return -EINVAL;
}
if (in->is_smmu_enabled == false) {
rx_client = in->u_rx.rx.client;
tx_client = in->u_tx.tx.client;
} else {
rx_client = in->u_rx.rx_smmu.client;
tx_client = in->u_tx.tx_smmu.client;
}
ipa_ep_idx_rx = ipa_get_ep_mapping(rx_client);
ipa_ep_idx_tx = ipa_get_ep_mapping(tx_client);
if (ipa_ep_idx_rx == -1 || ipa_ep_idx_tx == -1) {
IPAERR("fail to alloc EP.\n");
return -EFAULT;
}
if (ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES ||
ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES) {
IPAERR("ep out of range.\n");
return -EFAULT;
}
ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
if (ep_rx->valid || ep_tx->valid) {
IPAERR("EP already allocated.\n");
return -EFAULT;
}
memset(ep_rx, 0, offsetof(struct ipa3_ep_context, sys));
memset(ep_tx, 0, offsetof(struct ipa3_ep_context, sys));
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
if (wdi_notify)
ipa3_ctx->uc_wdi_ctx.stats_notify = wdi_notify;
else
IPADBG("wdi_notify is null\n");
#endif
/* setup rx ep cfg */
ep_rx->valid = 1;
ep_rx->client = rx_client;
result = ipa3_disable_data_path(ipa_ep_idx_rx);
if (result) {
IPAERR("disable data path failed res=%d clnt=%d.\n", result,
ipa_ep_idx_rx);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -EFAULT;
}
ep_rx->client_notify = in->notify;
ep_rx->priv = in->priv;
if (in->is_smmu_enabled == false)
memcpy(&ep_rx->cfg, &in->u_rx.rx.ipa_ep_cfg,
sizeof(ep_rx->cfg));
else
memcpy(&ep_rx->cfg, &in->u_rx.rx_smmu.ipa_ep_cfg,
sizeof(ep_rx->cfg));
if (ipa3_cfg_ep(ipa_ep_idx_rx, &ep_rx->cfg)) {
IPAERR("fail to setup rx pipe cfg\n");
result = -EFAULT;
goto fail;
}
IPADBG("ipa3_ctx->ipa_wdi3_over_gsi %d\n",
ipa3_ctx->ipa_wdi3_over_gsi);
/* setup RX gsi channel */
if (ipa3_setup_wdi3_gsi_channel(in->is_smmu_enabled,
&in->u_rx.rx, &in->u_rx.rx_smmu, IPA_WDI3_RX_DIR,
ep_rx)) {
IPAERR("fail to setup wdi3 gsi rx channel\n");
result = -EFAULT;
goto fail;
}
if (gsi_query_channel_db_addr(ep_rx->gsi_chan_hdl,
&gsi_db_addr_low, &gsi_db_addr_high)) {
IPAERR("failed to query gsi rx db addr\n");
result = -EFAULT;
goto fail;
}
/* only 32 bit lsb is used */
out->rx_uc_db_pa = (phys_addr_t)(gsi_db_addr_low);
IPADBG("out->rx_uc_db_pa %llu\n", out->rx_uc_db_pa);
ipa3_install_dflt_flt_rules(ipa_ep_idx_rx);
IPADBG("client %d (ep: %d) connected\n", rx_client,
ipa_ep_idx_rx);
/* setup tx ep cfg */
ep_tx->valid = 1;
ep_tx->client = tx_client;
result = ipa3_disable_data_path(ipa_ep_idx_tx);
if (result) {
IPAERR("disable data path failed res=%d ep=%d.\n", result,
ipa_ep_idx_tx);
result = -EFAULT;
goto fail;
}
if (in->is_smmu_enabled == false)
memcpy(&ep_tx->cfg, &in->u_tx.tx.ipa_ep_cfg,
sizeof(ep_tx->cfg));
else
memcpy(&ep_tx->cfg, &in->u_tx.tx_smmu.ipa_ep_cfg,
sizeof(ep_tx->cfg));
ep_tx->cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
ep_tx->cfg.aggr.aggr = IPA_GENERIC;
ep_tx->cfg.aggr.aggr_byte_limit = IPA_WLAN_AGGR_BYTE_LIMIT;
ep_tx->cfg.aggr.aggr_pkt_limit = IPA_WLAN_AGGR_PKT_LIMIT;
ep_tx->cfg.aggr.aggr_hard_byte_limit_en = IPA_ENABLE_AGGR;
if (ipa3_cfg_ep(ipa_ep_idx_tx, &ep_tx->cfg)) {
IPAERR("fail to setup tx pipe cfg\n");
result = -EFAULT;
goto fail;
}
/* setup TX gsi channel */
if (ipa3_setup_wdi3_gsi_channel(in->is_smmu_enabled,
&in->u_tx.tx, &in->u_tx.tx_smmu, IPA_WDI3_TX_DIR,
ep_tx)) {
IPAERR("fail to setup wdi3 gsi tx channel\n");
result = -EFAULT;
goto fail;
}
if (gsi_query_channel_db_addr(ep_tx->gsi_chan_hdl,
&gsi_db_addr_low, &gsi_db_addr_high)) {
IPAERR("failed to query gsi tx db addr\n");
result = -EFAULT;
goto fail;
}
/* only 32 bit lsb is used */
out->tx_uc_db_pa = (phys_addr_t)(gsi_db_addr_low);
IPADBG("out->tx_uc_db_pa %llu\n", out->tx_uc_db_pa);
IPADBG("client %d (ep: %d) connected\n", tx_client,
ipa_ep_idx_tx);
/* ring initial event ring dbs */
gsi_query_evt_ring_db_addr(ep_rx->gsi_evt_ring_hdl,
&evt_ring_db_addr_low, &evt_ring_db_addr_high);
IPADBG("evt_ring_hdl %lu, db_addr_low %u db_addr_high %u\n",
ep_rx->gsi_evt_ring_hdl, evt_ring_db_addr_low,
evt_ring_db_addr_high);
/* only 32 bit lsb is used */
db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
/*
* IPA/GSI driver should ring the event DB once after
* initialization of the event, with a value that is
* outside of the ring range. Eg: ring base = 0x1000,
* ring size = 0x100 => AP can write value > 0x1100
* into the doorbell address. Eg: 0x 1110
*/
iowrite32(in->u_rx.rx.event_ring_size / 4 + 10, db_addr);
gsi_query_evt_ring_db_addr(ep_tx->gsi_evt_ring_hdl,
&evt_ring_db_addr_low, &evt_ring_db_addr_high);
/* only 32 bit lsb is used */
db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
/*
* IPA/GSI driver should ring the event DB once after
* initialization of the event, with a value that is
* outside of the ring range. Eg: ring base = 0x1000,
* ring size = 0x100 => AP can write value > 0x1100
* into the doorbell address. Eg: 0x 1110
*/
iowrite32(in->u_tx.tx.event_ring_size / 4 + 10, db_addr);
fail:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return result;
}
int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
{
struct ipa3_ep_context *ep_tx, *ep_rx;
int result = 0;
/* wdi3 only support over gsi */
if (!ipa3_ctx->ipa_wdi3_over_gsi) {
IPAERR("wdi3 over uc offload not supported");
WARN_ON(1);
return -EFAULT;
}
IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
if (ipa_ep_idx_tx < 0 || ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES ||
ipa_ep_idx_rx < 0 || ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES) {
IPAERR("invalid ipa ep index\n");
return -EINVAL;
}
ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
/* tear down tx pipe */
result = ipa3_reset_gsi_channel(ipa_ep_idx_tx);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to reset gsi channel: %d.\n", result);
goto exit;
}
result = gsi_reset_evt_ring(ep_tx->gsi_evt_ring_hdl);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to reset evt ring: %d.\n", result);
goto exit;
}
result = ipa3_release_gsi_channel(ipa_ep_idx_tx);
if (result) {
IPAERR("failed to release gsi channel: %d\n", result);
goto exit;
}
memset(ep_tx, 0, sizeof(struct ipa3_ep_context));
IPADBG("tx client (ep: %d) disconnected\n", ipa_ep_idx_tx);
/* tear down rx pipe */
result = ipa3_reset_gsi_channel(ipa_ep_idx_rx);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to reset gsi channel: %d.\n", result);
goto exit;
}
result = gsi_reset_evt_ring(ep_rx->gsi_evt_ring_hdl);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to reset evt ring: %d.\n", result);
goto exit;
}
result = ipa3_release_gsi_channel(ipa_ep_idx_rx);
if (result) {
IPAERR("failed to release gsi channel: %d\n", result);
goto exit;
}
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
ipa3_uc_debug_stats_dealloc(IPA_HW_PROTOCOL_WDI3);
ipa3_delete_dflt_flt_rules(ipa_ep_idx_rx);
memset(ep_rx, 0, sizeof(struct ipa3_ep_context));
IPADBG("rx client (ep: %d) disconnected\n", ipa_ep_idx_rx);
exit:
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_by_pipe(ipa_ep_idx_tx));
return result;
}
int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
{
struct ipa3_ep_context *ep_tx, *ep_rx;
int result = 0;
/* wdi3 only support over gsi */
if (!ipa3_ctx->ipa_wdi3_over_gsi) {
IPAERR("wdi3 over uc offload not supported");
WARN_ON(1);
return -EFAULT;
}
IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
/* start gsi tx channel */
result = gsi_start_channel(ep_tx->gsi_chan_hdl);
if (result) {
IPAERR("failed to start gsi tx channel\n");
result = -EFAULT;
goto exit;
}
/* start gsi rx channel */
result = gsi_start_channel(ep_rx->gsi_chan_hdl);
if (result) {
IPAERR("failed to start gsi rx channel\n");
result = -EFAULT;
goto exit;
}
/* start uC gsi dbg stats monitor */
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].ch_id
= ep_rx->gsi_chan_hdl;
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].dir
= DIR_PRODUCER;
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].ch_id
= ep_tx->gsi_chan_hdl;
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].dir
= DIR_CONSUMER;
ipa3_uc_debug_stats_alloc(
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3]);
}
/* enable data path */
result = ipa3_enable_data_path(ipa_ep_idx_rx);
if (result) {
IPAERR("enable data path failed res=%d clnt=%d.\n", result,
ipa_ep_idx_rx);
result = -EFAULT;
goto exit;
}
result = ipa3_enable_data_path(ipa_ep_idx_tx);
if (result) {
IPAERR("enable data path failed res=%d clnt=%d.\n", result,
ipa_ep_idx_tx);
result = -EFAULT;
goto exit;
}
exit:
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(ipa_ep_idx_tx));
return result;
}
int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
{
int result = 0;
struct ipa3_ep_context *ep;
u32 source_pipe_bitmask = 0;
bool disable_force_clear = false;
struct ipahal_ep_cfg_ctrl_scnd ep_ctrl_scnd = { 0 };
/* wdi3 only support over gsi */
if (!ipa3_ctx->ipa_wdi3_over_gsi) {
IPAERR("wdi3 over uc offload not supported");
WARN_ON(1);
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
/* disable tx data path */
result = ipa3_disable_data_path(ipa_ep_idx_tx);
if (result) {
IPAERR("enable data path failed res=%d clnt=%d.\n", result,
ipa_ep_idx_tx);
result = -EFAULT;
goto fail;
}
/* disable rx data path */
result = ipa3_disable_data_path(ipa_ep_idx_rx);
if (result) {
IPAERR("disable data path failed res=%d clnt=%d.\n", result,
ipa_ep_idx_rx);
result = -EFAULT;
goto fail;
}
/*
* For WDI 3.0 need to ensure pipe will be empty before suspend
* as IPA uC will fail to suspend the pipe otherwise.
*/
ep = &ipa3_ctx->ep[ipa_ep_idx_rx];
source_pipe_bitmask = 1 <<
ipa3_get_ep_mapping(ep->client);
result = ipa3_enable_force_clear(ipa_ep_idx_rx,
false, source_pipe_bitmask);
if (result) {
/*
* assuming here modem SSR, AP can remove
* the delay in this case
*/
IPAERR("failed to force clear %d\n", result);
IPAERR("remove delay from SCND reg\n");
ep_ctrl_scnd.endp_delay = false;
ipahal_write_reg_n_fields(
IPA_ENDP_INIT_CTRL_SCND_n, ipa_ep_idx_rx,
&ep_ctrl_scnd);
} else {
disable_force_clear = true;
}
/* stop gsi rx channel */
result = ipa3_stop_gsi_channel(ipa_ep_idx_rx);
if (result) {
IPAERR("failed to stop gsi rx channel\n");
result = -EFAULT;
goto fail;
}
/* stop gsi tx channel */
result = ipa3_stop_gsi_channel(ipa_ep_idx_tx);
if (result) {
IPAERR("failed to stop gsi tx channel\n");
result = -EFAULT;
goto fail;
}
/* stop uC gsi dbg stats monitor */
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].ch_id
= 0xff;
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[0].dir
= DIR_PRODUCER;
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].ch_id
= 0xff;
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3].ch_id_info[1].dir
= DIR_CONSUMER;
ipa3_uc_debug_stats_alloc(
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_WDI3]);
}
if (disable_force_clear)
ipa3_disable_force_clear(ipa_ep_idx_rx);
fail:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return result;
}
int ipa3_write_qmapid_wdi3_gsi_pipe(u32 clnt_hdl, u8 qmap_id)
{
int result = 0;
struct ipa3_ep_context *ep;
union __packed gsi_channel_scratch ch_scratch;
memset(&ch_scratch, 0, sizeof(ch_scratch));
if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
ipa3_ctx->ep[clnt_hdl].valid == 0) {
IPAERR_RL("bad parm, %d\n", clnt_hdl);
return -EINVAL;
}
ep = &ipa3_ctx->ep[clnt_hdl];
IPA_ACTIVE_CLIENTS_INC_EP(ipa3_get_client_mapping(clnt_hdl));
result = gsi_read_channel_scratch(ep->gsi_chan_hdl, &ch_scratch);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to read channel scratch %d\n", result);
goto exit;
}
result = gsi_stop_channel(ep->gsi_chan_hdl);
if (result != GSI_STATUS_SUCCESS && result != -GSI_STATUS_AGAIN &&
result != -GSI_STATUS_TIMED_OUT) {
IPAERR("failed to stop gsi channel %d\n", result);
goto exit;
}
ch_scratch.wdi3.qmap_id = qmap_id;
result = gsi_write_channel_scratch(ep->gsi_chan_hdl,
ch_scratch);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to write channel scratch %d\n", result);
goto exit;
}
result = gsi_start_channel(ep->gsi_chan_hdl);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to start gsi channel %d\n", result);
goto exit;
}
exit:
IPA_ACTIVE_CLIENTS_DEC_EP(ipa3_get_client_mapping(clnt_hdl));
return result;
}
/**
* ipa3_get_wdi3_gsi_stats() - Query WDI3 gsi stats from uc
* @stats: [inout] stats blob from client populated by driver
*
* Returns: 0 on success, negative on failure
*
* @note Cannot be called from atomic context
*
*/
int ipa3_get_wdi3_gsi_stats(struct ipa3_uc_dbg_ring_stats *stats)
{
int i;
if (!ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio) {
IPAERR("bad NULL parms for wdi3_gsi_stats\n");
return -EINVAL;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = 0; i < MAX_WDI3_CHANNELS; i++) {
stats->ring[i].ringFull = ioread32(
ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
stats->ring[i].ringEmpty = ioread32(
ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
stats->ring[i].ringUsageHigh = ioread32(
ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
stats->ring[i].ringUsageLow = ioread32(
ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
stats->ring[i].RingUtilCount = ioread32(
ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
}
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}

1846
ipa/ipa_v3/ipa_wigig_i.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

5
ipa/ipa_v3/ipahal/Makefile Arquivo normal
Ver arquivo

@@ -0,0 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_IPA3) += ipa_hal.o
ipa_hal-y := ipahal.o ipahal_reg.o ipahal_fltrt.o ipahal_hw_stats.o ipahal_nat.o

1616
ipa/ipa_v3/ipahal/ipahal.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

655
ipa/ipa_v3/ipahal/ipahal.h Arquivo normal
Ver arquivo

@@ -0,0 +1,655 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _IPAHAL_H_
#define _IPAHAL_H_
#include "../ipa_defs.h"
#include "../../ipa_common_i.h"
/*
* Immediate command names
*
* NOTE:: Any change to this enum, need to change to ipahal_imm_cmd_name_to_str
* array as well.
*/
enum ipahal_imm_cmd_name {
IPA_IMM_CMD_IP_V4_FILTER_INIT,
IPA_IMM_CMD_IP_V6_FILTER_INIT,
IPA_IMM_CMD_IP_V4_NAT_INIT,
IPA_IMM_CMD_IP_V4_ROUTING_INIT,
IPA_IMM_CMD_IP_V6_ROUTING_INIT,
IPA_IMM_CMD_HDR_INIT_LOCAL,
IPA_IMM_CMD_HDR_INIT_SYSTEM,
IPA_IMM_CMD_REGISTER_WRITE,
IPA_IMM_CMD_NAT_DMA,
IPA_IMM_CMD_IP_PACKET_INIT,
IPA_IMM_CMD_DMA_SHARED_MEM,
IPA_IMM_CMD_IP_PACKET_TAG_STATUS,
IPA_IMM_CMD_DMA_TASK_32B_ADDR,
IPA_IMM_CMD_TABLE_DMA,
IPA_IMM_CMD_IP_V6_CT_INIT,
IPA_IMM_CMD_MAX,
};
/* Immediate commands abstracted structures */
/*
* struct ipahal_imm_cmd_ip_v4_filter_init - IP_V4_FILTER_INIT cmd payload
* Inits IPv4 filter block.
* @hash_rules_addr: Addr in sys mem where ipv4 hashable flt tbl starts
* @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
* be copied to
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
* be copied to
*/
struct ipahal_imm_cmd_ip_v4_filter_init {
u64 hash_rules_addr;
u64 nhash_rules_addr;
u32 hash_rules_size;
u32 hash_local_addr;
u32 nhash_rules_size;
u32 nhash_local_addr;
};
/*
* struct ipahal_imm_cmd_ip_v6_filter_init - IP_V6_FILTER_INIT cmd payload
* Inits IPv6 filter block.
* @hash_rules_addr: Addr in sys mem where ipv6 hashable flt tbl starts
* @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
* be copied to
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
* be copied to
*/
struct ipahal_imm_cmd_ip_v6_filter_init {
u64 hash_rules_addr;
u64 nhash_rules_addr;
u32 hash_rules_size;
u32 hash_local_addr;
u32 nhash_rules_size;
u32 nhash_local_addr;
};
/*
* struct ipahal_imm_cmd_nat_ipv6ct_init_common - NAT/IPv6CT table init command
* common part
* @base_table_addr: Address in sys/shared mem where base table start
* @expansion_table_addr: Address in sys/shared mem where expansion table
* starts. Entries that result in hash collision are located in this table.
* @base_table_addr_shared: base_table_addr in shared mem (if not, then sys)
* @expansion_table_addr_shared: expansion_rules_addr in
* shared mem (if not, then sys)
* @size_base_table: Num of entries in the base table
* @size_expansion_table: Num of entries in the expansion table
* @table_index: For future support of multiple tables
*/
struct ipahal_imm_cmd_nat_ipv6ct_init_common {
u64 base_table_addr;
u64 expansion_table_addr;
bool base_table_addr_shared;
bool expansion_table_addr_shared;
u16 size_base_table;
u16 size_expansion_table;
u8 table_index;
};
/*
* struct ipahal_imm_cmd_ip_v4_nat_init - IP_V4_NAT_INIT cmd payload
* Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
* cache address and other related parameters.
* @table_init: table initialization parameters
* @index_table_addr: Addr in sys/shared mem where index table, which points
* to NAT table starts
* @index_table_expansion_addr: Addr in sys/shared mem where expansion index
* table starts
* @index_table_addr_shared: index_table_addr in shared mem (if not, then sys)
* @index_table_expansion_addr_shared: index_table_expansion_addr in
* shared mem (if not, then sys)
* @public_addr_info: Public IP addresses info suitable to the IPA H/W version
* IPA H/W >= 4.0 - PDN config table offset in SMEM
* IPA H/W < 4.0 - The public IP address
*/
struct ipahal_imm_cmd_ip_v4_nat_init {
struct ipahal_imm_cmd_nat_ipv6ct_init_common table_init;
u64 index_table_addr;
u64 index_table_expansion_addr;
bool index_table_addr_shared;
bool index_table_expansion_addr_shared;
u32 public_addr_info;
};
/*
* struct ipahal_imm_cmd_ip_v6_ct_init - IP_V6_CONN_TRACK_INIT cmd payload
* Inits IPv6CT block. Initiate IPv6CT table with it dimensions, location
* cache address and other related parameters.
* @table_init: table initialization parameters
*/
struct ipahal_imm_cmd_ip_v6_ct_init {
struct ipahal_imm_cmd_nat_ipv6ct_init_common table_init;
};
/*
* struct ipahal_imm_cmd_ip_v4_routing_init - IP_V4_ROUTING_INIT cmd payload
* Inits IPv4 routing table/structure - with the rules and other related params
* @hash_rules_addr: Addr in sys mem where ipv4 hashable rt tbl starts
* @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
* be copied to
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
* be copied to
*/
struct ipahal_imm_cmd_ip_v4_routing_init {
u64 hash_rules_addr;
u64 nhash_rules_addr;
u32 hash_rules_size;
u32 hash_local_addr;
u32 nhash_rules_size;
u32 nhash_local_addr;
};
/*
* struct ipahal_imm_cmd_ip_v6_routing_init - IP_V6_ROUTING_INIT cmd payload
* Inits IPv6 routing table/structure - with the rules and other related params
* @hash_rules_addr: Addr in sys mem where ipv6 hashable rt tbl starts
* @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
* be copied to
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
* be copied to
*/
struct ipahal_imm_cmd_ip_v6_routing_init {
u64 hash_rules_addr;
u64 nhash_rules_addr;
u32 hash_rules_size;
u32 hash_local_addr;
u32 nhash_rules_size;
u32 nhash_local_addr;
};
/*
* struct ipahal_imm_cmd_hdr_init_local - HDR_INIT_LOCAL cmd payload
* Inits hdr table within local mem with the hdrs and their length.
* @hdr_table_addr: Word address in sys mem where the table starts (SRC)
* @size_hdr_table: Size of the above (in bytes)
* @hdr_addr: header address in IPA sram (used as DST for memory copy)
* @rsvd: reserved
*/
struct ipahal_imm_cmd_hdr_init_local {
u64 hdr_table_addr;
u32 size_hdr_table;
u32 hdr_addr;
};
/*
* struct ipahal_imm_cmd_hdr_init_system - HDR_INIT_SYSTEM cmd payload
* Inits hdr table within sys mem with the hdrs and their length.
* @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
*/
struct ipahal_imm_cmd_hdr_init_system {
u64 hdr_table_addr;
};
/*
* struct ipahal_imm_cmd_table_dma - TABLE_DMA cmd payload
* Perform DMA operation on NAT and IPV6 connection tracking related mem
* addresses. Copy data into different locations within IPv6CT and NAT
* associated tbls. (For add/remove NAT rules)
* @offset: offset in bytes from base addr to write 'data' to
* @data: data to be written
* @table_index: NAT tbl index. Defines the tbl on which to perform DMA op.
* @base_addr: Base addr to which the DMA operation should be performed.
*/
struct ipahal_imm_cmd_table_dma {
u32 offset;
u16 data;
u8 table_index;
u8 base_addr;
};
/*
* struct ipahal_imm_cmd_ip_packet_init - IP_PACKET_INIT cmd payload
* Configuration for specific IP pkt. Shall be called prior to an IP pkt
* data. Pkt will not go through IP pkt processing.
* @destination_pipe_index: Destination pipe index (in case routing
* is enabled, this field will overwrite the rt rule)
*/
struct ipahal_imm_cmd_ip_packet_init {
u32 destination_pipe_index;
};
/*
* enum ipa_pipeline_clear_option - Values for pipeline clear waiting options
* @IPAHAL_HPS_CLEAR: Wait for HPS clear. All queues except high priority queue
* shall not be serviced until HPS is clear of packets or immediate commands.
* The high priority Rx queue / Q6ZIP group shall still be serviced normally.
*
* @IPAHAL_SRC_GRP_CLEAR: Wait for originating source group to be clear
* (for no packet contexts allocated to the originating source group).
* The source group / Rx queue shall not be serviced until all previously
* allocated packet contexts are released. All other source groups/queues shall
* be serviced normally.
*
* @IPAHAL_FULL_PIPELINE_CLEAR: Wait for full pipeline to be clear.
* All groups / Rx queues shall not be serviced until IPA pipeline is fully
* clear. This should be used for debug only.
*/
enum ipahal_pipeline_clear_option {
IPAHAL_HPS_CLEAR,
IPAHAL_SRC_GRP_CLEAR,
IPAHAL_FULL_PIPELINE_CLEAR
};
/*
* struct ipahal_imm_cmd_register_write - REGISTER_WRITE cmd payload
* Write value to register. Allows reg changes to be synced with data packet
* and other immediate commands. Can be used to access the sram
* @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
* @value: value to write to register
* @value_mask: mask specifying which value bits to write to the register
* @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
* @pipeline_clear_option: options for pipeline clear waiting
*/
struct ipahal_imm_cmd_register_write {
u32 offset;
u32 value;
u32 value_mask;
bool skip_pipeline_clear;
enum ipahal_pipeline_clear_option pipeline_clear_options;
};
/*
* struct ipahal_imm_cmd_dma_shared_mem - DMA_SHARED_MEM cmd payload
* Perform mem copy into or out of the SW area of IPA local mem
* @system_addr: Address in system memory
* @size: Size in bytes of data to copy. Expected size is up to 2K bytes
* @local_addr: Address in IPA local memory
* @clear_after_read: Clear local memory at the end of a read operation allows
* atomic read and clear if HPS is clear. Ignore for writes.
* @is_read: Read operation from local memory? If not, then write.
* @skip_pipeline_clear: if to skip pipeline clear waiting (don't wait)
* @pipeline_clear_option: options for pipeline clear waiting
*/
struct ipahal_imm_cmd_dma_shared_mem {
u64 system_addr;
u32 size;
u32 local_addr;
bool clear_after_read;
bool is_read;
bool skip_pipeline_clear;
enum ipahal_pipeline_clear_option pipeline_clear_options;
};
/*
* struct ipahal_imm_cmd_ip_packet_tag_status - IP_PACKET_TAG_STATUS cmd payload
* This cmd is used for to allow SW to track HW processing by setting a TAG
* value that is passed back to SW inside Packet Status information.
* TAG info will be provided as part of Packet Status info generated for
* the next pkt transferred over the pipe.
* This immediate command must be followed by a packet in the same transfer.
* @tag: Tag that is provided back to SW
*/
struct ipahal_imm_cmd_ip_packet_tag_status {
u64 tag;
};
/*
* struct ipahal_imm_cmd_dma_task_32b_addr - IPA_DMA_TASK_32B_ADDR cmd payload
* Used by clients using 32bit addresses. Used to perform DMA operation on
* multiple descriptors.
* The Opcode is dynamic, where it holds the number of buffer to process
* @cmplt: Complete flag: If true, IPA interrupt SW when the entire
* DMA related data was completely xfered to its destination.
* @eof: Enf Of Frame flag: If true, IPA assert the EOT to the
* dest client. This is used used for aggr sequence
* @flsh: Flush flag: If true pkt will go through the IPA blocks but
* will not be xfered to dest client but rather will be discarded
* @lock: Lock pipe flag: If true, IPA will stop processing descriptors
* from other EPs in the same src grp (RX queue)
* @unlock: Unlock pipe flag: If true, IPA will stop exclusively
* servicing current EP out of the src EPs of the grp (RX queue)
* @size1: Size of buffer1 data
* @addr1: Pointer to buffer1 data
* @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
* only the first one needs to have this field set. It will be ignored
* in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
* must contain this field (2 or more buffers) or EOT.
*/
struct ipahal_imm_cmd_dma_task_32b_addr {
bool cmplt;
bool eof;
bool flsh;
bool lock;
bool unlock;
u32 size1;
u32 addr1;
u32 packet_size;
};
/*
* struct ipahal_imm_cmd_pyld - Immediate cmd payload information
* @len: length of the buffer
* @opcode: opcode of the immediate command
* @data: buffer contains the immediate command payload. Buffer goes
* back to back with this structure
*/
struct ipahal_imm_cmd_pyld {
u16 len;
u16 opcode;
u8 data[0];
};
/* Immediate command Function APIs */
/*
* ipahal_imm_cmd_name_str() - returns string that represent the imm cmd
* @cmd_name: [in] Immediate command name
*/
const char *ipahal_imm_cmd_name_str(enum ipahal_imm_cmd_name cmd_name);
/*
* ipahal_construct_imm_cmd() - Construct immdiate command
* This function builds imm cmd bulk that can be be sent to IPA
* The command will be allocated dynamically.
* After done using it, call ipahal_destroy_imm_cmd() to release it
*/
struct ipahal_imm_cmd_pyld *ipahal_construct_imm_cmd(
enum ipahal_imm_cmd_name cmd, const void *params, bool is_atomic_ctx);
/*
* ipahal_construct_nop_imm_cmd() - Construct immediate comamnd for NO-Op
* Core driver may want functionality to inject NOP commands to IPA
* to ensure e.g., PIPLINE clear before someother operation.
* The functionality given by this function can be reached by
* ipahal_construct_imm_cmd(). This function is helper to the core driver
* to reach this NOP functionlity easily.
* @skip_pipline_clear: if to skip pipeline clear waiting (don't wait)
* @pipline_clr_opt: options for pipeline clear waiting
* @is_atomic_ctx: is called in atomic context or can sleep?
*/
struct ipahal_imm_cmd_pyld *ipahal_construct_nop_imm_cmd(
bool skip_pipline_clear,
enum ipahal_pipeline_clear_option pipline_clr_opt,
bool is_atomic_ctx);
/*
* ipahal_destroy_imm_cmd() - Destroy/Release bulk that was built
* by the construction functions
*/
static inline void ipahal_destroy_imm_cmd(struct ipahal_imm_cmd_pyld *pyld)
{
kfree(pyld);
}
/* IPA Status packet Structures and Function APIs */
/*
* enum ipahal_pkt_status_opcode - Packet Status Opcode
* @IPAHAL_STATUS_OPCODE_PACKET_2ND_PASS: Packet Status generated as part of
* IPA second processing pass for a packet (i.e. IPA XLAT processing for
* the translated packet).
*/
enum ipahal_pkt_status_opcode {
IPAHAL_PKT_STATUS_OPCODE_PACKET = 0,
IPAHAL_PKT_STATUS_OPCODE_NEW_FRAG_RULE,
IPAHAL_PKT_STATUS_OPCODE_DROPPED_PACKET,
IPAHAL_PKT_STATUS_OPCODE_SUSPENDED_PACKET,
IPAHAL_PKT_STATUS_OPCODE_LOG,
IPAHAL_PKT_STATUS_OPCODE_DCMP,
IPAHAL_PKT_STATUS_OPCODE_PACKET_2ND_PASS,
};
/*
* enum ipahal_pkt_status_exception - Packet Status exception type
* @IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH: formerly IHL exception.
*
* Note: IPTYPE, PACKET_LENGTH and PACKET_THRESHOLD exceptions means that
* partial / no IP processing took place and corresponding Status Mask
* fields should be ignored. Flt and rt info is not valid.
*
* NOTE:: Any change to this enum, need to change to
* ipahal_pkt_status_exception_to_str array as well.
*/
enum ipahal_pkt_status_exception {
IPAHAL_PKT_STATUS_EXCEPTION_NONE = 0,
IPAHAL_PKT_STATUS_EXCEPTION_DEAGGR,
IPAHAL_PKT_STATUS_EXCEPTION_IPTYPE,
IPAHAL_PKT_STATUS_EXCEPTION_PACKET_LENGTH,
IPAHAL_PKT_STATUS_EXCEPTION_PACKET_THRESHOLD,
IPAHAL_PKT_STATUS_EXCEPTION_FRAG_RULE_MISS,
IPAHAL_PKT_STATUS_EXCEPTION_SW_FILT,
/*
* NAT and IPv6CT have the same value at HW.
* NAT for IPv4 and IPv6CT for IPv6 exceptions
*/
IPAHAL_PKT_STATUS_EXCEPTION_NAT,
IPAHAL_PKT_STATUS_EXCEPTION_IPV6CT,
IPAHAL_PKT_STATUS_EXCEPTION_CSUM,
IPAHAL_PKT_STATUS_EXCEPTION_MAX,
};
/*
* enum ipahal_pkt_status_mask - Packet Status bitmask shift values of
* the contained flags. This bitmask indicates flags on the properties of
* the packet as well as IPA processing it may had.
* @FRAG_PROCESS: Frag block processing flag: Was pkt processed by frag block?
* Also means the frag info is valid unless exception or first frag
* @FILT_PROCESS: Flt block processing flag: Was pkt processed by flt block?
* Also means that flt info is valid.
* @NAT_PROCESS: NAT block processing flag: Was pkt processed by NAT block?
* Also means that NAT info is valid, unless exception.
* @ROUTE_PROCESS: Rt block processing flag: Was pkt processed by rt block?
* Also means that rt info is valid, unless exception.
* @TAG_VALID: Flag specifying if TAG and TAG info valid?
* @FRAGMENT: Flag specifying if pkt is IP fragment.
* @FIRST_FRAGMENT: Flag specifying if pkt is first fragment. In this case, frag
* info is invalid
* @V4: Flag specifying pkt is IPv4 or IPv6
* @CKSUM_PROCESS: CSUM block processing flag: Was pkt processed by csum block?
* If so, csum trailer exists
* @AGGR_PROCESS: Aggr block processing flag: Was pkt processed by aggr block?
* @DEST_EOT: Flag specifying if EOT was asserted for the pkt on dest endp
* @DEAGGR_PROCESS: Deaggr block processing flag: Was pkt processed by deaggr
* block?
* @DEAGG_FIRST: Flag specifying if this is the first pkt in deaggr frame
* @SRC_EOT: Flag specifying if EOT asserted by src endp when sending the buffer
* @PREV_EOT: Flag specifying if EOT was sent just before the pkt as part of
* aggr hard-byte-limit
* @BYTE_LIMIT: Flag specifying if pkt is over a configured byte limit.
*/
enum ipahal_pkt_status_mask {
IPAHAL_PKT_STATUS_MASK_FRAG_PROCESS_SHFT = 0,
IPAHAL_PKT_STATUS_MASK_FILT_PROCESS_SHFT,
IPAHAL_PKT_STATUS_MASK_NAT_PROCESS_SHFT,
IPAHAL_PKT_STATUS_MASK_ROUTE_PROCESS_SHFT,
IPAHAL_PKT_STATUS_MASK_TAG_VALID_SHFT,
IPAHAL_PKT_STATUS_MASK_FRAGMENT_SHFT,
IPAHAL_PKT_STATUS_MASK_FIRST_FRAGMENT_SHFT,
IPAHAL_PKT_STATUS_MASK_V4_SHFT,
IPAHAL_PKT_STATUS_MASK_CKSUM_PROCESS_SHFT,
IPAHAL_PKT_STATUS_MASK_AGGR_PROCESS_SHFT,
IPAHAL_PKT_STATUS_MASK_DEST_EOT_SHFT,
IPAHAL_PKT_STATUS_MASK_DEAGGR_PROCESS_SHFT,
IPAHAL_PKT_STATUS_MASK_DEAGG_FIRST_SHFT,
IPAHAL_PKT_STATUS_MASK_SRC_EOT_SHFT,
IPAHAL_PKT_STATUS_MASK_PREV_EOT_SHFT,
IPAHAL_PKT_STATUS_MASK_BYTE_LIMIT_SHFT,
};
/*
* Returns boolean value representing a property of the a packet.
* @__flag_shft: The shift value of the flag of the status bitmask of
* @__status: Pointer to abstracrted status structure
* the needed property. See enum ipahal_pkt_status_mask
*/
#define IPAHAL_PKT_STATUS_MASK_FLAG_VAL(__flag_shft, __status) \
(((__status)->status_mask) & ((u32)0x1<<(__flag_shft)) ? true : false)
/*
* enum ipahal_pkt_status_nat_type - Type of NAT
*/
enum ipahal_pkt_status_nat_type {
IPAHAL_PKT_STATUS_NAT_NONE,
IPAHAL_PKT_STATUS_NAT_SRC,
IPAHAL_PKT_STATUS_NAT_DST,
};
/*
* struct ipahal_pkt_status - IPA status packet abstracted payload.
* This structure describes the status packet fields for the
* following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
* IPA_STATUS_SUSPENDED_PACKET.
* Other statuses types has different status packet structure.
* @tag_info: S/W defined value provided via immediate command
* @status_opcode: The Type of the status (Opcode).
* @exception: The first exception that took place.
* In case of exception, src endp and pkt len are always valid.
* @status_mask: Bit mask for flags on several properties on the packet
* and processing it may passed at IPA. See enum ipahal_pkt_status_mask
* @pkt_len: Pkt pyld len including hdr and retained hdr if used. Does
* not include padding or checksum trailer len.
* @metadata: meta data value used by packet
* @flt_local: Filter table location flag: Does matching flt rule belongs to
* flt tbl that resides in lcl memory? (if not, then system mem)
* @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl?
* @flt_global: Global filter rule flag: Does matching flt rule belongs to
* the global flt tbl? (if not, then the per endp tables)
* @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule
* specifies to retain header?
* Starting IPA4.5, this will be true only if packet has L2 header.
* @flt_miss: Filtering miss flag: Was their a filtering rule miss?
* In case of miss, all flt info to be ignored
* @rt_local: Route table location flag: Does matching rt rule belongs to
* rt tbl that resides in lcl memory? (if not, then system mem)
* @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
* @ucp: UC Processing flag
* @rt_miss: Routing miss flag: Was their a routing rule miss?
* @nat_hit: NAT hit flag: Was their NAT hit?
* @nat_type: Defines the type of the NAT operation:
* @time_of_day_ctr: running counter from IPA clock
* @hdr_local: Header table location flag: In header insertion, was the header
* taken from the table resides in local memory? (If no, then system mem)
* @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
* @flt_rule_id: The ID of the matching filter rule (if no miss).
* This info can be combined with endp_src_idx to locate the exact rule.
* @rt_rule_id: The ID of the matching rt rule. (if no miss). This info
* can be combined with rt_tbl_idx to locate the exact rule.
* @nat_entry_idx: Index of the NAT entry used of NAT processing
* @hdr_offset: Offset of used header in the header table
* @endp_src_idx: Source end point index.
* @endp_dest_idx: Destination end point index.
* Not valid in case of exception
* @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
* @seq_num: Per source endp unique packet sequence number
* @frag_rule: Frag rule index in H/W frag table in case of frag hit
*/
struct ipahal_pkt_status {
u64 tag_info;
enum ipahal_pkt_status_opcode status_opcode;
enum ipahal_pkt_status_exception exception;
u32 status_mask;
u32 pkt_len;
u32 metadata;
bool flt_local;
bool flt_hash;
bool flt_global;
bool flt_ret_hdr;
bool flt_miss;
bool rt_local;
bool rt_hash;
bool ucp;
bool rt_miss;
bool nat_hit;
enum ipahal_pkt_status_nat_type nat_type;
u32 time_of_day_ctr;
bool hdr_local;
bool frag_hit;
u16 flt_rule_id;
u16 rt_rule_id;
u16 nat_entry_idx;
u16 hdr_offset;
u8 endp_src_idx;
u8 endp_dest_idx;
u8 rt_tbl_idx;
u8 seq_num;
u8 frag_rule;
};
/*
* ipahal_pkt_status_get_size() - Get H/W size of packet status
*/
u32 ipahal_pkt_status_get_size(void);
/*
* ipahal_pkt_status_parse() - Parse Packet Status payload to abstracted form
* @unparsed_status: Pointer to H/W format of the packet status as read from H/W
* @status: Pointer to pre-allocated buffer where the parsed info will be stored
*/
void ipahal_pkt_status_parse(const void *unparsed_status,
struct ipahal_pkt_status *status);
/*
* ipahal_pkt_status_exception_str() - returns string represents exception type
* @exception: [in] The exception type
*/
const char *ipahal_pkt_status_exception_str(
enum ipahal_pkt_status_exception exception);
/*
* ipahal_cp_hdr_to_hw_buff() - copy header to hardware buffer according to
* base address and offset given.
* @base: dma base address
* @offset: offset from base address where the data will be copied
* @hdr: the header to be copied
* @hdr_len: the length of the header
*/
void ipahal_cp_hdr_to_hw_buff(void *base, u32 offset, u8 *hdr, u32 hdr_len);
/*
* ipahal_cp_proc_ctx_to_hw_buff() - copy processing context to
* base address and offset given.
* @type: type of header processing context
* @base: dma base address
* @offset: offset from base address where the data will be copied
* @hdr_len: the length of the header
* @is_hdr_proc_ctx: header is located in phys_base (true) or hdr_base_addr
* @phys_base: memory location in DDR
* @hdr_base_addr: base address in table
* @offset_entry: offset from hdr_base_addr in table
* @l2tp_params: l2tp parameters
* @is_64: Indicates whether header base address/dma base address is 64 bit.
*/
int ipahal_cp_proc_ctx_to_hw_buff(enum ipa_hdr_proc_type type,
void *base, u32 offset, u32 hdr_len,
bool is_hdr_proc_ctx, dma_addr_t phys_base,
u64 hdr_base_addr,
struct ipa_hdr_offset_entry *offset_entry,
struct ipa_l2tp_hdr_proc_ctx_params l2tp_params, bool is_64);
/*
* ipahal_get_proc_ctx_needed_len() - calculates the needed length for addition
* of header processing context according to the type of processing context
* @type: header processing context type (no processing context,
* IPA_HDR_PROC_ETHII_TO_ETHII etc.)
*/
int ipahal_get_proc_ctx_needed_len(enum ipa_hdr_proc_type type);
int ipahal_init(enum ipa_hw_type ipa_hw_type, void __iomem *base,
struct device *ipa_pdev);
void ipahal_destroy(void);
void ipahal_free_dma_mem(struct ipa_mem_buffer *mem);
#endif /* _IPAHAL_H_ */

4372
ipa/ipa_v3/ipahal/ipahal_fltrt.c Arquivo normal

Diferenças do arquivo suprimidas por serem muito extensas Carregar Diff

Ver arquivo

@@ -0,0 +1,308 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _IPAHAL_FLTRT_H_
#define _IPAHAL_FLTRT_H_
/*
* struct ipahal_fltrt_alloc_imgs_params - Params for tbls imgs allocations
* The allocation logic will allocate DMA memory representing the header.
* If the bodies are local (SRAM) the allocation will allocate
* a DMA buffers that would contain the content of these local tables in raw
* @ipt: IP version type
* @tbls_num: Number of tables to represent by the header
* @num_lcl_hash_tbls: Number of local (sram) hashable tables
* @num_lcl_nhash_tbls: Number of local (sram) non-hashable tables
* @total_sz_lcl_hash_tbls: Total size of local hashable tables
* @total_sz_lcl_nhash_tbls: Total size of local non-hashable tables
* @hash_hdr/nhash_hdr: OUT params for the header structures
* @hash_bdy/nhash_bdy: OUT params for the local body structures
*/
struct ipahal_fltrt_alloc_imgs_params {
enum ipa_ip_type ipt;
u32 tbls_num;
u32 num_lcl_hash_tbls;
u32 num_lcl_nhash_tbls;
u32 total_sz_lcl_hash_tbls;
u32 total_sz_lcl_nhash_tbls;
/* OUT PARAMS */
struct ipa_mem_buffer hash_hdr;
struct ipa_mem_buffer nhash_hdr;
struct ipa_mem_buffer hash_bdy;
struct ipa_mem_buffer nhash_bdy;
};
/*
* enum ipahal_rt_rule_hdr_type - Header type used in rt rules
* @IPAHAL_RT_RULE_HDR_NONE: No header is used
* @IPAHAL_RT_RULE_HDR_RAW: Raw header is used
* @IPAHAL_RT_RULE_HDR_PROC_CTX: Header Processing context is used
*/
enum ipahal_rt_rule_hdr_type {
IPAHAL_RT_RULE_HDR_NONE,
IPAHAL_RT_RULE_HDR_RAW,
IPAHAL_RT_RULE_HDR_PROC_CTX,
};
/*
* struct ipahal_rt_rule_gen_params - Params for generating rt rule
* @ipt: IP family version
* @dst_pipe_idx: Destination pipe index
* @hdr_type: Header type to be used
* @hdr_lcl: Does header on local or system table?
* @hdr_ofst: Offset of the header in the header table
* @priority: Rule priority
* @id: Rule ID
* @cnt_idx: Stats counter index
* @rule: Rule info
*/
struct ipahal_rt_rule_gen_params {
enum ipa_ip_type ipt;
int dst_pipe_idx;
enum ipahal_rt_rule_hdr_type hdr_type;
bool hdr_lcl;
u32 hdr_ofst;
u32 priority;
u32 id;
u8 cnt_idx;
const struct ipa_rt_rule_i *rule;
};
/*
* struct ipahal_rt_rule_entry - Rt rule info parsed from H/W
* @dst_pipe_idx: Destination pipe index
* @hdr_lcl: Does the references header located in sram or system mem?
* @hdr_ofst: Offset of the header in the header table
* @hdr_type: Header type to be used
* @priority: Rule priority
* @retain_hdr: to retain the removed header in header removal
* @id: Rule ID
* @cnt_idx: stats counter index
* @eq_attrib: Equations and their params in the rule
* @rule_size: Rule size in memory
*/
struct ipahal_rt_rule_entry {
int dst_pipe_idx;
bool hdr_lcl;
u32 hdr_ofst;
enum ipahal_rt_rule_hdr_type hdr_type;
u32 priority;
bool retain_hdr;
u32 id;
u8 cnt_idx;
struct ipa_ipfltri_rule_eq eq_attrib;
u32 rule_size;
};
/*
* struct ipahal_flt_rule_gen_params - Params for generating flt rule
* @ipt: IP family version
* @rt_tbl_idx: Routing table the rule pointing to
* @priority: Rule priority
* @id: Rule ID
* @cnt_idx: Stats counter index
* @rule: Rule info
*/
struct ipahal_flt_rule_gen_params {
enum ipa_ip_type ipt;
u32 rt_tbl_idx;
u32 priority;
u32 id;
u8 cnt_idx;
const struct ipa_flt_rule_i *rule;
};
/*
* struct ipahal_flt_rule_entry - Flt rule info parsed from H/W
* @rule: Rule info
* @priority: Rule priority
* @id: Rule ID
* @cnt_idx: stats counter index
* @rule_size: Rule size in memory
*/
struct ipahal_flt_rule_entry {
struct ipa_flt_rule_i rule;
u32 priority;
u32 id;
u8 cnt_idx;
u32 rule_size;
};
/* Get the H/W table (flt/rt) header width */
u32 ipahal_get_hw_tbl_hdr_width(void);
/* Get the H/W local table (SRAM) address alignment
* Tables headers references to local tables via offsets in SRAM
* This function return the alignment of the offset that IPA expects
*/
u32 ipahal_get_lcl_tbl_addr_alignment(void);
/*
* Rule priority is used to distinguish rules order
* at the integrated table consisting from hashable and
* non-hashable tables. Max priority are rules that once are
* scanned by IPA, IPA will not look for further rules and use it.
*/
int ipahal_get_rule_max_priority(void);
/* Given a priority, calc and return the next lower one if it is in
* legal range.
*/
int ipahal_rule_decrease_priority(int *prio);
/* Does the given ID represents rule miss? */
bool ipahal_is_rule_miss_id(u32 id);
/* Get rule ID with high bit only asserted
* Used e.g. to create groups of IDs according to this bit
*/
u32 ipahal_get_rule_id_hi_bit(void);
/* Get the low value possible to be used for rule-id */
u32 ipahal_get_low_rule_id(void);
/*
* low value possible for counter hdl id
*/
u32 ipahal_get_low_hdl_id(void);
/*
* max counter hdl id for stats
*/
u32 ipahal_get_high_hdl_id(void);
/* used for query check and associated with rt/flt rules */
bool ipahal_is_rule_cnt_id_valid(u8 cnt_id);
/* max rule id for stats */
bool ipahal_get_max_stats_rule_id(void);
/*
* ipahal_rt_generate_empty_img() - Generate empty route image
* Creates routing header buffer for the given tables number.
* For each table, make it point to the empty table on DDR.
* @tbls_num: Number of tables. For each will have an entry in the header
* @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
* @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
* @mem: mem object that points to DMA mem representing the hdr structure
* @atomic: should DMA allocation be executed with atomic flag
*/
int ipahal_rt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
u32 nhash_hdr_size, struct ipa_mem_buffer *mem, bool atomic);
/*
* ipahal_flt_generate_empty_img() - Generate empty filter image
* Creates filter header buffer for the given tables number.
* For each table, make it point to the empty table on DDR.
* @tbls_num: Number of tables. For each will have an entry in the header
* @hash_hdr_size: SRAM buf size of the hash tbls hdr. Used for space check
* @nhash_hdr_size: SRAM buf size of the nhash tbls hdr. Used for space check
* @ep_bitmap: Bitmap representing the EP that has flt tables. The format
* should be: bit0->EP0, bit1->EP1
* @mem: mem object that points to DMA mem representing the hdr structure
* @atomic: should DMA allocation be executed with atomic flag
*/
int ipahal_flt_generate_empty_img(u32 tbls_num, u32 hash_hdr_size,
u32 nhash_hdr_size, u64 ep_bitmap, struct ipa_mem_buffer *mem,
bool atomic);
/*
* ipahal_fltrt_allocate_hw_tbl_imgs() - Allocate tbl images DMA structures
* Used usually during commit.
* Allocates header structures and init them to point to empty DDR table
* Allocate body strucutres for local bodies tables
* @params: Parameters for IN and OUT regard the allocation.
*/
int ipahal_fltrt_allocate_hw_tbl_imgs(
struct ipahal_fltrt_alloc_imgs_params *params);
/*
* ipahal_fltrt_allocate_hw_sys_tbl() - Allocate DMA mem for H/W flt/rt sys tbl
* @tbl_mem: IN/OUT param. size for effective table size. Pointer, for the
* allocated memory.
*
* The size is adapted for needed alignments/borders.
*/
int ipahal_fltrt_allocate_hw_sys_tbl(struct ipa_mem_buffer *tbl_mem);
/*
* ipahal_fltrt_write_addr_to_hdr() - Fill table header with table address
* Given table addr/offset, adapt it to IPA H/W format and write it
* to given header index.
* @addr: Address or offset to be used
* @hdr_base: base address of header structure to write the address
* @hdr_idx: index of the address in the header structure
* @is_sys: Is it system address or local offset
*/
int ipahal_fltrt_write_addr_to_hdr(u64 addr, void *hdr_base, u32 hdr_idx,
bool is_sys);
/*
* ipahal_fltrt_read_addr_from_hdr() - Given sram address, read it's
* content (physical address or offset) and parse it.
* @hdr_base: base sram address of the header structure.
* @hdr_idx: index of the header entry line in the header structure.
* @addr: The parsed address - Out parameter
* @is_sys: Is this system or local address - Out parameter
*/
int ipahal_fltrt_read_addr_from_hdr(void *hdr_base, u32 hdr_idx, u64 *addr,
bool *is_sys);
/*
* ipahal_rt_generate_hw_rule() - generates the routing hardware rule.
* @params: Params for the rule creation.
* @hw_len: Size of the H/W rule to be returned
* @buf: Buffer to build the rule in. If buf is NULL, then the rule will
* be built in internal temp buf. This is used e.g. to get the rule size
* only.
*/
int ipahal_rt_generate_hw_rule(struct ipahal_rt_rule_gen_params *params,
u32 *hw_len, u8 *buf);
/*
* ipahal_flt_generate_hw_rule() - generates the filtering hardware rule.
* @params: Params for the rule creation.
* @hw_len: Size of the H/W rule to be returned
* @buf: Buffer to build the rule in. If buf is NULL, then the rule will
* be built in internal temp buf. This is used e.g. to get the rule size
* only.
*/
int ipahal_flt_generate_hw_rule(struct ipahal_flt_rule_gen_params *params,
u32 *hw_len, u8 *buf);
/*
* ipahal_flt_generate_equation() - generate flt rule in equation form
* Will build equation form flt rule from given info.
* @ipt: IP family
* @attrib: Rule attribute to be generated
* @eq_atrb: Equation form generated rule
* Note: Usage example: Pass the generated form to other sub-systems
* for inter-subsystems rules exchange.
*/
int ipahal_flt_generate_equation(enum ipa_ip_type ipt,
const struct ipa_rule_attrib *attrib,
struct ipa_ipfltri_rule_eq *eq_atrb);
/*
* ipahal_rt_parse_hw_rule() - Parse H/W formated rt rule
* Given the rule address, read the rule info from H/W and parse it.
* @rule_addr: Rule address (virtual memory)
* @rule: Out parameter for parsed rule info
*/
int ipahal_rt_parse_hw_rule(u8 *rule_addr,
struct ipahal_rt_rule_entry *rule);
/*
* ipahal_flt_parse_hw_rule() - Parse H/W formated flt rule
* Given the rule address, read the rule info from H/W and parse it.
* @rule_addr: Rule address (virtual memory)
* @rule: Out parameter for parsed rule info
*/
int ipahal_flt_parse_hw_rule(u8 *rule_addr,
struct ipahal_flt_rule_entry *rule);
#endif /* _IPAHAL_FLTRT_H_ */

Ver arquivo

@@ -0,0 +1,257 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*/
#ifndef _IPAHAL_FLTRT_I_H_
#define _IPAHAL_FLTRT_I_H_
/*
* enum ipa_fltrt_equations - RULE equations
* These are names values to the equations that can be used
* The HAL layer holds mapping between these names and H/W
* presentation.
*/
enum ipa_fltrt_equations {
IPA_TOS_EQ,
IPA_PROTOCOL_EQ,
IPA_TC_EQ,
IPA_OFFSET_MEQ128_0,
IPA_OFFSET_MEQ128_1,
IPA_OFFSET_MEQ32_0,
IPA_OFFSET_MEQ32_1,
IPA_IHL_OFFSET_MEQ32_0,
IPA_IHL_OFFSET_MEQ32_1,
IPA_METADATA_COMPARE,
IPA_IHL_OFFSET_RANGE16_0,
IPA_IHL_OFFSET_RANGE16_1,
IPA_IHL_OFFSET_EQ_32,
IPA_IHL_OFFSET_EQ_16,
IPA_FL_EQ,
IPA_IS_FRAG,
IPA_IS_PURE_ACK,
IPA_EQ_MAX,
};
/* Width and Alignment values for H/W structures.
* Specific for IPA version.
*/
#define IPA3_0_HW_TBL_SYSADDR_ALIGNMENT (127)
#define IPA3_0_HW_TBL_LCLADDR_ALIGNMENT (7)
#define IPA3_0_HW_TBL_BLK_SIZE_ALIGNMENT (127)
#define IPA3_0_HW_TBL_WIDTH (8)
#define IPA3_0_HW_TBL_HDR_WIDTH (8)
#define IPA3_0_HW_TBL_ADDR_MASK (127)
#define IPA3_0_HW_RULE_BUF_SIZE (256)
#define IPA3_0_HW_RULE_START_ALIGNMENT (7)
/*
* Rules Priority.
* Needed due to rules classification to hashable and non-hashable.
* Higher priority is lower in number. i.e. 0 is highest priority
*/
#define IPA3_0_RULE_MAX_PRIORITY (0)
#define IPA3_0_RULE_MIN_PRIORITY (1023)
/*
* RULE ID, bit length (e.g. 10 bits).
*/
#define IPA3_0_RULE_ID_BIT_LEN (10)
#define IPA3_0_LOW_RULE_ID (1)
/*
* COUNTER ID, LOW COUNTER ID.
*/
#define IPA4_5_LOW_CNT_ID (1)
/**
* struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule
* @word: routing rule header properties
* @en_rule: enable rule - Equation bit fields
* @pipe_dest_idx: destination pipe index
* @system: Is referenced header is lcl or sys memory
* @hdr_offset: header offset
* @proc_ctx: whether hdr_offset points to header table or to
* header processing context table
* @priority: Rule priority. Added to distinguish rules order
* at the integrated table consisting from hashable and
* non-hashable parts
* @rsvd1: reserved bits
* @retain_hdr: added to add back to the packet the header removed
* as part of header removal. This will be done as part of
* header insertion block.
* @rule_id: rule ID that will be returned in the packet status
* @rsvd2: reserved bits
*/
struct ipa3_0_rt_rule_hw_hdr {
union {
u64 word;
struct {
u64 en_rule:16;
u64 pipe_dest_idx:5;
u64 system:1;
u64 hdr_offset:9;
u64 proc_ctx:1;
u64 priority:10;
u64 rsvd1:5;
u64 retain_hdr:1;
u64 rule_id:10;
u64 rsvd2:6;
} hdr;
} u;
};
/**
* struct ipa3_0_rt_rule_hw_hdr - HW header of IPA routing rule
* @word: routing rule header properties
* @en_rule: enable rule - Equation bit fields
* @pipe_dest_idx: destination pipe index
* @system: Is referenced header is lcl or sys memory
* @hdr_offset: header offset
* @proc_ctx: whether hdr_offset points to header table or to
* header processing context table
* @priority: Rule priority. Added to distinguish rules order
* at the integrated table consisting from hashable and
* non-hashable parts
* @stats_cnt_idx_msb: stats cnt index msb
* @rsvd2: reserved bits
* @retain_hdr: added to add back to the packet the header removed
* as part of header removal. This will be done as part of
* header insertion block.
* @rule_id: rule ID that will be returned in the packet status
* @stats_cnt_idx_lsb: stats cnt index lsb
*/
struct ipa4_5_rt_rule_hw_hdr {
union {
u64 word;
struct {
u64 en_rule:16;
u64 pipe_dest_idx:5;
u64 system:1;
u64 hdr_offset:9;
u64 proc_ctx:1;
u64 priority:10;
u64 stats_cnt_idx_msb : 2;
u64 rsvd2 : 3;
u64 retain_hdr:1;
u64 rule_id:10;
u64 stats_cnt_idx_lsb : 6;
} hdr;
} u;
};
/**
* struct ipa3_0_flt_rule_hw_hdr - HW header of IPA filter rule
* @word: filtering rule properties
* @en_rule: enable rule
* @action: post filtering action
* @rt_tbl_idx: index in routing table
* @retain_hdr: added to add back to the packet the header removed
* as part of header removal. This will be done as part of
* header insertion block.
* @rsvd1: reserved bits
* @priority: Rule priority. Added to distinguish rules order
* at the integrated table consisting from hashable and
* non-hashable parts
* @rsvd2: reserved bits
* @rule_id: rule ID that will be returned in the packet status
* @rsvd3: reserved bits
*/
struct ipa3_0_flt_rule_hw_hdr {
union {
u64 word;
struct {
u64 en_rule:16;
u64 action:5;
u64 rt_tbl_idx:5;
u64 retain_hdr:1;
u64 rsvd1:5;
u64 priority:10;
u64 rsvd2:6;
u64 rule_id:10;
u64 rsvd3:6;
} hdr;
} u;
};
/**
* struct ipa4_0_flt_rule_hw_hdr - HW header of IPA filter rule
* @word: filtering rule properties
* @en_rule: enable rule
* @action: post filtering action
* @rt_tbl_idx: index in routing table
* @retain_hdr: added to add back to the packet the header removed
* as part of header removal. This will be done as part of
* header insertion block.
* @pdn_idx: in case of go to src nat action possible to input the pdn index to
* the NAT block
* @set_metadata: enable metadata replacement in the NAT block
* @priority: Rule priority. Added to distinguish rules order
* at the integrated table consisting from hashable and
* non-hashable parts
* @rsvd2: reserved bits
* @rule_id: rule ID that will be returned in the packet status
* @rsvd3: reserved bits
*/
struct ipa4_0_flt_rule_hw_hdr {
union {
u64 word;
struct {
u64 en_rule : 16;
u64 action : 5;
u64 rt_tbl_idx : 5;
u64 retain_hdr : 1;
u64 pdn_idx : 4;
u64 set_metadata : 1;
u64 priority : 10;
u64 rsvd2 : 6;
u64 rule_id : 10;
u64 rsvd3 : 6;
} hdr;
} u;
};
/**
* struct ipa4_5_flt_rule_hw_hdr - HW header of IPA filter rule
* @word: filtering rule properties
* @en_rule: enable rule
* @action: post filtering action
* @rt_tbl_idx: index in routing table
* @retain_hdr: added to add back to the packet the header removed
* as part of header removal. This will be done as part of
* header insertion block.
* @pdn_idx: in case of go to src nat action possible to input the pdn index to
* the NAT block
* @set_metadata: enable metadata replacement in the NAT block
* @priority: Rule priority. Added to distinguish rules order
* at the integrated table consisting from hashable and
* non-hashable parts
* @stats_cnt_idx_msb: stats cnt index msb
* @rsvd2: reserved bits
* @rule_id: rule ID that will be returned in the packet status
* @stats_cnt_idx_lsb: stats cnt index lsb
*/
struct ipa4_5_flt_rule_hw_hdr {
union {
u64 word;
struct {
u64 en_rule : 16;
u64 action : 5;
u64 rt_tbl_idx : 5;
u64 retain_hdr : 1;
u64 pdn_idx : 4;
u64 set_metadata : 1;
u64 priority : 10;
u64 stats_cnt_idx_msb : 2;
u64 rsvd2 : 4;
u64 rule_id : 10;
u64 stats_cnt_idx_lsb : 6;
} hdr;
} u;
};
int ipahal_fltrt_init(enum ipa_hw_type ipa_hw_type);
void ipahal_fltrt_destroy(void);
#endif /* _IPAHAL_FLTRT_I_H_ */

Ver arquivo

@@ -0,0 +1,634 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#include "ipahal.h"
#include "ipahal_hw_stats.h"
#include "ipahal_hw_stats_i.h"
#include "ipahal_i.h"
struct ipahal_hw_stats_obj {
struct ipahal_stats_init_pyld *(*generate_init_pyld)(void *params,
bool is_atomic_ctx);
int (*get_offset)(void *params, struct ipahal_stats_offset *out);
int (*parse_stats)(void *init_params, void *raw_stats,
void *parsed_stats);
};
static int _count_ones(u32 number)
{
int count = 0;
while (number) {
count++;
number = number & (number - 1);
}
return count;
}
static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_quota(
void *params, bool is_atomic_ctx)
{
struct ipahal_stats_init_pyld *pyld;
struct ipahal_stats_init_quota *in =
(struct ipahal_stats_init_quota *)params;
int entries = _count_ones(in->enabled_bitmask);
IPAHAL_DBG_LOW("entries = %d\n", entries);
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
entries * sizeof(struct ipahal_stats_quota_hw), is_atomic_ctx);
if (!pyld) {
IPAHAL_ERR("no mem\n");
return NULL;
}
pyld->len = entries * sizeof(struct ipahal_stats_quota_hw);
return pyld;
}
static int ipahal_get_offset_quota(void *params,
struct ipahal_stats_offset *out)
{
struct ipahal_stats_get_offset_quota *in =
(struct ipahal_stats_get_offset_quota *)params;
int entries = _count_ones(in->init.enabled_bitmask);
IPAHAL_DBG_LOW("\n");
out->offset = 0;
out->size = entries * sizeof(struct ipahal_stats_quota_hw);
return 0;
}
static int ipahal_parse_stats_quota(void *init_params, void *raw_stats,
void *parsed_stats)
{
struct ipahal_stats_init_quota *init =
(struct ipahal_stats_init_quota *)init_params;
struct ipahal_stats_quota_hw *raw_hw =
(struct ipahal_stats_quota_hw *)raw_stats;
struct ipahal_stats_quota_all *out =
(struct ipahal_stats_quota_all *)parsed_stats;
int stat_idx = 0;
int i;
memset(out, 0, sizeof(*out));
IPAHAL_DBG_LOW("\n");
for (i = 0; i < IPAHAL_MAX_PIPES; i++) {
if (init->enabled_bitmask & (1 << i)) {
IPAHAL_DBG_LOW("pipe %d stat_idx %d\n", i, stat_idx);
out->stats[i].num_ipv4_bytes =
raw_hw[stat_idx].num_ipv4_bytes;
out->stats[i].num_ipv4_pkts =
raw_hw[stat_idx].num_ipv4_pkts;
out->stats[i].num_ipv6_pkts =
raw_hw[stat_idx].num_ipv6_pkts;
out->stats[i].num_ipv6_bytes =
raw_hw[stat_idx].num_ipv6_bytes;
stat_idx++;
}
}
return 0;
}
static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_tethering(
void *params, bool is_atomic_ctx)
{
struct ipahal_stats_init_pyld *pyld;
struct ipahal_stats_init_tethering *in =
(struct ipahal_stats_init_tethering *)params;
int hdr_entries = _count_ones(in->prod_bitmask);
int entries = 0;
int i;
void *pyld_ptr;
u32 incremental_offset;
IPAHAL_DBG_LOW("prod entries = %d\n", hdr_entries);
for (i = 0; i < sizeof(in->prod_bitmask) * 8; i++) {
if (in->prod_bitmask & (1 << i)) {
if (in->cons_bitmask[i] == 0) {
IPAHAL_ERR("no cons bitmask for prod %d\n", i);
return NULL;
}
entries += _count_ones(in->cons_bitmask[i]);
}
}
IPAHAL_DBG_LOW("sum all entries = %d\n", entries);
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw) +
entries * sizeof(struct ipahal_stats_tethering_hw),
is_atomic_ctx);
if (!pyld)
return NULL;
pyld->len = hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw) +
entries * sizeof(struct ipahal_stats_tethering_hw);
pyld_ptr = pyld->data;
incremental_offset =
(hdr_entries * sizeof(struct ipahal_stats_tethering_hdr_hw))
/ 8;
for (i = 0; i < sizeof(in->prod_bitmask) * 8; i++) {
if (in->prod_bitmask & (1 << i)) {
struct ipahal_stats_tethering_hdr_hw *hdr = pyld_ptr;
hdr->dst_mask = in->cons_bitmask[i];
hdr->offset = incremental_offset;
IPAHAL_DBG_LOW("hdr->dst_mask=0x%x\n", hdr->dst_mask);
IPAHAL_DBG_LOW("hdr->offset=0x%x\n", hdr->offset);
/* add the stats entry */
incremental_offset += _count_ones(in->cons_bitmask[i]) *
sizeof(struct ipahal_stats_tethering_hw) / 8;
pyld_ptr += sizeof(*hdr);
}
}
return pyld;
}
static int ipahal_get_offset_tethering(void *params,
struct ipahal_stats_offset *out)
{
struct ipahal_stats_get_offset_tethering *in =
(struct ipahal_stats_get_offset_tethering *)params;
int entries = 0;
int i;
for (i = 0; i < sizeof(in->init.prod_bitmask) * 8; i++) {
if (in->init.prod_bitmask & (1 << i)) {
if (in->init.cons_bitmask[i] == 0) {
IPAHAL_ERR("no cons bitmask for prod %d\n", i);
return -EPERM;
}
entries += _count_ones(in->init.cons_bitmask[i]);
}
}
IPAHAL_DBG_LOW("sum all entries = %d\n", entries);
/* skip the header */
out->offset = _count_ones(in->init.prod_bitmask) *
sizeof(struct ipahal_stats_tethering_hdr_hw);
out->size = entries * sizeof(struct ipahal_stats_tethering_hw);
return 0;
}
static int ipahal_parse_stats_tethering(void *init_params, void *raw_stats,
void *parsed_stats)
{
struct ipahal_stats_init_tethering *init =
(struct ipahal_stats_init_tethering *)init_params;
struct ipahal_stats_tethering_hw *raw_hw =
(struct ipahal_stats_tethering_hw *)raw_stats;
struct ipahal_stats_tethering_all *out =
(struct ipahal_stats_tethering_all *)parsed_stats;
int i, j;
int stat_idx = 0;
memset(out, 0, sizeof(*out));
IPAHAL_DBG_LOW("\n");
for (i = 0; i < IPAHAL_MAX_PIPES; i++) {
for (j = 0; j < IPAHAL_MAX_PIPES; j++) {
if ((init->prod_bitmask & (1 << i)) &&
init->cons_bitmask[i] & (1 << j)) {
IPAHAL_DBG_LOW("prod %d cons %d\n", i, j);
IPAHAL_DBG_LOW("stat_idx %d\n", stat_idx);
out->stats[i][j].num_ipv4_bytes =
raw_hw[stat_idx].num_ipv4_bytes;
IPAHAL_DBG_LOW("num_ipv4_bytes %lld\n",
out->stats[i][j].num_ipv4_bytes);
out->stats[i][j].num_ipv4_pkts =
raw_hw[stat_idx].num_ipv4_pkts;
IPAHAL_DBG_LOW("num_ipv4_pkts %lld\n",
out->stats[i][j].num_ipv4_pkts);
out->stats[i][j].num_ipv6_pkts =
raw_hw[stat_idx].num_ipv6_pkts;
IPAHAL_DBG_LOW("num_ipv6_pkts %lld\n",
out->stats[i][j].num_ipv6_pkts);
out->stats[i][j].num_ipv6_bytes =
raw_hw[stat_idx].num_ipv6_bytes;
IPAHAL_DBG_LOW("num_ipv6_bytes %lld\n",
out->stats[i][j].num_ipv6_bytes);
stat_idx++;
}
}
}
return 0;
}
static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_flt_rt_v4_5(
void *params, bool is_atomic_ctx)
{
struct ipahal_stats_init_pyld *pyld;
int num = (int)(params);
if (num > IPA_MAX_FLT_RT_CNT_INDEX ||
num <= 0) {
IPAHAL_ERR("num %d not valid\n", num);
return NULL;
}
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
num *
sizeof(struct ipahal_stats_flt_rt_v4_5_hw),
is_atomic_ctx);
if (!pyld)
return NULL;
pyld->len = num *
sizeof(struct ipahal_stats_flt_rt_v4_5_hw);
return pyld;
}
static int ipahal_get_offset_flt_rt_v4_5(void *params,
struct ipahal_stats_offset *out)
{
struct ipahal_stats_get_offset_flt_rt_v4_5 *in =
(struct ipahal_stats_get_offset_flt_rt_v4_5 *)params;
int num;
out->offset = (in->start_id - 1) *
sizeof(struct ipahal_stats_flt_rt_v4_5);
num = in->end_id - in->start_id + 1;
out->size = num * sizeof(struct ipahal_stats_flt_rt_v4_5);
return 0;
}
static int ipahal_parse_stats_flt_rt_v4_5(void *init_params,
void *raw_stats, void *parsed_stats)
{
struct ipahal_stats_flt_rt_v4_5_hw *raw_hw =
(struct ipahal_stats_flt_rt_v4_5_hw *)raw_stats;
struct ipa_ioc_flt_rt_query *query =
(struct ipa_ioc_flt_rt_query *)parsed_stats;
int num, i;
num = query->end_id - query->start_id + 1;
IPAHAL_DBG_LOW("\n");
for (i = 0; i < num; i++) {
((struct ipa_flt_rt_stats *)
query->stats)[i].num_bytes =
raw_hw[i].num_bytes;
((struct ipa_flt_rt_stats *)
query->stats)[i].num_pkts_hash =
raw_hw[i].num_packets_hash;
((struct ipa_flt_rt_stats *)
query->stats)[i].num_pkts =
raw_hw[i].num_packets;
}
return 0;
}
static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_flt_rt(
void *params, bool is_atomic_ctx)
{
struct ipahal_stats_init_pyld *pyld;
struct ipahal_stats_init_flt_rt *in =
(struct ipahal_stats_init_flt_rt *)params;
int hdr_entries;
int num_rules = 0;
int i, start_entry;
void *pyld_ptr;
u32 incremental_offset;
for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++)
num_rules += _count_ones(in->rule_id_bitmask[i]);
if (num_rules == 0) {
IPAHAL_ERR("no rule ids provided\n");
return NULL;
}
IPAHAL_DBG_LOW("num_rules = %d\n", num_rules);
hdr_entries = IPAHAL_MAX_RULE_ID_32;
for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) {
if (in->rule_id_bitmask[i] != 0)
break;
hdr_entries--;
}
start_entry = i;
for (i = IPAHAL_MAX_RULE_ID_32 - 1; i >= start_entry; i--) {
if (in->rule_id_bitmask[i] != 0)
break;
hdr_entries--;
}
IPAHAL_DBG_LOW("hdr_entries = %d\n", hdr_entries);
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw) +
num_rules * sizeof(struct ipahal_stats_flt_rt_hw),
is_atomic_ctx);
if (!pyld) {
IPAHAL_ERR("no mem\n");
return NULL;
}
pyld->len = hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw) +
num_rules * sizeof(struct ipahal_stats_flt_rt_hw);
pyld_ptr = pyld->data;
incremental_offset =
(hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw))
/ 8;
for (i = start_entry; i < hdr_entries; i++) {
struct ipahal_stats_flt_rt_hdr_hw *hdr = pyld_ptr;
hdr->en_mask = in->rule_id_bitmask[i];
hdr->cnt_offset = incremental_offset;
/* add the stats entry */
incremental_offset += _count_ones(in->rule_id_bitmask[i]) *
sizeof(struct ipahal_stats_flt_rt_hw) / 8;
pyld_ptr += sizeof(*hdr);
}
return pyld;
}
static int ipahal_get_offset_flt_rt(void *params,
struct ipahal_stats_offset *out)
{
struct ipahal_stats_get_offset_flt_rt *in =
(struct ipahal_stats_get_offset_flt_rt *)params;
int i;
int hdr_entries;
int skip_rules = 0;
int start_entry;
int rule_bit = in->rule_id % 32;
int rule_idx = in->rule_id / 32;
if (rule_idx >= IPAHAL_MAX_RULE_ID_32) {
IPAHAL_ERR("invalid rule_id %d\n", in->rule_id);
return -EPERM;
}
hdr_entries = IPAHAL_MAX_RULE_ID_32;
for (i = 0; i < IPAHAL_MAX_RULE_ID_32; i++) {
if (in->init.rule_id_bitmask[i] != 0)
break;
hdr_entries--;
}
if (hdr_entries == 0) {
IPAHAL_ERR("no rule ids provided\n");
return -EPERM;
}
start_entry = i;
for (i = IPAHAL_MAX_RULE_ID_32 - 1; i >= 0; i--) {
if (in->init.rule_id_bitmask[i] != 0)
break;
hdr_entries--;
}
IPAHAL_DBG_LOW("hdr_entries = %d\n", hdr_entries);
/* skip the header */
out->offset = hdr_entries * sizeof(struct ipahal_stats_flt_rt_hdr_hw);
/* skip the previous rules */
for (i = start_entry; i < rule_idx; i++)
skip_rules += _count_ones(in->init.rule_id_bitmask[i]);
for (i = 0; i < rule_bit; i++)
if (in->init.rule_id_bitmask[rule_idx] & (1 << i))
skip_rules++;
out->offset += skip_rules * sizeof(struct ipahal_stats_flt_rt_hw);
out->size = sizeof(struct ipahal_stats_flt_rt_hw);
return 0;
}
static int ipahal_parse_stats_flt_rt(void *init_params, void *raw_stats,
void *parsed_stats)
{
struct ipahal_stats_flt_rt_hw *raw_hw =
(struct ipahal_stats_flt_rt_hw *)raw_stats;
struct ipahal_stats_flt_rt *out =
(struct ipahal_stats_flt_rt *)parsed_stats;
memset(out, 0, sizeof(*out));
IPAHAL_DBG_LOW("\n");
out->num_packets = raw_hw->num_packets;
out->num_packets_hash = raw_hw->num_packets_hash;
return 0;
}
static struct ipahal_stats_init_pyld *ipahal_generate_init_pyld_drop(
void *params, bool is_atomic_ctx)
{
struct ipahal_stats_init_pyld *pyld;
struct ipahal_stats_init_drop *in =
(struct ipahal_stats_init_drop *)params;
int entries = _count_ones(in->enabled_bitmask);
IPAHAL_DBG_LOW("entries = %d\n", entries);
pyld = IPAHAL_MEM_ALLOC(sizeof(*pyld) +
entries * sizeof(struct ipahal_stats_drop_hw), is_atomic_ctx);
if (!pyld)
return NULL;
pyld->len = entries * sizeof(struct ipahal_stats_drop_hw);
return pyld;
}
static int ipahal_get_offset_drop(void *params,
struct ipahal_stats_offset *out)
{
struct ipahal_stats_get_offset_drop *in =
(struct ipahal_stats_get_offset_drop *)params;
int entries = _count_ones(in->init.enabled_bitmask);
IPAHAL_DBG_LOW("\n");
out->offset = 0;
out->size = entries * sizeof(struct ipahal_stats_drop_hw);
return 0;
}
static int ipahal_parse_stats_drop(void *init_params, void *raw_stats,
void *parsed_stats)
{
struct ipahal_stats_init_drop *init =
(struct ipahal_stats_init_drop *)init_params;
struct ipahal_stats_drop_hw *raw_hw =
(struct ipahal_stats_drop_hw *)raw_stats;
struct ipahal_stats_drop_all *out =
(struct ipahal_stats_drop_all *)parsed_stats;
int stat_idx = 0;
int i;
memset(out, 0, sizeof(*out));
IPAHAL_DBG_LOW("\n");
for (i = 0; i < IPAHAL_MAX_PIPES; i++) {
if (init->enabled_bitmask & (1 << i)) {
out->stats[i].drop_byte_cnt =
raw_hw[stat_idx].drop_byte_cnt;
out->stats[i].drop_packet_cnt =
raw_hw[stat_idx].drop_packet_cnt;
stat_idx++;
}
}
return 0;
}
static struct ipahal_hw_stats_obj
ipahal_hw_stats_objs[IPA_HW_MAX][IPAHAL_HW_STATS_MAX] = {
/* IPAv4 */
[IPA_HW_v4_0][IPAHAL_HW_STATS_QUOTA] = {
ipahal_generate_init_pyld_quota,
ipahal_get_offset_quota,
ipahal_parse_stats_quota
},
[IPA_HW_v4_0][IPAHAL_HW_STATS_TETHERING] = {
ipahal_generate_init_pyld_tethering,
ipahal_get_offset_tethering,
ipahal_parse_stats_tethering
},
[IPA_HW_v4_0][IPAHAL_HW_STATS_FNR] = {
ipahal_generate_init_pyld_flt_rt,
ipahal_get_offset_flt_rt,
ipahal_parse_stats_flt_rt
},
[IPA_HW_v4_0][IPAHAL_HW_STATS_DROP] = {
ipahal_generate_init_pyld_drop,
ipahal_get_offset_drop,
ipahal_parse_stats_drop
},
[IPA_HW_v4_5][IPAHAL_HW_STATS_QUOTA] = {
ipahal_generate_init_pyld_quota,
ipahal_get_offset_quota,
ipahal_parse_stats_quota
},
[IPA_HW_v4_5][IPAHAL_HW_STATS_FNR] = {
ipahal_generate_init_pyld_flt_rt_v4_5,
ipahal_get_offset_flt_rt_v4_5,
ipahal_parse_stats_flt_rt_v4_5
},
[IPA_HW_v4_5][IPAHAL_HW_STATS_TETHERING] = {
ipahal_generate_init_pyld_tethering,
ipahal_get_offset_tethering,
ipahal_parse_stats_tethering
},
[IPA_HW_v4_5][IPAHAL_HW_STATS_DROP] = {
ipahal_generate_init_pyld_drop,
ipahal_get_offset_drop,
ipahal_parse_stats_drop
},
};
int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type)
{
int i;
int j;
struct ipahal_hw_stats_obj zero_obj;
struct ipahal_hw_stats_obj *hw_stat_ptr;
IPAHAL_DBG_LOW("Entry - HW_TYPE=%d\n", ipa_hw_type);
if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
return -EINVAL;
}
memset(&zero_obj, 0, sizeof(zero_obj));
for (i = IPA_HW_v4_0 ; i < ipa_hw_type ; i++) {
for (j = 0; j < IPAHAL_HW_STATS_MAX; j++) {
if (!memcmp(&ipahal_hw_stats_objs[i + 1][j], &zero_obj,
sizeof(struct ipahal_hw_stats_obj))) {
memcpy(&ipahal_hw_stats_objs[i + 1][j],
&ipahal_hw_stats_objs[i][j],
sizeof(struct ipahal_hw_stats_obj));
} else {
/*
* explicitly overridden stat.
* Check validity
*/
hw_stat_ptr = &ipahal_hw_stats_objs[i + 1][j];
if (!hw_stat_ptr->get_offset) {
IPAHAL_ERR(
"stat=%d get_offset null ver=%d\n",
j, i+1);
WARN_ON(1);
}
if (!hw_stat_ptr->parse_stats) {
IPAHAL_ERR(
"stat=%d parse_stats null ver=%d\n",
j, i + 1);
WARN_ON(1);
}
}
}
}
return 0;
}
int ipahal_stats_get_offset(enum ipahal_hw_stats_type type, void *params,
struct ipahal_stats_offset *out)
{
if (type < 0 || type >= IPAHAL_HW_STATS_MAX) {
IPAHAL_ERR("Invalid type stat=%d\n", type);
WARN_ON(1);
return -EFAULT;
}
if (!params || !out) {
IPAHAL_ERR("Null arg\n");
WARN_ON(1);
return -EFAULT;
}
return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].get_offset(
params, out);
}
struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld(
enum ipahal_hw_stats_type type, void *params, bool is_atomic_ctx)
{
struct ipahal_hw_stats_obj *hw_obj_ptr;
if (type < 0 || type >= IPAHAL_HW_STATS_MAX) {
IPAHAL_ERR("Invalid type stat=%d\n", type);
WARN_ON(1);
return NULL;
}
hw_obj_ptr = &ipahal_hw_stats_objs[ipahal_ctx->hw_type][type];
return hw_obj_ptr->generate_init_pyld(params, is_atomic_ctx);
}
int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params,
void *raw_stats, void *parsed_stats)
{
if (WARN((type < 0 || type >= IPAHAL_HW_STATS_MAX),
"Invalid type stat = %d\n", type))
return -EFAULT;
if (WARN((!raw_stats || !parsed_stats), "Null arg\n"))
return -EFAULT;
return ipahal_hw_stats_objs[ipahal_ctx->hw_type][type].parse_stats(
init_params, raw_stats, parsed_stats);
}
void ipahal_set_flt_rt_sw_stats(void *raw_stats,
struct ipa_flt_rt_stats sw_stats)
{
struct ipahal_stats_flt_rt_v4_5_hw *raw_hw =
(struct ipahal_stats_flt_rt_v4_5_hw *)raw_stats;
IPAHAL_DBG_LOW("\n");
raw_hw->num_bytes = sw_stats.num_bytes;
raw_hw->num_packets_hash = sw_stats.num_pkts_hash;
raw_hw->num_packets = sw_stats.num_pkts;
}

Ver arquivo

@@ -0,0 +1,273 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _IPAHAL_HW_STATS_H_
#define _IPAHAL_HW_STATS_H_
#include <linux/ipa.h>
#define IPAHAL_MAX_PIPES 32
#define IPAHAL_MAX_RULE_ID_32 (1024 / 32) /* 10 bits of rule id */
enum ipahal_hw_stats_type {
IPAHAL_HW_STATS_QUOTA,
IPAHAL_HW_STATS_TETHERING,
IPAHAL_HW_STATS_FNR,
IPAHAL_HW_STATS_DROP,
IPAHAL_HW_STATS_MAX
};
/*
* struct ipahal_stats_init_pyld - Statistics initialization payload
* @len: length of payload
* @data: actual payload data
*/
struct ipahal_stats_init_pyld {
u16 len;
u16 reserved;
u8 data[0];
};
/*
* struct ipahal_stats_offset - Statistics offset parameters
* @offset: offset of the statistic from beginning of stats table
* @size: size of the statistics
*/
struct ipahal_stats_offset {
u32 offset;
u16 size;
};
/*
* struct ipahal_stats_init_quota - Initializations parameters for quota
* @enabled_bitmask: bit mask of pipes to be monitored
*/
struct ipahal_stats_init_quota {
u32 enabled_bitmask;
};
/*
* struct ipahal_stats_get_offset_quota - Get offset parameters for quota
* @init: initialization parameters used in initialization of stats
*/
struct ipahal_stats_get_offset_quota {
struct ipahal_stats_init_quota init;
};
/*
* struct ipahal_stats_quota - Quota statistics
* @num_ipv4_bytes: IPv4 bytes
* @num_ipv6_bytes: IPv6 bytes
* @num_ipv4_pkts: IPv4 packets
* @num_ipv6_pkts: IPv6 packets
*/
struct ipahal_stats_quota {
u64 num_ipv4_bytes;
u64 num_ipv6_bytes;
u64 num_ipv4_pkts;
u64 num_ipv6_pkts;
};
/*
* struct ipahal_stats_quota_all - Quota statistics for all pipes
* @stats: array of statistics per pipe
*/
struct ipahal_stats_quota_all {
struct ipahal_stats_quota stats[IPAHAL_MAX_PIPES];
};
/*
* struct ipahal_stats_init_tethering - Initializations parameters for tethering
* @prod_bitmask: bit mask of producer pipes to be monitored
* @cons_bitmask: bit mask of consumer pipes to be monitored per producer
*/
struct ipahal_stats_init_tethering {
u32 prod_bitmask;
u32 cons_bitmask[IPAHAL_MAX_PIPES];
};
/*
* struct ipahal_stats_get_offset_tethering - Get offset parameters for
* tethering
* @init: initialization parameters used in initialization of stats
*/
struct ipahal_stats_get_offset_tethering {
struct ipahal_stats_init_tethering init;
};
/*
* struct ipahal_stats_tethering - Tethering statistics
* @num_ipv4_bytes: IPv4 bytes
* @num_ipv6_bytes: IPv6 bytes
* @num_ipv4_pkts: IPv4 packets
* @num_ipv6_pkts: IPv6 packets
*/
struct ipahal_stats_tethering {
u64 num_ipv4_bytes;
u64 num_ipv6_bytes;
u64 num_ipv4_pkts;
u64 num_ipv6_pkts;
};
/*
* struct ipahal_stats_tethering_all - Tethering statistics for all pipes
* @stats: matrix of statistics per pair of pipes
*/
struct ipahal_stats_tethering_all {
struct ipahal_stats_tethering
stats[IPAHAL_MAX_PIPES][IPAHAL_MAX_PIPES];
};
/*
* struct ipahal_stats_init_flt_rt - Initializations parameters for flt_rt
* @rule_id_bitmask: array describes which rule ids to monitor.
* rule_id bit is determined by:
* index to the array => rule_id / 32
* bit to enable => rule_id % 32
*/
struct ipahal_stats_init_flt_rt {
u32 rule_id_bitmask[IPAHAL_MAX_RULE_ID_32];
};
/*
* struct ipahal_stats_get_offset_flt_rt - Get offset parameters for flt_rt
* @init: initialization parameters used in initialization of stats
* @rule_id: rule_id to get the offset for
*/
struct ipahal_stats_get_offset_flt_rt {
struct ipahal_stats_init_flt_rt init;
u32 rule_id;
};
/*
* struct ipahal_stats_flt_rt - flt_rt statistics
* @num_packets: Total number of packets hit this rule
* @num_packets_hash: Total number of packets hit this rule in hash table
*/
struct ipahal_stats_flt_rt {
u32 num_packets;
u32 num_packets_hash;
};
/*
* struct ipahal_stats_flt_rt_v4_5 - flt_rt statistics
* @num_packets: Total number of packets hit this rule
* @num_packets_hash: Total number of packets hit this rule in hash table
* @num_bytes: Total number of bytes hit this rule
*/
struct ipahal_stats_flt_rt_v4_5 {
u32 num_packets;
u32 num_packets_hash;
u64 num_bytes;
};
/*
* struct ipahal_stats_get_offset_flt_rt_v4_5 - Get offset parameters for flt_rt
* @start_id: start_id to get the offset
* @end_id: end_id to get the offset
*/
struct ipahal_stats_get_offset_flt_rt_v4_5 {
u8 start_id;
u8 end_id;
};
/*
* struct ipahal_stats_init_drop - Initializations parameters for Drop
* @enabled_bitmask: bit mask of pipes to be monitored
*/
struct ipahal_stats_init_drop {
u32 enabled_bitmask;
};
/*
* struct ipahal_stats_get_offset_drop - Get offset parameters for Drop
* @init: initialization parameters used in initialization of stats
*/
struct ipahal_stats_get_offset_drop {
struct ipahal_stats_init_drop init;
};
/*
* struct ipahal_stats_drop - Packet Drop statistics
* @drop_packet_cnt: number of packets dropped
* @drop_byte_cnt: number of bytes dropped
*/
struct ipahal_stats_drop {
u32 drop_packet_cnt;
u32 drop_byte_cnt;
};
/*
* struct ipahal_stats_drop_all - Drop statistics for all pipes
* @stats: array of statistics per pipes
*/
struct ipahal_stats_drop_all {
struct ipahal_stats_drop stats[IPAHAL_MAX_PIPES];
};
/*
* ipahal_stats_generate_init_pyld - Generate the init payload for stats
* @type: type of stats
* @params: init_pyld parameters based of stats type
* @is_atomic_ctx: is calling context atomic ?
*
* This function will generate the initialization payload for a particular
* statistic in hardware. IPA driver is expected to use this payload to
* initialize the SRAM.
*
* Return: pointer to ipahal_stats_init_pyld on success or NULL on failure.
*/
struct ipahal_stats_init_pyld *ipahal_stats_generate_init_pyld(
enum ipahal_hw_stats_type type, void *params, bool is_atomic_ctx);
/*
* ipahal_destroy_stats_init_pyld() - Destroy/Release bulk that was built
* by the ipahal_stats_generate_init_pyld function.
*/
static inline void ipahal_destroy_stats_init_pyld(
struct ipahal_stats_init_pyld *pyld)
{
kfree(pyld);
}
/*
* ipahal_stats_get_offset - Get the offset / size of payload for stats
* @type: type of stats
* @params: get_offset parameters based of stats type
* @out: out parameter for the offset and size.
*
* This function will return the offset of the counter from beginning of
* the table.IPA driver is expected to read this portion in SRAM and pass
* it to ipahal_parse_stats() to interprete the stats.
*
* Return: 0 on success and negative on failure
*/
int ipahal_stats_get_offset(enum ipahal_hw_stats_type type, void *params,
struct ipahal_stats_offset *out);
/*
* ipahal_parse_stats - parse statistics
* @type: type of stats
* @init_params: init_pyld parameters used on init
* @raw_stats: stats read from IPA SRAM
* @parsed_stats: pointer to parsed stats based on type
*
* Return: 0 on success and negative on failure
*/
int ipahal_parse_stats(enum ipahal_hw_stats_type type, void *init_params,
void *raw_stats, void *parsed_stats);
/*
* ipahal_set_flt_rt_sw_stats - set sw counter stats for FnR
* @raw_stats: stats write to IPA SRAM
* @sw_stats: FnR sw stats to be written
*
* Return: None
*/
void ipahal_set_flt_rt_sw_stats(void *raw_stats,
struct ipa_flt_rt_stats sw_stats);
#endif /* _IPAHAL_HW_STATS_H_ */

Ver arquivo

@@ -0,0 +1,54 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _IPAHAL_HW_STATS_I_H_
#define _IPAHAL_HW_STATS_I_H_
#include "ipahal_hw_stats.h"
int ipahal_hw_stats_init(enum ipa_hw_type ipa_hw_type);
struct ipahal_stats_quota_hw {
u64 num_ipv4_bytes;
u64 num_ipv4_pkts:32;
u64 num_ipv6_pkts:32;
u64 num_ipv6_bytes;
};
struct ipahal_stats_tethering_hdr_hw {
u64 dst_mask:32;
u64 offset:32;
};
struct ipahal_stats_tethering_hw {
u64 num_ipv4_bytes;
u64 num_ipv4_pkts:32;
u64 num_ipv6_pkts:32;
u64 num_ipv6_bytes;
};
struct ipahal_stats_flt_rt_hdr_hw {
u64 en_mask:32;
u64 reserved:16;
u64 cnt_offset:16;
};
struct ipahal_stats_flt_rt_hw {
u64 num_packets_hash:32;
u64 num_packets:32;
};
struct ipahal_stats_flt_rt_v4_5_hw {
u64 num_packets_hash:32;
u64 num_packets:32;
u64 num_bytes;
};
struct ipahal_stats_drop_hw {
u64 drop_byte_cnt:40;
u64 drop_packet_cnt:24;
};
#endif /* _IPAHAL_HW_STATS_I_H_ */

727
ipa/ipa_v3/ipahal/ipahal_i.h Arquivo normal
Ver arquivo

@@ -0,0 +1,727 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _IPAHAL_I_H_
#define _IPAHAL_I_H_
#include <linux/ipa.h>
#include "../../ipa_common_i.h"
#define IPAHAL_DRV_NAME "ipahal"
#define IPAHAL_DBG(fmt, args...) \
do { \
pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAHAL_DBG_LOW(fmt, args...) \
do { \
pr_debug(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAHAL_ERR(fmt, args...) \
do { \
pr_err(IPAHAL_DRV_NAME " %s:%d " fmt, __func__, __LINE__, \
## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAHAL_ERR_RL(fmt, args...) \
do { \
pr_err_ratelimited_ipa(IPAHAL_DRV_NAME " %s:%d " fmt, \
__func__, __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
IPAHAL_DRV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAHAL_DBG_REG(fmt, args...) \
do { \
pr_err(fmt, ## args); \
IPA_IPC_LOGGING(ipahal_ctx->regdumpbuf, \
" %s:%d " fmt, ## args); \
} while (0)
#define IPAHAL_DBG_REG_IPC_ONLY(fmt, args...) \
IPA_IPC_LOGGING(ipahal_ctx->regdumpbuf, " %s:%d " fmt, ## args)
#define IPAHAL_MEM_ALLOC(__size, __is_atomic_ctx) \
(kzalloc((__size), ((__is_atomic_ctx) ? GFP_ATOMIC : GFP_KERNEL)))
#define IPAHAL_IPC_LOG_PAGES 50
#define IPAHAL_PKT_STATUS_FLTRT_RULE_MISS_ID 0x3ff
/*
* struct ipahal_context - HAL global context data
* @hw_type: IPA H/W type/version.
* @base: Base address to be used for accessing IPA memory. This is
* I/O memory mapped address.
* Controlled by debugfs. default is off
* @dent: Debugfs folder dir entry
* @ipa_pdev: IPA Platform Device. Will be used for DMA memory
* @empty_fltrt_tbl: Empty table to be used at tables init.
*/
struct ipahal_context {
enum ipa_hw_type hw_type;
void __iomem *base;
struct dentry *dent;
struct device *ipa_pdev;
struct ipa_mem_buffer empty_fltrt_tbl;
void *regdumpbuf;
};
extern struct ipahal_context *ipahal_ctx;
/* Immediate commands H/W structures */
/*
* struct ipa_imm_cmd_hw_ip_v4_filter_init - IP_V4_FILTER_INIT command payload
* in H/W format.
* Inits IPv4 filter block.
* @hash_rules_addr: Addr in system mem where ipv4 hashable flt rules starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv4 hashable flt tbl should
* be copied to
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv4 non-hashable flt tbl should
* be copied to
* @rsvd: reserved
* @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable flt tbl starts
*/
struct ipa_imm_cmd_hw_ip_v4_filter_init {
u64 hash_rules_addr:64;
u64 hash_rules_size:12;
u64 hash_local_addr:16;
u64 nhash_rules_size:12;
u64 nhash_local_addr:16;
u64 rsvd:8;
u64 nhash_rules_addr:64;
};
/*
* struct ipa_imm_cmd_hw_ip_v6_filter_init - IP_V6_FILTER_INIT command payload
* in H/W format.
* Inits IPv6 filter block.
* @hash_rules_addr: Addr in system mem where ipv6 hashable flt rules starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv6 hashable flt tbl should
* be copied to
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv6 non-hashable flt tbl should
* be copied to
* @rsvd: reserved
* @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable flt tbl starts
*/
struct ipa_imm_cmd_hw_ip_v6_filter_init {
u64 hash_rules_addr:64;
u64 hash_rules_size:12;
u64 hash_local_addr:16;
u64 nhash_rules_size:12;
u64 nhash_local_addr:16;
u64 rsvd:8;
u64 nhash_rules_addr:64;
};
/*
* struct ipa_imm_cmd_hw_ip_v4_nat_init - IP_V4_NAT_INIT command payload
* in H/W format.
* Inits IPv4 NAT block. Initiate NAT table with it dimensions, location
* cache address and other related parameters.
* @ipv4_rules_addr: Addr in sys/shared mem where ipv4 NAT rules start
* @ipv4_expansion_rules_addr: Addr in sys/shared mem where expansion NAT
* table starts. IPv4 NAT rules that result in hash collision are located
* in this table.
* @index_table_addr: Addr in sys/shared mem where index table, which points
* to NAT table starts
* @index_table_expansion_addr: Addr in sys/shared mem where expansion index
* table starts
* @table_index: For future support of multiple NAT tables
* @rsvd1: reserved
* @ipv4_rules_addr_type: ipv4_rules_addr in sys or shared mem
* @ipv4_expansion_rules_addr_type: ipv4_expansion_rules_addr in
* sys or shared mem
* @index_table_addr_type: index_table_addr in sys or shared mem
* @index_table_expansion_addr_type: index_table_expansion_addr in
* sys or shared mem
* @size_base_tables: Num of entries in NAT tbl and idx tbl (each)
* @size_expansion_tables: Num of entries in NAT expansion tbl and expansion
* idx tbl (each)
* @rsvd2: reserved
* @public_addr_info: Public IP addresses info suitable to the IPA H/W version
* IPA H/W >= 4.0 - PDN config table offset in SMEM
* IPA H/W < 4.0 - The public IP address
*/
struct ipa_imm_cmd_hw_ip_v4_nat_init {
u64 ipv4_rules_addr:64;
u64 ipv4_expansion_rules_addr:64;
u64 index_table_addr:64;
u64 index_table_expansion_addr:64;
u64 table_index:3;
u64 rsvd1:1;
u64 ipv4_rules_addr_type:1;
u64 ipv4_expansion_rules_addr_type:1;
u64 index_table_addr_type:1;
u64 index_table_expansion_addr_type:1;
u64 size_base_tables:12;
u64 size_expansion_tables:10;
u64 rsvd2:2;
u64 public_addr_info:32;
};
/*
* struct ipa_imm_cmd_hw_ip_v6_ct_init - IP_V6_CONN_TRACK_INIT command payload
* in H/W format.
* Inits IPv6CT block. Initiate IPv6CT table with it dimensions, location
* cache address and other related parameters.
* @table_addr: Address in sys/shared mem where IPv6CT rules start
* @expansion_table_addr: Address in sys/shared mem where IPv6CT expansion
* table starts. IPv6CT rules that result in hash collision are located
* in this table.
* @table_index: For future support of multiple IPv6CT tables
* @rsvd1: reserved
* @table_addr_type: table_addr in sys or shared mem
* @expansion_table_addr_type: expansion_table_addr in sys or shared mem
* @rsvd2: reserved
* @size_base_tables: Number of entries in IPv6CT table
* @size_expansion_tables: Number of entries in IPv6CT expansion table
* @rsvd3: reserved
*/
struct ipa_imm_cmd_hw_ip_v6_ct_init {
u64 table_addr:64;
u64 expansion_table_addr:64;
u64 table_index:3;
u64 rsvd1:1;
u64 table_addr_type:1;
u64 expansion_table_addr_type:1;
u64 rsvd2:2;
u64 size_base_table:12;
u64 size_expansion_table:10;
u64 rsvd3:34;
};
/*
* struct ipa_imm_cmd_hw_ip_v4_routing_init - IP_V4_ROUTING_INIT command payload
* in H/W format.
* Inits IPv4 routing table/structure - with the rules and other related params
* @hash_rules_addr: Addr in system mem where ipv4 hashable rt rules starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv4 hashable rt tbl should
* be copied to
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv4 non-hashable rt tbl should
* be copied to
* @rsvd: reserved
* @nhash_rules_addr: Addr in sys mem where ipv4 non-hashable rt tbl starts
*/
struct ipa_imm_cmd_hw_ip_v4_routing_init {
u64 hash_rules_addr:64;
u64 hash_rules_size:12;
u64 hash_local_addr:16;
u64 nhash_rules_size:12;
u64 nhash_local_addr:16;
u64 rsvd:8;
u64 nhash_rules_addr:64;
};
/*
* struct ipa_imm_cmd_hw_ip_v6_routing_init - IP_V6_ROUTING_INIT command payload
* in H/W format.
* Inits IPv6 routing table/structure - with the rules and other related params
* @hash_rules_addr: Addr in system mem where ipv6 hashable rt rules starts
* @hash_rules_size: Size in bytes of the hashable tbl to cpy to local mem
* @hash_local_addr: Addr in shared mem where ipv6 hashable rt tbl should
* be copied to
* @nhash_rules_size: Size in bytes of the non-hashable tbl to cpy to local mem
* @nhash_local_addr: Addr in shared mem where ipv6 non-hashable rt tbl should
* be copied to
* @rsvd: reserved
* @nhash_rules_addr: Addr in sys mem where ipv6 non-hashable rt tbl starts
*/
struct ipa_imm_cmd_hw_ip_v6_routing_init {
u64 hash_rules_addr:64;
u64 hash_rules_size:12;
u64 hash_local_addr:16;
u64 nhash_rules_size:12;
u64 nhash_local_addr:16;
u64 rsvd:8;
u64 nhash_rules_addr:64;
};
/*
* struct ipa_imm_cmd_hw_hdr_init_local - HDR_INIT_LOCAL command payload
* in H/W format.
* Inits hdr table within local mem with the hdrs and their length.
* @hdr_table_addr: Word address in sys mem where the table starts (SRC)
* @size_hdr_table: Size of the above (in bytes)
* @hdr_addr: header address in IPA sram (used as DST for memory copy)
* @rsvd: reserved
*/
struct ipa_imm_cmd_hw_hdr_init_local {
u64 hdr_table_addr:64;
u64 size_hdr_table:12;
u64 hdr_addr:16;
u64 rsvd:4;
};
/*
* struct ipa_imm_cmd_hw_nat_dma - NAT_DMA command payload
* in H/W format
* Perform DMA operation on NAT related mem addressess. Copy data into
* different locations within NAT associated tbls. (For add/remove NAT rules)
* @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
* @rsvd1: reserved
* @base_addr: Base addr to which the DMA operation should be performed.
* @rsvd2: reserved
* @offset: offset in bytes from base addr to write 'data' to
* @data: data to be written
* @rsvd3: reserved
*/
struct ipa_imm_cmd_hw_nat_dma {
u64 table_index:3;
u64 rsvd1:1;
u64 base_addr:2;
u64 rsvd2:2;
u64 offset:32;
u64 data:16;
u64 rsvd3:8;
};
/*
* struct ipa_imm_cmd_hw_table_dma_ipav4 - TABLE_DMA command payload
* in H/W format
* Perform DMA operation on NAT and ipv6 connection tracking related mem
* addresses. Copy data into different locations within NAT associated tbls
* (For add/remove NAT rules)
* @table_index: NAT tbl index. Defines the NAT tbl on which to perform DMA op.
* @rsvd1: reserved
* @base_addr: Base addr to which the DMA operation should be performed.
* @rsvd2: reserved
* @offset: offset in bytes from base addr to write 'data' to
* @data: data to be written
* @rsvd3: reserved
*/
struct ipa_imm_cmd_hw_table_dma_ipav4 {
u64 table_index : 3;
u64 rsvd1 : 1;
u64 base_addr : 3;
u64 rsvd2 : 1;
u64 offset : 32;
u64 data : 16;
u64 rsvd3 : 8;
};
/*
* struct ipa_imm_cmd_hw_hdr_init_system - HDR_INIT_SYSTEM command payload
* in H/W format.
* Inits hdr table within sys mem with the hdrs and their length.
* @hdr_table_addr: Word address in system memory where the hdrs tbl starts.
*/
struct ipa_imm_cmd_hw_hdr_init_system {
u64 hdr_table_addr:64;
};
/*
* struct ipa_imm_cmd_hw_ip_packet_init - IP_PACKET_INIT command payload
* in H/W format.
* Configuration for specific IP pkt. Shall be called prior to an IP pkt
* data. Pkt will not go through IP pkt processing.
* @destination_pipe_index: Destination pipe index (in case routing
* is enabled, this field will overwrite the rt rule)
* @rsvd: reserved
*/
struct ipa_imm_cmd_hw_ip_packet_init {
u64 destination_pipe_index:5;
u64 rsv1:59;
};
/*
* struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload
* in H/W format.
* Write value to register. Allows reg changes to be synced with data packet
* and other immediate command. Can be used to access the sram
* @sw_rsvd: Ignored by H/W. May be used by S/W
* @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
* @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
* @value: value to write to register
* @value_mask: mask specifying which value bits to write to the register
* @pipeline_clear_options: options for pipeline to clear
* 0: HPS - no pkt inside HPS (not grp specific)
* 1: source group - The immediate cmd src grp does not use any pkt ctxs
* 2: Wait until no pkt reside inside IPA pipeline
* 3: reserved
* @rsvd: reserved - should be set to zero
*/
struct ipa_imm_cmd_hw_register_write {
u64 sw_rsvd:15;
u64 skip_pipeline_clear:1;
u64 offset:16;
u64 value:32;
u64 value_mask:32;
u64 pipeline_clear_options:2;
u64 rsvd:30;
};
/*
* struct ipa_imm_cmd_hw_register_write - REGISTER_WRITE command payload
* in H/W format.
* Write value to register. Allows reg changes to be synced with data packet
* and other immediate command. Can be used to access the sram
* @sw_rsvd: Ignored by H/W. May be used by S/W
* @offset_high: high bits of the Offset field - bits 17-20
* @rsvd: reserved - should be set to zero
* @offset: offset from IPA base address - Lower 16bit of the IPA reg addr
* @value: value to write to register
* @value_mask: mask specifying which value bits to write to the register
* @rsvd2: reserved - should be set to zero
*/
struct ipa_imm_cmd_hw_register_write_v_4_0 {
u64 sw_rsvd:11;
u64 offset_high:4;
u64 rsvd:1;
u64 offset:16;
u64 value:32;
u64 value_mask:32;
u64 rsvd2:32;
};
/*
* struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
* in H/W format.
* Perform mem copy into or out of the SW area of IPA local mem
* @sw_rsvd: Ignored by H/W. My be used by S/W
* @size: Size in bytes of data to copy. Expected size is up to 2K bytes
* @local_addr: Address in IPA local memory
* @direction: Read or write?
* 0: IPA write, Write to local address from system address
* 1: IPA read, Read from local address to system address
* @skip_pipeline_clear: 0 to wait until IPA pipeline is clear. 1 don't wait
* @pipeline_clear_options: options for pipeline to clear
* 0: HPS - no pkt inside HPS (not grp specific)
* 1: source group - The immediate cmd src grp does npt use any pkt ctxs
* 2: Wait until no pkt reside inside IPA pipeline
* 3: reserved
* @rsvd: reserved - should be set to zero
* @system_addr: Address in system memory
*/
struct ipa_imm_cmd_hw_dma_shared_mem {
u64 sw_rsvd:16;
u64 size:16;
u64 local_addr:16;
u64 direction:1;
u64 skip_pipeline_clear:1;
u64 pipeline_clear_options:2;
u64 rsvd:12;
u64 system_addr:64;
};
/*
* struct ipa_imm_cmd_hw_dma_shared_mem - DMA_SHARED_MEM command payload
* in H/W format.
* Perform mem copy into or out of the SW area of IPA local mem
* @sw_rsvd: Ignored by H/W. My be used by S/W
* @size: Size in bytes of data to copy. Expected size is up to 2K bytes
* @clear_after_read: Clear local memory at the end of a read operation allows
* atomic read and clear if HPS is clear. Ignore for writes.
* @local_addr: Address in IPA local memory
* @direction: Read or write?
* 0: IPA write, Write to local address from system address
* 1: IPA read, Read from local address to system address
* @rsvd: reserved - should be set to zero
* @system_addr: Address in system memory
*/
struct ipa_imm_cmd_hw_dma_shared_mem_v_4_0 {
u64 sw_rsvd:15;
u64 clear_after_read:1;
u64 size:16;
u64 local_addr:16;
u64 direction:1;
u64 rsvd:15;
u64 system_addr:64;
};
/*
* struct ipa_imm_cmd_hw_ip_packet_tag_status -
* IP_PACKET_TAG_STATUS command payload in H/W format.
* This cmd is used for to allow SW to track HW processing by setting a TAG
* value that is passed back to SW inside Packet Status information.
* TAG info will be provided as part of Packet Status info generated for
* the next pkt transferred over the pipe.
* This immediate command must be followed by a packet in the same transfer.
* @sw_rsvd: Ignored by H/W. My be used by S/W
* @tag: Tag that is provided back to SW
*/
struct ipa_imm_cmd_hw_ip_packet_tag_status {
u64 sw_rsvd:16;
u64 tag:48;
};
/*
* struct ipa_imm_cmd_hw_dma_task_32b_addr -
* IPA_DMA_TASK_32B_ADDR command payload in H/W format.
* Used by clients using 32bit addresses. Used to perform DMA operation on
* multiple descriptors.
* The Opcode is dynamic, where it holds the number of buffer to process
* @sw_rsvd: Ignored by H/W. My be used by S/W
* @cmplt: Complete flag: When asserted IPA will interrupt SW when the entire
* DMA related data was completely xfered to its destination.
* @eof: Enf Of Frame flag: When asserted IPA will assert the EOT to the
* dest client. This is used used for aggr sequence
* @flsh: Flush flag: When asserted, pkt will go through the IPA blocks but
* will not be xfered to dest client but rather will be discarded
* @lock: Lock pipe flag: When asserted, IPA will stop processing descriptors
* from other EPs in the same src grp (RX queue)
* @unlock: Unlock pipe flag: When asserted, IPA will stop exclusively
* servicing current EP out of the src EPs of the grp (RX queue)
* @size1: Size of buffer1 data
* @addr1: Pointer to buffer1 data
* @packet_size: Total packet size. If a pkt send using multiple DMA_TASKs,
* only the first one needs to have this field set. It will be ignored
* in subsequent DMA_TASKs until the packet ends (EOT). First DMA_TASK
* must contain this field (2 or more buffers) or EOT.
*/
struct ipa_imm_cmd_hw_dma_task_32b_addr {
u64 sw_rsvd:11;
u64 cmplt:1;
u64 eof:1;
u64 flsh:1;
u64 lock:1;
u64 unlock:1;
u64 size1:16;
u64 addr1:32;
u64 packet_size:16;
};
/* IPA Status packet H/W structures and info */
/*
* struct ipa_status_pkt_hw - IPA status packet payload in H/W format.
* This structure describes the status packet H/W structure for the
* following statuses: IPA_STATUS_PACKET, IPA_STATUS_DROPPED_PACKET,
* IPA_STATUS_SUSPENDED_PACKET.
* Other statuses types has different status packet structure.
* @status_opcode: The Type of the status (Opcode).
* @exception: (not bitmask) - the first exception that took place.
* In case of exception, src endp and pkt len are always valid.
* @status_mask: Bit mask specifying on which H/W blocks the pkt was processed.
* @pkt_len: Pkt pyld len including hdr, include retained hdr if used. Does
* not include padding or checksum trailer len.
* @endp_src_idx: Source end point index.
* @rsvd1: reserved
* @endp_dest_idx: Destination end point index.
* Not valid in case of exception
* @rsvd2: reserved
* @metadata: meta data value used by packet
* @flt_local: Filter table location flag: Does matching flt rule belongs to
* flt tbl that resides in lcl memory? (if not, then system mem)
* @flt_hash: Filter hash hit flag: Does matching flt rule was in hash tbl?
* @flt_global: Global filter rule flag: Does matching flt rule belongs to
* the global flt tbl? (if not, then the per endp tables)
* @flt_ret_hdr: Retain header in filter rule flag: Does matching flt rule
* specifies to retain header?
* Starting IPA4.5, this will be true only if packet has L2 header.
* @flt_rule_id: The ID of the matching filter rule. This info can be combined
* with endp_src_idx to locate the exact rule. ID=0x3FF reserved to specify
* flt miss. In case of miss, all flt info to be ignored
* @rt_local: Route table location flag: Does matching rt rule belongs to
* rt tbl that resides in lcl memory? (if not, then system mem)
* @rt_hash: Route hash hit flag: Does matching rt rule was in hash tbl?
* @ucp: UC Processing flag.
* @rt_tbl_idx: Index of rt tbl that contains the rule on which was a match
* @rt_rule_id: The ID of the matching rt rule. This info can be combined
* with rt_tbl_idx to locate the exact rule. ID=0x3FF reserved to specify
* rt miss. In case of miss, all rt info to be ignored
* @nat_hit: NAT hit flag: Was their NAT hit?
* @nat_entry_idx: Index of the NAT entry used of NAT processing
* @nat_type: Defines the type of the NAT operation:
* 00: No NAT
* 01: Source NAT
* 10: Destination NAT
* 11: Reserved
* @tag_info: S/W defined value provided via immediate command
* @seq_num: Per source endp unique packet sequence number
* @time_of_day_ctr: running counter from IPA clock
* @hdr_local: Header table location flag: In header insertion, was the header
* taken from the table resides in local memory? (If no, then system mem)
* @hdr_offset: Offset of used header in the header table
* @frag_hit: Frag hit flag: Was their frag rule hit in H/W frag table?
* @frag_rule: Frag rule index in H/W frag table in case of frag hit
* @hw_specific: H/W specific reserved value
*/
struct ipa_pkt_status_hw {
u64 status_opcode:8;
u64 exception:8;
u64 status_mask:16;
u64 pkt_len:16;
u64 endp_src_idx:5;
u64 rsvd1:3;
u64 endp_dest_idx:5;
u64 rsvd2:3;
u64 metadata:32;
u64 flt_local:1;
u64 flt_hash:1;
u64 flt_global:1;
u64 flt_ret_hdr:1;
u64 flt_rule_id:10;
u64 rt_local:1;
u64 rt_hash:1;
u64 ucp:1;
u64 rt_tbl_idx:5;
u64 rt_rule_id:10;
u64 nat_hit:1;
u64 nat_entry_idx:13;
u64 nat_type:2;
u64 tag_info:48;
u64 seq_num:8;
u64 time_of_day_ctr:24;
u64 hdr_local:1;
u64 hdr_offset:10;
u64 frag_hit:1;
u64 frag_rule:4;
u64 hw_specific:16;
};
/* Size of H/W Packet Status */
#define IPA3_0_PKT_STATUS_SIZE 32
/* Headers and processing context H/W structures and definitions */
/* uCP command numbers */
#define IPA_HDR_UCP_802_3_TO_802_3 6
#define IPA_HDR_UCP_802_3_TO_ETHII 7
#define IPA_HDR_UCP_ETHII_TO_802_3 8
#define IPA_HDR_UCP_ETHII_TO_ETHII 9
#define IPA_HDR_UCP_L2TP_HEADER_ADD 10
#define IPA_HDR_UCP_L2TP_HEADER_REMOVE 11
/* Processing context TLV type */
#define IPA_PROC_CTX_TLV_TYPE_END 0
#define IPA_PROC_CTX_TLV_TYPE_HDR_ADD 1
#define IPA_PROC_CTX_TLV_TYPE_PROC_CMD 3
/**
* struct ipa_hw_hdr_proc_ctx_tlv -
* HW structure of IPA processing context header - TLV part
* @type: 0 - end type
* 1 - header addition type
* 3 - processing command type
* @length: number of bytes after tlv
* for type:
* 0 - needs to be 0
* 1 - header addition length
* 3 - number of 32B including type and length.
* @value: specific value for type
* for type:
* 0 - needs to be 0
* 1 - header length
* 3 - command ID (see IPA_HDR_UCP_* definitions)
*/
struct ipa_hw_hdr_proc_ctx_tlv {
u32 type:8;
u32 length:8;
u32 value:16;
};
/**
* struct ipa_hw_hdr_proc_ctx_hdr_add -
* HW structure of IPA processing context - add header tlv
* @tlv: IPA processing context TLV
* @hdr_addr: processing context header address
*/
struct ipa_hw_hdr_proc_ctx_hdr_add {
struct ipa_hw_hdr_proc_ctx_tlv tlv;
u32 hdr_addr;
u32 hdr_addr_hi;
};
/**
* struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr -
* HW structure of IPA processing context - add l2tp header tlv
* @tlv: IPA processing context TLV
* @l2tp_params: l2tp parameters
*/
struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr {
struct ipa_hw_hdr_proc_ctx_tlv tlv;
struct ipa_l2tp_header_add_procparams l2tp_params;
};
/**
* struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr -
* HW structure of IPA processing context - remove l2tp header tlv
* @tlv: IPA processing context TLV
* @l2tp_params: l2tp parameters
*/
struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr {
struct ipa_hw_hdr_proc_ctx_tlv tlv;
struct ipa_l2tp_header_remove_procparams l2tp_params;
};
/**
* struct ipa_hw_hdr_proc_ctx_add_hdr_seq -
* IPA processing context header - add header sequence
* @hdr_add: add header command
* @end: tlv end command (cmd.type must be 0)
*/
struct ipa_hw_hdr_proc_ctx_add_hdr_seq {
struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
struct ipa_hw_hdr_proc_ctx_tlv end;
};
/**
* struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq -
* IPA processing context header - process command sequence
* @hdr_add: add header command
* @cmd: tlv processing command (cmd.type must be 3)
* @end: tlv end command (cmd.type must be 0)
*/
struct ipa_hw_hdr_proc_ctx_add_hdr_cmd_seq {
struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
struct ipa_hw_hdr_proc_ctx_tlv cmd;
struct ipa_hw_hdr_proc_ctx_tlv end;
};
/**
* struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq -
* IPA processing context header - process command sequence
* @hdr_add: add header command
* @l2tp_params: l2tp params for header addition
* @end: tlv end command (cmd.type must be 0)
*/
struct ipa_hw_hdr_proc_ctx_add_l2tp_hdr_cmd_seq {
struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
struct ipa_hw_hdr_proc_ctx_l2tp_add_hdr l2tp_params;
struct ipa_hw_hdr_proc_ctx_tlv end;
};
/**
* struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq -
* IPA processing context header - process command sequence
* @hdr_add: add header command
* @l2tp_params: l2tp params for header removal
* @end: tlv end command (cmd.type must be 0)
*/
struct ipa_hw_hdr_proc_ctx_remove_l2tp_hdr_cmd_seq {
struct ipa_hw_hdr_proc_ctx_hdr_add hdr_add;
struct ipa_hw_hdr_proc_ctx_l2tp_remove_hdr l2tp_params;
struct ipa_hw_hdr_proc_ctx_tlv end;
};
#endif /* _IPAHAL_I_H_ */

510
ipa/ipa_v3/ipahal/ipahal_nat.c Arquivo normal
Ver arquivo

@@ -0,0 +1,510 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/debugfs.h>
#include "ipahal_nat.h"
#include "ipahal_nat_i.h"
#include "ipahal_i.h"
#define IPA_64_LOW_32_MASK (0xFFFFFFFF)
#define IPA_64_HIGH_32_MASK (0xFFFFFFFF00000000ULL)
static const char *ipahal_nat_type_to_str[IPA_NAT_MAX] = {
__stringify(IPAHAL_NAT_IPV4),
__stringify(IPAHAL_NAT_IPV4_INDEX),
__stringify(IPAHAL_NAT_IPV4_PDN),
__stringify(IPAHAL_NAT_IPV6CT)
};
static size_t ipa_nat_ipv4_entry_size_v_3_0(void)
{
return sizeof(struct ipa_nat_hw_ipv4_entry);
}
static size_t ipa_nat_ipv4_index_entry_size_v_3_0(void)
{
return sizeof(struct ipa_nat_hw_indx_entry);
}
static size_t ipa_nat_ipv4_pdn_entry_size_v_4_0(void)
{
return sizeof(struct ipa_nat_hw_pdn_entry);
}
static size_t ipa_nat_ipv6ct_entry_size_v_4_0(void)
{
return sizeof(struct ipa_nat_hw_ipv6ct_entry);
}
static bool ipa_nat_ipv4_is_entry_zeroed_v_3_0(const void *entry)
{
struct ipa_nat_hw_ipv4_entry zero_entry = { 0 };
return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true;
}
static bool ipa_nat_ipv4_is_index_entry_zeroed_v_3_0(const void *entry)
{
struct ipa_nat_hw_indx_entry zero_entry = { 0 };
return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true;
}
static bool ipa_nat_ipv4_is_pdn_entry_zeroed_v_4_0(const void *entry)
{
struct ipa_nat_hw_pdn_entry zero_entry = { 0 };
return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true;
}
static bool ipa_nat_ipv6ct_is_entry_zeroed_v_4_0(const void *entry)
{
struct ipa_nat_hw_ipv6ct_entry zero_entry = { 0 };
return (memcmp(&zero_entry, entry, sizeof(zero_entry))) ? false : true;
}
static bool ipa_nat_ipv4_is_entry_valid_v_3_0(const void *entry)
{
struct ipa_nat_hw_ipv4_entry *hw_entry =
(struct ipa_nat_hw_ipv4_entry *)entry;
return hw_entry->enable &&
hw_entry->protocol != IPAHAL_NAT_INVALID_PROTOCOL;
}
static bool ipa_nat_ipv4_is_index_entry_valid_v_3_0(const void *entry)
{
struct ipa_nat_hw_indx_entry *hw_entry =
(struct ipa_nat_hw_indx_entry *)entry;
return hw_entry->tbl_entry != 0;
}
static bool ipa_nat_ipv4_is_pdn_entry_valid_v_4_0(const void *entry)
{
struct ipa_nat_hw_pdn_entry *hw_entry =
(struct ipa_nat_hw_pdn_entry *)entry;
return hw_entry->public_ip != 0;
}
static bool ipa_nat_ipv6ct_is_entry_valid_v_4_0(const void *entry)
{
struct ipa_nat_hw_ipv6ct_entry *hw_entry =
(struct ipa_nat_hw_ipv6ct_entry *)entry;
return hw_entry->enable &&
hw_entry->protocol != IPAHAL_NAT_INVALID_PROTOCOL;
}
static int ipa_nat_ipv4_stringify_entry_v_3_0(const void *entry,
char *buff, size_t buff_size)
{
const struct ipa_nat_hw_ipv4_entry *nat_entry =
(const struct ipa_nat_hw_ipv4_entry *)entry;
return scnprintf(buff, buff_size,
"\t\tPrivate_IP=%pI4h Target_IP=%pI4h\n"
"\t\tNext_Index=%d Public_Port=%d\n"
"\t\tPrivate_Port=%d Target_Port=%d\n"
"\t\tIP_CKSM_delta=0x%x Enable=%s Redirect=%s\n"
"\t\tTime_stamp=0x%x Proto=%d\n"
"\t\tPrev_Index=%d Indx_tbl_entry=%d\n"
"\t\tTCP_UDP_cksum_delta=0x%x\n",
&nat_entry->private_ip, &nat_entry->target_ip,
nat_entry->next_index, nat_entry->public_port,
nat_entry->private_port, nat_entry->target_port,
nat_entry->ip_chksum,
(nat_entry->enable) ? "true" : "false",
(nat_entry->redirect) ? "Direct_To_APPS" : "Fwd_to_route",
nat_entry->time_stamp, nat_entry->protocol,
nat_entry->prev_index, nat_entry->indx_tbl_entry,
nat_entry->tcp_udp_chksum);
}
static int ipa_nat_ipv4_stringify_entry_v_4_0(const void *entry,
char *buff, size_t buff_size)
{
int length;
const struct ipa_nat_hw_ipv4_entry *nat_entry =
(const struct ipa_nat_hw_ipv4_entry *)entry;
length = ipa_nat_ipv4_stringify_entry_v_3_0(entry, buff, buff_size);
length += scnprintf(buff + length, buff_size - length,
"\t\tPDN_Index=%d\n", nat_entry->pdn_index);
return length;
}
static int ipa_nat_ipv4_index_stringify_entry_v_3_0(const void *entry,
char *buff, size_t buff_size)
{
const struct ipa_nat_hw_indx_entry *index_entry =
(const struct ipa_nat_hw_indx_entry *)entry;
return scnprintf(buff, buff_size,
"\t\tTable_Entry=%d Next_Index=%d\n",
index_entry->tbl_entry, index_entry->next_index);
}
static int ipa_nat_ipv4_pdn_stringify_entry_v_4_0(const void *entry,
char *buff, size_t buff_size)
{
const struct ipa_nat_hw_pdn_entry *pdn_entry =
(const struct ipa_nat_hw_pdn_entry *)entry;
return scnprintf(buff, buff_size,
"ip=%pI4h src_metadata=0x%X, dst_metadata=0x%X\n",
&pdn_entry->public_ip,
pdn_entry->src_metadata, pdn_entry->dst_metadata);
}
static inline int ipa_nat_ipv6_stringify_addr(char *buff, size_t buff_size,
const char *msg, u64 lsb, u64 msb)
{
struct in6_addr addr;
addr.s6_addr32[0] = cpu_to_be32((msb & IPA_64_HIGH_32_MASK) >> 32);
addr.s6_addr32[1] = cpu_to_be32(msb & IPA_64_LOW_32_MASK);
addr.s6_addr32[2] = cpu_to_be32((lsb & IPA_64_HIGH_32_MASK) >> 32);
addr.s6_addr32[3] = cpu_to_be32(lsb & IPA_64_LOW_32_MASK);
return scnprintf(buff, buff_size,
"\t\t%s_IPv6_Addr=%pI6c\n", msg, &addr);
}
static int ipa_nat_ipv6ct_stringify_entry_v_4_0(const void *entry,
char *buff, size_t buff_size)
{
int length = 0;
const struct ipa_nat_hw_ipv6ct_entry *ipv6ct_entry =
(const struct ipa_nat_hw_ipv6ct_entry *)entry;
length += ipa_nat_ipv6_stringify_addr(
buff + length,
buff_size - length,
"Src",
ipv6ct_entry->src_ipv6_lsb,
ipv6ct_entry->src_ipv6_msb);
length += ipa_nat_ipv6_stringify_addr(
buff + length,
buff_size - length,
"Dest",
ipv6ct_entry->dest_ipv6_lsb,
ipv6ct_entry->dest_ipv6_msb);
length += scnprintf(buff + length, buff_size - length,
"\t\tEnable=%s Redirect=%s Time_Stamp=0x%x Proto=%d\n"
"\t\tNext_Index=%d Dest_Port=%d Src_Port=%d\n"
"\t\tDirection Settings: Out=%s In=%s\n"
"\t\tPrev_Index=%d\n",
(ipv6ct_entry->enable) ? "true" : "false",
(ipv6ct_entry->redirect) ? "Direct_To_APPS" : "Fwd_to_route",
ipv6ct_entry->time_stamp,
ipv6ct_entry->protocol,
ipv6ct_entry->next_index,
ipv6ct_entry->dest_port,
ipv6ct_entry->src_port,
(ipv6ct_entry->out_allowed) ? "Allow" : "Deny",
(ipv6ct_entry->in_allowed) ? "Allow" : "Deny",
ipv6ct_entry->prev_index);
return length;
}
static void ipa_nat_ipv4_pdn_construct_entry_v_4_0(const void *fields,
u32 *address)
{
const struct ipahal_nat_pdn_entry *pdn_entry =
(const struct ipahal_nat_pdn_entry *)fields;
struct ipa_nat_hw_pdn_entry *pdn_entry_address =
(struct ipa_nat_hw_pdn_entry *)address;
memset(pdn_entry_address, 0, sizeof(struct ipa_nat_hw_pdn_entry));
pdn_entry_address->public_ip = pdn_entry->public_ip;
pdn_entry_address->src_metadata = pdn_entry->src_metadata;
pdn_entry_address->dst_metadata = pdn_entry->dst_metadata;
}
static void ipa_nat_ipv4_pdn_parse_entry_v_4_0(void *fields,
const u32 *address)
{
struct ipahal_nat_pdn_entry *pdn_entry =
(struct ipahal_nat_pdn_entry *)fields;
const struct ipa_nat_hw_pdn_entry *pdn_entry_address =
(const struct ipa_nat_hw_pdn_entry *)address;
pdn_entry->public_ip = pdn_entry_address->public_ip;
pdn_entry->src_metadata = pdn_entry_address->src_metadata;
pdn_entry->dst_metadata = pdn_entry_address->dst_metadata;
}
/*
* struct ipahal_nat_obj - H/W information for specific IPA version
* @entry_size - CB to get the size of the entry
* @is_entry_zeroed - CB to determine whether an entry is definitely zero
* @is_entry_valid - CB to determine whether an entry is valid
* Validity criterium depends on entry type. E.g. for NAT base table
* Entry need to be with valid protocol and enabled.
* @stringify_entry - CB to create string that represents an entry
* @construct_entry - CB to create NAT entry using the given fields
* @parse_entry - CB to parse NAT entry to the given fields structure
*/
struct ipahal_nat_obj {
size_t (*entry_size)(void);
bool (*is_entry_zeroed)(const void *entry);
bool (*is_entry_valid)(const void *entry);
int (*stringify_entry)(const void *entry, char *buff, size_t buff_size);
void (*construct_entry)(const void *fields, u32 *address);
void (*parse_entry)(void *fields, const u32 *address);
};
/*
* This table contains the info regard each NAT type for IPAv3 and later.
* Information like: get entry size and stringify entry functions.
* All the information on all the NAT types on IPAv3 are statically
* defined below. If information is missing regard some NAT type on some
* IPA version, the init function will fill it with the information from the
* previous IPA version.
* Information is considered missing if all of the fields are 0
*/
static struct ipahal_nat_obj ipahal_nat_objs[IPA_HW_MAX][IPA_NAT_MAX] = {
/* IPAv3 */
[IPA_HW_v3_0][IPAHAL_NAT_IPV4] = {
ipa_nat_ipv4_entry_size_v_3_0,
ipa_nat_ipv4_is_entry_zeroed_v_3_0,
ipa_nat_ipv4_is_entry_valid_v_3_0,
ipa_nat_ipv4_stringify_entry_v_3_0
},
[IPA_HW_v3_0][IPAHAL_NAT_IPV4_INDEX] = {
ipa_nat_ipv4_index_entry_size_v_3_0,
ipa_nat_ipv4_is_index_entry_zeroed_v_3_0,
ipa_nat_ipv4_is_index_entry_valid_v_3_0,
ipa_nat_ipv4_index_stringify_entry_v_3_0
},
/* IPAv4 */
[IPA_HW_v4_0][IPAHAL_NAT_IPV4] = {
ipa_nat_ipv4_entry_size_v_3_0,
ipa_nat_ipv4_is_entry_zeroed_v_3_0,
ipa_nat_ipv4_is_entry_valid_v_3_0,
ipa_nat_ipv4_stringify_entry_v_4_0
},
[IPA_HW_v4_0][IPAHAL_NAT_IPV4_PDN] = {
ipa_nat_ipv4_pdn_entry_size_v_4_0,
ipa_nat_ipv4_is_pdn_entry_zeroed_v_4_0,
ipa_nat_ipv4_is_pdn_entry_valid_v_4_0,
ipa_nat_ipv4_pdn_stringify_entry_v_4_0,
ipa_nat_ipv4_pdn_construct_entry_v_4_0,
ipa_nat_ipv4_pdn_parse_entry_v_4_0
},
[IPA_HW_v4_0][IPAHAL_NAT_IPV6CT] = {
ipa_nat_ipv6ct_entry_size_v_4_0,
ipa_nat_ipv6ct_is_entry_zeroed_v_4_0,
ipa_nat_ipv6ct_is_entry_valid_v_4_0,
ipa_nat_ipv6ct_stringify_entry_v_4_0
}
};
static void ipahal_nat_check_obj(struct ipahal_nat_obj *obj,
int nat_type, int ver)
{
WARN(obj->entry_size == NULL, "%s missing entry_size for version %d\n",
ipahal_nat_type_str(nat_type), ver);
WARN(obj->is_entry_zeroed == NULL,
"%s missing is_entry_zeroed for version %d\n",
ipahal_nat_type_str(nat_type), ver);
WARN(obj->stringify_entry == NULL,
"%s missing stringify_entry for version %d\n",
ipahal_nat_type_str(nat_type), ver);
}
/*
* ipahal_nat_init() - Build the NAT information table
* See ipahal_nat_objs[][] comments
*/
int ipahal_nat_init(enum ipa_hw_type ipa_hw_type)
{
int i;
int j;
struct ipahal_nat_obj zero_obj, *next_obj;
IPAHAL_DBG("Entry - HW_TYPE=%d\n", ipa_hw_type);
memset(&zero_obj, 0, sizeof(zero_obj));
if ((ipa_hw_type < 0) || (ipa_hw_type >= IPA_HW_MAX)) {
IPAHAL_ERR("invalid IPA HW type (%d)\n", ipa_hw_type);
return -EINVAL;
}
for (i = IPA_HW_v3_0 ; i < ipa_hw_type ; ++i) {
for (j = 0; j < IPA_NAT_MAX; ++j) {
next_obj = &ipahal_nat_objs[i + 1][j];
if (!memcmp(next_obj, &zero_obj, sizeof(*next_obj))) {
memcpy(next_obj, &ipahal_nat_objs[i][j],
sizeof(*next_obj));
} else {
ipahal_nat_check_obj(next_obj, j, i + 1);
}
}
}
return 0;
}
const char *ipahal_nat_type_str(enum ipahal_nat_type nat_type)
{
if (nat_type < 0 || nat_type >= IPA_NAT_MAX) {
IPAHAL_ERR("requested NAT type %d is invalid\n", nat_type);
return "Invalid NAT type";
}
return ipahal_nat_type_to_str[nat_type];
}
int ipahal_nat_entry_size(enum ipahal_nat_type nat_type, size_t *entry_size)
{
if (WARN(entry_size == NULL, "entry_size is NULL\n"))
return -EINVAL;
if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
"requested NAT type %d is invalid\n", nat_type))
return -EINVAL;
IPAHAL_DBG("Get the entry size for NAT type=%s\n",
ipahal_nat_type_str(nat_type));
*entry_size =
ipahal_nat_objs[ipahal_ctx->hw_type][nat_type].entry_size();
IPAHAL_DBG("The entry size is %zu\n", *entry_size);
return 0;
}
int ipahal_nat_is_entry_zeroed(enum ipahal_nat_type nat_type, void *entry,
bool *entry_zeroed)
{
struct ipahal_nat_obj *nat_ptr;
if (WARN(entry == NULL || entry_zeroed == NULL,
"NULL pointer received\n"))
return -EINVAL;
if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
"requested NAT type %d is invalid\n", nat_type))
return -EINVAL;
IPAHAL_DBG("Determine whether the entry is zeroed for NAT type=%s\n",
ipahal_nat_type_str(nat_type));
nat_ptr =
&ipahal_nat_objs[ipahal_ctx->hw_type][nat_type];
*entry_zeroed = nat_ptr->is_entry_zeroed(entry);
IPAHAL_DBG("The entry is %szeroed\n", (*entry_zeroed) ? "" : "not ");
return 0;
}
int ipahal_nat_is_entry_valid(enum ipahal_nat_type nat_type, void *entry,
bool *entry_valid)
{
struct ipahal_nat_obj *nat_obj;
if (WARN(entry == NULL || entry_valid == NULL,
"NULL pointer received\n"))
return -EINVAL;
if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
"requested NAT type %d is invalid\n", nat_type))
return -EINVAL;
IPAHAL_DBG("Determine whether the entry is valid for NAT type=%s\n",
ipahal_nat_type_str(nat_type));
nat_obj = &ipahal_nat_objs[ipahal_ctx->hw_type][nat_type];
*entry_valid = nat_obj->is_entry_valid(entry);
IPAHAL_DBG("The entry is %svalid\n", (*entry_valid) ? "" : "not ");
return 0;
}
int ipahal_nat_stringify_entry(enum ipahal_nat_type nat_type, void *entry,
char *buff, size_t buff_size)
{
int result;
struct ipahal_nat_obj *nat_obj_ptr;
if (WARN(entry == NULL || buff == NULL, "NULL pointer received\n"))
return -EINVAL;
if (WARN(!buff_size, "The output buff size is zero\n"))
return -EINVAL;
if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
"requested NAT type %d is invalid\n", nat_type))
return -EINVAL;
nat_obj_ptr =
&ipahal_nat_objs[ipahal_ctx->hw_type][nat_type];
IPAHAL_DBG("Create the string for the entry of NAT type=%s\n",
ipahal_nat_type_str(nat_type));
result = nat_obj_ptr->stringify_entry(entry, buff, buff_size);
IPAHAL_DBG("The string successfully created with length %d\n",
result);
return result;
}
int ipahal_nat_construct_entry(enum ipahal_nat_type nat_type,
const void *fields,
void *address)
{
struct ipahal_nat_obj *nat_obj_ptr;
if (WARN(address == NULL || fields == NULL, "NULL pointer received\n"))
return -EINVAL;
if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
"requested NAT type %d is invalid\n", nat_type))
return -EINVAL;
IPAHAL_DBG("Create %s entry using given fields\n",
ipahal_nat_type_str(nat_type));
nat_obj_ptr =
&ipahal_nat_objs[ipahal_ctx->hw_type][nat_type];
nat_obj_ptr->construct_entry(fields, address);
return 0;
}
int ipahal_nat_parse_entry(enum ipahal_nat_type nat_type, void *fields,
const void *address)
{
struct ipahal_nat_obj *nat_obj_ptr;
if (WARN(address == NULL || fields == NULL, "NULL pointer received\n"))
return -EINVAL;
if (WARN(nat_type < 0 || nat_type >= IPA_NAT_MAX,
"requested NAT type %d is invalid\n", nat_type))
return -EINVAL;
IPAHAL_DBG("Get the parsed values for NAT type=%s\n",
ipahal_nat_type_str(nat_type));
nat_obj_ptr =
&ipahal_nat_objs[ipahal_ctx->hw_type][nat_type];
nat_obj_ptr->parse_entry(fields, address);
return 0;
}

Alguns arquivos não foram exibidos porque demasiados arquivos foram alterados neste diff Mostrar mais