Merge "msm: ipa: create ipa_client for ethernet"

This commit is contained in:
qctecmdr
2020-11-17 15:46:06 -08:00
committed by Gerrit - the friendly Code Review server
21 changed files with 2532 additions and 93 deletions

View File

@@ -2391,6 +2391,7 @@ static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
break;
case GSI_CHAN_PROT_AQC:
case GSI_CHAN_PROT_11AD:
case GSI_CHAN_PROT_RTK:
ch_k_cntxt_0.chtype_protocol_msb = 1;
break;
default:
@@ -4596,6 +4597,37 @@ void gsi_wdi3_write_evt_ring_db(unsigned long evt_ring_hdl,
}
EXPORT_SYMBOL(gsi_wdi3_write_evt_ring_db);
int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp)
{
if (is_rp) {
return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_READ_PTR,
gsi_ctx->per.ee, chan_hdl);
} else {
return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_RE_FETCH_WRITE_PTR,
gsi_ctx->per.ee, chan_hdl);
}
}
EXPORT_SYMBOL(gsi_get_refetch_reg);
int gsi_get_drop_stats(unsigned long ep_id, int scratch_id)
{
/* RTK use scratch 5 */
if (scratch_id == 5) {
/*
* each channel context is 6 lines of 8 bytes, but n in SHRAM_n
* is in 4 bytes offsets, so multiplying ep_id by 6*2=12 will
* give the beginning of the required channel context, and then
* need to add 7 since the channel context layout has the ring
* rbase (8 bytes) + channel scratch 0-4 (20 bytes) so adding
* additional 28/4 = 7 to get to scratch 5 of the required
* channel.
*/
gsihal_read_reg_n(GSI_GSI_SHRAM_n, ep_id * 12 + 7);
}
return 0;
}
EXPORT_SYMBOL(gsi_get_drop_stats);
void gsi_wdi3_dump_register(unsigned long chan_hdl)
{
uint32_t val;

View File

@@ -142,6 +142,7 @@ enum gsi_evt_chtype {
GSI_EVT_CHTYPE_MHIP_EV = 0x7,
GSI_EVT_CHTYPE_AQC_EV = 0x8,
GSI_EVT_CHTYPE_11AD_EV = 0x9,
GSI_EVT_CHTYPE_RTK_EV = 0xC,
};
enum gsi_evt_ring_elem_size {
@@ -222,6 +223,9 @@ enum gsi_chan_prot {
GSI_CHAN_PROT_MHIP = 0x7,
GSI_CHAN_PROT_AQC = 0x8,
GSI_CHAN_PROT_11AD = 0x9,
GSI_CHAN_PROT_MHIC = 0xA,
GSI_CHAN_PROT_QDSS = 0xB,
GSI_CHAN_PROT_RTK = 0xC,
};
enum gsi_max_prefetch {
@@ -951,6 +955,28 @@ union __packed gsi_wdi3_channel_scratch2_reg {
} data;
};
/**
* gsi_rtk_channel_scratch - Realtek SW config area of
* channel scratch
*
* @rtk_bar_low: Realtek bar address LSB
* @rtk_bar_high: Realtek bar address MSB
* @queue_number: dma channel number in rtk
* @fix_buff_size: buff size in KB
* @rtk_buff_addr_high: buffer addr where TRE points to
* @rtk_buff_addr_low: buffer addr where TRE points to
* the descriptor
*/
struct __packed gsi_rtk_channel_scratch {
uint32_t rtk_bar_low;
uint32_t rtk_bar_high : 9;
uint32_t queue_number : 5;
uint32_t fix_buff_size : 4;
uint32_t reserved1 : 6;
uint32_t rtk_buff_addr_high : 8;
uint32_t rtk_buff_addr_low;
uint32_t reserved2;
};
/**
* gsi_channel_scratch - channel scratch SW config area
@@ -967,6 +993,7 @@ union __packed gsi_channel_scratch {
struct __packed gsi_wdi3_channel_scratch wdi3;
struct __packed gsi_mhip_channel_scratch mhip;
struct __packed gsi_wdi2_channel_scratch_new wdi2_new;
struct __packed gsi_rtk_channel_scratch rtk;
struct __packed {
uint32_t word1;
uint32_t word2;
@@ -1101,6 +1128,17 @@ struct __packed gsi_wdi3_evt_scratch {
uint32_t reserved2;
};
/**
* gsi_rtk_evt_scratch - realtek protocol SW config area of
* event scratch
* @reserved1: reserve bit.
* @reserved2: reserve bit.
*/
struct __packed gsi_rtk_evt_scratch {
uint32_t reserved1;
uint32_t reserved2;
};
/**
* gsi_evt_scratch - event scratch SW config area
*
@@ -1112,6 +1150,7 @@ union __packed gsi_evt_scratch {
struct __packed gsi_11ad_evt_scratch w11ad;
struct __packed gsi_wdi3_evt_scratch wdi3;
struct __packed gsi_mhip_evt_scratch mhip;
struct __packed gsi_rtk_evt_scratch rtk;
struct __packed {
uint32_t word1;
uint32_t word2;
@@ -2068,6 +2107,21 @@ int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
void gsi_wdi3_write_evt_ring_db(unsigned long chan_hdl, uint32_t db_addr_low,
uint32_t db_addr_high);
/**
* gsi_get_refetch_reg - get WP/RP value from re_fetch register
*
* @chan_hdl: gsi channel handle
* @is_rp: rp or wp
*/
int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp);
/**
* gsi_get_drop_stats - get drop stats by GSI
*
* @ep_id: ep index
* @scratch_id: drop stats on which scratch register
*/
int gsi_get_drop_stats(unsigned long ep_id, int scratch_id);
/**
* gsi_wdi3_dump_register - dump wdi3 related gsi registers

View File

@@ -161,6 +161,7 @@ static const char *gsireg_name_to_str[GSI_REG_MAX] = {
__stringify(GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_k),
__stringify(GSI_INTER_EE_n_SRC_EV_CH_IRQ_k),
__stringify(GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_k),
__stringify(GSI_GSI_SHRAM_n),
};
/*
@@ -1661,6 +1662,9 @@ static struct gsihal_reg_obj gsihal_reg_objs[GSI_VER_MAX][GSI_REG_MAX] = {
[GSI_VER_2_5][GSI_GSI_INST_RAM_n] = {
gsireg_construct_dummy, gsireg_parse_dummy,
0x0001b000, GSI_GSI_INST_RAM_n_WORD_SZ, 0},
[GSI_VER_2_5][GSI_GSI_SHRAM_n] = {
gsireg_construct_dummy, gsireg_parse_dummy,
0x00002000, GSI_GSI_SHRAM_n_WORD_SZ, 0 },
/* GSIv2_9 */
[GSI_VER_2_9][GSI_EE_n_EV_CH_k_CNTXT_1] = {

View File

@@ -151,6 +151,7 @@ enum gsihal_reg_name {
GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_k,
GSI_INTER_EE_n_SRC_EV_CH_IRQ_k,
GSI_INTER_EE_n_SRC_EV_CH_IRQ_CLR_k,
GSI_GSI_SHRAM_n,
GSI_REG_MAX
};

View File

@@ -15,6 +15,9 @@
/* GSI_GSI_INST_RAM_n */
#define GSI_GSI_INST_RAM_n_WORD_SZ 0x4
/* GSI_GSI_SHRAM_n */
#define GSI_GSI_SHRAM_n_WORD_SZ 0x4
#define GSI_GSI_INST_RAM_n_MAXn 4095
#define GSI_V2_0_GSI_INST_RAM_n_MAXn 6143
#define GSI_V2_2_GSI_INST_RAM_n_MAXn 4095

View File

@@ -31,6 +31,7 @@ ipam-y += \
ipa_v3/ipahal/ipahal_fltrt.o \
ipa_v3/ipahal/ipahal_hw_stats.o \
ipa_v3/ipahal/ipahal_nat.o \
ipa_v3/ipa_eth_i.o \
ipam-$(CONFIG_RMNET_IPA3) += ipa_v3/rmnet_ipa.o ipa_v3/ipa_qmi_service_v01.o \
ipa_v3/ipa_qmi_service.o ipa_v3/rmnet_ctl_ipa.o \

View File

@@ -7,4 +7,4 @@ obj-$(CONFIG_ECM_IPA) += ecmipam.o
ecmipam-objs := ecm_ipa.o
obj-$(CONFIG_IPA_CLIENTS_MANAGER) += ipa_clientsm.o
ipa_clientsm-objs := ipa_clients_manager.o ipa_usb.o ipa_wdi3.o ipa_gsb.o ipa_uc_offload.o ipa_wigig.o ipa_mhi_client.o
ipa_clientsm-objs := ipa_clients_manager.o ipa_usb.o ipa_wdi3.o ipa_gsb.o ipa_uc_offload.o ipa_wigig.o ipa_mhi_client.o ipa_eth.o

View File

@@ -21,4 +21,6 @@ void ipa_mhi_register(void);
void ipa_wigig_register(void);
void ipa_eth_register(void);
#endif /* _IPA_CLIENTS_I_H */

View File

@@ -26,6 +26,8 @@ static int __init ipa_clients_manager_init(void)
ipa_wigig_register();
ipa_eth_register();
ipa3_notify_clients_registered();
return 0;

File diff suppressed because it is too large Load Diff

View File

@@ -13,6 +13,7 @@
#include <linux/ipa_uc_offload.h>
#include <linux/ipa_wdi3.h>
#include <linux/ipa_wigig.h>
#include <linux/ipa_eth.h>
#include <linux/ratelimit.h>
#include "gsi.h"
@@ -420,7 +421,11 @@ struct IpaHwOffloadStatsAllocCmdData_t {
* @ch_num: number of ch supported for given protocol
*/
struct ipa_uc_dbg_ring_stats {
struct IpaHwRingStats_t ring[IPA_MAX_CH_STATS_SUPPORTED];
union {
struct IpaHwRingStats_t ring[IPA_MAX_CH_STATS_SUPPORTED];
struct ipa_uc_dbg_rtk_ring_stats
rtk[IPA_MAX_CH_STATS_SUPPORTED];
} u;
u8 num_ch;
};
@@ -772,4 +777,32 @@ int ipa3_get_smmu_params(struct ipa_smmu_in_params *in,
*/
int ipa3_tz_unlock_reg(struct ipa_tz_unlock_reg_info *reg_info, u16 num_regs);
int ipa_eth_rtk_connect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type);
int ipa_eth_aqc_connect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type);
int ipa_eth_emac_connect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type);
int ipa_eth_rtk_disconnect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type);
int ipa_eth_aqc_disconnect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type);
int ipa_eth_emac_disconnect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type);
int ipa_eth_client_conn_evt(struct ipa_ecm_msg *msg);
int ipa_eth_client_disconn_evt(struct ipa_ecm_msg *msg);
#endif /* _IPA_COMMON_I_H_ */

View File

@@ -762,23 +762,23 @@ int ipa3_get_usb_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = 0; i < MAX_USB_CHANNELS; i++) {
stats->ring[i].ringFull = ioread32(
stats->u.ring[i].ringFull = ioread32(
ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
stats->ring[i].ringEmpty = ioread32(
stats->u.ring[i].ringEmpty = ioread32(
ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
stats->ring[i].ringUsageHigh = ioread32(
stats->u.ring[i].ringUsageHigh = ioread32(
ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
stats->ring[i].ringUsageLow = ioread32(
stats->u.ring[i].ringUsageLow = ioread32(
ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
stats->ring[i].RingUtilCount = ioread32(
stats->u.ring[i].RingUtilCount = ioread32(
ipa3_ctx->usb_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
@@ -1939,23 +1939,23 @@ int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = 0; i < MAX_AQC_CHANNELS; i++) {
stats->ring[i].ringFull = ioread32(
stats->u.ring[i].ringFull = ioread32(
ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
stats->ring[i].ringEmpty = ioread32(
stats->u.ring[i].ringEmpty = ioread32(
ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
stats->ring[i].ringUsageHigh = ioread32(
stats->u.ring[i].ringUsageHigh = ioread32(
ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
stats->ring[i].ringUsageLow = ioread32(
stats->u.ring[i].ringUsageLow = ioread32(
ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
stats->ring[i].RingUtilCount = ioread32(
stats->u.ring[i].RingUtilCount = ioread32(
ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
@@ -1963,5 +1963,70 @@ int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
/**
* ipa3_get_rtk_gsi_stats() - Query RTK gsi stats from uc
* @stats: [inout] stats blob from client populated by driver
*
* Returns: 0 on success, negative on failure
*
* @note Cannot be called from atomic context
*
*/
int ipa3_get_rtk_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
{
int i;
u64 low, high;
if (!ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio) {
IPAERR("bad parms NULL eth_gsi_stats_mmio\n");
return -EINVAL;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = 0; i < MAX_RTK_CHANNELS; i++) {
stats->u.rtk[i].commStats.ringFull = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
stats->u.rtk[i].commStats.ringEmpty = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
stats->u.rtk[i].commStats.ringUsageHigh = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
stats->u.rtk[i].commStats.ringUsageLow = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
stats->u.rtk[i].commStats.RingUtilCount = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
stats->u.rtk[i].trCount = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_TRCOUNT_OFF);
stats->u.rtk[i].erCount = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_ERCOUNT_OFF);
stats->u.rtk[i].totalAosCount = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_AOSCOUNT_OFF);
low = ioread32(ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_BUSYTIME_OFF);
high = ioread32(ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_BUSYTIME_OFF + sizeof(u32));
stats->u.rtk[i].busyTime = low | (high << 32);
}
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}

View File

@@ -34,6 +34,14 @@ struct ipa3_debugfs_file {
const struct file_operations fops;
};
static const char * const ipa_eth_clients_strings[] = {
__stringify(AQC107),
__stringify(AQC113),
__stringify(RTK8111K),
__stringify(RTK8125B),
__stringify(NTN),
__stringify(EMAC),
};
const char *ipa3_event_name[IPA_EVENT_MAX_NUM] = {
__stringify(WLAN_CLIENT_CONNECT),
@@ -102,6 +110,7 @@ const char *ipa3_hdr_proc_type_name[] = {
};
static struct dentry *dent;
static struct dentry *dent_eth;
static char dbg_buff[IPA_MAX_MSG_LEN + 1];
static char *active_clients_buf;
@@ -2327,11 +2336,11 @@ static ssize_t ipa3_read_wdi_gsi_stats(struct file *file,
"TX ringUsageHigh=%u\n"
"TX ringUsageLow=%u\n"
"TX RingUtilCount=%u\n",
stats.ring[1].ringFull,
stats.ring[1].ringEmpty,
stats.ring[1].ringUsageHigh,
stats.ring[1].ringUsageLow,
stats.ring[1].RingUtilCount);
stats.u.ring[1].ringFull,
stats.u.ring[1].ringEmpty,
stats.u.ring[1].ringUsageHigh,
stats.u.ring[1].ringUsageLow,
stats.u.ring[1].RingUtilCount);
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"RX ringFull=%u\n"
@@ -2339,11 +2348,11 @@ static ssize_t ipa3_read_wdi_gsi_stats(struct file *file,
"RX ringUsageHigh=%u\n"
"RX ringUsageLow=%u\n"
"RX RingUtilCount=%u\n",
stats.ring[0].ringFull,
stats.ring[0].ringEmpty,
stats.ring[0].ringUsageHigh,
stats.ring[0].ringUsageLow,
stats.ring[0].RingUtilCount);
stats.u.ring[0].ringFull,
stats.u.ring[0].ringEmpty,
stats.u.ring[0].ringUsageHigh,
stats.u.ring[0].ringUsageLow,
stats.u.ring[0].RingUtilCount);
cnt += nbytes;
} else {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -2374,11 +2383,11 @@ static ssize_t ipa3_read_wdi3_gsi_stats(struct file *file,
"TX ringUsageHigh=%u\n"
"TX ringUsageLow=%u\n"
"TX RingUtilCount=%u\n",
stats.ring[1].ringFull,
stats.ring[1].ringEmpty,
stats.ring[1].ringUsageHigh,
stats.ring[1].ringUsageLow,
stats.ring[1].RingUtilCount);
stats.u.ring[1].ringFull,
stats.u.ring[1].ringEmpty,
stats.u.ring[1].ringUsageHigh,
stats.u.ring[1].ringUsageLow,
stats.u.ring[1].RingUtilCount);
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"RX ringFull=%u\n"
@@ -2386,11 +2395,11 @@ static ssize_t ipa3_read_wdi3_gsi_stats(struct file *file,
"RX ringUsageHigh=%u\n"
"RX ringUsageLow=%u\n"
"RX RingUtilCount=%u\n",
stats.ring[0].ringFull,
stats.ring[0].ringEmpty,
stats.ring[0].ringUsageHigh,
stats.ring[0].ringUsageLow,
stats.ring[0].RingUtilCount);
stats.u.ring[0].ringFull,
stats.u.ring[0].ringEmpty,
stats.u.ring[0].ringUsageHigh,
stats.u.ring[0].ringUsageLow,
stats.u.ring[0].RingUtilCount);
cnt += nbytes;
} else {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -2439,11 +2448,11 @@ static ssize_t ipa3_read_aqc_gsi_stats(struct file *file,
"TX ringUsageHigh=%u\n"
"TX ringUsageLow=%u\n"
"TX RingUtilCount=%u\n",
stats.ring[1].ringFull,
stats.ring[1].ringEmpty,
stats.ring[1].ringUsageHigh,
stats.ring[1].ringUsageLow,
stats.ring[1].RingUtilCount);
stats.u.ring[1].ringFull,
stats.u.ring[1].ringEmpty,
stats.u.ring[1].ringUsageHigh,
stats.u.ring[1].ringUsageLow,
stats.u.ring[1].RingUtilCount);
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"RX ringFull=%u\n"
@@ -2451,11 +2460,11 @@ static ssize_t ipa3_read_aqc_gsi_stats(struct file *file,
"RX ringUsageHigh=%u\n"
"RX ringUsageLow=%u\n"
"RX RingUtilCount=%u\n",
stats.ring[0].ringFull,
stats.ring[0].ringEmpty,
stats.ring[0].ringUsageHigh,
stats.ring[0].ringUsageLow,
stats.ring[0].RingUtilCount);
stats.u.ring[0].ringFull,
stats.u.ring[0].ringEmpty,
stats.u.ring[0].ringUsageHigh,
stats.u.ring[0].ringUsageLow,
stats.u.ring[0].RingUtilCount);
cnt += nbytes;
} else {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -2486,11 +2495,11 @@ static ssize_t ipa3_read_mhip_gsi_stats(struct file *file,
"IPA_CLIENT_MHI_PRIME_TETH_CONS ringUsageHigh=%u\n"
"IPA_CLIENT_MHI_PRIME_TETH_CONS ringUsageLow=%u\n"
"IPA_CLIENT_MHI_PRIME_TETH_CONS RingUtilCount=%u\n",
stats.ring[1].ringFull,
stats.ring[1].ringEmpty,
stats.ring[1].ringUsageHigh,
stats.ring[1].ringUsageLow,
stats.ring[1].RingUtilCount);
stats.u.ring[1].ringFull,
stats.u.ring[1].ringEmpty,
stats.u.ring[1].ringUsageHigh,
stats.u.ring[1].ringUsageLow,
stats.u.ring[1].RingUtilCount);
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"IPA_CLIENT_MHI_PRIME_TETH_PROD ringFull=%u\n"
@@ -2498,11 +2507,11 @@ static ssize_t ipa3_read_mhip_gsi_stats(struct file *file,
"IPA_CLIENT_MHI_PRIME_TETH_PROD ringUsageHigh=%u\n"
"IPA_CLIENT_MHI_PRIME_TETH_PROD ringUsageLow=%u\n"
"IPA_CLIENT_MHI_PRIME_TETH_PROD RingUtilCount=%u\n",
stats.ring[0].ringFull,
stats.ring[0].ringEmpty,
stats.ring[0].ringUsageHigh,
stats.ring[0].ringUsageLow,
stats.ring[0].RingUtilCount);
stats.u.ring[0].ringFull,
stats.u.ring[0].ringEmpty,
stats.u.ring[0].ringUsageHigh,
stats.u.ring[0].ringUsageLow,
stats.u.ring[0].RingUtilCount);
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringFull=%u\n"
@@ -2510,11 +2519,11 @@ static ssize_t ipa3_read_mhip_gsi_stats(struct file *file,
"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringUsageHigh=%u\n"
"IPA_CLIENT_MHI_PRIME_RMNET_CONS ringUsageLow=%u\n"
"IPA_CLIENT_MHI_PRIME_RMNET_CONS RingUtilCount=%u\n",
stats.ring[3].ringFull,
stats.ring[3].ringEmpty,
stats.ring[3].ringUsageHigh,
stats.ring[3].ringUsageLow,
stats.ring[3].RingUtilCount);
stats.u.ring[3].ringFull,
stats.u.ring[3].ringEmpty,
stats.u.ring[3].ringUsageHigh,
stats.u.ring[3].ringUsageLow,
stats.u.ring[3].RingUtilCount);
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringFull=%u\n"
@@ -2522,11 +2531,11 @@ static ssize_t ipa3_read_mhip_gsi_stats(struct file *file,
"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringUsageHigh=%u\n"
"IPA_CLIENT_MHI_PRIME_RMNET_PROD ringUsageLow=%u\n"
"IPA_CLIENT_MHI_PRIME_RMNET_PROD RingUtilCount=%u\n",
stats.ring[2].ringFull,
stats.ring[2].ringEmpty,
stats.ring[2].ringUsageHigh,
stats.ring[2].ringUsageLow,
stats.ring[2].RingUtilCount);
stats.u.ring[2].ringFull,
stats.u.ring[2].ringEmpty,
stats.u.ring[2].ringUsageHigh,
stats.u.ring[2].ringUsageLow,
stats.u.ring[2].RingUtilCount);
cnt += nbytes;
} else {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -2558,11 +2567,11 @@ static ssize_t ipa3_read_usb_gsi_stats(struct file *file,
"TX ringUsageHigh=%u\n"
"TX ringUsageLow=%u\n"
"TX RingUtilCount=%u\n",
stats.ring[1].ringFull,
stats.ring[1].ringEmpty,
stats.ring[1].ringUsageHigh,
stats.ring[1].ringUsageLow,
stats.ring[1].RingUtilCount);
stats.u.ring[1].ringFull,
stats.u.ring[1].ringEmpty,
stats.u.ring[1].ringUsageHigh,
stats.u.ring[1].ringUsageLow,
stats.u.ring[1].RingUtilCount);
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"RX ringFull=%u\n"
@@ -2570,11 +2579,11 @@ static ssize_t ipa3_read_usb_gsi_stats(struct file *file,
"RX ringUsageHigh=%u\n"
"RX ringUsageLow=%u\n"
"RX RingUtilCount=%u\n",
stats.ring[0].ringFull,
stats.ring[0].ringEmpty,
stats.ring[0].ringUsageHigh,
stats.ring[0].ringUsageLow,
stats.ring[0].RingUtilCount);
stats.u.ring[0].ringFull,
stats.u.ring[0].ringEmpty,
stats.u.ring[0].ringUsageHigh,
stats.u.ring[0].ringUsageLow,
stats.u.ring[0].RingUtilCount);
cnt += nbytes;
} else {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
@@ -2905,7 +2914,7 @@ void ipa3_debugfs_init(void)
size_t i;
struct dentry *file;
dent = debugfs_create_dir("ipa", 0);
dent = debugfs_create_dir("ipa", NULL);
if (IS_ERR(dent)) {
IPAERR("fail to create folder in debug_fs.\n");
return;
@@ -3011,6 +3020,338 @@ struct dentry *ipa_debugfs_get_root(void)
}
EXPORT_SYMBOL(ipa_debugfs_get_root);
static ssize_t ipa3_eth_read_status(struct file *file,
char __user *ubuf, size_t count, loff_t *ppos)
{
int nbytes;
int cnt = 0;
int i, j, k, type;
struct ipa3_eth_info eth_info;
if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5) {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"This feature only support on IPA4.5+\n");
cnt += nbytes;
goto done;
}
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"%15s|%10s|%10s|%30s|%10s|%10s\n", "protocol",
"instance", "pipe_hdl", "pipe_enum",
"pipe_id", "ch_id");
cnt += nbytes;
for (i = 0; i < IPA_ETH_CLIENT_MAX; i++) {
for (j = 0; j < IPA_ETH_INST_ID_MAX; j++) {
eth_info = ipa3_ctx->eth_info[i][j];
for (k = 0; k < eth_info.num_ch; k++) {
if (eth_info.map[j].valid) {
type = eth_info.map[k].type;
nbytes = scnprintf(dbg_buff + cnt,
IPA_MAX_MSG_LEN - cnt,
"%15s|%10d|%10d|%30s|%10d|%10d\n",
ipa_eth_clients_strings[i],
j,
eth_info.map[k].pipe_hdl,
ipa_clients_strings[type],
eth_info.map[k].pipe_id,
eth_info.map[k].ch_id);
cnt += nbytes;
}
}
}
}
done:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
static const struct file_operations fops_ipa_eth_status = {
.read = ipa3_eth_read_status,
};
void ipa3_eth_debugfs_init(void)
{
struct dentry *file;
if (IS_ERR_OR_NULL(dent)) {
IPAERR("debugs root not created\n");
return;
}
dent_eth = debugfs_create_dir("eth", dent);
if (IS_ERR(dent)) {
IPAERR("fail to create folder in debug_fs.\n");
return;
}
file = debugfs_create_file("status", IPA_READ_ONLY_MODE,
dent_eth, NULL, &fops_ipa_eth_status);
if (!file) {
IPAERR("could not create status\n");
goto fail;
}
return;
fail:
debugfs_remove_recursive(dent_eth);
}
static ssize_t ipa3_eth_read_perf_status(struct file *file,
char __user *ubuf, size_t count, loff_t *ppos)
{
int nbytes;
int cnt = 0;
struct ipa_eth_client *client;
struct ipa_uc_dbg_ring_stats stats;
int tx_ep, rx_ep;
int ret;
if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5
&& (ipa3_ctx->ipa_hw_type != IPA_HW_v4_1
|| ipa3_ctx->platform_type != IPA_PLAT_TYPE_APQ)) {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"This feature only support on IPA4.5+\n");
cnt += nbytes;
goto done;
}
client = (struct ipa_eth_client *)file->private_data;
switch (client->client_type) {
case IPA_ETH_CLIENT_AQC107:
case IPA_ETH_CLIENT_AQC113:
ret = ipa3_get_aqc_gsi_stats(&stats);
tx_ep = IPA_CLIENT_AQC_ETHERNET_CONS;
rx_ep = IPA_CLIENT_AQC_ETHERNET_PROD;
if (!ret) {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"%s_ringFull=%u\n"
"%s_ringEmpty=%u\n"
"%s_ringUsageHigh=%u\n"
"%s_ringUsageLow=%u\n"
"%s_RingUtilCount=%u\n",
ipa_clients_strings[tx_ep],
stats.u.ring[1].ringFull,
ipa_clients_strings[tx_ep],
stats.u.ring[1].ringEmpty,
ipa_clients_strings[tx_ep],
stats.u.ring[1].ringUsageHigh,
ipa_clients_strings[tx_ep],
stats.u.ring[1].ringUsageLow,
ipa_clients_strings[tx_ep],
stats.u.ring[1].RingUtilCount);
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt,
IPA_MAX_MSG_LEN - cnt,
"%s_ringFull=%u\n"
"%s_ringEmpty=%u\n"
"%s_ringUsageHigh=%u\n"
"%s_ringUsageLow=%u\n"
"%s_RingUtilCount=%u\n",
ipa_clients_strings[rx_ep],
stats.u.ring[0].ringFull,
ipa_clients_strings[rx_ep],
stats.u.ring[0].ringEmpty,
ipa_clients_strings[rx_ep],
stats.u.ring[0].ringUsageHigh,
ipa_clients_strings[rx_ep],
stats.u.ring[0].ringUsageLow,
ipa_clients_strings[rx_ep],
stats.u.ring[0].RingUtilCount);
cnt += nbytes;
} else {
nbytes = scnprintf(dbg_buff,
IPA_MAX_MSG_LEN,
"Fail to read AQC GSI stats\n");
cnt += nbytes;
}
break;
case IPA_ETH_CLIENT_RTK8111K:
case IPA_ETH_CLIENT_RTK8125B:
ret = ipa3_get_rtk_gsi_stats(&stats);
tx_ep = IPA_CLIENT_RTK_ETHERNET_CONS;
rx_ep = IPA_CLIENT_RTK_ETHERNET_PROD;
if (!ret) {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"%s_ringFull=%u\n"
"%s_ringEmpty=%u\n"
"%s_ringUsageHigh=%u\n"
"%s_ringUsageLow=%u\n"
"%s_RingUtilCount=%u\n"
"%s_trCount=%u\n"
"%s_erCound=%u\n"
"%s_totalAoSCount=%u\n"
"%s_busytime=%llu\n",
ipa_clients_strings[tx_ep],
stats.u.rtk[1].commStats.ringFull,
ipa_clients_strings[tx_ep],
stats.u.rtk[1].commStats.ringEmpty,
ipa_clients_strings[tx_ep],
stats.u.rtk[1].commStats.ringUsageHigh,
ipa_clients_strings[tx_ep],
stats.u.rtk[1].commStats.ringUsageLow,
ipa_clients_strings[tx_ep],
stats.u.rtk[1].commStats.RingUtilCount,
ipa_clients_strings[tx_ep],
stats.u.rtk[1].trCount,
ipa_clients_strings[tx_ep],
stats.u.rtk[1].erCount,
ipa_clients_strings[tx_ep],
stats.u.rtk[1].totalAosCount,
ipa_clients_strings[tx_ep],
stats.u.rtk[1].busyTime);
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt,
IPA_MAX_MSG_LEN - cnt,
"%s_ringFull=%u\n"
"%s_ringEmpty=%u\n"
"%s_ringUsageHigh=%u\n"
"%s_ringUsageLow=%u\n"
"%s_RingUtilCount=%u\n"
"%s_trCount=%u\n"
"%s_erCount=%u\n"
"%s_totalAoSCount=%u\n"
"%s_busytime=%llu\n",
ipa_clients_strings[rx_ep],
stats.u.rtk[0].commStats.ringFull,
ipa_clients_strings[rx_ep],
stats.u.rtk[0].commStats.ringEmpty,
ipa_clients_strings[rx_ep],
stats.u.rtk[0].commStats.ringUsageHigh,
ipa_clients_strings[rx_ep],
stats.u.rtk[0].commStats.ringUsageLow,
ipa_clients_strings[rx_ep],
stats.u.rtk[0].commStats.RingUtilCount,
ipa_clients_strings[rx_ep],
stats.u.rtk[0].trCount,
ipa_clients_strings[rx_ep],
stats.u.rtk[0].erCount,
ipa_clients_strings[rx_ep],
stats.u.rtk[0].totalAosCount,
ipa_clients_strings[rx_ep],
stats.u.rtk[0].busyTime);
cnt += nbytes;
} else {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"Fail to read AQC GSI stats\n");
cnt += nbytes;
}
break;
default:
ret = -EFAULT;
}
done:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
static ssize_t ipa3_eth_read_err_status(struct file *file,
char __user *ubuf, size_t count, loff_t *ppos)
{
int nbytes;
int cnt = 0;
struct ipa_eth_client *client;
int tx_ep, rx_ep;
struct ipa3_eth_error_stats tx_stats;
struct ipa3_eth_error_stats rx_stats;
if (ipa3_ctx->ipa_hw_type < IPA_HW_v4_5
&& (ipa3_ctx->ipa_hw_type != IPA_HW_v4_1
|| ipa3_ctx->platform_type != IPA_PLAT_TYPE_APQ)) {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"This feature only support on IPA4.5+\n");
cnt += nbytes;
goto done;
}
client = (struct ipa_eth_client *)file->private_data;
switch (client->client_type) {
case IPA_ETH_CLIENT_AQC107:
case IPA_ETH_CLIENT_AQC113:
tx_ep = IPA_CLIENT_AQC_ETHERNET_CONS;
rx_ep = IPA_CLIENT_AQC_ETHERNET_PROD;
break;
case IPA_ETH_CLIENT_RTK8111K:
case IPA_ETH_CLIENT_RTK8125B:
tx_ep = IPA_CLIENT_RTK_ETHERNET_CONS;
rx_ep = IPA_CLIENT_RTK_ETHERNET_PROD;
ipa3_eth_get_status(tx_ep, 5, &tx_stats);
ipa3_eth_get_status(rx_ep, 5, &rx_stats);
break;
default:
IPAERR("Not supported\n");
return 0;
}
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"%s_RP=0x%x\n"
"%s_WP=0x%x\n"
"%s_SCRATCH5=0x%x\n",
ipa_clients_strings[tx_ep],
tx_stats.rp,
ipa_clients_strings[tx_ep],
tx_stats.wp,
ipa_clients_strings[tx_ep],
tx_stats.err);
cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"%s_RP=0x%x\n"
"%s_WP=0x%x\n"
"%s_SCRATCH5=0x%x\n"
"%s_err:%u\n",
ipa_clients_strings[rx_ep],
rx_stats.rp,
ipa_clients_strings[rx_ep],
rx_stats.wp,
ipa_clients_strings[rx_ep],
rx_stats.err,
ipa_clients_strings[rx_ep],
rx_stats.err & 0xff);
cnt += nbytes;
done:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);
}
static const struct file_operations fops_ipa_eth_stats = {
.read = ipa3_eth_read_perf_status,
.open = ipa3_open_dbg,
};
static const struct file_operations fops_ipa_eth_client_status = {
.read = ipa3_eth_read_err_status,
.open = ipa3_open_dbg,
};
void ipa3_eth_debugfs_add_node(struct ipa_eth_client *client)
{
struct dentry *file;
int type, inst_id;
char name[IPA_RESOURCE_NAME_MAX];
if (IS_ERR_OR_NULL(dent_eth)) {
IPAERR("debugs eth root not created\n");
return;
}
if (client == NULL) {
IPAERR_RL("invalid input\n");
return;
}
type = client->client_type;
inst_id = client->inst_id;
snprintf(name, IPA_RESOURCE_NAME_MAX,
"%s_%d_stats", ipa_eth_clients_strings[type], inst_id);
file = debugfs_create_file(name, IPA_READ_ONLY_MODE,
dent_eth, (void *)client, &fops_ipa_eth_stats);
if (!file) {
IPAERR("could not create hw_type file\n");
return;
}
snprintf(name, IPA_RESOURCE_NAME_MAX,
"%s_%d_status", ipa_eth_clients_strings[type], inst_id);
file = debugfs_create_file(name, IPA_READ_ONLY_MODE,
dent_eth, (void *)client, &fops_ipa_eth_client_status);
if (!file) {
IPAERR("could not create hw_type file\n");
goto fail;
}
return;
fail:
debugfs_remove_recursive(dent_eth);
}
#else /* !CONFIG_DEBUG_FS */
#define INVALID_NO_OF_CHAR (-1)
void ipa3_debugfs_init(void) {}
@@ -3023,4 +3364,6 @@ int _ipa_read_ep_reg_v4_0(char *buf, int max_len, int pipe)
{
return INVALID_NO_OF_CHAR;
}
void ipa3_eth_debugfs_init(void) {}
void ipa3_eth_debugfs_add(struct ipa_eth_client *client) {}
#endif

View File

@@ -0,0 +1,698 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*/
#include "ipa_i.h"
#include <linux/if_vlan.h>
#include <linux/ipa_eth.h>
#include <linux/log2.h>
#define IPA_ETH_RTK_MODT (32)
#define IPA_ETH_RTK_MODC (128)
#define IPA_ETH_AGGR_PKT_LIMIT 1
#define IPA_ETH_AGGR_BYTE_LIMIT 2 /*2 Kbytes Agger hard byte limit*/
#define IPA_ETH_MBOX_M (1)
#define IPA_ETH_RX_MBOX_N (20)
#define IPA_ETH_TX_MBOX_N (21)
#define IPA_ETH_RX_MBOX_VAL (1)
#define IPA_ETH_TX_MBOX_VAL (2)
#define IPA_ETH_PCIE_MASK BIT_ULL(40)
#define IPA_ETH_PCIE_SET(val) (val | IPA_ETH_PCIE_MASK)
enum ipa_eth_dir {
IPA_ETH_RX = 0,
IPA_ETH_TX = 1,
};
static void ipa3_eth_save_client_mapping(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type type, int id,
int pipe_id, int ch_id)
{
struct ipa_eth_client *client_info;
enum ipa_eth_client_type client_type;
u8 inst_id, pipe_hdl;
struct ipa3_eth_info *eth_info;
client_info = pipe->client_info;
client_type = client_info->client_type;
inst_id = client_info->inst_id;
pipe_hdl = pipe->pipe_hdl;
eth_info = &ipa3_ctx->eth_info[client_type][inst_id];
if (!eth_info->map[id].valid) {
eth_info->num_ch++;
eth_info->map[id].type = type;
eth_info->map[id].pipe_id = pipe_id;
eth_info->map[id].ch_id = ch_id;
eth_info->map[id].valid = true;
eth_info->map[id].pipe_hdl = pipe_hdl;
}
}
static int ipa3_eth_config_uc(bool init,
u8 protocol,
u8 dir,
u8 gsi_ch)
{
struct ipa_mem_buffer cmd;
enum ipa_cpu_2_hw_offload_commands command;
int result;
IPADBG("config uc %s\n", init ? "init" : "Deinit");
if (init) {
struct IpaHwOffloadSetUpCmdData_t_v4_0 *cmd_data;
cmd.size = sizeof(*cmd_data);
cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
&cmd.phys_base, GFP_KERNEL);
if (cmd.base == NULL) {
IPAERR("fail to get DMA memory.\n");
return -ENOMEM;
}
cmd_data =
(struct IpaHwOffloadSetUpCmdData_t_v4_0 *)cmd.base;
cmd_data->protocol = protocol;
switch (protocol) {
case IPA_HW_PROTOCOL_AQC:
/* TODO: add support for AQC */
break;
case IPA_HW_PROTOCOL_RTK:
cmd_data->SetupCh_params.RtkSetupCh_params.dir =
dir;
cmd_data->SetupCh_params.RtkSetupCh_params.gsi_ch =
gsi_ch;
break;
default:
IPAERR("invalid protocol%d\n", protocol);
}
command = IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP;
} else {
struct IpaHwOffloadCommonChCmdData_t_v4_0 *cmd_data;
cmd.size = sizeof(*cmd_data);
cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
&cmd.phys_base, GFP_KERNEL);
if (cmd.base == NULL) {
IPAERR("fail to get DMA memory.\n");
return -ENOMEM;
}
cmd_data =
(struct IpaHwOffloadCommonChCmdData_t_v4_0 *)cmd.base;
cmd_data->protocol = protocol;
switch (protocol) {
case IPA_HW_PROTOCOL_AQC:
/* TODO: add support for AQC */
break;
case IPA_HW_PROTOCOL_RTK:
cmd_data->CommonCh_params.RtkCommonCh_params.gsi_ch =
gsi_ch;
break;
default:
IPAERR("invalid protocol%d\n", protocol);
}
cmd_data->CommonCh_params.RtkCommonCh_params.gsi_ch = gsi_ch;
command = IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
command,
IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
false, 10 * HZ);
if (result) {
IPAERR("fail to %s uc for %s gsi channel %d\n",
init ? "init" : "deinit",
dir == IPA_ETH_RX ? "Rx" : "Tx", gsi_ch);
}
dma_free_coherent(ipa3_ctx->uc_pdev,
cmd.size, cmd.base, cmd.phys_base);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
IPADBG("exit\n");
return result;
}
static void ipa_eth_gsi_evt_ring_err_cb(struct gsi_evt_err_notify *notify)
{
switch (notify->evt_id) {
case GSI_EVT_OUT_OF_BUFFERS_ERR:
IPAERR("Got GSI_EVT_OUT_OF_BUFFERS_ERR\n");
break;
case GSI_EVT_OUT_OF_RESOURCES_ERR:
IPAERR("Got GSI_EVT_OUT_OF_RESOURCES_ERR\n");
break;
case GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR:
IPAERR("Got GSI_EVT_UNSUPPORTED_INTER_EE_OP_ERR\n");
break;
case GSI_EVT_EVT_RING_EMPTY_ERR:
IPAERR("Got GSI_EVT_EVT_RING_EMPTY_ERR\n");
break;
default:
IPAERR("Unexpected err evt: %d\n", notify->evt_id);
}
ipa_assert();
}
static void ipa_eth_gsi_chan_err_cb(struct gsi_chan_err_notify *notify)
{
switch (notify->evt_id) {
case GSI_CHAN_INVALID_TRE_ERR:
IPAERR("Got GSI_CHAN_INVALID_TRE_ERR\n");
break;
case GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR:
IPAERR("Got GSI_CHAN_NON_ALLOCATED_EVT_ACCESS_ERR\n");
break;
case GSI_CHAN_OUT_OF_BUFFERS_ERR:
IPAERR("Got GSI_CHAN_OUT_OF_BUFFERS_ERR\n");
break;
case GSI_CHAN_OUT_OF_RESOURCES_ERR:
IPAERR("Got GSI_CHAN_OUT_OF_RESOURCES_ERR\n");
break;
case GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR:
IPAERR("Got GSI_CHAN_UNSUPPORTED_INTER_EE_OP_ERR\n");
break;
case GSI_CHAN_HWO_1_ERR:
IPAERR("Got GSI_CHAN_HWO_1_ERR\n");
break;
default:
IPAERR("Unexpected err evt: %d\n", notify->evt_id);
}
ipa_assert();
}
static int ipa_eth_setup_rtk_gsi_channel(
struct ipa_eth_client_pipe_info *pipe,
struct ipa3_ep_context *ep)
{
struct gsi_evt_ring_props gsi_evt_ring_props;
struct gsi_chan_props gsi_channel_props;
union __packed gsi_channel_scratch ch_scratch;
union __packed gsi_evt_scratch evt_scratch;
const struct ipa_gsi_ep_config *gsi_ep_info;
int result, len;
int queue_number;
u64 bar_addr;
if (unlikely(!pipe->info.is_transfer_ring_valid)) {
IPAERR("RTK transfer ring invalid\n");
ipa_assert();
return -EFAULT;
}
/* setup event ring */
bar_addr =
IPA_ETH_PCIE_SET(pipe->info.client_info.rtk.bar_addr);
memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_RTK_EV;
gsi_evt_ring_props.intr = GSI_INTR_MSI;
gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
if (pipe->dir == IPA_ETH_PIPE_DIR_TX) {
gsi_evt_ring_props.int_modt = IPA_ETH_RTK_MODT;
gsi_evt_ring_props.int_modc = IPA_ETH_RTK_MODC;
}
gsi_evt_ring_props.exclusive = true;
gsi_evt_ring_props.err_cb = ipa_eth_gsi_evt_ring_err_cb;
gsi_evt_ring_props.user_data = NULL;
gsi_evt_ring_props.msi_addr =
bar_addr +
pipe->info.client_info.rtk.dest_tail_ptr_offs;
len = pipe->info.transfer_ring_size;
gsi_evt_ring_props.ring_len = len;
gsi_evt_ring_props.ring_base_addr =
(u64)pipe->info.transfer_ring_base;
result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
ipa3_ctx->gsi_dev_hdl,
&ep->gsi_evt_ring_hdl);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("fail to alloc RX event ring\n");
return -EFAULT;
}
ep->gsi_mem_info.evt_ring_len =
gsi_evt_ring_props.ring_len;
ep->gsi_mem_info.evt_ring_base_addr =
gsi_evt_ring_props.ring_base_addr;
/* setup channel ring */
memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
gsi_channel_props.prot = GSI_CHAN_PROT_RTK;
if (pipe->dir == IPA_ETH_PIPE_DIR_TX)
gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
else
gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
if (!gsi_ep_info) {
IPAERR("Failed getting GSI EP info for client=%d\n",
ep->client);
result = -EINVAL;
goto fail_get_gsi_ep_info;
} else
gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
gsi_channel_props.prefetch_mode =
gsi_ep_info->prefetch_mode;
gsi_channel_props.empty_lvl_threshold =
gsi_ep_info->prefetch_threshold;
gsi_channel_props.low_weight = 1;
gsi_channel_props.err_cb = ipa_eth_gsi_chan_err_cb;
gsi_channel_props.ring_len = len;
gsi_channel_props.ring_base_addr =
(u64)pipe->info.transfer_ring_base;
result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
&ep->gsi_chan_hdl);
if (result != GSI_STATUS_SUCCESS)
goto fail_get_gsi_ep_info;
ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
ep->gsi_mem_info.chan_ring_base_addr =
gsi_channel_props.ring_base_addr;
/* write event scratch */
memset(&evt_scratch, 0, sizeof(evt_scratch));
/* nothing is needed for RTK event scratch */
/* write ch scratch */
queue_number = pipe->info.client_info.rtk.queue_number;
memset(&ch_scratch, 0, sizeof(ch_scratch));
ch_scratch.rtk.rtk_bar_low =
(u32)bar_addr;
ch_scratch.rtk.rtk_bar_high =
(u32)((u64)(bar_addr) >> 32);
/*
* RX: Queue Number will be as is received from RTK
* (Range 0 - 15).
* TX: Queue Number will be configured to be
* either 16 or 18.
* (For TX Queue 0: Configure 16)
* (For TX Queue 1: Configure 18)
*/
ch_scratch.rtk.queue_number =
(pipe->dir == IPA_ETH_PIPE_DIR_RX) ?
pipe->info.client_info.rtk.queue_number :
(queue_number == 0) ? 16 : 18;
ch_scratch.rtk.fix_buff_size =
ilog2(pipe->info.fix_buffer_size);
ch_scratch.rtk.rtk_buff_addr_low =
(u32)pipe->info.data_buff_list[0].iova;
ch_scratch.rtk.rtk_buff_addr_high =
(u32)((u64)(pipe->info.data_buff_list[0].iova) >> 32);
result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to write evt ring scratch\n");
goto fail_write_scratch;
}
return 0;
fail_write_scratch:
gsi_dealloc_channel(ep->gsi_chan_hdl);
ep->gsi_chan_hdl = ~0;
fail_get_gsi_ep_info:
gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
ep->gsi_evt_ring_hdl = ~0;
return result;
}
static int ipa3_smmu_map_rtk_pipes(struct ipa_eth_client_pipe_info *pipe,
bool map)
{
struct iommu_domain *smmu_domain;
int result;
int i;
u64 iova;
phys_addr_t pa;
u64 iova_p;
phys_addr_t pa_p;
u32 size_p;
if (pipe->info.fix_buffer_size > PAGE_SIZE) {
IPAERR("invalid data buff size\n");
return -EINVAL;
}
if (ipa3_ctx->s1_bypass_arr[IPA_SMMU_CB_AP]) {
IPADBG("AP SMMU is set to s1 bypass\n");
return 0;
}
smmu_domain = ipa3_get_smmu_domain();
if (!smmu_domain) {
IPAERR("invalid smmu domain\n");
return -EINVAL;
}
result = ipa3_smmu_map_peer_buff(
(u64)pipe->info.transfer_ring_base,
pipe->info.transfer_ring_size,
map,
pipe->info.transfer_ring_sgt,
IPA_SMMU_CB_AP);
if (result) {
IPAERR("failed to %s ntn ring %d\n",
map ? "map" : "unmap", result);
return -EINVAL;
}
for (i = 0; i < pipe->info.data_buff_list_size; i++) {
iova = (u64)pipe->info.data_buff_list[i].iova;
pa = (phys_addr_t)pipe->info.data_buff_list[i].pa;
IPA_SMMU_ROUND_TO_PAGE(iova, pa, pipe->info.fix_buffer_size,
iova_p, pa_p, size_p);
IPADBG_LOW("%s 0x%llx to 0x%pa size %d\n", map ? "mapping" :
"unmapping", iova_p, &pa_p, size_p);
if (map) {
result = ipa3_iommu_map(smmu_domain, iova_p, pa_p,
size_p, IOMMU_READ | IOMMU_WRITE);
if (result)
IPAERR("Fail to map 0x%llx\n", iova);
} else {
result = iommu_unmap(smmu_domain, iova_p, size_p);
if (result != size_p) {
IPAERR("Fail to unmap 0x%llx\n", iova);
goto fail_map_buffer_smmu_enabled;
}
}
}
return 0;
fail_map_buffer_smmu_enabled:
ipa3_smmu_map_peer_buff(
(u64)pipe->info.transfer_ring_base,
pipe->info.transfer_ring_size,
!map,
pipe->info.transfer_ring_sgt,
IPA_SMMU_CB_AP);
return result;
}
int ipa3_eth_rtk_connect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type)
{
struct ipa3_ep_context *ep;
int ep_idx;
bool vlan_mode;
int result = 0;
u32 gsi_db_addr_low, gsi_db_addr_high;
void __iomem *db_addr;
u32 evt_ring_db_addr_low, evt_ring_db_addr_high, db_val = 0;
int id;
ep_idx = ipa_get_ep_mapping(client_type);
if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES) {
IPAERR("undefined client_type\n");
return -EFAULT;
}
/* need enhancement for vlan support on multiple attach */
result = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
if (result) {
IPAERR("Could not determine IPA VLAN mode\n");
return result;
}
result = ipa3_smmu_map_rtk_pipes(pipe, true);
if (result) {
IPAERR("failed to map SMMU %d\n", result);
return result;
}
ep = &ipa3_ctx->ep[ep_idx];
memset(ep, 0, offsetof(struct ipa3_ep_context, sys));
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ep->valid = 1;
ep->client = client_type;
result = ipa3_disable_data_path(ep_idx);
if (result) {
IPAERR("disable data path failed res=%d clnt=%d.\n", result,
ep_idx);
goto disable_data_path_fail;
}
ep->cfg.nat.nat_en = IPA_CLIENT_IS_PROD(client_type) ?
IPA_SRC_NAT : IPA_BYPASS_NAT;
ep->cfg.hdr.hdr_len = vlan_mode ? VLAN_ETH_HLEN : ETH_HLEN;
ep->cfg.mode.mode = IPA_BASIC;
if (IPA_CLIENT_IS_CONS(client_type)) {
ep->cfg.aggr.aggr_en = IPA_ENABLE_AGGR;
ep->cfg.aggr.aggr = IPA_GENERIC;
ep->cfg.aggr.aggr_byte_limit = IPA_ETH_AGGR_BYTE_LIMIT;
ep->cfg.aggr.aggr_pkt_limit = IPA_ETH_AGGR_PKT_LIMIT;
ep->cfg.aggr.aggr_hard_byte_limit_en = IPA_ENABLE_AGGR;
} else {
ep->client_notify = pipe->info.notify;
ep->priv = pipe->info.priv;
}
if (ipa3_cfg_ep(ep_idx, &ep->cfg)) {
IPAERR("fail to setup rx pipe cfg\n");
goto cfg_ep_fail;
}
if (IPA_CLIENT_IS_PROD(client_type))
ipa3_install_dflt_flt_rules(ep_idx);
IPADBG("client %d (ep: %d) connected\n", client_type,
ep_idx);
if (ipa_eth_setup_rtk_gsi_channel(pipe, ep)) {
IPAERR("fail to setup eth gsi rx channel\n");
result = -EFAULT;
goto setup_rtk_gsi_ch_fail;
}
if (gsi_query_channel_db_addr(ep->gsi_chan_hdl,
&gsi_db_addr_low, &gsi_db_addr_high)) {
IPAERR("failed to query gsi rx db addr\n");
result = -EFAULT;
goto query_ch_db_fail;
}
/* only 32 bit lsb is used */
db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4);
if (IPA_CLIENT_IS_PROD(client_type)) {
/* Rx: Initialize to ring base (i.e point 6) */
db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr;
} else {
/* TX: Initialize to end of ring */
db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr;
db_val += (u32)ep->gsi_mem_info.chan_ring_len;
}
iowrite32(db_val, db_addr);
iounmap(db_addr);
gsi_query_evt_ring_db_addr(ep->gsi_evt_ring_hdl,
&evt_ring_db_addr_low, &evt_ring_db_addr_high);
IPADBG("evt_ring_hdl %lu, db_addr_low %u db_addr_high %u\n",
ep->gsi_evt_ring_hdl, evt_ring_db_addr_low,
evt_ring_db_addr_high);
/* only 32 bit lsb is used */
db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
/*
* IPA/GSI driver should ring the event DB once after
* initialization of the event, with a value that is
* outside of the ring range. Eg: ring base = 0x1000,
* ring size = 0x100 => AP can write value > 0x1100
* into the doorbell address. Eg: 0x 1110.
* Use event ring base addr + event ring size + 1 element size.
*/
db_val = (u32)ep->gsi_mem_info.evt_ring_base_addr;
db_val += (u32)ep->gsi_mem_info.evt_ring_len;
db_val += GSI_EVT_RING_RE_SIZE_16B;
iowrite32(db_val, db_addr);
iounmap(db_addr);
if (IPA_CLIENT_IS_PROD(client_type)) {
/* RX mailbox */
pipe->info.db_pa = ipa3_ctx->ipa_wrapper_base +
ipahal_get_reg_base() +
ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
IPA_ETH_MBOX_M,
IPA_ETH_RX_MBOX_N);
pipe->info.db_val = IPA_ETH_RX_MBOX_VAL;
} else {
/* TX mailbox */
pipe->info.db_pa = ipa3_ctx->ipa_wrapper_base +
ipahal_get_reg_base() +
ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
IPA_ETH_MBOX_M,
IPA_ETH_TX_MBOX_N);
pipe->info.db_val = IPA_ETH_TX_MBOX_VAL;
}
/* enable data path */
result = ipa3_enable_data_path(ep_idx);
if (result) {
IPAERR("enable data path failed res=%d clnt=%d\n", result,
ep_idx);
goto enable_data_path_fail;
}
/* start gsi channel */
result = gsi_start_channel(ep->gsi_chan_hdl);
if (result) {
IPAERR("failed to start gsi tx channel\n");
goto start_channel_fail;
}
id = (pipe->dir == IPA_ETH_PIPE_DIR_TX) ? 1 : 0;
/* start uC gsi dbg stats monitor */
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK].ch_id_info[id].ch_id
= ep->gsi_chan_hdl;
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK].ch_id_info[id].dir
= pipe->dir;
ipa3_uc_debug_stats_alloc(
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK]);
}
ipa3_eth_save_client_mapping(pipe, client_type,
id, ep_idx, ep->gsi_chan_hdl);
result = ipa3_eth_config_uc(true,
IPA_HW_PROTOCOL_RTK,
(pipe->dir == IPA_ETH_PIPE_DIR_TX)
? IPA_ETH_TX : IPA_ETH_RX,
ep->gsi_chan_hdl);
if (result) {
IPAERR("failed to config uc\n");
goto config_uc_fail;
}
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
config_uc_fail:
/* stop uC gsi dbg stats monitor */
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK].ch_id_info[id].ch_id
= 0xff;
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK].ch_id_info[id].dir
= pipe->dir;
ipa3_uc_debug_stats_alloc(
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK]);
}
ipa3_stop_gsi_channel(ep->gsi_chan_hdl);
start_channel_fail:
ipa3_disable_data_path(ep_idx);
enable_data_path_fail:
query_ch_db_fail:
setup_rtk_gsi_ch_fail:
cfg_ep_fail:
disable_data_path_fail:
ipa3_smmu_map_rtk_pipes(pipe, false);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return result;
}
EXPORT_SYMBOL(ipa3_eth_rtk_connect);
int ipa3_eth_aqc_connect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type)
{
return 0;
}
EXPORT_SYMBOL(ipa3_eth_aqc_connect);
int ipa3_eth_emac_connect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type)
{
return 0;
}
EXPORT_SYMBOL(ipa3_eth_emac_connect);
int ipa3_eth_rtk_disconnect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type)
{
int result = 0;
struct ipa3_ep_context *ep;
int ep_idx;
int id;
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ep_idx = ipa_get_ep_mapping(client_type);
if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES) {
IPAERR("undefined client_type\n");
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -EFAULT;
}
ep = &ipa3_ctx->ep[ep_idx];
/* disable data path */
result = ipa3_disable_data_path(ep_idx);
if (result) {
IPAERR("enable data path failed res=%d clnt=%d.\n", result,
ep_idx);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return -EFAULT;
}
id = (pipe->dir == IPA_ETH_PIPE_DIR_TX) ? 1 : 0;
/* stop uC gsi dbg stats monitor */
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5) {
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK].ch_id_info[id].ch_id
= 0xff;
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK].ch_id_info[id].dir
= pipe->dir;
ipa3_uc_debug_stats_alloc(
ipa3_ctx->gsi_info[IPA_HW_PROTOCOL_RTK]);
}
/* stop gsi channel */
result = ipa3_stop_gsi_channel(ep_idx);
if (result) {
IPAERR("failed to stop gsi channel %d\n", ep_idx);
result = -EFAULT;
ipa_assert();
goto fail;
}
result = ipa3_eth_config_uc(false,
IPA_HW_PROTOCOL_RTK,
(pipe->dir == IPA_ETH_PIPE_DIR_TX)
? IPA_ETH_TX : IPA_ETH_RX,
ep->gsi_chan_hdl);
if (result)
IPAERR("failed to config uc\n");
/* tear down pipe */
result = ipa3_reset_gsi_channel(ep_idx);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to reset gsi channel: %d.\n", result);
ipa_assert();
goto fail;
}
result = gsi_reset_evt_ring(ep->gsi_evt_ring_hdl);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to reset evt ring: %d.\n", result);
ipa_assert();
goto fail;
}
result = ipa3_release_gsi_channel(ep_idx);
if (result) {
IPAERR("failed to release gsi channel: %d\n", result);
ipa_assert();
goto fail;
}
memset(ep, 0, sizeof(struct ipa3_ep_context));
IPADBG("client (ep: %d) disconnected\n", ep_idx);
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_5)
ipa3_uc_debug_stats_dealloc(IPA_HW_PROTOCOL_RTK);
if (IPA_CLIENT_IS_PROD(client_type))
ipa3_delete_dflt_flt_rules(ep_idx);
/* unmap th pipe */
result = ipa3_smmu_map_rtk_pipes(pipe, false);
if (result)
IPAERR("failed to unmap SMMU %d\n", result);
fail:
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return result;
}
EXPORT_SYMBOL(ipa3_eth_rtk_disconnect);
int ipa3_eth_aqc_disconnect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type)
{
return 0;
}
EXPORT_SYMBOL(ipa3_eth_aqc_disconnect);
int ipa3_eth_emac_disconnect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type)
{
return 0;
}
EXPORT_SYMBOL(ipa3_eth_emac_disconnect);

View File

@@ -1442,6 +1442,12 @@ struct ipa3_stats {
#define IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF (12)
#define IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF (16)
#define IPA3_UC_DEBUG_STATS_OFF (20)
#define IPA3_UC_DEBUG_STATS_TRCOUNT_OFF (20)
#define IPA3_UC_DEBUG_STATS_ERCOUNT_OFF (24)
#define IPA3_UC_DEBUG_STATS_AOSCOUNT_OFF (28)
#define IPA3_UC_DEBUG_STATS_BUSYTIME_OFF (32)
#define IPA3_UC_DEBUG_STATS_RTK_OFF (40)
/**
* struct ipa3_uc_dbg_stats - uC dbg stats for offloading
@@ -1655,6 +1661,12 @@ struct ipa3_aqc_ctx {
struct ipa3_uc_dbg_stats dbg_stats;
};
/**
* struct ipa3_rtk_ctx - IPA rtk context
*/
struct ipa3_rtk_ctx {
struct ipa3_uc_dbg_stats dbg_stats;
};
/**
* struct ipa3_transport_pm - transport power management related members
@@ -1830,6 +1842,26 @@ struct ipa3_app_clock_vote {
u32 cnt;
};
struct ipa_eth_client_mapping {
enum ipa_client_type type;
int pipe_id;
int pipe_hdl;
int ch_id;
bool valid;
};
struct ipa3_eth_info {
u8 num_ch;
struct ipa_eth_client_mapping map[IPA_MAX_CH_STATS_SUPPORTED];
};
struct ipa3_eth_error_stats {
int rp;
int wp;
u32 err;
};
/**
* struct ipa3_context - IPA context
* @cdev: cdev context
@@ -1937,6 +1969,7 @@ struct ipa3_app_clock_vote {
* @rmnet_ctl_enable: enable pipe support fow low latency data
* @gsi_fw_file_name: GSI IPA fw file name
* @uc_fw_file_name: uC IPA fw file name
* @eth_info: ethernet client mapping
*/
struct ipa3_context {
struct ipa3_char_device_context cdev;
@@ -2098,6 +2131,7 @@ struct ipa3_context {
struct ipa3_usb_ctx usb_ctx;
struct ipa3_mhip_ctx mhip_ctx;
struct ipa3_aqc_ctx aqc_ctx;
struct ipa3_rtk_ctx rtk_ctx;
atomic_t ipa_clk_vote;
int (*client_lock_unlock[IPA_MAX_CLNT])(bool is_lock);
@@ -2127,6 +2161,8 @@ struct ipa3_context {
bool rmnet_ctl_enable;
char *gsi_fw_file_name;
char *uc_fw_file_name;
struct ipa3_eth_info
eth_info[IPA_ETH_CLIENT_MAX][IPA_ETH_INST_ID_MAX];
};
struct ipa3_plat_drv_res {
@@ -2670,6 +2706,7 @@ int ipa3_get_wdi_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
int ipa3_get_wdi3_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
int ipa3_get_usb_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
int ipa3_get_rtk_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa3_get_smem_restr_bytes(void);
int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);
@@ -2810,6 +2847,8 @@ struct ipa3_rt_tbl *__ipa3_find_rt_tbl(enum ipa_ip_type ip, const char *name);
int ipa3_set_single_ndp_per_mbim(bool enable);
void ipa3_debugfs_init(void);
void ipa3_debugfs_remove(void);
void ipa3_eth_debugfs_init(void);
void ipa3_eth_debugfs_add(struct ipa_eth_client *client);
void ipa3_dump_buff_internal(void *base, dma_addr_t phy_base, u32 size);
#ifdef IPA_DEBUG
@@ -3116,8 +3155,30 @@ void ipa_eth_exit(void);
#else
static inline int ipa_eth_init(void) { return 0; }
static inline void ipa_eth_exit(void) { }
#endif // CONFIG_IPA_ETH
#endif
void ipa3_eth_debugfs_add_node(struct ipa_eth_client *client);
int ipa3_eth_rtk_connect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type);
int ipa3_eth_aqc_connect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type);
int ipa3_eth_emac_connect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type);
int ipa3_eth_rtk_disconnect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type);
int ipa3_eth_aqc_disconnect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type);
int ipa3_eth_emac_disconnect(
struct ipa_eth_client_pipe_info *pipe,
enum ipa_client_type client_type);
int ipa3_eth_client_conn_evt(struct ipa_ecm_msg *msg);
int ipa3_eth_client_disconn_evt(struct ipa_ecm_msg *msg);
void ipa3_eth_get_status(u32 client, int scratch_id,
struct ipa3_eth_error_stats *stats);
int ipa3_get_gsi_chan_info(struct gsi_chan_info *gsi_chan_info,
unsigned long chan_hdl);

View File

@@ -3239,23 +3239,23 @@ int ipa3_get_mhip_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = 0; i < MAX_MHIP_CHANNELS; i++) {
stats->ring[i].ringFull = ioread32(
stats->u.ring[i].ringFull = ioread32(
ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
stats->ring[i].ringEmpty = ioread32(
stats->u.ring[i].ringEmpty = ioread32(
ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
stats->ring[i].ringUsageHigh = ioread32(
stats->u.ring[i].ringUsageHigh = ioread32(
ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
stats->ring[i].ringUsageLow = ioread32(
stats->u.ring[i].ringUsageLow = ioread32(
ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
stats->ring[i].RingUtilCount = ioread32(
stats->u.ring[i].RingUtilCount = ioread32(
ipa3_ctx->mhip_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);

View File

@@ -409,6 +409,17 @@ static void ipa3_uc_save_dbg_stats(u32 size)
} else
goto unmap;
break;
case IPA_HW_PROTOCOL_RTK:
if (!ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio) {
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_size =
size;
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_ofst =
addr_offset;
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio =
mmio;
} else
goto unmap;
break;
case IPA_HW_PROTOCOL_WDI:
if (!ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio) {
ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_size =
@@ -1484,6 +1495,10 @@ int ipa3_uc_debug_stats_dealloc(uint32_t prot_id)
iounmap(ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio);
ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
break;
case IPA_HW_PROTOCOL_RTK:
iounmap(ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio);
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
break;
case IPA_HW_PROTOCOL_WDI:
iounmap(ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio);
ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;

View File

@@ -24,6 +24,7 @@
#define DIR_PRODUCER 1
#define MAX_AQC_CHANNELS 2
#define MAX_RTK_CHANNELS 2
#define MAX_11AD_CHANNELS 5
#define MAX_WDI2_CHANNELS 2
#define MAX_WDI3_CHANNELS 2
@@ -81,6 +82,7 @@ enum ipa3_hw_features {
* @IPA_HW_PROTOCOL_ETH : protocol related to ETH operation in IPA HW
* @IPA_HW_PROTOCOL_MHIP: protocol related to MHIP operation in IPA HW
* @IPA_HW_PROTOCOL_USB : protocol related to USB operation in IPA HW
* @IPA_HW_PROTOCOL_RTK : protocol related to RTK operation in IPA HW
*/
enum ipa4_hw_protocol {
IPA_HW_PROTOCOL_COMMON = 0x0,
@@ -91,6 +93,7 @@ enum ipa4_hw_protocol {
IPA_HW_PROTOCOL_ETH = 0x5,
IPA_HW_PROTOCOL_MHIP = 0x6,
IPA_HW_PROTOCOL_USB = 0x7,
IPA_HW_PROTOCOL_RTK = 0x9,
IPA_HW_PROTOCOL_MAX
};
@@ -589,7 +592,75 @@ struct IpaHw11adInitCmdData_t {
*/
struct IpaHw11adDeinitCmdData_t {
u32 reserved;
};
} __packed;
/**
* struct IpaHwRtkSetupCmdData_t - rlk setup channel command data
* @dir: Direction RX/TX
* @gsi_ch: GSI Channel number
* @reserved: 16 bytes padding
*/
struct IpaHwRtkSetupCmdData_t {
uint8_t dir;
uint8_t gsi_ch;
uint16_t reserved;
} __packed;
/**
* struct IpaHwRtkCommonChCmdData - rtk tear down channel command data
* @gsi_ch: GSI Channel number
* @reserved_0: padding
* @reserved_1: padding
*/
struct IpaHwRtkCommonChCmdData_t {
uint8_t gsi_ch;
uint8_t reserved_0;
uint16_t reserved_1;
} __packed;
/**
* struct IpaHwAQCInitCmdData_t - AQC peripheral init command data
* @periph_baddr_lsb: Peripheral Base Address LSB (pa/IOVA)
* @periph_baddr_msb: Peripheral Base Address MSB (pa/IOVA)
*/
struct IpaHwAQCInitCmdData_t {
u32 periph_baddr_lsb;
u32 periph_baddr_msb;
} __packed;
/**
* struct IpaHwAQCDeinitCmdData_t - AQC peripheral deinit command data
* @reserved: Reserved for future
*/
struct IpaHwAQCDeinitCmdData_t {
u32 reserved;
} __packed;
/**
* struct IpaHwAQCSetupCmdData_t - AQC setup channel command data
* @dir: Direction RX/TX
* @aqc_ch: aqc channel number
* @gsi_ch: GSI Channel number
* @reserved: 8 bytes padding
*/
struct IpaHwAQCSetupCmdData_t {
u8 dir;
u8 aqc_ch;
u8 gsi_ch;
u8 reserved;
} __packed;
/**
* struct IpaHwAQCCommonChCmdData_t - AQC tear down channel command data
* @gsi_ch: GSI Channel number
* @reserved_0: padding
* @reserved_1: padding
*/
struct IpaHwAQCCommonChCmdData_t {
u8 gsi_ch;
u8 reserved_0;
u16 reserved_1;
} __packed;
/**
* struct IpaHwSetUpCmd - Structure holding the parameters
@@ -599,7 +670,9 @@ struct IpaHw11adDeinitCmdData_t {
*/
union IpaHwSetUpCmd {
struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params;
struct IpaHw11adSetupCmdData_t W11AdSetupCh_params;
struct IpaHwAQCSetupCmdData_t AqcSetupCh_params;
struct IpaHw11adSetupCmdData_t W11AdSetupCh_params;
struct IpaHwRtkSetupCmdData_t RtkSetupCh_params;
} __packed;
struct IpaHwOffloadSetUpCmdData_t {
@@ -662,6 +735,8 @@ struct IpaHwOffloadSetUpCmdData_t_v4_0 {
*/
union IpaHwCommonChCmd {
union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params;
struct IpaHwAQCCommonChCmdData_t AqcCommonCh_params;
struct IpaHwRtkCommonChCmdData_t RtkCommonCh_params;
struct IpaHw11adCommonChCmdData_t W11AdCommonCh_params;
} __packed;
@@ -743,6 +818,7 @@ struct IpaHwOffloadCommonChCmdData_t_v4_0 {
*/
union IpaHwPeripheralInitCmd {
struct IpaHw11adInitCmdData_t W11AdInit_params;
struct IpaHwAQCInitCmdData_t AqcInit_params;
} __packed;
struct IpaHwPeripheralInitCmdData_t {
@@ -757,6 +833,7 @@ struct IpaHwPeripheralInitCmdData_t {
*/
union IpaHwPeripheralDeinitCmd {
struct IpaHw11adDeinitCmdData_t W11AdDeinit_params;
struct IpaHwAQCDeinitCmdData_t AqcDeinit_params;
} __packed;
struct IpaHwPeripheralDeinitCmdData_t {

View File

@@ -430,23 +430,23 @@ int ipa3_get_wdi_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = 0; i < MAX_WDI2_CHANNELS; i++) {
stats->ring[i].ringFull = ioread32(
stats->u.ring[i].ringFull = ioread32(
ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
stats->ring[i].ringEmpty = ioread32(
stats->u.ring[i].ringEmpty = ioread32(
ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
stats->ring[i].ringUsageHigh = ioread32(
stats->u.ring[i].ringUsageHigh = ioread32(
ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
stats->ring[i].ringUsageLow = ioread32(
stats->u.ring[i].ringUsageLow = ioread32(
ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
stats->ring[i].RingUtilCount = ioread32(
stats->u.ring[i].RingUtilCount = ioread32(
ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);

View File

@@ -2341,6 +2341,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 8, 2, 27, 32, IPA_EE_Q6, GSI_FREE_PRE_FETCH, 3 }, IPA_TX_INSTANCE_NA },
[IPA_4_5][IPA_CLIENT_RTK_ETHERNET_PROD] = {
true, IPA_v4_5_GROUP_UL_DL,
true,
IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
QMB_MASTER_SELECT_DDR,
{ 10, 13, 8, 16, IPA_EE_AP, GSI_ESCAPE_BUF_ONLY, 0 }, IPA_TX_INSTANCE_NA },
/* Only for test purpose */
[IPA_4_5][IPA_CLIENT_TEST_PROD] = {
true, IPA_v4_5_GROUP_UL_DL,
@@ -2457,6 +2463,12 @@ static const struct ipa_ep_configuration ipa3_ep_mapping
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 18, 4, 9, 9, IPA_EE_Q6, GSI_ESCAPE_BUF_ONLY, 0 }, IPA_TX_INSTANCE_NA },
[IPA_4_5][IPA_CLIENT_RTK_ETHERNET_CONS] = {
true, IPA_v4_5_GROUP_UL_DL,
false,
IPA_DPS_HPS_SEQ_TYPE_INVALID,
QMB_MASTER_SELECT_DDR,
{ 23, 8, 9, 9, IPA_EE_AP, GSI_SMART_PRE_FETCH, 4 }, IPA_TX_INSTANCE_NA },
/* Only for test purpose */
/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
[IPA_4_5][IPA_CLIENT_TEST_CONS] = {
@@ -7004,7 +7016,9 @@ int ipa3_write_qmap_id(struct ipa_ioc_write_qmapid *param_in)
param_in->client == IPA_CLIENT_HSIC1_PROD ||
param_in->client == IPA_CLIENT_ODU_PROD ||
param_in->client == IPA_CLIENT_ETHERNET_PROD ||
param_in->client == IPA_CLIENT_WIGIG_PROD) {
param_in->client == IPA_CLIENT_WIGIG_PROD ||
param_in->client == IPA_CLIENT_AQC_ETHERNET_PROD ||
param_in->client == IPA_CLIENT_RTK_ETHERNET_PROD) {
result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
} else if (param_in->client == IPA_CLIENT_WLAN1_PROD ||
param_in->client == IPA_CLIENT_WLAN2_PROD) {
@@ -10250,6 +10264,10 @@ void ipa3_get_gsi_stats(int prot_id,
stats->num_ch = MAX_AQC_CHANNELS;
ipa3_get_aqc_gsi_stats(stats);
break;
case IPA_HW_PROTOCOL_RTK:
stats->num_ch = MAX_RTK_CHANNELS;
ipa3_get_rtk_gsi_stats(stats);
break;
case IPA_HW_PROTOCOL_11ad:
break;
case IPA_HW_PROTOCOL_WDI:
@@ -10371,6 +10389,10 @@ int ipa3_get_prot_id(enum ipa_client_type client)
case IPA_CLIENT_AQC_ETHERNET_PROD:
prot_id = IPA_HW_PROTOCOL_AQC;
break;
case IPA_CLIENT_RTK_ETHERNET_CONS:
case IPA_CLIENT_RTK_ETHERNET_PROD:
prot_id = IPA_HW_PROTOCOL_RTK;
break;
case IPA_CLIENT_MHI_PRIME_TETH_PROD:
case IPA_CLIENT_MHI_PRIME_TETH_CONS:
case IPA_CLIENT_MHI_PRIME_RMNET_PROD:
@@ -10407,3 +10429,21 @@ int ipa3_get_prot_id(enum ipa_client_type client)
return prot_id;
}
void ipa3_eth_get_status(u32 client, int scratch_id,
struct ipa3_eth_error_stats *stats)
{
int ch_id;
int ipa_ep_idx;
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ipa_ep_idx = ipa3_get_ep_mapping(client);
if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED)
return;
ch_id = ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl;
stats->rp = gsi_get_refetch_reg(ch_id, true);
stats->wp = gsi_get_refetch_reg(ch_id, false);
stats->err = gsi_get_drop_stats(ipa_ep_idx, scratch_id);
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
}

View File

@@ -994,23 +994,23 @@ int ipa3_get_wdi3_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = 0; i < MAX_WDI3_CHANNELS; i++) {
stats->ring[i].ringFull = ioread32(
stats->u.ring[i].ringFull = ioread32(
ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
stats->ring[i].ringEmpty = ioread32(
stats->u.ring[i].ringEmpty = ioread32(
ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
stats->ring[i].ringUsageHigh = ioread32(
stats->u.ring[i].ringUsageHigh = ioread32(
ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
stats->ring[i].ringUsageLow = ioread32(
stats->u.ring[i].ringUsageLow = ioread32(
ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
stats->ring[i].RingUtilCount = ioread32(
stats->u.ring[i].RingUtilCount = ioread32(
ipa3_ctx->wdi3_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);