Merge "msm: ipa: add unit tests for NTN3 offload"

This commit is contained in:
qctecmdr
2021-05-04 19:03:59 -07:00
committed by Gerrit - the friendly Code Review server
12 changed files with 1993 additions and 146 deletions

View File

@@ -2416,6 +2416,7 @@ static void gsi_program_chan_ctx(struct gsi_chan_props *props, unsigned int ee,
case GSI_CHAN_PROT_11AD: case GSI_CHAN_PROT_11AD:
case GSI_CHAN_PROT_RTK: case GSI_CHAN_PROT_RTK:
case GSI_CHAN_PROT_QDSS: case GSI_CHAN_PROT_QDSS:
case GSI_CHAN_PROT_NTN:
ch_k_cntxt_0.chtype_protocol_msb = 1; ch_k_cntxt_0.chtype_protocol_msb = 1;
break; break;
default: default:
@@ -4936,25 +4937,81 @@ int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp)
} }
EXPORT_SYMBOL(gsi_get_refetch_reg); EXPORT_SYMBOL(gsi_get_refetch_reg);
int gsi_get_drop_stats(unsigned long ep_id, int scratch_id) int gsi_get_drop_stats(unsigned long ep_id, int scratch_id,
unsigned long chan_hdl)
{ {
/* RTK use scratch 5 */ #define GSI_RTK_ERR_STATS_MASK 0xFFFF
if (scratch_id == 5) { #define GSI_NTN_ERR_STATS_MASK 0xFFFFFFFF
/* #define GSI_AQC_RX_STATUS_MASK 0x1FFF
* each channel context is 6 lines of 8 bytes, but n in SHRAM_n #define GSI_AQC_RX_STATUS_SHIFT 0
* is in 4 bytes offsets, so multiplying ep_id by 6*2=12 will #define GSI_AQC_RDM_ERR_MASK 0x1FFF0000
* give the beginning of the required channel context, and then #define GSI_AQC_RDM_ERR_SHIFT 16
* need to add 7 since the channel context layout has the ring
* rbase (8 bytes) + channel scratch 0-4 (20 bytes) so adding uint16_t rx_status;
* additional 28/4 = 7 to get to scratch 5 of the required uint16_t rdm_err;
* channel. uint32_t val;
*/
gsihal_read_reg_n(GSI_GSI_SHRAM_n, ep_id * 12 + 7); /* on newer versions we can read the ch scratch directly from reg */
if (gsi_ctx->per.ver >= GSI_VER_3_0) {
switch (scratch_id) {
case 5:
return gsihal_read_reg_nk(
GSI_EE_n_GSI_CH_k_SCRATCH_5,
gsi_ctx->per.ee,
chan_hdl) & GSI_RTK_ERR_STATS_MASK;
break;
case 6:
return gsihal_read_reg_nk(
GSI_EE_n_GSI_CH_k_SCRATCH_6,
gsi_ctx->per.ee,
chan_hdl) & GSI_NTN_ERR_STATS_MASK;
break;
case 7:
val = gsihal_read_reg_nk(
GSI_EE_n_GSI_CH_k_SCRATCH_7,
gsi_ctx->per.ee,
chan_hdl);
rx_status = (val & GSI_AQC_RX_STATUS_MASK)
>> GSI_AQC_RX_STATUS_SHIFT;
rdm_err = (val & GSI_AQC_RDM_ERR_MASK)
>> (GSI_AQC_RDM_ERR_SHIFT);
return rx_status + rdm_err;
break;
default:
GSIERR("invalid scratch id %d\n", scratch_id);
return 0;
}
/* on older versions we need to read the scratch from SHRAM */
} else {
/* RTK use scratch 5 */
if (scratch_id == 5) {
/*
* each channel context is 6 lines of 8 bytes, but n in
* SHRAM_n is in 4 bytes offsets, so multiplying ep_id
* by 6*2=12 will give the beginning of the required
* channel context, and then need to add 7 since the
* channel context layout has the ring rbase (8 bytes)
* + channel scratch 0-4 (20 bytes) so adding
* additional 28/4 = 7 to get to scratch 5 of the
* required channel.
*/
return gsihal_read_reg_n(
GSI_GSI_SHRAM_n,
ep_id * 12 + 7) & GSI_RTK_ERR_STATS_MASK;
}
} }
return 0; return 0;
} }
EXPORT_SYMBOL(gsi_get_drop_stats); EXPORT_SYMBOL(gsi_get_drop_stats);
int gsi_get_wp(unsigned long chan_hdl)
{
return gsihal_read_reg_nk(GSI_EE_n_GSI_CH_k_CNTXT_6, gsi_ctx->per.ee,
chan_hdl);
}
EXPORT_SYMBOL(gsi_get_wp);
void gsi_wdi3_dump_register(unsigned long chan_hdl) void gsi_wdi3_dump_register(unsigned long chan_hdl)
{ {
uint32_t val; uint32_t val;

View File

@@ -144,6 +144,7 @@ enum gsi_evt_chtype {
GSI_EVT_CHTYPE_AQC_EV = 0x8, GSI_EVT_CHTYPE_AQC_EV = 0x8,
GSI_EVT_CHTYPE_11AD_EV = 0x9, GSI_EVT_CHTYPE_11AD_EV = 0x9,
GSI_EVT_CHTYPE_RTK_EV = 0xC, GSI_EVT_CHTYPE_RTK_EV = 0xC,
GSI_EVT_CHTYPE_NTN_EV = 0xD,
}; };
enum gsi_evt_ring_elem_size { enum gsi_evt_ring_elem_size {
@@ -227,6 +228,7 @@ enum gsi_chan_prot {
GSI_CHAN_PROT_MHIC = 0xA, GSI_CHAN_PROT_MHIC = 0xA,
GSI_CHAN_PROT_QDSS = 0xB, GSI_CHAN_PROT_QDSS = 0xB,
GSI_CHAN_PROT_RTK = 0xC, GSI_CHAN_PROT_RTK = 0xC,
GSI_CHAN_PROT_NTN = 0xD,
}; };
enum gsi_max_prefetch { enum gsi_max_prefetch {
@@ -1029,6 +1031,26 @@ union __packed gsi_wdi3_channel_scratch2_reg {
uint32_t reserved2 : 23; uint32_t reserved2 : 23;
}; };
/**
* gsi_ntn_channel_scratch - NTN SW config area of
* channel scratch
*
* @buff_addr_lsb: NTN buffer address LSB
* @buff_addr_msb: NTN buffer address MSB
* @fix_buff_size: buff size in log2
* @ioc_mod_threshold: the threshold for IOC moderation (TX)
*/
struct __packed gsi_ntn_channel_scratch {
uint32_t buff_addr_lsb;
uint32_t buff_addr_msb : 8;
uint32_t fix_buff_size : 4;
uint32_t reserved1 : 20;
uint32_t ioc_mod_threshold : 16;
uint32_t reserved2 : 16;
uint32_t reserved3;
uint32_t reserved4;
};
/** /**
* gsi_channel_scratch - channel scratch SW config area * gsi_channel_scratch - channel scratch SW config area
* *
@@ -1046,6 +1068,7 @@ union __packed gsi_channel_scratch {
struct __packed gsi_wdi2_channel_scratch_new wdi2_new; struct __packed gsi_wdi2_channel_scratch_new wdi2_new;
struct __packed gsi_aqc_channel_scratch aqc; struct __packed gsi_aqc_channel_scratch aqc;
struct __packed gsi_rtk_channel_scratch rtk; struct __packed gsi_rtk_channel_scratch rtk;
struct __packed gsi_ntn_channel_scratch ntn;
struct __packed gsi_qdss_channel_scratch qdss; struct __packed gsi_qdss_channel_scratch qdss;
struct __packed { struct __packed {
uint32_t word1; uint32_t word1;
@@ -2238,8 +2261,17 @@ int gsi_get_refetch_reg(unsigned long chan_hdl, bool is_rp);
* *
* @ep_id: ep index * @ep_id: ep index
* @scratch_id: drop stats on which scratch register * @scratch_id: drop stats on which scratch register
* @chan_hdl: gsi channel handle
*/ */
int gsi_get_drop_stats(unsigned long ep_id, int scratch_id); int gsi_get_drop_stats(unsigned long ep_id, int scratch_id,
unsigned long chan_hdl);
/**
* gsi_get_wp - get channel write pointer for stats
*
* @chan_hdl: gsi channel handle
*/
int gsi_get_wp(unsigned long chan_hdl);
/** /**
* gsi_wdi3_dump_register - dump wdi3 related gsi registers * gsi_wdi3_dump_register - dump wdi3 related gsi registers

View File

@@ -47,7 +47,7 @@ ipam-$(CONFIG_IPA3_REGDUMP) += ipa_v3/dump/ipa_reg_dump.o
ipam-$(CONFIG_IPA_UT) += test/ipa_ut_framework.o test/ipa_test_example.o \ ipam-$(CONFIG_IPA_UT) += test/ipa_ut_framework.o test/ipa_test_example.o \
test/ipa_test_mhi.o test/ipa_test_dma.o \ test/ipa_test_mhi.o test/ipa_test_dma.o \
test/ipa_test_hw_stats.o test/ipa_pm_ut.o \ test/ipa_test_hw_stats.o test/ipa_pm_ut.o \
test/ipa_test_wdi3.o test/ipa_test_wdi3.o test/ipa_test_ntn.o
ipanetm-y += ipa_v3/ipa_net.o ipanetm-y += ipa_v3/ipa_net.o

View File

@@ -376,7 +376,7 @@ int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt,
enum ipa_smmu_cb_type cb_type) enum ipa_smmu_cb_type cb_type)
{ {
struct iommu_domain *smmu_domain; struct iommu_domain *smmu_domain;
int res; int res, ret = 0;
phys_addr_t phys; phys_addr_t phys;
unsigned long va; unsigned long va;
struct scatterlist *sg; struct scatterlist *sg;
@@ -417,7 +417,8 @@ int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt,
res = ipa3_iommu_map(smmu_domain, va, phys, res = ipa3_iommu_map(smmu_domain, va, phys,
len, IOMMU_READ | IOMMU_WRITE); len, IOMMU_READ | IOMMU_WRITE);
if (res) { if (res) {
IPAERR("Fail to map pa=%pa\n", &phys); IPAERR("Fail to map pa=%pa, va 0x%X\n",
&phys, va);
return -EINVAL; return -EINVAL;
} }
va += len; va += len;
@@ -437,18 +438,39 @@ int ipa3_smmu_map_peer_buff(u64 iova, u32 size, bool map, struct sg_table *sgt,
} }
} }
} else { } else {
res = iommu_unmap(smmu_domain, if (sgt != NULL) {
rounddown(iova, PAGE_SIZE), va = rounddown(iova, PAGE_SIZE);
roundup(size + iova - rounddown(iova, PAGE_SIZE), for_each_sg(sgt->sgl, sg, sgt->nents, i)
PAGE_SIZE)); {
if (res != roundup(size + iova - rounddown(iova, PAGE_SIZE), page = sg_page(sg);
PAGE_SIZE)) { phys = page_to_phys(page);
IPAERR("Fail to unmap 0x%llx\n", iova); len = PAGE_ALIGN(sg->offset + sg->length);
return -EINVAL; res = iommu_unmap(smmu_domain, va, len);
if (res != len) {
IPAERR(
"Fail to unmap pa=%pa, va 0x%X, res %d\n"
, &phys, va, res);
ret = -EINVAL;
}
va += len;
count++;
}
} else {
res = iommu_unmap(smmu_domain,
rounddown(iova, PAGE_SIZE),
roundup(
size + iova - rounddown(iova, PAGE_SIZE),
PAGE_SIZE));
if (res != roundup(
size + iova - rounddown(iova, PAGE_SIZE),
PAGE_SIZE)) {
IPAERR("Fail to unmap 0x%llx\n", iova);
return -EINVAL;
}
} }
} }
IPADBG("Peer buff %s 0x%llx\n", map ? "map" : "unmap", iova); IPADBG("Peer buff %s 0x%llx\n", map ? "map" : "unmap", iova);
return 0; return ret;
} }
EXPORT_SYMBOL(ipa3_smmu_map_peer_buff); EXPORT_SYMBOL(ipa3_smmu_map_peer_buff);
@@ -1992,6 +2014,32 @@ int ipa3_clear_endpoint_delay(u32 clnt_hdl)
return 0; return 0;
} }
static void ipa3_get_gsi_ring_stats(struct IpaHwRingStats_t *ring,
struct ipa3_uc_dbg_stats *ctx_stats, int idx)
{
ring->ringFull = ioread32(
ctx_stats->uc_dbg_stats_mmio
+ idx * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
ring->ringEmpty = ioread32(
ctx_stats->uc_dbg_stats_mmio
+ idx * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
ring->ringUsageHigh = ioread32(
ctx_stats->uc_dbg_stats_mmio
+ idx * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
ring->ringUsageLow = ioread32(
ctx_stats->uc_dbg_stats_mmio
+ idx * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
ring->RingUtilCount = ioread32(
ctx_stats->uc_dbg_stats_mmio
+ idx * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
}
/** /**
* ipa3_get_aqc_gsi_stats() - Query AQC gsi stats from uc * ipa3_get_aqc_gsi_stats() - Query AQC gsi stats from uc
* @stats: [inout] stats blob from client populated by driver * @stats: [inout] stats blob from client populated by driver
@@ -2011,32 +2059,43 @@ int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
} }
IPA_ACTIVE_CLIENTS_INC_SIMPLE(); IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = 0; i < MAX_AQC_CHANNELS; i++) { for (i = 0; i < MAX_AQC_CHANNELS; i++) {
stats->u.ring[i].ringFull = ioread32( ipa3_get_gsi_ring_stats(stats->u.ring + i,
ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio &ipa3_ctx->aqc_ctx.dbg_stats, i);
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
stats->u.ring[i].ringEmpty = ioread32(
ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
stats->u.ring[i].ringUsageHigh = ioread32(
ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
stats->u.ring[i].ringUsageLow = ioread32(
ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
stats->u.ring[i].RingUtilCount = ioread32(
ipa3_ctx->aqc_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_OFF +
IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
} }
IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0; return 0;
} }
/**
* ipa3_get_ntn_gsi_stats() - Query NTN gsi stats from uc
* @stats: [inout] stats blob from client populated by driver
*
* Returns: 0 on success, negative on failure
*
* @note Cannot be called from atomic context
*
*/
int ipa3_get_ntn_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
{
int i;
if (!ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_mmio) {
IPAERR("bad parms NULL ntn_gsi_stats_mmio\n");
return -EINVAL;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = 0; i < MAX_NTN_CHANNELS; i++) {
ipa3_get_gsi_ring_stats(stats->u.ring + i,
&ipa3_ctx->ntn_ctx.dbg_stats, i);
}
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
return 0;
}
/** /**
* ipa3_get_rtk_gsi_stats() - Query RTK gsi stats from uc * ipa3_get_rtk_gsi_stats() - Query RTK gsi stats from uc
* @stats: [inout] stats blob from client populated by driver * @stats: [inout] stats blob from client populated by driver
@@ -2057,26 +2116,8 @@ int ipa3_get_rtk_gsi_stats(struct ipa_uc_dbg_ring_stats *stats)
} }
IPA_ACTIVE_CLIENTS_INC_SIMPLE(); IPA_ACTIVE_CLIENTS_INC_SIMPLE();
for (i = 0; i < MAX_RTK_CHANNELS; i++) { for (i = 0; i < MAX_RTK_CHANNELS; i++) {
stats->u.rtk[i].commStats.ringFull = ioread32( ipa3_get_gsi_ring_stats(&stats->u.rtk[i].commStats,
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio &ipa3_ctx->rtk_ctx.dbg_stats, i);
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_RINGFULL_OFF);
stats->u.rtk[i].commStats.ringEmpty = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_RINGEMPTY_OFF);
stats->u.rtk[i].commStats.ringUsageHigh = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGEHIGH_OFF);
stats->u.rtk[i].commStats.ringUsageLow = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_RINGUSAGELOW_OFF);
stats->u.rtk[i].commStats.RingUtilCount = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF +
IPA3_UC_DEBUG_STATS_RINGUTILCOUNT_OFF);
stats->u.rtk[i].trCount = ioread32( stats->u.rtk[i].trCount = ioread32(
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio
+ i * IPA3_UC_DEBUG_STATS_RTK_OFF + + i * IPA3_UC_DEBUG_STATS_RTK_OFF +

View File

@@ -3281,9 +3281,16 @@ static ssize_t ipa3_eth_read_perf_status(struct file *file,
switch (client->client_type) { switch (client->client_type) {
case IPA_ETH_CLIENT_AQC107: case IPA_ETH_CLIENT_AQC107:
case IPA_ETH_CLIENT_AQC113: case IPA_ETH_CLIENT_AQC113:
ret = ipa3_get_aqc_gsi_stats(&stats); case IPA_ETH_CLIENT_NTN:
tx_ep = IPA_CLIENT_AQC_ETHERNET_CONS; if (client->client_type == IPA_ETH_CLIENT_NTN) {
rx_ep = IPA_CLIENT_AQC_ETHERNET_PROD; ret = ipa3_get_ntn_gsi_stats(&stats);
tx_ep = IPA_CLIENT_ETHERNET_CONS;
rx_ep = IPA_CLIENT_ETHERNET_PROD;
} else {
ret = ipa3_get_aqc_gsi_stats(&stats);
tx_ep = IPA_CLIENT_AQC_ETHERNET_CONS;
rx_ep = IPA_CLIENT_AQC_ETHERNET_PROD;
}
if (!ret) { if (!ret) {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"%s_ringFull=%u\n" "%s_ringFull=%u\n"
@@ -3323,7 +3330,9 @@ static ssize_t ipa3_eth_read_perf_status(struct file *file,
} else { } else {
nbytes = scnprintf(dbg_buff, nbytes = scnprintf(dbg_buff,
IPA_MAX_MSG_LEN, IPA_MAX_MSG_LEN,
"Fail to read AQC GSI stats\n"); "Fail to read [%s][%s] GSI stats\n",
ipa_clients_strings[rx_ep],
ipa_clients_strings[tx_ep]);
cnt += nbytes; cnt += nbytes;
} }
break; break;
@@ -3394,7 +3403,7 @@ static ssize_t ipa3_eth_read_perf_status(struct file *file,
cnt += nbytes; cnt += nbytes;
} else { } else {
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"Fail to read AQC GSI stats\n"); "Fail to read RTK GSI stats\n");
cnt += nbytes; cnt += nbytes;
} }
break; break;
@@ -3415,6 +3424,7 @@ static ssize_t ipa3_eth_read_err_status(struct file *file,
int tx_ep, rx_ep; int tx_ep, rx_ep;
struct ipa3_eth_error_stats tx_stats; struct ipa3_eth_error_stats tx_stats;
struct ipa3_eth_error_stats rx_stats; struct ipa3_eth_error_stats rx_stats;
int scratch_num;
memset(&tx_stats, 0, sizeof(struct ipa3_eth_error_stats)); memset(&tx_stats, 0, sizeof(struct ipa3_eth_error_stats));
memset(&rx_stats, 0, sizeof(struct ipa3_eth_error_stats)); memset(&rx_stats, 0, sizeof(struct ipa3_eth_error_stats));
@@ -3433,42 +3443,45 @@ static ssize_t ipa3_eth_read_err_status(struct file *file,
case IPA_ETH_CLIENT_AQC113: case IPA_ETH_CLIENT_AQC113:
tx_ep = IPA_CLIENT_AQC_ETHERNET_CONS; tx_ep = IPA_CLIENT_AQC_ETHERNET_CONS;
rx_ep = IPA_CLIENT_AQC_ETHERNET_PROD; rx_ep = IPA_CLIENT_AQC_ETHERNET_PROD;
break; scratch_num = 7;
case IPA_ETH_CLIENT_RTK8111K: case IPA_ETH_CLIENT_RTK8111K:
case IPA_ETH_CLIENT_RTK8125B: case IPA_ETH_CLIENT_RTK8125B:
tx_ep = IPA_CLIENT_RTK_ETHERNET_CONS; tx_ep = IPA_CLIENT_RTK_ETHERNET_CONS;
rx_ep = IPA_CLIENT_RTK_ETHERNET_PROD; rx_ep = IPA_CLIENT_RTK_ETHERNET_PROD;
ipa3_eth_get_status(tx_ep, 5, &tx_stats); scratch_num = 5;
ipa3_eth_get_status(rx_ep, 5, &rx_stats);
break; break;
case IPA_ETH_CLIENT_NTN:
tx_ep = IPA_CLIENT_ETHERNET_CONS;
rx_ep = IPA_CLIENT_ETHERNET_PROD;
scratch_num = 6;
default: default:
IPAERR("Not supported\n"); IPAERR("Not supported\n");
return 0; return 0;
} }
ipa3_eth_get_status(tx_ep, scratch_num, &tx_stats);
ipa3_eth_get_status(rx_ep, scratch_num, &rx_stats);
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN, nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"%s_RP=0x%x\n" "%s_RP=0x%x\n"
"%s_WP=0x%x\n" "%s_WP=0x%x\n"
"%s_SCRATCH5=0x%x\n", "%s_err:%u (scratch %d)\n",
ipa_clients_strings[tx_ep], ipa_clients_strings[tx_ep],
tx_stats.rp, tx_stats.rp,
ipa_clients_strings[tx_ep], ipa_clients_strings[tx_ep],
tx_stats.wp, tx_stats.wp,
ipa_clients_strings[tx_ep], ipa_clients_strings[tx_ep],
tx_stats.err); tx_stats.err, scratch_num);
cnt += nbytes; cnt += nbytes;
nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt, nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
"%s_RP=0x%x\n" "%s_RP=0x%x\n"
"%s_WP=0x%x\n" "%s_WP=0x%x\n"
"%s_SCRATCH5=0x%x\n" "%s_err:%u (scratch %d)\n",
"%s_err:%u\n",
ipa_clients_strings[rx_ep], ipa_clients_strings[rx_ep],
rx_stats.rp, rx_stats.rp,
ipa_clients_strings[rx_ep], ipa_clients_strings[rx_ep],
rx_stats.wp, rx_stats.wp,
ipa_clients_strings[rx_ep], ipa_clients_strings[rx_ep],
rx_stats.err, rx_stats.err, scratch_num);
ipa_clients_strings[rx_ep],
rx_stats.err & 0xff);
cnt += nbytes; cnt += nbytes;
done: done:
return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt); return simple_read_from_buffer(ubuf, count, ppos, dbg_buff, cnt);

View File

@@ -17,9 +17,13 @@
#define IPA_ETH_AQC_MODC_FACTOR (10) #define IPA_ETH_AQC_MODC_FACTOR (10)
#define AQC_WRB_MODC_FACTOR (10) #define AQC_WRB_MODC_FACTOR (10)
#define IPA_ETH_NTN_MODT (32)
#define IPA_ETH_NTN_MODC (128)
#define NTN_BUFFER_SIZE 2048 /* 2K */
#define IPA_ETH_AGGR_PKT_LIMIT 1 #define IPA_ETH_AGGR_PKT_LIMIT 1
#define IPA_ETH_AGGR_BYTE_LIMIT 2 /*2 Kbytes Agger hard byte limit*/ #define IPA_ETH_AGGR_BYTE_LIMIT 2 /* 2 Kbytes Agger hard byte limit */
#define IPA_ETH_MBOX_M (1) #define IPA_ETH_MBOX_M (1)
@@ -454,6 +458,7 @@ static struct iommu_domain *ipa_eth_get_smmu_domain(
return ipa3_get_eth_smmu_domain(); return ipa3_get_eth_smmu_domain();
if (IPA_CLIENT_IS_SMMU_ETH1_INSTANCE(client_type)) if (IPA_CLIENT_IS_SMMU_ETH1_INSTANCE(client_type))
return ipa3_get_eth1_smmu_domain(); return ipa3_get_eth1_smmu_domain();
return NULL; return NULL;
} }
@@ -495,7 +500,9 @@ static int ipa3_smmu_map_eth_pipes(struct ipa_eth_client_pipe_info *pipe,
enum ipa_smmu_cb_type cb_type; enum ipa_smmu_cb_type cb_type;
if (pipe->info.fix_buffer_size > PAGE_SIZE) { if (pipe->info.fix_buffer_size > PAGE_SIZE) {
IPAERR("invalid data buff size\n"); IPAERR("%s: invalid data buff size %d\n",
pipe->dir == IPA_ETH_PIPE_DIR_TX ? "TX" : "RX",
pipe->info.fix_buffer_size);
return -EINVAL; return -EINVAL;
} }
@@ -511,7 +518,7 @@ static int ipa3_smmu_map_eth_pipes(struct ipa_eth_client_pipe_info *pipe,
pipe->info.transfer_ring_sgt, pipe->info.transfer_ring_sgt,
IPA_SMMU_CB_AP); IPA_SMMU_CB_AP);
if (result) { if (result) {
IPAERR("failed to %s ntn ring %d\n", IPAERR("failed to %s ring %d\n",
map ? "map" : "unmap", result); map ? "map" : "unmap", result);
return -EINVAL; return -EINVAL;
} }
@@ -536,41 +543,58 @@ map_buffer:
"SMMU cb %d is not shared, continue to map buffers\n", cb_type); "SMMU cb %d is not shared, continue to map buffers\n", cb_type);
} }
smmu_domain = ipa_eth_get_smmu_domain(client_type); if (pipe->info.is_buffer_pool_valid) {
if (!smmu_domain) { IPADBG("buffer pool valid\n");
IPAERR("invalid smmu domain\n"); result = ipa3_smmu_map_peer_buff(
result = -EINVAL; (u64)pipe->info.buffer_pool_base_addr,
goto fail_map_buffer_smmu_enabled; pipe->info.fix_buffer_size,
} map,
pipe->info.buffer_pool_base_sgt,
prev_iova_p = 0; cb_type);
for (i = 0; i < pipe->info.data_buff_list_size; i++) { if (result) {
iova = (u64)pipe->info.data_buff_list[i].iova; IPAERR("failed to %s buffer %d cb_type %d\n",
pa = (phys_addr_t)pipe->info.data_buff_list[i].pa; map ? "map" : "unmap", result, cb_type);
IPA_SMMU_ROUND_TO_PAGE(iova, pa, pipe->info.fix_buffer_size, goto fail_map_buffer_smmu_enabled;
iova_p, pa_p, size_p);
/* Add check on every 2nd buffer for AQC smmu-dup issue */
if (prev_iova_p == iova_p) {
IPADBG_LOW(
"current buffer and previous are on the same page, skip page mapping\n"
);
continue;
} }
prev_iova_p = iova_p; } else {
IPADBG_LOW("%s 0x%llx to 0x%pa size %d\n", map ? "mapping" : IPADBG("buffer pool not valid\n");
"unmapping", iova_p, &pa_p, size_p); smmu_domain = ipa_eth_get_smmu_domain(client_type);
if (map) { if (!smmu_domain) {
result = ipa3_iommu_map(smmu_domain, iova_p, pa_p, IPAERR("invalid smmu domain\n");
size_p, IOMMU_READ | IOMMU_WRITE); result = -EINVAL;
if (result) goto fail_map_buffer_smmu_enabled;
IPAERR("Fail to map 0x%llx\n", iova); }
} else {
result = iommu_unmap(smmu_domain, iova_p, size_p); prev_iova_p = 0;
if (result != size_p) { for (i = 0; i < pipe->info.data_buff_list_size; i++) {
IPAERR("Fail to unmap 0x%llx\n", iova); iova = (u64)pipe->info.data_buff_list[i].iova;
pa = (phys_addr_t)pipe->info.data_buff_list[i].pa;
IPA_SMMU_ROUND_TO_PAGE(iova, pa, pipe->info.fix_buffer_size,
iova_p, pa_p, size_p);
/* Add check on every 2nd buffer for AQC smmu-dup issue */
if (prev_iova_p == iova_p) {
IPADBG_LOW(
"current buffer and previous are on the same page, skip page mapping\n"
);
continue;
}
prev_iova_p = iova_p;
IPADBG_LOW("%s 0x%llx to 0x%pa size %d\n", map ? "mapping" :
"unmapping", iova_p, &pa_p, size_p);
if (map) {
result = ipa3_iommu_map(smmu_domain, iova_p, pa_p,
size_p, IOMMU_READ | IOMMU_WRITE);
if (result)
IPAERR("Fail to map 0x%llx\n", iova);
} else {
result = iommu_unmap(smmu_domain, iova_p, size_p);
if (result != size_p) {
IPAERR("Fail to unmap 0x%llx\n", iova);
}
} }
} }
} }
return 0; return 0;
fail_map_buffer_smmu_enabled: fail_map_buffer_smmu_enabled:
@@ -709,6 +733,139 @@ fail_get_gsi_ep_info:
return result; return result;
} }
static int ipa_eth_setup_ntn_gsi_channel(
struct ipa_eth_client_pipe_info *pipe,
struct ipa3_ep_context *ep)
{
struct gsi_evt_ring_props gsi_evt_ring_props;
struct gsi_chan_props gsi_channel_props;
union __packed gsi_channel_scratch ch_scratch;
union __packed gsi_evt_scratch evt_scratch;
const struct ipa_gsi_ep_config *gsi_ep_info;
int result, len;
u64 bar_addr;
if (unlikely(!pipe->info.is_transfer_ring_valid)) {
IPAERR("NTN transfer ring invalid\n");
ipa_assert();
return -EFAULT;
}
/* don't assert bit 40 in test mode as we emulate regs on DDR not
* on PICE address space */
bar_addr = pipe->client_info->test ?
pipe->info.client_info.ntn.bar_addr :
IPA_ETH_PCIE_SET(pipe->info.client_info.ntn.bar_addr);
/* setup event ring */
memset(&gsi_evt_ring_props, 0, sizeof(gsi_evt_ring_props));
gsi_evt_ring_props.intf = GSI_EVT_CHTYPE_NTN_EV;
gsi_evt_ring_props.re_size = GSI_EVT_RING_RE_SIZE_16B;
gsi_evt_ring_props.intr = GSI_INTR_MSI;
gsi_evt_ring_props.int_modt = IPA_ETH_NTN_MODT;
gsi_evt_ring_props.int_modc = IPA_ETH_NTN_MODC;
gsi_evt_ring_props.exclusive = true;
gsi_evt_ring_props.err_cb = ipa_eth_gsi_evt_ring_err_cb;
gsi_evt_ring_props.user_data = NULL;
gsi_evt_ring_props.msi_addr =
bar_addr +
pipe->info.client_info.ntn.tail_ptr_offs;
len = pipe->info.transfer_ring_size;
gsi_evt_ring_props.ring_len = len;
gsi_evt_ring_props.ring_base_addr =
(u64)pipe->info.transfer_ring_base;
result = gsi_alloc_evt_ring(&gsi_evt_ring_props,
ipa3_ctx->gsi_dev_hdl,
&ep->gsi_evt_ring_hdl);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("fail to alloc RX event ring\n");
result = -EFAULT;
}
ep->gsi_mem_info.evt_ring_len =
gsi_evt_ring_props.ring_len;
ep->gsi_mem_info.evt_ring_base_addr =
gsi_evt_ring_props.ring_base_addr;
/* setup channel ring */
memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
gsi_channel_props.prot = GSI_CHAN_PROT_NTN;
if (pipe->dir == IPA_ETH_PIPE_DIR_TX)
gsi_channel_props.dir = GSI_CHAN_DIR_FROM_GSI;
else
gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
gsi_ep_info = ipa3_get_gsi_ep_info(ep->client);
if (!gsi_ep_info) {
IPAERR("Failed getting GSI EP info for client=%d\n",
ep->client);
result = -EINVAL;
goto fail_get_gsi_ep_info;
} else
gsi_channel_props.ch_id = gsi_ep_info->ipa_gsi_chan_num;
gsi_channel_props.evt_ring_hdl = ep->gsi_evt_ring_hdl;
gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
if (pipe->dir == IPA_ETH_PIPE_DIR_TX)
gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
else
gsi_channel_props.use_db_eng = GSI_CHAN_DIRECT_MODE;
gsi_channel_props.db_in_bytes = 1;
gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
gsi_channel_props.prefetch_mode =
gsi_ep_info->prefetch_mode;
gsi_channel_props.empty_lvl_threshold =
gsi_ep_info->prefetch_threshold;
gsi_channel_props.low_weight = 1;
gsi_channel_props.err_cb = ipa_eth_gsi_chan_err_cb;
gsi_channel_props.ring_len = len;
gsi_channel_props.ring_base_addr =
(u64)pipe->info.transfer_ring_base;
result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
&ep->gsi_chan_hdl);
if (result != GSI_STATUS_SUCCESS)
goto fail_get_gsi_ep_info;
ep->gsi_mem_info.chan_ring_len = gsi_channel_props.ring_len;
ep->gsi_mem_info.chan_ring_base_addr =
gsi_channel_props.ring_base_addr;
/* write event scratch */
memset(&evt_scratch, 0, sizeof(evt_scratch));
/* nothing is needed for NTN event scratch */
/* write ch scratch */
memset(&ch_scratch, 0, sizeof(ch_scratch));
ch_scratch.ntn.fix_buff_size =
ilog2(pipe->info.fix_buffer_size);
if (pipe->info.is_buffer_pool_valid) {
ch_scratch.ntn.buff_addr_lsb =
(u32)pipe->info.buffer_pool_base_addr;
ch_scratch.ntn.buff_addr_msb =
(u32)((u64)(pipe->info.buffer_pool_base_addr) >> 32);
}
else {
ch_scratch.ntn.buff_addr_lsb =
(u32)pipe->info.data_buff_list[0].iova;
ch_scratch.ntn.buff_addr_msb =
(u32)((u64)(pipe->info.data_buff_list[0].iova) >> 32);
}
if (pipe->dir == IPA_ETH_PIPE_DIR_TX)
ch_scratch.ntn.ioc_mod_threshold = IPA_ETH_NTN_MODT;
result = gsi_write_channel_scratch(ep->gsi_chan_hdl, ch_scratch);
if (result != GSI_STATUS_SUCCESS) {
IPAERR("failed to write evt ring scratch\n");
goto fail_write_scratch;
}
return 0;
fail_write_scratch:
gsi_dealloc_channel(ep->gsi_chan_hdl);
ep->gsi_chan_hdl = ~0;
fail_get_gsi_ep_info:
gsi_dealloc_evt_ring(ep->gsi_evt_ring_hdl);
ep->gsi_evt_ring_hdl = ~0;
return result;
}
static int ipa3_eth_get_prot(struct ipa_eth_client_pipe_info *pipe, static int ipa3_eth_get_prot(struct ipa_eth_client_pipe_info *pipe,
enum ipa4_hw_protocol *prot) enum ipa4_hw_protocol *prot)
{ {
@@ -724,6 +881,8 @@ static int ipa3_eth_get_prot(struct ipa_eth_client_pipe_info *pipe,
*prot = IPA_HW_PROTOCOL_RTK; *prot = IPA_HW_PROTOCOL_RTK;
break; break;
case IPA_ETH_CLIENT_NTN: case IPA_ETH_CLIENT_NTN:
*prot = IPA_HW_PROTOCOL_NTN3;
break;
case IPA_ETH_CLIENT_EMAC: case IPA_ETH_CLIENT_EMAC:
*prot = IPA_HW_PROTOCOL_ETH; *prot = IPA_HW_PROTOCOL_ETH;
break; break;
@@ -758,6 +917,21 @@ int ipa3_eth_connect(
IPAERR("undefined client_type\n"); IPAERR("undefined client_type\n");
return -EFAULT; return -EFAULT;
} }
/* currently all protocols require valid transfer ring */
if (!pipe->info.is_transfer_ring_valid) {
IPAERR("transfer ring not valid!\n");
return -EINVAL;
}
if (pipe->client_info->client_type == IPA_ETH_CLIENT_NTN) {
if (pipe->info.fix_buffer_size != NTN_BUFFER_SIZE) {
IPAERR("fix buffer size %u not valid for NTN, use 2K\n"
, pipe->info.fix_buffer_size);
return -EINVAL;
}
}
/* need enhancement for vlan support on multiple attach */ /* need enhancement for vlan support on multiple attach */
result = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode); result = ipa3_is_vlan_mode(IPA_VLAN_IF_ETH, &vlan_mode);
if (result) { if (result) {
@@ -771,11 +945,6 @@ int ipa3_eth_connect(
return result; return result;
} }
if (prot == IPA_HW_PROTOCOL_ETH) {
IPAERR("EMAC\\NTN still not supported using this framework\n");
return -EFAULT;
}
result = ipa3_smmu_map_eth_pipes(pipe, client_type, true); result = ipa3_smmu_map_eth_pipes(pipe, client_type, true);
if (result) { if (result) {
IPAERR("failed to map SMMU %d\n", result); IPAERR("failed to map SMMU %d\n", result);
@@ -817,19 +986,26 @@ int ipa3_eth_connect(
IPADBG("client %d (ep: %d) connected\n", client_type, IPADBG("client %d (ep: %d) connected\n", client_type,
ep_idx); ep_idx);
if (prot == IPA_HW_PROTOCOL_RTK) { switch (prot) {
if (ipa_eth_setup_rtk_gsi_channel(pipe, ep)) { case IPA_HW_PROTOCOL_RTK:
IPAERR("fail to setup eth gsi rx channel\n"); result = ipa_eth_setup_rtk_gsi_channel(pipe, ep);
result = -EFAULT; break;
goto setup_gsi_ch_fail; case IPA_HW_PROTOCOL_AQC:
} result = ipa_eth_setup_aqc_gsi_channel(pipe, ep);
} else if (prot == IPA_HW_PROTOCOL_AQC) { break;
if (ipa_eth_setup_aqc_gsi_channel(pipe, ep)) { case IPA_HW_PROTOCOL_NTN3:
IPAERR("fail to setup eth gsi rx channel\n"); result = ipa_eth_setup_ntn_gsi_channel(pipe, ep);
result = -EFAULT; break;
goto setup_gsi_ch_fail; default:
} IPAERR("unknown protocol %d\n", prot);
result = -EINVAL;
} }
if (result) {
IPAERR("fail to setup eth gsi rx channel\n");
result = -EFAULT;
goto setup_gsi_ch_fail;
}
if (gsi_query_channel_db_addr(ep->gsi_chan_hdl, if (gsi_query_channel_db_addr(ep->gsi_chan_hdl,
&gsi_db_addr_low, &gsi_db_addr_high)) { &gsi_db_addr_low, &gsi_db_addr_high)) {
IPAERR("failed to query gsi rx db addr\n"); IPAERR("failed to query gsi rx db addr\n");
@@ -837,7 +1013,8 @@ int ipa3_eth_connect(
goto query_ch_db_fail; goto query_ch_db_fail;
} }
if (ipa3_ctx->ipa_hw_type >= IPA_HW_v5_0) { if (ipa3_ctx->ipa_hw_type >= IPA_HW_v5_0) {
if (prot == IPA_HW_PROTOCOL_AQC) { switch (prot) {
case IPA_HW_PROTOCOL_AQC:
if (IPA_CLIENT_IS_PROD(client_type)) { if (IPA_CLIENT_IS_PROD(client_type)) {
if (gsi_query_msi_addr(ep->gsi_chan_hdl, if (gsi_query_msi_addr(ep->gsi_chan_hdl,
&pipe->info.db_pa)) { &pipe->info.db_pa)) {
@@ -850,13 +1027,19 @@ int ipa3_eth_connect(
pipe->info.db_val = 0; pipe->info.db_val = 0;
/* only 32 bit lsb is used */ /* only 32 bit lsb is used */
db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4); db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4);
if (!db_addr) {
IPAERR("ioremap failed\n");
result = -EFAULT;
goto ioremap_fail;
}
/* TX: Initialize to end of ring */ /* TX: Initialize to end of ring */
db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr; db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr;
db_val += (u32)ep->gsi_mem_info.chan_ring_len; db_val += (u32)ep->gsi_mem_info.chan_ring_len;
iowrite32(db_val, db_addr); iowrite32(db_val, db_addr);
iounmap(db_addr); iounmap(db_addr);
} }
} else if (prot == IPA_HW_PROTOCOL_RTK) { break;
case IPA_HW_PROTOCOL_RTK:
if (gsi_query_msi_addr(ep->gsi_chan_hdl, if (gsi_query_msi_addr(ep->gsi_chan_hdl,
&pipe->info.db_pa)) { &pipe->info.db_pa)) {
result = -EFAULT; result = -EFAULT;
@@ -865,11 +1048,45 @@ int ipa3_eth_connect(
if (IPA_CLIENT_IS_CONS(client_type)) { if (IPA_CLIENT_IS_CONS(client_type)) {
/* only 32 bit lsb is used */ /* only 32 bit lsb is used */
db_addr = ioremap((phys_addr_t)(pipe->info.db_pa), 4); db_addr = ioremap((phys_addr_t)(pipe->info.db_pa), 4);
if (!db_addr) {
IPAERR("ioremap failed\n");
result = -EFAULT;
goto ioremap_fail;
}
/* TX: ring MSI doorbell */ /* TX: ring MSI doorbell */
db_val = IPA_ETH_MSI_DB_VAL; db_val = IPA_ETH_MSI_DB_VAL;
iowrite32(db_val, db_addr); iowrite32(db_val, db_addr);
iounmap(db_addr); iounmap(db_addr);
} }
break;
case IPA_HW_PROTOCOL_NTN3:
pipe->info.db_pa = gsi_db_addr_low;
pipe->info.db_val = 0;
/* only 32 bit lsb is used */
db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4);
if (!db_addr) {
IPAERR("ioremap failed\n");
result = -EFAULT;
goto ioremap_fail;
}
if (IPA_CLIENT_IS_PROD(client_type)) {
/* Rx: Initialize to ring base (i.e point 6) */
db_val =
(u32)ep->gsi_mem_info.chan_ring_base_addr;
} else {
/* TX: Initialize to end of ring */
db_val =
(u32)ep->gsi_mem_info.chan_ring_base_addr;
db_val +=
(u32)ep->gsi_mem_info.chan_ring_len;
}
iowrite32(db_val, db_addr);
iounmap(db_addr);
break;
default:
/* we can't really get here as we checked prot before */
IPAERR("unknown protocol %d\n", prot);
} }
} else { } else {
if (IPA_CLIENT_IS_PROD(client_type)) { if (IPA_CLIENT_IS_PROD(client_type)) {
@@ -891,6 +1108,11 @@ int ipa3_eth_connect(
} }
/* only 32 bit lsb is used */ /* only 32 bit lsb is used */
db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4); db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4);
if (!db_addr) {
IPAERR("ioremap failed\n");
result = -EFAULT;
goto ioremap_fail;
}
/* Rx: Initialize to ring base (i.e point 6) */ /* Rx: Initialize to ring base (i.e point 6) */
db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr; db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr;
iowrite32(db_val, db_addr); iowrite32(db_val, db_addr);
@@ -910,6 +1132,11 @@ int ipa3_eth_connect(
} }
/* only 32 bit lsb is used */ /* only 32 bit lsb is used */
db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4); db_addr = ioremap((phys_addr_t)(gsi_db_addr_low), 4);
if (!db_addr) {
IPAERR("ioremap failed\n");
result = -EFAULT;
goto ioremap_fail;
}
/* TX: Initialize to end of ring */ /* TX: Initialize to end of ring */
db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr; db_val = (u32)ep->gsi_mem_info.chan_ring_base_addr;
db_val += (u32)ep->gsi_mem_info.chan_ring_len; db_val += (u32)ep->gsi_mem_info.chan_ring_len;
@@ -924,6 +1151,11 @@ int ipa3_eth_connect(
evt_ring_db_addr_high); evt_ring_db_addr_high);
/* only 32 bit lsb is used */ /* only 32 bit lsb is used */
db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4); db_addr = ioremap((phys_addr_t)(evt_ring_db_addr_low), 4);
if (!db_addr) {
IPAERR("ioremap failed\n");
result = -EFAULT;
goto ioremap_fail;
}
/* /*
* IPA/GSI driver should ring the event DB once after * IPA/GSI driver should ring the event DB once after
* initialization of the event, with a value that is * initialization of the event, with a value that is
@@ -996,7 +1228,7 @@ int ipa3_eth_connect(
ipa3_eth_save_client_mapping(pipe, client_type, ipa3_eth_save_client_mapping(pipe, client_type,
id, ep_idx, ep->gsi_chan_hdl); id, ep_idx, ep->gsi_chan_hdl);
if ((ipa3_ctx->ipa_hw_type == IPA_HW_v4_5) || if ((ipa3_ctx->ipa_hw_type == IPA_HW_v4_5) ||
(prot != IPA_HW_PROTOCOL_AQC)) { (prot == IPA_HW_PROTOCOL_RTK)) {
result = ipa3_eth_config_uc(true, result = ipa3_eth_config_uc(true,
prot, prot,
(pipe->dir == IPA_ETH_PIPE_DIR_TX) (pipe->dir == IPA_ETH_PIPE_DIR_TX)
@@ -1026,6 +1258,7 @@ uc_init_peripheral_fail:
start_channel_fail: start_channel_fail:
ipa3_disable_data_path(ep_idx); ipa3_disable_data_path(ep_idx);
enable_data_path_fail: enable_data_path_fail:
ioremap_fail:
query_msi_fail: query_msi_fail:
query_ch_db_fail: query_ch_db_fail:
setup_gsi_ch_fail: setup_gsi_ch_fail:
@@ -1053,11 +1286,6 @@ int ipa3_eth_disconnect(
return result; return result;
} }
if (prot == IPA_HW_PROTOCOL_ETH) {
IPAERR("EMAC\\NTN still not supported using this framework\n");
return -EFAULT;
}
IPA_ACTIVE_CLIENTS_INC_SIMPLE(); IPA_ACTIVE_CLIENTS_INC_SIMPLE();
ep_idx = ipa_get_ep_mapping(client_type); ep_idx = ipa_get_ep_mapping(client_type);
if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES) { if (ep_idx == -1 || ep_idx >= IPA3_MAX_NUM_PIPES) {
@@ -1095,7 +1323,7 @@ int ipa3_eth_disconnect(
} }
if ((ipa3_ctx->ipa_hw_type == IPA_HW_v4_5) || if ((ipa3_ctx->ipa_hw_type == IPA_HW_v4_5) ||
(prot != IPA_HW_PROTOCOL_AQC)) { (prot == IPA_HW_PROTOCOL_RTK)) {
result = ipa3_eth_config_uc(false, result = ipa3_eth_config_uc(false,
prot, prot,
(pipe->dir == IPA_ETH_PIPE_DIR_TX) (pipe->dir == IPA_ETH_PIPE_DIR_TX)

View File

@@ -1740,6 +1740,13 @@ struct ipa3_rtk_ctx {
struct ipa3_uc_dbg_stats dbg_stats; struct ipa3_uc_dbg_stats dbg_stats;
}; };
/**
* struct ipa3_ntn_ctx - IPA ntn context
*/
struct ipa3_ntn_ctx {
struct ipa3_uc_dbg_stats dbg_stats;
};
/** /**
* struct ipa3_transport_pm - transport power management related members * struct ipa3_transport_pm - transport power management related members
* @transport_pm_mutex: Mutex to protect the transport_pm functionality. * @transport_pm_mutex: Mutex to protect the transport_pm functionality.
@@ -2210,6 +2217,7 @@ struct ipa3_context {
struct ipa3_mhip_ctx mhip_ctx; struct ipa3_mhip_ctx mhip_ctx;
struct ipa3_aqc_ctx aqc_ctx; struct ipa3_aqc_ctx aqc_ctx;
struct ipa3_rtk_ctx rtk_ctx; struct ipa3_rtk_ctx rtk_ctx;
struct ipa3_ntn_ctx ntn_ctx;
atomic_t ipa_clk_vote; atomic_t ipa_clk_vote;
int (*client_lock_unlock[IPA_MAX_CLNT])(bool is_lock); int (*client_lock_unlock[IPA_MAX_CLNT])(bool is_lock);
@@ -2829,6 +2837,7 @@ int ipa3_get_wdi3_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
int ipa3_get_usb_gsi_stats(struct ipa_uc_dbg_ring_stats *stats); int ipa3_get_usb_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats); int ipa3_get_aqc_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
int ipa3_get_rtk_gsi_stats(struct ipa_uc_dbg_ring_stats *stats); int ipa3_get_rtk_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
int ipa3_get_ntn_gsi_stats(struct ipa_uc_dbg_ring_stats *stats);
int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats); int ipa3_get_wdi_stats(struct IpaHwStatsWDIInfoData_t *stats);
u16 ipa3_get_smem_restr_bytes(void); u16 ipa3_get_smem_restr_bytes(void);
int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes); int ipa3_broadcast_wdi_quota_reach_ind(uint32_t fid, uint64_t num_bytes);

View File

@@ -440,6 +440,17 @@ static void ipa3_uc_save_dbg_stats(u32 size)
} else } else
goto unmap; goto unmap;
break; break;
case IPA_HW_PROTOCOL_NTN3:
if (!ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_mmio) {
ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_size =
size;
ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_ofst =
addr_offset;
ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_mmio =
mmio;
} else
goto unmap;
break;
case IPA_HW_PROTOCOL_WDI: case IPA_HW_PROTOCOL_WDI:
if (!ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio) { if (!ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio) {
ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_size = ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_size =
@@ -1627,6 +1638,10 @@ int ipa3_uc_debug_stats_dealloc(uint32_t prot_id)
iounmap(ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio); iounmap(ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio);
ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio = NULL; ipa3_ctx->rtk_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
break; break;
case IPA_HW_PROTOCOL_NTN3:
iounmap(ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_mmio);
ipa3_ctx->ntn_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;
break;
case IPA_HW_PROTOCOL_WDI: case IPA_HW_PROTOCOL_WDI:
iounmap(ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio); iounmap(ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio);
ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio = NULL; ipa3_ctx->wdi2_ctx.dbg_stats.uc_dbg_stats_mmio = NULL;

View File

@@ -25,6 +25,7 @@
#define MAX_AQC_CHANNELS 2 #define MAX_AQC_CHANNELS 2
#define MAX_RTK_CHANNELS 2 #define MAX_RTK_CHANNELS 2
#define MAX_NTN_CHANNELS 2
#define MAX_11AD_CHANNELS 5 #define MAX_11AD_CHANNELS 5
#define MAX_WDI2_CHANNELS 2 #define MAX_WDI2_CHANNELS 2
#define MAX_WDI3_CHANNELS 3 #define MAX_WDI3_CHANNELS 3
@@ -83,6 +84,7 @@ enum ipa3_hw_features {
* @IPA_HW_PROTOCOL_MHIP: protocol related to MHIP operation in IPA HW * @IPA_HW_PROTOCOL_MHIP: protocol related to MHIP operation in IPA HW
* @IPA_HW_PROTOCOL_USB : protocol related to USB operation in IPA HW * @IPA_HW_PROTOCOL_USB : protocol related to USB operation in IPA HW
* @IPA_HW_PROTOCOL_RTK : protocol related to RTK operation in IPA HW * @IPA_HW_PROTOCOL_RTK : protocol related to RTK operation in IPA HW
* @IPA_HW_PROTOCOL_NTN3 : protocol related to NTN3 operation in IPA HW
*/ */
enum ipa4_hw_protocol { enum ipa4_hw_protocol {
IPA_HW_PROTOCOL_COMMON = 0x0, IPA_HW_PROTOCOL_COMMON = 0x0,
@@ -94,6 +96,7 @@ enum ipa4_hw_protocol {
IPA_HW_PROTOCOL_MHIP = 0x6, IPA_HW_PROTOCOL_MHIP = 0x6,
IPA_HW_PROTOCOL_USB = 0x7, IPA_HW_PROTOCOL_USB = 0x7,
IPA_HW_PROTOCOL_RTK = 0x9, IPA_HW_PROTOCOL_RTK = 0x9,
IPA_HW_PROTOCOL_NTN3 = 0xA,
IPA_HW_PROTOCOL_MAX IPA_HW_PROTOCOL_MAX
}; };

View File

@@ -11758,7 +11758,9 @@ void ipa3_get_gsi_stats(int prot_id,
stats->num_ch = MAX_WDI3_CHANNELS; stats->num_ch = MAX_WDI3_CHANNELS;
ipa3_get_wdi3_gsi_stats(stats); ipa3_get_wdi3_gsi_stats(stats);
break; break;
case IPA_HW_PROTOCOL_ETH: case IPA_HW_PROTOCOL_NTN3:
stats->num_ch = MAX_NTN_CHANNELS;
ipa3_get_ntn_gsi_stats(stats);
break; break;
case IPA_HW_PROTOCOL_MHIP: case IPA_HW_PROTOCOL_MHIP:
stats->num_ch = MAX_MHIP_CHANNELS; stats->num_ch = MAX_MHIP_CHANNELS;
@@ -11914,6 +11916,10 @@ int ipa3_get_prot_id(enum ipa_client_type client)
void ipa3_eth_get_status(u32 client, int scratch_id, void ipa3_eth_get_status(u32 client, int scratch_id,
struct ipa3_eth_error_stats *stats) struct ipa3_eth_error_stats *stats)
{ {
#define RTK_GSI_SCRATCH_ID 5
#define AQC_GSI_SCRATCH_ID 7
#define NTN_GSI_SCRATCH_ID 6
int ch_id; int ch_id;
int ipa_ep_idx; int ipa_ep_idx;
@@ -11922,9 +11928,43 @@ void ipa3_eth_get_status(u32 client, int scratch_id,
if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED)
return; return;
ch_id = ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl; ch_id = ipa3_ctx->ep[ipa_ep_idx].gsi_chan_hdl;
stats->rp = gsi_get_refetch_reg(ch_id, true);
stats->wp = gsi_get_refetch_reg(ch_id, false); /*
stats->err = gsi_get_drop_stats(ipa_ep_idx, scratch_id); * drop stats sometimes exist for RX and sometimes for Tx,
* wp sometimes acquired from ch_cntxt_6 and sometimes from refetch,
* depending on protocol.
*/
stats->err = 0;
switch (client) {
case IPA_CLIENT_RTK_ETHERNET_PROD:
stats->err = gsi_get_drop_stats(ipa_ep_idx, RTK_GSI_SCRATCH_ID,
ch_id);
case IPA_CLIENT_RTK_ETHERNET_CONS:
stats->wp = gsi_get_refetch_reg(ch_id, false);
stats->rp = gsi_get_refetch_reg(ch_id, true);
break;
case IPA_CLIENT_AQC_ETHERNET_PROD:
stats->err = gsi_get_drop_stats(ipa_ep_idx, AQC_GSI_SCRATCH_ID,
ch_id);
stats->wp = gsi_get_wp(ch_id);
stats->rp = gsi_get_refetch_reg(ch_id, true);
break;
case IPA_CLIENT_AQC_ETHERNET_CONS:
stats->wp = gsi_get_refetch_reg(ch_id, false);
stats->rp = gsi_get_refetch_reg(ch_id, true);
break;
case IPA_CLIENT_ETHERNET_PROD:
stats->wp = gsi_get_wp(ch_id);
stats->rp = gsi_get_refetch_reg(ch_id, true);
break;
case IPA_CLIENT_ETHERNET_CONS:
stats->err = gsi_get_drop_stats(ipa_ep_idx, NTN_GSI_SCRATCH_ID,
ch_id);
stats->wp = gsi_get_refetch_reg(ch_id, false);
stats->rp = gsi_get_refetch_reg(ch_id, true);
break;
}
IPA_ACTIVE_CLIENTS_DEC_SIMPLE(); IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
/* /*
* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. * Copyright (c) 2016-2019, 2021 The Linux Foundation. All rights reserved.
*/ */
#ifndef _IPA_UT_SUITE_LIST_H_ #ifndef _IPA_UT_SUITE_LIST_H_
@@ -19,6 +19,7 @@ IPA_UT_DECLARE_SUITE(pm);
IPA_UT_DECLARE_SUITE(example); IPA_UT_DECLARE_SUITE(example);
IPA_UT_DECLARE_SUITE(hw_stats); IPA_UT_DECLARE_SUITE(hw_stats);
IPA_UT_DECLARE_SUITE(wdi3); IPA_UT_DECLARE_SUITE(wdi3);
IPA_UT_DECLARE_SUITE(ntn);
/** /**
@@ -33,6 +34,7 @@ IPA_UT_DEFINE_ALL_SUITES_START
IPA_UT_REGISTER_SUITE(example), IPA_UT_REGISTER_SUITE(example),
IPA_UT_REGISTER_SUITE(hw_stats), IPA_UT_REGISTER_SUITE(hw_stats),
IPA_UT_REGISTER_SUITE(wdi3), IPA_UT_REGISTER_SUITE(wdi3),
IPA_UT_REGISTER_SUITE(ntn),
} IPA_UT_DEFINE_ALL_SUITES_END; } IPA_UT_DEFINE_ALL_SUITES_END;
#endif /* _IPA_UT_SUITE_LIST_H_ */ #endif /* _IPA_UT_SUITE_LIST_H_ */