qcacmn: Defer IPA SMMU mapping to OPT_DP reserve

Currently, IPA SMMU map/unmap is called as part
of init. This causes every nbuf to be mapped
to IPA in the Rx path, causing throughputs
to drop. This change resolves the problem by
deferring the IPA SMMU map/unmap
call to OPT_DP filter reserve/release, as
nbuf needs to be mapped to IPA only in this scenario.

Change-Id: If198a6c5f22af58fdaf9d9c020c74b1f76002e37
CRs-Fixed: 3496679
This commit is contained in:
Namita Nair
2023-06-20 15:40:49 -07:00
committed by Rahul Choudhary
parent 7830b92b9d
commit b50ceeee79
12 changed files with 247 additions and 38 deletions

View File

@@ -743,6 +743,64 @@ cdp_ipa_tx_buf_smmu_unmapping(ol_txrx_soc_handle soc, uint8_t pdev_id,
return QDF_STATUS_SUCCESS;
}
/**
* cdp_ipa_rx_buf_smmu_pool_mapping() - Create SMMU mappings for Rx pool
* @soc: data path soc handle
* @pdev_id: pdev id
* @create: Map/unmap
* @line: line number
* @func: function name
*
* Create SMMU map/unmap for Rx buffers allocated to IPA
*
* return QDF_STATUS_SUCCESS
*/
static inline QDF_STATUS
cdp_ipa_rx_buf_smmu_pool_mapping(ol_txrx_soc_handle soc, uint8_t pdev_id,
bool create, const char *func, uint32_t line)
{
if (!soc || !soc->ops || !soc->ops->ipa_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_FAILURE;
}
if (soc->ops->ipa_ops->ipa_rx_buf_smmu_pool_mapping)
return soc->ops->ipa_ops->ipa_rx_buf_smmu_pool_mapping(soc,
pdev_id, create, func, line);
return QDF_STATUS_SUCCESS;
}
static inline QDF_STATUS cdp_ipa_set_smmu_mapped(ol_txrx_soc_handle soc,
int val)
{
if (!soc || !soc->ops || !soc->ops->ipa_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_FAILURE;
}
if (soc->ops->ipa_ops->ipa_set_smmu_mapped)
return soc->ops->ipa_ops->ipa_set_smmu_mapped(soc, val);
return QDF_STATUS_SUCCESS;
}
static inline int cdp_ipa_get_smmu_mapped(ol_txrx_soc_handle soc)
{
if (!soc || !soc->ops || !soc->ops->ipa_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_FAILURE;
}
if (soc->ops->ipa_ops->ipa_get_smmu_mapped)
return soc->ops->ipa_ops->ipa_get_smmu_mapped(soc);
return QDF_STATUS_SUCCESS;
}
#ifdef IPA_WDS_EASYMESH_FEATURE
/**
* cdp_ipa_ast_create() - Create/update AST entry in AST table

View File

@@ -2156,6 +2156,9 @@ struct cdp_throttle_ops {
* @ipa_tx_buf_smmu_mapping: Create SMMU mappings for Tx
* @ipa_tx_buf_smmu_unmapping: Release SMMU mappings for Tx
* buffers to IPA
* @ipa_rx_buf_smmu_pool_mapping: Create SMMU mapping for Rx
* @ipa_set_smmu_mapped: Set IPA SMMU mapped value
* @ipa_get_smmu_mapped: Get IPA SMMU mapped value
* @ipa_rx_super_rule_setup: Setup cce super rules based on filter tuple
* @ipa_ast_create: Create/Update ast entry
* @ipa_get_wdi_version: Get WDI version
@@ -2251,6 +2254,15 @@ struct cdp_ipa_ops {
uint8_t pdev_id,
const char *func,
uint32_t line);
QDF_STATUS (*ipa_rx_buf_smmu_pool_mapping)(
struct cdp_soc_t *soc_hdl,
uint8_t pdev_id,
bool create,
const char *func,
uint32_t line);
QDF_STATUS (*ipa_set_smmu_mapped)(struct cdp_soc_t *soc_hdl, int val);
int (*ipa_get_smmu_mapped)(struct cdp_soc_t *soc_hdl);
#ifdef IPA_OPT_WIFI_DP
QDF_STATUS (*ipa_rx_super_rule_setup)(struct cdp_soc_t *soc_hdl,
void *flt_params);

View File

@@ -378,6 +378,21 @@ static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(
}
#endif /* RX_DESC_MULTI_PAGE_ALLOC */
QDF_STATUS dp_ipa_set_smmu_mapped(struct cdp_soc_t *soc_hdl, int val)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
qdf_atomic_set(&soc->ipa_mapped, val);
return QDF_STATUS_SUCCESS;
}
int dp_ipa_get_smmu_mapped(struct cdp_soc_t *soc_hdl)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
return qdf_atomic_read(&soc->ipa_mapped);
}
static QDF_STATUS dp_ipa_get_shared_mem_info(qdf_device_t osdev,
qdf_shared_mem_t *shared_mem,
void *cpu_addr,
@@ -3448,8 +3463,12 @@ QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
qdf_atomic_set(&soc->ipa_pipes_enabled, 1);
DP_IPA_EP_SET_TX_DB_PA(soc, ipa_res);
if (!ipa_config_is_opt_wifi_dp_enabled()) {
dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, true,
__func__, __LINE__);
qdf_atomic_set(&soc->ipa_mapped, 1);
}
result = qdf_ipa_wdi_enable_pipes(hdl);
if (result) {
@@ -3458,8 +3477,9 @@ QDF_STATUS dp_ipa_enable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
__func__, result);
qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
DP_IPA_RESET_TX_DB_PA(soc, ipa_res);
dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false,
__func__, __LINE__);
if (qdf_atomic_read(&soc->ipa_mapped))
dp_ipa_handle_rx_buf_pool_smmu_mapping(
soc, pdev, false, __func__, __LINE__);
return QDF_STATUS_E_FAILURE;
}
@@ -3505,6 +3525,8 @@ QDF_STATUS dp_ipa_disable_pipes(struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
}
qdf_atomic_set(&soc->ipa_pipes_enabled, 0);
if (qdf_atomic_read(&soc->ipa_mapped))
dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, false,
__func__, __LINE__);
@@ -3915,6 +3937,31 @@ QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(
struct dp_pdev *pdev =
dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
dp_debug("SMMU S1 disabled");
return QDF_STATUS_SUCCESS;
}
if (!pdev) {
dp_err("Invalid pdev instance pdev_id:%d", pdev_id);
return QDF_STATUS_E_FAILURE;
}
if (__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false, func, line) ||
dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, false, func, line))
return QDF_STATUS_E_FAILURE;
return QDF_STATUS_SUCCESS;
}
QDF_STATUS dp_ipa_rx_buf_pool_smmu_mapping(
struct cdp_soc_t *soc_hdl, uint8_t pdev_id,
bool create, const char *func, uint32_t line)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_pdev *pdev =
dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
if (!pdev) {
dp_err("Invalid instance");
return QDF_STATUS_E_FAILURE;
@@ -3925,13 +3972,9 @@ QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(
return QDF_STATUS_SUCCESS;
}
if (__dp_ipa_tx_buf_smmu_mapping(soc, pdev, false, func, line) ||
dp_ipa_tx_alt_buf_smmu_mapping(soc, pdev, false, func, line))
return QDF_STATUS_E_FAILURE;
dp_ipa_handle_rx_buf_pool_smmu_mapping(soc, pdev, create, func, line);
return QDF_STATUS_SUCCESS;
}
#ifdef IPA_WDS_EASYMESH_FEATURE
QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl,
qdf_ipa_ast_info_type_t *data)

View File

@@ -409,7 +409,6 @@ QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
bool create,
const char *func,
uint32_t line);
/**
* dp_ipa_tx_buf_smmu_mapping() - Create SMMU mappings for IPA
* allocated TX buffers
@@ -437,6 +436,13 @@ QDF_STATUS dp_ipa_tx_buf_smmu_mapping(struct cdp_soc_t *soc_hdl,
QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(struct cdp_soc_t *soc_hdl,
uint8_t pdev_id, const char *func,
uint32_t line);
QDF_STATUS dp_ipa_rx_buf_pool_smmu_mapping(struct cdp_soc_t *soc_hdl,
uint8_t pdev_id,
bool create,
const char *func,
uint32_t line);
QDF_STATUS dp_ipa_set_smmu_mapped(struct cdp_soc_t *soc, int val);
int dp_ipa_get_smmu_mapped(struct cdp_soc_t *soc);
#ifndef QCA_OL_DP_SRNG_LOCK_LESS_ACCESS
static inline void
@@ -679,6 +685,26 @@ static inline QDF_STATUS dp_ipa_tx_buf_smmu_unmapping(struct cdp_soc_t *soc_hdl,
return QDF_STATUS_SUCCESS;
}
static inline QDF_STATUS dp_ipa_rx_buf_pool_smmu_mapping(
struct cdp_soc_t *soc_hdl,
uint8_t pdev_id,
bool create,
const char *func,
uint32_t line)
{
return QDF_STATUS_SUCCESS;
}
static inline QDF_STATUS dp_ipa_set_smmu_mapped(struct cdp_soc_t *soc, int val)
{
return QDF_STATUS_SUCCESS;
}
static inline int dp_ipa_get_smmu_mapped(struct cdp_soc_t *soc)
{
return QDF_STATUS_SUCCESS;
}
#ifdef IPA_WDS_EASYMESH_FEATURE
static inline QDF_STATUS dp_ipa_ast_create(struct cdp_soc_t *soc_hdl,
qdf_ipa_ast_info_type_t *data)

View File

@@ -12061,6 +12061,9 @@ static struct cdp_ipa_ops dp_ops_ipa = {
.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping,
.ipa_tx_buf_smmu_unmapping = dp_ipa_tx_buf_smmu_unmapping,
.ipa_rx_buf_smmu_pool_mapping = dp_ipa_rx_buf_pool_smmu_mapping,
.ipa_set_smmu_mapped = dp_ipa_set_smmu_mapped,
.ipa_get_smmu_mapped = dp_ipa_get_smmu_mapped,
#ifdef QCA_ENHANCED_STATS_SUPPORT
.ipa_update_peer_rx_stats = dp_ipa_update_peer_rx_stats,
#endif

View File

@@ -365,6 +365,7 @@ dp_pdev_nbuf_alloc_and_map_replenish(struct dp_soc *dp_soc,
nbuf_frag_info_t->paddr =
qdf_nbuf_get_frag_paddr((nbuf_frag_info_t->virt_addr).nbuf, 0);
if (qdf_atomic_read(&dp_soc->ipa_mapped))
dp_ipa_handle_rx_buf_smmu_mapping(dp_soc, (qdf_nbuf_t)(
(nbuf_frag_info_t->virt_addr).nbuf),
rx_desc_pool->buf_size,
@@ -3160,6 +3161,7 @@ dp_pdev_rx_buffers_attach(struct dp_soc *dp_soc, uint32_t mac_id,
desc_list->rx_desc.cookie,
rx_desc_pool->owner);
if (qdf_atomic_read(&dp_soc->ipa_mapped))
dp_ipa_handle_rx_buf_smmu_mapping(
dp_soc, nbuf,
rx_desc_pool->buf_size, true,

View File

@@ -2742,6 +2742,7 @@ void dp_rx_nbuf_unmap(struct dp_soc *soc,
QDF_NBUF_CB_PADDR(rx_desc->nbuf),
rx_desc_pool->buf_size);
if (qdf_atomic_read(&soc->ipa_mapped))
dp_ipa_handle_rx_buf_smmu_mapping(soc, rx_desc->nbuf,
rx_desc_pool->buf_size,
false, __func__, __LINE__);
@@ -2760,7 +2761,9 @@ void dp_rx_nbuf_unmap_pool(struct dp_soc *soc,
{
dp_audio_smmu_unmap(soc->osdev, QDF_NBUF_CB_PADDR(nbuf),
rx_desc_pool->buf_size);
dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, rx_desc_pool->buf_size,
if (qdf_atomic_read(&soc->ipa_mapped))
dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf,
rx_desc_pool->buf_size,
false, __func__, __LINE__);
qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf, QDF_DMA_FROM_DEVICE,
rx_desc_pool->buf_size);

View File

@@ -1307,8 +1307,10 @@ static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_txrx_peer *txrx_peer,
return QDF_STATUS_E_FAILURE;
}
dp_ipa_handle_rx_buf_smmu_mapping(soc, head, rx_desc_pool->buf_size,
true, __func__, __LINE__);
if (qdf_atomic_read(&soc->ipa_mapped))
dp_ipa_handle_rx_buf_smmu_mapping(soc, head,
rx_desc_pool->buf_size, true,
__func__, __LINE__);
dp_audio_smmu_map(soc->osdev,
qdf_mem_paddr_from_dmaaddr(soc->osdev,

View File

@@ -193,9 +193,13 @@ static void dp_rx_desc_nbuf_cleanup(struct dp_soc *soc,
QDF_NBUF_CB_PADDR(nbuf),
buf_size);
if (dp_ipa_handle_rx_buf_smmu_mapping(soc, nbuf, buf_size,
false, __func__, __LINE__))
if (qdf_atomic_read(&soc->ipa_mapped)) {
if (dp_ipa_handle_rx_buf_smmu_mapping(
soc, nbuf, buf_size,
false, __func__,
__LINE__))
dp_info_rl("Unable to unmap nbuf: %pK", nbuf);
}
qdf_nbuf_unmap_nbytes_single(soc->osdev, nbuf,
QDF_DMA_BIDIRECTIONAL, buf_size);
dp_rx_nbuf_free(nbuf);

View File

@@ -2912,6 +2912,7 @@ struct dp_soc {
struct htt_t2h_stats htt_stats;
void *external_txrx_handle; /* External data path handle */
qdf_atomic_t ipa_mapped;
#ifdef IPA_OFFLOAD
struct ipa_dp_tx_rsc ipa_uc_tx_rsc;
#ifdef IPA_WDI3_TX_TWO_PIPES

View File

@@ -123,6 +123,8 @@
* @WLAN_IPA_UC_OPCODE_UC_READY: IPA UC ready indication
* @WLAN_IPA_FILTER_RSV_NOTIFY: OPT WIFI DP filter reserve notification
* @WLAN_IPA_FILTER_REL_NOTIFY: OPT WIFI DP filter release notification
* @WLAN_IPA_SMMU_MAP: IPA SMMU map call
* @WLAN_IPA_SMMU_UNMAP: IPA SMMU unmap call
* @WLAN_IPA_UC_OPCODE_MAX: IPA UC max operation code
*/
enum wlan_ipa_uc_op_code {
@@ -139,6 +141,8 @@ enum wlan_ipa_uc_op_code {
WLAN_IPA_UC_OPCODE_UC_READY = 8,
WLAN_IPA_FILTER_RSV_NOTIFY = 9,
WLAN_IPA_FILTER_REL_NOTIFY = 10,
WLAN_IPA_SMMU_MAP = 11,
WLAN_IPA_SMMU_UNMAP = 12,
/* keep this last */
WLAN_IPA_UC_OPCODE_MAX
};

View File

@@ -4353,6 +4353,8 @@ QDF_STATUS wlan_ipa_setup(struct wlan_ipa_priv *ipa_ctx,
qdf_mutex_create(&ipa_ctx->ipa_lock);
qdf_atomic_init(&ipa_ctx->deinit_in_prog);
cdp_ipa_set_smmu_mapped(ipa_ctx->dp_soc, 0);
status = wlan_ipa_wdi_setup_rm(ipa_ctx);
if (status != QDF_STATUS_SUCCESS)
goto fail_setup_rm;
@@ -4765,6 +4767,20 @@ static void wlan_ipa_uc_op_cb(struct op_msg_type *op_msg,
qdf_ipa_wdi_opt_dpath_notify_flt_rlsd_per_inst(ipa_ctx->hdl,
msg->rsvd);
qdf_mutex_release(&ipa_ctx->ipa_lock);
} else if (msg->op_code == WLAN_IPA_SMMU_MAP) {
ipa_info("opt_dp: IPA smmu pool map");
qdf_mutex_acquire(&ipa_ctx->ipa_lock);
cdp_ipa_rx_buf_smmu_pool_mapping(ipa_ctx->dp_soc,
ipa_ctx->dp_pdev_id, true,
__func__, __LINE__);
qdf_mutex_release(&ipa_ctx->ipa_lock);
} else if (msg->op_code == WLAN_IPA_SMMU_UNMAP) {
ipa_info("opt_dp: IPA smmu pool unmap");
qdf_mutex_acquire(&ipa_ctx->ipa_lock);
cdp_ipa_rx_buf_smmu_pool_mapping(ipa_ctx->dp_soc,
ipa_ctx->dp_pdev_id,
false, __func__, __LINE__);
qdf_mutex_release(&ipa_ctx->ipa_lock);
} else if (wlan_ipa_uc_op_metering(ipa_ctx, op_msg)) {
ipa_err("Invalid message: op_code=%d, reason=%d",
msg->op_code, ipa_ctx->stat_req_reason);
@@ -5167,16 +5183,30 @@ void wlan_ipa_flush_pending_vdev_events(struct wlan_ipa_priv *ipa_ctx,
void wlan_ipa_wdi_opt_dpath_notify_flt_rsvd(bool response)
{
struct wlan_ipa_priv *ipa_ctx = gp_ipa;
struct op_msg_type *msg;
struct op_msg_type *smmu_msg;
struct op_msg_type *notify_msg;
struct uc_op_work_struct *uc_op_work;
msg = qdf_mem_malloc(sizeof(*msg));
if (!msg)
smmu_msg = qdf_mem_malloc(sizeof(*smmu_msg));
if (!smmu_msg)
return;
msg->op_code = WLAN_IPA_FILTER_RSV_NOTIFY;
msg->rsvd = response;
if (response) {
smmu_msg->op_code = WLAN_IPA_SMMU_MAP;
uc_op_work = &ipa_ctx->uc_op_work[WLAN_IPA_SMMU_MAP];
uc_op_work->msg = smmu_msg;
qdf_sched_work(0, &uc_op_work->work);
cdp_ipa_set_smmu_mapped(ipa_ctx->dp_soc, 1);
}
notify_msg = qdf_mem_malloc(sizeof(*notify_msg));
if (!notify_msg)
return;
notify_msg->op_code = WLAN_IPA_FILTER_RSV_NOTIFY;
notify_msg->rsvd = response;
uc_op_work = &ipa_ctx->uc_op_work[WLAN_IPA_FILTER_RSV_NOTIFY];
uc_op_work->msg = msg;
uc_op_work->msg = notify_msg;
qdf_sched_work(0, &uc_op_work->work);
}
@@ -5467,10 +5497,14 @@ void wlan_ipa_wdi_opt_dpath_notify_flt_rlsd(int flt0_rslt, int flt1_rslt)
{
struct wifi_dp_flt_setup *dp_flt_params = NULL;
struct wlan_ipa_priv *ipa_ctx = gp_ipa;
struct op_msg_type *msg;
struct wlan_objmgr_pdev *pdev;
struct op_msg_type *smmu_msg;
struct op_msg_type *notify_msg;
struct uc_op_work_struct *uc_op_work;
bool result = false;
bool val = false;
pdev = ipa_ctx->pdev;
dp_flt_params = &(ipa_ctx->dp_cce_super_rule_flt_param);
if ((dp_flt_params->flt_addr_params[0].ipa_flt_in_use == true &&
@@ -5484,14 +5518,31 @@ void wlan_ipa_wdi_opt_dpath_notify_flt_rlsd(int flt0_rslt, int flt1_rslt)
result = true;
}
msg = qdf_mem_malloc(sizeof(*msg));
if (!msg)
smmu_msg = qdf_mem_malloc(sizeof(*smmu_msg));
if (!smmu_msg)
return;
msg->op_code = WLAN_IPA_FILTER_REL_NOTIFY;
msg->rsvd = result;
uc_op_work = &ipa_ctx->uc_op_work[WLAN_IPA_FILTER_REL_NOTIFY];
uc_op_work->msg = msg;
val = cdp_ipa_get_smmu_mapped(ipa_ctx->dp_soc);
if (val) {
smmu_msg->op_code = WLAN_IPA_SMMU_UNMAP;
uc_op_work = &ipa_ctx->uc_op_work[WLAN_IPA_SMMU_UNMAP];
uc_op_work->msg = smmu_msg;
qdf_sched_work(0, &uc_op_work->work);
cdp_ipa_set_smmu_mapped(ipa_ctx->dp_soc, 0);
} else {
ipa_err("IPA SMMU not mapped!!");
}
notify_msg = qdf_mem_malloc(sizeof(*notify_msg));
if (!notify_msg)
return;
notify_msg->op_code = WLAN_IPA_FILTER_REL_NOTIFY;
notify_msg->rsvd = result;
uc_op_work = &ipa_ctx->uc_op_work[WLAN_IPA_FILTER_REL_NOTIFY];
uc_op_work->msg = notify_msg;
qdf_sched_work(0, &uc_op_work->work);
qdf_wake_lock_release(&ipa_ctx->opt_dp_wake_lock,
WIFI_POWER_EVENT_WAKELOCK_OPT_WIFI_DP);
ipa_info("opt_dp: Wakelock released");