Merge "qcacmn: Map IPA tx buffers as part of IPA ready callback"

This commit is contained in:
Linux Build Service Account
2020-09-02 16:36:34 -07:00
committed by Gerrit - the friendly Code Review server
commit d1c20a8502
5 muutettua tiedostoa jossa 98 lisäystä ja 9 poistoa

Näytä tiedosto

@@ -1,5 +1,5 @@
/*
* Copyright (c) 2016-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2016-2020 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -609,6 +609,30 @@ cdp_ipa_rx_intrabss_fwd(ol_txrx_soc_handle soc, uint8_t vdev_id,
return false;
}
/**
* cdp_ipa_tx_buf_smmu_mapping() - Create SMMU mappings for Tx
* buffers allocated to IPA
* @soc: data path soc handle
* @pdev_id: device instance id
*
* Create SMMU mappings for Tx buffers allocated to IPA
*
* return QDF_STATUS_SUCCESS
*/
static inline QDF_STATUS
cdp_ipa_tx_buf_smmu_mapping(ol_txrx_soc_handle soc, uint8_t pdev_id)
{
if (!soc || !soc->ops || !soc->ops->ipa_ops) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_FATAL,
"%s invalid instance", __func__);
return QDF_STATUS_E_FAILURE;
}
if (soc->ops->ipa_ops->ipa_tx_buf_smmu_mapping)
return soc->ops->ipa_ops->ipa_tx_buf_smmu_mapping(soc, pdev_id);
return QDF_STATUS_SUCCESS;
}
#endif /* IPA_OFFLOAD */
#endif /* _CDP_TXRX_IPA_H_ */

Näytä tiedosto

@@ -1493,6 +1493,8 @@ struct cdp_throttle_ops {
* @ipa_register_op_cb:
* @ipa_get_stat:
* @ipa_tx_data_frame:
* @ipa_tx_buf_smmu_mapping: Provide SMMU mappings for Tx
* buffers to IPA
*/
struct cdp_ipa_ops {
QDF_STATUS (*ipa_get_resource)(struct cdp_soc_t *soc_hdl,
@@ -1556,6 +1558,8 @@ struct cdp_ipa_ops {
uint32_t max_supported_bw_mbps);
bool (*ipa_rx_intrabss_fwd)(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
qdf_nbuf_t nbuf, bool *fwd_success);
QDF_STATUS (*ipa_tx_buf_smmu_mapping)(struct cdp_soc_t *soc_hdl,
uint8_t pdev_id);
};
#endif

Näytä tiedosto

@@ -98,6 +98,9 @@ static QDF_STATUS __dp_ipa_handle_buf_smmu_mapping(struct dp_soc *soc,
{
qdf_mem_info_t mem_map_table = {0};
if (!qdf_ipa_is_ready())
return QDF_STATUS_SUCCESS;
qdf_update_mem_map_table(soc->osdev, &mem_map_table,
qdf_nbuf_get_frag_paddr(nbuf, 0),
size);
@@ -155,6 +158,29 @@ QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
return __dp_ipa_handle_buf_smmu_mapping(soc, nbuf, size, create);
}
static QDF_STATUS __dp_ipa_tx_buf_smmu_mapping(
struct dp_soc *soc,
struct dp_pdev *pdev,
bool create)
{
uint32_t index;
QDF_STATUS ret;
uint32_t tx_buffer_cnt = soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt;
qdf_nbuf_t nbuf;
for (index = 0; index < tx_buffer_cnt; index++) {
nbuf = (qdf_nbuf_t)
soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[index];
if (!nbuf)
continue;
ret = __dp_ipa_handle_buf_smmu_mapping(
soc, nbuf,
skb_end_pointer(nbuf) - nbuf->data,
true);
}
return ret;
}
#ifdef RX_DESC_MULTI_PAGE_ALLOC
static QDF_STATUS dp_ipa_handle_rx_buf_pool_smmu_mapping(struct dp_soc *soc,
struct dp_pdev *pdev,
@@ -268,6 +294,7 @@ static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
int idx;
qdf_nbuf_t nbuf;
struct dp_ipa_resources *ipa_res;
bool is_ipa_ready = qdf_ipa_is_ready();
for (idx = 0; idx < soc->ipa_uc_tx_rsc.alloc_tx_buf_cnt; idx++) {
nbuf = (qdf_nbuf_t)
@@ -275,7 +302,7 @@ static void dp_tx_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
if (!nbuf)
continue;
if (qdf_mem_smmu_s1_enabled(soc->osdev))
if (qdf_mem_smmu_s1_enabled(soc->osdev) && is_ipa_ready)
__dp_ipa_handle_buf_smmu_mapping(
soc, nbuf,
skb_end_pointer(nbuf) - nbuf->data,
@@ -424,12 +451,6 @@ static int dp_tx_ipa_uc_attach(struct dp_soc *soc, struct dp_pdev *pdev)
soc->ipa_uc_tx_rsc.tx_buf_pool_vaddr_unaligned[tx_buffer_count]
= (void *)nbuf;
if (qdf_mem_smmu_s1_enabled(soc->osdev))
__dp_ipa_handle_buf_smmu_mapping(
soc, nbuf,
skb_end_pointer(nbuf) - nbuf->data,
true);
}
hal_srng_access_end_unlocked(soc->hal_soc,
@@ -1967,4 +1988,26 @@ qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
return dp_ipa_frag_nbuf_linearize(soc, nbuf);
}
QDF_STATUS dp_ipa_tx_buf_smmu_mapping(
struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
{
QDF_STATUS ret;
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_pdev *pdev =
dp_get_pdev_from_soc_pdev_id_wifi3(soc, pdev_id);
if (!pdev) {
dp_err("%s invalid instance", __func__);
return QDF_STATUS_E_FAILURE;
}
if (!qdf_mem_smmu_s1_enabled(soc->osdev)) {
dp_debug("SMMU S1 disabled");
return QDF_STATUS_SUCCESS;
}
ret = __dp_ipa_tx_buf_smmu_mapping(soc, pdev, true);
return ret;
}
#endif

Näytä tiedosto

@@ -248,6 +248,17 @@ bool dp_ipa_is_mdm_platform(void);
qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf);
/**
* dp_ipa_tx_buf_smmu_mapping() Create SMMU mappings for IPA
* allocated TX buffers
* @soc_hdl - handle to the soc
* @pdev_id - pdev id number, to get the handle
*
* Return: QDF_STATUS
*/
QDF_STATUS dp_ipa_tx_buf_smmu_mapping(
struct cdp_soc_t *soc_hdl, uint8_t pdev_id);
#else
static inline int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
{
@@ -279,5 +290,11 @@ static inline qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc,
return nbuf;
}
static inline QDF_STATUS dp_ipa_tx_buf_smmu_mapping(
struct cdp_soc_t *soc_hdl, uint8_t pdev_id)
{
return QDF_STATUS_SUCCESS;
}
#endif
#endif /* _DP_IPA_H_ */

Näytä tiedosto

@@ -11119,7 +11119,8 @@ static struct cdp_ipa_ops dp_ops_ipa = {
.ipa_enable_pipes = dp_ipa_enable_pipes,
.ipa_disable_pipes = dp_ipa_disable_pipes,
.ipa_set_perf_level = dp_ipa_set_perf_level,
.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd
.ipa_rx_intrabss_fwd = dp_ipa_rx_intrabss_fwd,
.ipa_tx_buf_smmu_mapping = dp_ipa_tx_buf_smmu_mapping
};
#endif