qcacmn: Properly handle RX REO reinject packets
Currently for REO reinject path, first fragment is in the linear part of the skb buffer while other fragments are appended to skb buffer as non-linear paged data. The other point is that for REO reinject buffer, l3_header_padding is not there, meaning ethernet header is right after struct rx_pkt_tlvs. Above implementation will have issues when WLAN IPA path is enabled. Firstly, IPA assumes data buffers are linear. Thus need to linearize skb buffer before reinjecting into REO. Secondly, when WLAN does IPA pipe connection, RX pkt offset is hard-coded to RX_PKT_TLVS_LEN + L3_HEADER_PADDING. Thus need to pad L3_HEADER_PADDING before ethernet header and after struct rx_pkt_tlvs. Change-Id: I36d41bc91d28c2580775a1d2e431e139ff02e19e CRs-Fixed: 2469315
This commit is contained in:
@@ -1759,4 +1759,56 @@ bool dp_ipa_is_mdm_platform(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/**
|
||||||
|
* dp_ipa_handle_rx_reo_reinject - Handle RX REO reinject skb buffer
|
||||||
|
* @soc: soc
|
||||||
|
* @nbuf: skb
|
||||||
|
*
|
||||||
|
* Return: nbuf if success and otherwise NULL
|
||||||
|
*/
|
||||||
|
qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf)
|
||||||
|
{
|
||||||
|
uint8_t *rx_pkt_tlvs;
|
||||||
|
bool reo_remapped;
|
||||||
|
|
||||||
|
if (!wlan_cfg_is_ipa_enabled(soc->wlan_cfg_ctx))
|
||||||
|
return nbuf;
|
||||||
|
|
||||||
|
qdf_spin_lock_bh(&soc->remap_lock);
|
||||||
|
reo_remapped = soc->reo_remapped;
|
||||||
|
qdf_spin_unlock_bh(&soc->remap_lock);
|
||||||
|
|
||||||
|
/* WLAN IPA is run-time disabled */
|
||||||
|
if (!reo_remapped)
|
||||||
|
return nbuf;
|
||||||
|
|
||||||
|
/* Linearize the skb since IPA assumes linear buffer */
|
||||||
|
if (qdf_likely(qdf_nbuf_is_frag(nbuf))) {
|
||||||
|
if (qdf_nbuf_linearize(nbuf)) {
|
||||||
|
dp_err_rl("nbuf linearize failed");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rx_pkt_tlvs = qdf_mem_malloc(RX_PKT_TLVS_LEN);
|
||||||
|
if (!rx_pkt_tlvs) {
|
||||||
|
dp_err_rl("rx_pkt_tlvs alloc failed");
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
qdf_mem_copy(rx_pkt_tlvs, qdf_nbuf_data(nbuf), RX_PKT_TLVS_LEN);
|
||||||
|
|
||||||
|
/* Pad L3_HEADER_PADDING before ethhdr and after rx_pkt_tlvs */
|
||||||
|
qdf_nbuf_push_head(nbuf, L3_HEADER_PADDING);
|
||||||
|
|
||||||
|
qdf_mem_copy(qdf_nbuf_data(nbuf), rx_pkt_tlvs, RX_PKT_TLVS_LEN);
|
||||||
|
|
||||||
|
/* L3_HEADDING_PADDING is not accounted for real skb length */
|
||||||
|
qdf_nbuf_set_len(nbuf, qdf_nbuf_len(nbuf) - L3_HEADER_PADDING);
|
||||||
|
|
||||||
|
qdf_mem_free(rx_pkt_tlvs);
|
||||||
|
|
||||||
|
return nbuf;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@@ -113,6 +113,8 @@ bool dp_reo_remap_config(struct dp_soc *soc, uint32_t *remap1,
|
|||||||
uint32_t *remap2);
|
uint32_t *remap2);
|
||||||
bool dp_ipa_is_mdm_platform(void);
|
bool dp_ipa_is_mdm_platform(void);
|
||||||
|
|
||||||
|
qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc, qdf_nbuf_t nbuf);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
static inline int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
|
static inline int dp_ipa_uc_detach(struct dp_soc *soc, struct dp_pdev *pdev)
|
||||||
{
|
{
|
||||||
@@ -136,5 +138,12 @@ static inline QDF_STATUS dp_ipa_handle_rx_buf_smmu_mapping(struct dp_soc *soc,
|
|||||||
{
|
{
|
||||||
return QDF_STATUS_SUCCESS;
|
return QDF_STATUS_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline qdf_nbuf_t dp_ipa_handle_rx_reo_reinject(struct dp_soc *soc,
|
||||||
|
qdf_nbuf_t nbuf)
|
||||||
|
{
|
||||||
|
return nbuf;
|
||||||
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
#endif /* _DP_IPA_H_ */
|
#endif /* _DP_IPA_H_ */
|
||||||
|
@@ -1006,8 +1006,8 @@ dp_rx_defrag_nwifi_to_8023(struct dp_soc *soc,
|
|||||||
*
|
*
|
||||||
* Returns: QDF_STATUS
|
* Returns: QDF_STATUS
|
||||||
*/
|
*/
|
||||||
static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer,
|
static QDF_STATUS dp_rx_defrag_reo_reinject(struct dp_peer *peer,
|
||||||
unsigned tid, qdf_nbuf_t head)
|
unsigned int tid, qdf_nbuf_t head)
|
||||||
{
|
{
|
||||||
struct dp_pdev *pdev = peer->vdev->pdev;
|
struct dp_pdev *pdev = peer->vdev->pdev;
|
||||||
struct dp_soc *soc = pdev->soc;
|
struct dp_soc *soc = pdev->soc;
|
||||||
@@ -1025,6 +1025,12 @@ dp_rx_defrag_nwifi_to_8023(struct dp_soc *soc,
|
|||||||
hal_ring_handle_t hal_srng = soc->reo_reinject_ring.hal_srng;
|
hal_ring_handle_t hal_srng = soc->reo_reinject_ring.hal_srng;
|
||||||
struct dp_rx_desc *rx_desc = peer->rx_tid[tid].head_frag_desc;
|
struct dp_rx_desc *rx_desc = peer->rx_tid[tid].head_frag_desc;
|
||||||
|
|
||||||
|
head = dp_ipa_handle_rx_reo_reinject(soc, head);
|
||||||
|
if (qdf_unlikely(!head)) {
|
||||||
|
dp_err_rl("IPA RX REO reinject failed");
|
||||||
|
return QDF_STATUS_E_FAILURE;
|
||||||
|
}
|
||||||
|
|
||||||
ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
|
ent_ring_desc = hal_srng_src_get_next(soc->hal_soc, hal_srng);
|
||||||
if (!ent_ring_desc) {
|
if (!ent_ring_desc) {
|
||||||
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
|
||||||
|
Reference in New Issue
Block a user