qcacmn: Add frame vdev id check in tx

Drop frame if it is not for the vap, to which
client is connected.
Enable this check only for ap.

Change-Id: I33d7eb79267490bdb8697e4e45f789e9f6279f0e
CRs-Fixed: 2756304
This commit is contained in:
Ankit Kumar
2020-08-14 16:48:28 +05:30
committed by snandini
parent c886b4c527
commit bdddeb7c22
6 changed files with 173 additions and 7 deletions

View File

@@ -1187,6 +1187,7 @@ struct cdp_rx_stats {
* @enqueue_fail: hw enqueue fail * @enqueue_fail: hw enqueue fail
* @dma_error: dma fail * @dma_error: dma fail
* @res_full: Resource Full: Congestion Control * @res_full: Resource Full: Congestion Control
* @fail_per_pkt_vdev_id_check: Per pkt vdev id check
* @exception_fw: packets sent to fw * @exception_fw: packets sent to fw
* @completion_fw: packets completions received from fw * @completion_fw: packets completions received from fw
* @cce_classified:Number of packets classified by CCE * @cce_classified:Number of packets classified by CCE
@@ -1244,6 +1245,7 @@ struct cdp_tx_ingress_stats {
uint32_t res_full; uint32_t res_full;
/* headroom insufficient */ /* headroom insufficient */
uint32_t headroom_insufficient; uint32_t headroom_insufficient;
uint32_t fail_per_pkt_vdev_id_check;
} dropped; } dropped;
/* Mesh packets info */ /* Mesh packets info */

View File

@@ -730,6 +730,7 @@ static inline void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj,
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.dma_error); DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.dma_error);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.ring_full); DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.ring_full);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.enqueue_fail); DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.enqueue_fail);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.fail_per_pkt_vdev_id_check);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.desc_na.num); DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.desc_na.num);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.res_full); DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.res_full);
DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.headroom_insufficient); DP_STATS_AGGR(tgtobj, srcobj, tx_i.dropped.headroom_insufficient);
@@ -743,6 +744,7 @@ static inline void dp_update_pdev_ingress_stats(struct dp_pdev *tgtobj,
tgtobj->stats.tx_i.dropped.dma_error + tgtobj->stats.tx_i.dropped.dma_error +
tgtobj->stats.tx_i.dropped.ring_full + tgtobj->stats.tx_i.dropped.ring_full +
tgtobj->stats.tx_i.dropped.enqueue_fail + tgtobj->stats.tx_i.dropped.enqueue_fail +
tgtobj->stats.tx_i.dropped.fail_per_pkt_vdev_id_check +
tgtobj->stats.tx_i.dropped.desc_na.num + tgtobj->stats.tx_i.dropped.desc_na.num +
tgtobj->stats.tx_i.dropped.res_full; tgtobj->stats.tx_i.dropped.res_full;

View File

@@ -5362,6 +5362,38 @@ fail0:
return QDF_STATUS_E_FAILURE; return QDF_STATUS_E_FAILURE;
} }
/**
* dp_vdev_register_tx_handler() - Register Tx handler
* @vdev: struct dp_vdev *
* @soc: struct dp_soc *
* @txrx_ops: struct ol_txrx_ops *
*/
static inline void dp_vdev_register_tx_handler(struct dp_vdev *vdev,
struct dp_soc *soc,
struct ol_txrx_ops *txrx_ops)
{
/* Enable vdev_id check only for ap, if flag is enabled */
if (vdev->mesh_vdev)
txrx_ops->tx.tx = dp_tx_send_mesh;
else if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
(vdev->opmode == wlan_op_mode_ap))
txrx_ops->tx.tx = dp_tx_send_vdev_id_check;
else
txrx_ops->tx.tx = dp_tx_send;
/* Avoid check in regular exception Path */
if ((wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx)) &&
(vdev->opmode == wlan_op_mode_ap))
txrx_ops->tx.tx_exception = dp_tx_send_exception_vdev_id_check;
else
txrx_ops->tx.tx_exception = dp_tx_send_exception;
dp_alert("Configure tx_vdev_id_chk_handler Feature Flag: %d and mode:%d for vdev_id:%d",
wlan_cfg_is_tx_per_pkt_vdev_id_check_enabled(soc->wlan_cfg_ctx),
vdev->opmode, vdev->vdev_id);
}
/** /**
* dp_vdev_register_wifi3() - Register VDEV operations from osif layer * dp_vdev_register_wifi3() - Register VDEV operations from osif layer
* @soc: Datapath soc handle * @soc: Datapath soc handle
@@ -5406,13 +5438,7 @@ static QDF_STATUS dp_vdev_register_wifi3(struct cdp_soc_t *soc_hdl,
#endif #endif
vdev->me_convert = txrx_ops->me_convert; vdev->me_convert = txrx_ops->me_convert;
/* TODO: Enable the following once Tx code is integrated */ dp_vdev_register_tx_handler(vdev, soc, txrx_ops);
if (vdev->mesh_vdev)
txrx_ops->tx.tx = dp_tx_send_mesh;
else
txrx_ops->tx.tx = dp_tx_send;
txrx_ops->tx.tx_exception = dp_tx_send_exception;
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW, QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
"DP Vdev Register success"); "DP Vdev Register success");

View File

@@ -5894,6 +5894,8 @@ void dp_txrx_path_stats(struct dp_soc *soc)
pdev->stats.tx_i.dropped.ring_full); pdev->stats.tx_i.dropped.ring_full);
DP_PRINT_STATS("Enqueue fail: %u", DP_PRINT_STATS("Enqueue fail: %u",
pdev->stats.tx_i.dropped.enqueue_fail); pdev->stats.tx_i.dropped.enqueue_fail);
DP_PRINT_STATS("Pkt dropped in vdev-id check: %u",
pdev->stats.tx_i.dropped.fail_per_pkt_vdev_id_check);
DP_PRINT_STATS("DMA Error: %u", DP_PRINT_STATS("DMA Error: %u",
pdev->stats.tx_i.dropped.dma_error); pdev->stats.tx_i.dropped.dma_error);
@@ -6135,6 +6137,8 @@ dp_print_pdev_tx_stats(struct dp_pdev *pdev)
pdev->stats.tx_i.dropped.desc_na.num); pdev->stats.tx_i.dropped.desc_na.num);
DP_PRINT_STATS(" HW enqueue failed= %d", DP_PRINT_STATS(" HW enqueue failed= %d",
pdev->stats.tx_i.dropped.enqueue_fail); pdev->stats.tx_i.dropped.enqueue_fail);
DP_PRINT_STATS(" Pkt dropped in vdev-id check= %d",
pdev->stats.tx_i.dropped.fail_per_pkt_vdev_id_check);
DP_PRINT_STATS(" Resources Full = %d", DP_PRINT_STATS(" Resources Full = %d",
pdev->stats.tx_i.dropped.res_full); pdev->stats.tx_i.dropped.res_full);
DP_PRINT_STATS(" FW removed Pkts = %u", DP_PRINT_STATS(" FW removed Pkts = %u",

View File

@@ -2365,6 +2365,42 @@ static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
return true; return true;
} }
/**
* dp_tx_per_pkt_vdev_id_check() - vdev id check for frame
* @nbuf: qdf_nbuf_t
* @vdev: struct dp_vdev *
*
* Allow packet for processing only if it is for peer client which is
* connected with same vap. Drop packet if client is connected to
* different vap.
*
* Return: QDF_STATUS
*/
static inline QDF_STATUS
dp_tx_per_pkt_vdev_id_check(qdf_nbuf_t nbuf, struct dp_vdev *vdev)
{
struct dp_ast_entry *dst_ast_entry = NULL;
qdf_ether_header_t *eh = (qdf_ether_header_t *)qdf_nbuf_data(nbuf);
if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost) ||
DP_FRAME_IS_BROADCAST((eh)->ether_dhost))
return QDF_STATUS_SUCCESS;
qdf_spin_lock_bh(&vdev->pdev->soc->ast_lock);
dst_ast_entry = dp_peer_ast_hash_find_by_vdevid(vdev->pdev->soc,
eh->ether_dhost,
vdev->vdev_id);
/* If there is no ast entry, return failure */
if (qdf_unlikely(!dst_ast_entry)) {
qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
return QDF_STATUS_E_FAILURE;
}
qdf_spin_unlock_bh(&vdev->pdev->soc->ast_lock);
return QDF_STATUS_SUCCESS;
}
/** /**
* dp_tx_send_exception() - Transmit a frame on a given VAP in exception path * dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
* @soc: DP soc handle * @soc: DP soc handle
@@ -2489,6 +2525,50 @@ fail:
return nbuf; return nbuf;
} }
/**
* dp_tx_send_exception_vdev_id_check() - Transmit a frame on a given VAP
* in exception path in special case to avoid regular exception path chk.
* @soc: DP soc handle
* @vdev_id: id of DP vdev handle
* @nbuf: skb
* @tx_exc_metadata: Handle that holds exception path meta data
*
* Entry point for Core Tx layer (DP_TX) invoked from
* hard_start_xmit in OSIF/HDD to transmit frames through fw
*
* Return: NULL on success,
* nbuf when it fails to send
*/
qdf_nbuf_t
dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc_hdl,
uint8_t vdev_id, qdf_nbuf_t nbuf,
struct cdp_tx_exception_metadata *tx_exc_metadata)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_vdev *vdev = dp_vdev_get_ref_by_id(soc, vdev_id,
DP_MOD_ID_TX_EXCEPTION);
if (qdf_unlikely(!vdev))
goto fail;
if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
== QDF_STATUS_E_FAILURE)) {
DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
goto fail;
}
/* Unref count as it will agin be taken inside dp_tx_exception */
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
return dp_tx_send_exception(soc_hdl, vdev_id, nbuf, tx_exc_metadata);
fail:
if (vdev)
dp_vdev_unref_delete(soc, vdev, DP_MOD_ID_TX_EXCEPTION);
dp_verbose_debug("pkt send failed");
return nbuf;
}
/** /**
* dp_tx_send_mesh() - Transmit mesh frame on a given VAP * dp_tx_send_mesh() - Transmit mesh frame on a given VAP
* @soc: DP soc handle * @soc: DP soc handle
@@ -2851,6 +2931,49 @@ send_multiple:
return nbuf; return nbuf;
} }
/**
* dp_tx_send_vdev_id_check() - Transmit a frame on a given VAP in special
* case to vaoid check in perpkt path.
* @soc: DP soc handle
* @vdev_id: id of DP vdev handle
* @nbuf: skb
*
* Entry point for Core Tx layer (DP_TX) invoked from
* hard_start_xmit in OSIF/HDD to transmit packet through dp_tx_send
* with special condition to avoid per pkt check in dp_tx_send
*
* Return: NULL on success,
* nbuf when it fails to send
*/
qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc_hdl,
uint8_t vdev_id, qdf_nbuf_t nbuf)
{
struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
struct dp_vdev *vdev = NULL;
if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
return nbuf;
/*
* dp_vdev_get_ref_by_id does does a atomic operation avoid using
* this in per packet path.
*
* As in this path vdev memory is already protected with netdev
* tx lock
*/
vdev = soc->vdev_id_map[vdev_id];
if (qdf_unlikely(!vdev))
return nbuf;
if (qdf_unlikely(dp_tx_per_pkt_vdev_id_check(nbuf, vdev)
== QDF_STATUS_E_FAILURE)) {
DP_STATS_INC(vdev, tx_i.dropped.fail_per_pkt_vdev_id_check, 1);
return nbuf;
}
return dp_tx_send(soc_hdl, vdev_id, nbuf);
}
/** /**
* dp_tx_reinject_handler() - Tx Reinject Handler * dp_tx_reinject_handler() - Tx Reinject Handler
* @soc: datapath soc handle * @soc: datapath soc handle

View File

@@ -228,9 +228,18 @@ QDF_STATUS dp_tx_pdev_init(struct dp_pdev *pdev);
qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf); qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t nbuf);
qdf_nbuf_t dp_tx_send_vdev_id_check(struct cdp_soc_t *soc, uint8_t vdev_id,
qdf_nbuf_t nbuf);
qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t dp_tx_send_exception(struct cdp_soc_t *soc, uint8_t vdev_id,
qdf_nbuf_t nbuf, qdf_nbuf_t nbuf,
struct cdp_tx_exception_metadata *tx_exc); struct cdp_tx_exception_metadata *tx_exc);
qdf_nbuf_t dp_tx_send_exception_vdev_id_check(struct cdp_soc_t *soc,
uint8_t vdev_id,
qdf_nbuf_t nbuf,
struct cdp_tx_exception_metadata *tx_exc);
qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id, qdf_nbuf_t dp_tx_send_mesh(struct cdp_soc_t *soc, uint8_t vdev_id,
qdf_nbuf_t nbuf); qdf_nbuf_t nbuf);
qdf_nbuf_t qdf_nbuf_t