qcacmn: Remove unnecessary checks in Tx path
1. Remove prints in per packet path 2. Add prefecth for skb shinfo 3. Avoid qdf_get_cpu() in DP tx Change-Id: I86d196cc7865a2d5b3a5a6098f6f7d84397a60d0 CRs-Fixed: 3227568
Bu işleme şunda yer alıyor:

işlemeyi yapan:
Madan Koyyalamudi

ebeveyn
29073d2cf5
işleme
bd0375c2de
@@ -637,11 +637,6 @@ dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
dp_tx_set_min_rates_for_critical_frames(soc, hal_tx_desc_cached,
|
||||
tx_desc->nbuf);
|
||||
dp_tx_desc_set_ktimestamp(vdev, tx_desc);
|
||||
dp_verbose_debug("length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
|
||||
tx_desc->length,
|
||||
(tx_desc->flags & DP_TX_DESC_FLAG_FRAG),
|
||||
(uint64_t)tx_desc->dma_addr, tx_desc->pkt_offset,
|
||||
tx_desc->id);
|
||||
|
||||
hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, ring_id);
|
||||
|
||||
|
@@ -833,6 +833,19 @@ void dp_classify_critical_pkts(struct dp_soc *soc, struct dp_vdev *vdev,
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef QCA_OL_TX_MULTIQ_SUPPORT
|
||||
static inline
|
||||
void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
|
||||
{
|
||||
qdf_nbuf_set_queue_mapping(nbuf, ring_id);
|
||||
}
|
||||
#else
|
||||
static inline
|
||||
void dp_rx_nbuf_queue_mapping_set(qdf_nbuf_t nbuf, uint8_t ring_id)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* dp_rx_intrabss_mcbc_fwd() - Does intrabss forward for mcast packets
|
||||
*
|
||||
@@ -850,6 +863,7 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
|
||||
{
|
||||
uint16_t len;
|
||||
qdf_nbuf_t nbuf_copy;
|
||||
uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
|
||||
|
||||
if (dp_rx_intrabss_eapol_drop_check(soc, ta_peer, rx_tlv_hdr,
|
||||
nbuf))
|
||||
@@ -870,14 +884,14 @@ bool dp_rx_intrabss_mcbc_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
|
||||
|
||||
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
||||
|
||||
qdf_mem_set(nbuf_copy->cb, 0x0, sizeof(nbuf_copy->cb));
|
||||
dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf_copy);
|
||||
|
||||
dp_rx_nbuf_queue_mapping_set(nbuf_copy, ring_id);
|
||||
if (soc->arch_ops.dp_rx_intrabss_handle_nawds(soc, ta_peer, nbuf_copy,
|
||||
tid_stats))
|
||||
return false;
|
||||
|
||||
/* set TX notify flag 0 to avoid unnecessary TX comp callback */
|
||||
qdf_nbuf_tx_notify_comp_set(nbuf_copy, 0);
|
||||
if (dp_tx_send((struct cdp_soc_t *)soc,
|
||||
ta_peer->vdev->vdev_id, nbuf_copy)) {
|
||||
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.fail, 1,
|
||||
@@ -910,6 +924,7 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
|
||||
struct cdp_tid_rx_stats *tid_stats)
|
||||
{
|
||||
uint16_t len;
|
||||
uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
|
||||
|
||||
len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
|
||||
|
||||
@@ -935,8 +950,10 @@ bool dp_rx_intrabss_ucast_fwd(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
|
||||
}
|
||||
}
|
||||
|
||||
qdf_mem_set(nbuf->cb, 0x0, sizeof(nbuf->cb));
|
||||
dp_classify_critical_pkts(soc, ta_peer->vdev, nbuf);
|
||||
|
||||
dp_rx_nbuf_queue_mapping_set(nbuf, ring_id);
|
||||
if (!dp_tx_send((struct cdp_soc_t *)soc,
|
||||
tx_vdev_id, nbuf)) {
|
||||
DP_PEER_PER_PKT_STATS_INC_PKT(ta_peer, rx.intra_bss.pkts, 1,
|
||||
|
@@ -3327,15 +3327,12 @@ qdf_nbuf_t dp_tx_send(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
||||
if (qdf_unlikely(!vdev))
|
||||
return nbuf;
|
||||
|
||||
dp_verbose_debug("skb "QDF_MAC_ADDR_FMT,
|
||||
QDF_MAC_ADDR_REF(nbuf->data));
|
||||
|
||||
/*
|
||||
* Set Default Host TID value to invalid TID
|
||||
* (TID override disabled)
|
||||
*/
|
||||
msdu_info.tid = HTT_TX_EXT_TID_INVALID;
|
||||
DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
|
||||
DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_headlen(nbuf));
|
||||
|
||||
if (qdf_unlikely(vdev->mesh_vdev)) {
|
||||
qdf_nbuf_t nbuf_mesh = dp_tx_extract_mesh_meta_data(vdev, nbuf,
|
||||
@@ -4863,6 +4860,7 @@ void dp_tx_prefetch_next_nbuf_data(struct dp_tx_desc_s *next)
|
||||
/* prefetch skb fields present in different cachelines */
|
||||
qdf_prefetch(&nbuf->len);
|
||||
qdf_prefetch(&nbuf->users);
|
||||
qdf_prefetch(skb_end_pointer(nbuf));
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
@@ -521,11 +521,7 @@ static inline void dp_tx_get_queue(struct dp_vdev *vdev,
|
||||
DP_TX_QUEUE_MASK;
|
||||
|
||||
queue->desc_pool_id = queue_offset;
|
||||
queue->ring_id = qdf_get_cpu();
|
||||
|
||||
dp_tx_debug("pool_id:%d ring_id: %d",
|
||||
queue->desc_pool_id, queue->ring_id);
|
||||
|
||||
queue->ring_id = qdf_nbuf_get_queue_mapping(nbuf);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@@ -4258,6 +4258,12 @@ qdf_nbuf_get_queue_mapping(qdf_nbuf_t buf)
|
||||
return __qdf_nbuf_get_queue_mapping(buf);
|
||||
}
|
||||
|
||||
static inline void
|
||||
qdf_nbuf_set_queue_mapping(qdf_nbuf_t buf, uint16_t val)
|
||||
{
|
||||
__qdf_nbuf_set_queue_mapping(buf, val);
|
||||
}
|
||||
|
||||
static inline uint8_t *
|
||||
qdf_nbuf_get_priv_ptr(qdf_nbuf_t buf)
|
||||
{
|
||||
|
@@ -2252,6 +2252,19 @@ __qdf_nbuf_get_queue_mapping(struct sk_buff *skb)
|
||||
return skb->queue_mapping;
|
||||
}
|
||||
|
||||
/**
|
||||
* __qdf_nbuf_set_queue_mapping() - get the queue mapping set by linux kernel
|
||||
*
|
||||
* @buf: sk buff
|
||||
* @val: queue_id
|
||||
*
|
||||
*/
|
||||
static inline void
|
||||
__qdf_nbuf_set_queue_mapping(struct sk_buff *skb, uint16_t val)
|
||||
{
|
||||
skb_set_queue_mapping(skb, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* __qdf_nbuf_set_timestamp() - set the timestamp for frame
|
||||
*
|
||||
|
Yeni konuda referans
Bir kullanıcı engelle