qcacmn: create qdf_nbuf_cb definition for win and mcc

WIN and MCC shared a common definition od qdf_nbuf_cb
considering that there are various fields which are
not common it is decided to have 2 different definitions
for WIN and MCC. in case of WIN he per pkt cycles will
be significantly reduced in RX path.

CRs-Fixed: 3257879
Change-Id: I5d3fa7c91592afb905702f2dca49972be89664e3
This commit is contained in:
Tallapragada Kalyan
2022-11-14 14:49:25 +05:30
committed by Madan Koyyalamudi
parent 28a0436d35
commit 44e4444e61
6 changed files with 1553 additions and 796 deletions

View File

@@ -29,7 +29,6 @@
#include "hal_api.h"
#include "hal_be_api.h"
#include "qdf_nbuf.h"
#include "hal_be_rx_tlv.h"
#ifdef MESH_MODE_SUPPORT
#include "if_meta_hdr.h"
#endif
@@ -63,9 +62,11 @@ dp_rx_wds_learn(struct dp_soc *soc,
struct dp_vdev *vdev,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *txrx_peer,
qdf_nbuf_t nbuf,
struct hal_rx_msdu_metadata msdu_metadata)
qdf_nbuf_t nbuf)
{
struct hal_rx_msdu_metadata msdu_metadata;
hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, &msdu_metadata);
/* WDS Source Port Learning */
if (qdf_likely(vdev->wds_enabled))
dp_rx_wds_srcport_learn(soc,
@@ -138,29 +139,12 @@ dp_rx_wds_learn(struct dp_soc *soc,
struct dp_vdev *vdev,
uint8_t *rx_tlv_hdr,
struct dp_txrx_peer *ta_txrx_peer,
qdf_nbuf_t nbuf,
struct hal_rx_msdu_metadata msdu_metadata)
qdf_nbuf_t nbuf)
{
dp_wds_ext_peer_learn_be(soc, ta_txrx_peer, rx_tlv_hdr, nbuf);
}
#endif
#if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
static inline void
dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
{
uint8_t lmac_id;
lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata);
qdf_nbuf_set_lmac_id(nbuf, lmac_id);
}
#else
static inline void
dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
{
}
#endif
uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
hal_ring_handle_t hal_ring_hdl, uint8_t reo_ring_num,
uint32_t quota)
@@ -183,10 +167,7 @@ uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
dp_txrx_ref_handle txrx_ref_handle = NULL;
struct dp_vdev *vdev;
uint32_t pkt_len = 0;
struct hal_rx_mpdu_desc_info mpdu_desc_info;
struct hal_rx_msdu_desc_info msdu_desc_info;
enum hal_reo_error_status error;
uint32_t peer_mdata;
uint8_t *rx_tlv_hdr;
uint32_t rx_bufs_reaped[WLAN_MAX_MLO_CHIPS][MAX_PDEV_CNT];
uint8_t mac_id = 0;
@@ -208,7 +189,6 @@ uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
uint32_t num_entries_avail = 0;
uint32_t rx_ol_pkt_cnt = 0;
uint32_t num_entries = 0;
struct hal_rx_msdu_metadata msdu_metadata;
QDF_STATUS status;
qdf_nbuf_t ebuf_head;
qdf_nbuf_t ebuf_tail;
@@ -221,6 +201,7 @@ uint32_t dp_rx_process_be(struct dp_intr *int_ctx,
uint32_t old_tid;
uint32_t peer_ext_stats;
uint32_t dsf;
uint32_t l3_pad;
DP_HIST_INIT();
@@ -248,8 +229,6 @@ more_data:
max_reap_limit = dp_rx_get_loop_pkt_limit(soc);
qdf_mem_zero(rx_bufs_reaped, sizeof(rx_bufs_reaped));
qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
qdf_mem_zero(head, sizeof(head));
qdf_mem_zero(tail, sizeof(tail));
old_tid = 0xff;
@@ -376,18 +355,12 @@ more_data:
ring_desc, rx_desc);
}
/* Get MPDU DESC info */
hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info);
pkt_capture_offload =
dp_rx_copy_desc_info_in_nbuf_cb(soc, ring_desc,
rx_desc->nbuf,
reo_ring_num);
/* Get MSDU DESC info */
hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info);
/* Set the end bit to identify the last buffer in MPDU */
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
qdf_nbuf_set_rx_chfrag_end(rx_desc->nbuf, 1);
if (qdf_unlikely(msdu_desc_info.msdu_flags &
HAL_MSDU_F_MSDU_CONTINUATION)) {
if (qdf_unlikely(qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf))) {
/* In dp_rx_sg_create() until the last buffer,
* end bit should not be set. As continuation bit set,
* this is not a last buffer.
@@ -408,7 +381,7 @@ more_data:
* available and number of buffers needed to
* reap this MPDU
*/
if ((msdu_desc_info.msdu_len /
if ((QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) /
(RX_DATA_BUFFER_SIZE -
soc->rx_pkt_tlv_size) + 1) >
num_pending) {
@@ -431,69 +404,12 @@ more_data:
}
}
if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
qdf_nbuf_set_rx_retry_flag(rx_desc->nbuf, 1);
if (qdf_unlikely(mpdu_desc_info.mpdu_flags &
HAL_MPDU_F_RAW_AMPDU))
qdf_nbuf_set_raw_frame(rx_desc->nbuf, 1);
if (!is_prev_msdu_last &&
!(msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION))
!(qdf_nbuf_is_rx_chfrag_cont(rx_desc->nbuf)))
is_prev_msdu_last = true;
rx_bufs_reaped[rx_desc->chip_id][rx_desc->pool_id]++;
peer_mdata = mpdu_desc_info.peer_meta_data;
QDF_NBUF_CB_RX_PEER_ID(rx_desc->nbuf) =
dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata);
QDF_NBUF_CB_RX_VDEV_ID(rx_desc->nbuf) =
dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata);
dp_rx_set_msdu_lmac_id(rx_desc->nbuf, peer_mdata);
/* to indicate whether this msdu is rx offload */
pkt_capture_offload =
DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata);
/*
* save msdu flags first, last and continuation msdu in
* nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
* length to nbuf->cb. This ensures the info required for
* per pkt processing is always in the same cache line.
* This helps in improving throughput for smaller pkt
* sizes.
*/
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
qdf_nbuf_set_rx_chfrag_start(rx_desc->nbuf, 1);
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
qdf_nbuf_set_rx_chfrag_cont(rx_desc->nbuf, 1);
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
qdf_nbuf_set_da_mcbc(rx_desc->nbuf, 1);
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
qdf_nbuf_set_da_valid(rx_desc->nbuf, 1);
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
qdf_nbuf_set_sa_valid(rx_desc->nbuf, 1);
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS)
qdf_nbuf_set_intra_bss(rx_desc->nbuf, 1);
if (qdf_likely(mpdu_desc_info.mpdu_flags &
HAL_MPDU_F_QOS_CONTROL_VALID))
qdf_nbuf_set_tid_val(rx_desc->nbuf, mpdu_desc_info.tid);
/* set sw exception */
qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt(
rx_desc->nbuf,
hal_rx_sw_exception_get_be(ring_desc));
QDF_NBUF_CB_RX_PKT_LEN(rx_desc->nbuf) = msdu_desc_info.msdu_len;
QDF_NBUF_CB_RX_CTX_ID(rx_desc->nbuf) = reo_ring_num;
/*
* move unmap after scattered msdu waiting break logic
* in case double skb unmap happened.
@@ -580,7 +496,7 @@ done:
rx_tlv_hdr = qdf_nbuf_data(nbuf);
vdev_id = QDF_NBUF_CB_RX_VDEV_ID(nbuf);
peer_id = QDF_NBUF_CB_RX_PEER_ID(nbuf);
peer_id = dp_rx_get_peer_id_be(nbuf);
if (dp_rx_is_list_ready(deliver_list_head, vdev, txrx_peer,
peer_id, vdev_id)) {
@@ -702,8 +618,6 @@ done:
* This is the most likely case, we receive 802.3 pkts
* decapsulated by HW, here we need to set the pkt length.
*/
hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr,
&msdu_metadata);
if (qdf_unlikely(qdf_nbuf_is_frag(nbuf))) {
bool is_mcbc, is_sa_vld, is_da_vld;
@@ -743,13 +657,12 @@ done:
}
}
} else {
l3_pad = hal_rx_get_l3_pad_bytes_be(nbuf, rx_tlv_hdr);
msdu_len = QDF_NBUF_CB_RX_PKT_LEN(nbuf);
pkt_len = msdu_len +
msdu_metadata.l3_hdr_pad +
soc->rx_pkt_tlv_size;
pkt_len = msdu_len + l3_pad + soc->rx_pkt_tlv_size;
qdf_nbuf_set_pktlen(nbuf, pkt_len);
dp_rx_skip_tlvs(soc, nbuf, msdu_metadata.l3_hdr_pad);
dp_rx_skip_tlvs(soc, nbuf, l3_pad);
}
dp_rx_send_pktlog(soc, rx_pdev, nbuf, QDF_TX_RX_STATUS_OK);
@@ -832,8 +745,7 @@ done:
dp_rx_wds_learn(soc, vdev,
rx_tlv_hdr,
txrx_peer,
nbuf,
msdu_metadata);
nbuf);
}
if (qdf_unlikely(vdev->mesh_vdev)) {
@@ -865,8 +777,7 @@ done:
if (dp_rx_check_ap_bridge(vdev))
if (dp_rx_intrabss_fwd_be(soc, txrx_peer,
rx_tlv_hdr,
nbuf,
msdu_metadata)) {
nbuf)) {
nbuf = next;
tid_stats->intrabss_cnt++;
continue; /* Get next desc */
@@ -1695,8 +1606,7 @@ dp_rx_intrabss_mcast_handler_be(struct dp_soc *soc,
}
bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
struct hal_rx_msdu_metadata msdu_metadata)
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf)
{
uint8_t tid = qdf_nbuf_get_tid_val(nbuf);
uint8_t ring_id = QDF_NBUF_CB_RX_CTX_ID(nbuf);
@@ -1704,6 +1614,7 @@ bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
tid_stats.tid_rx_stats[ring_id][tid];
bool ret = false;
struct dp_be_intrabss_params params;
struct hal_rx_msdu_metadata msdu_metadata;
/* if it is a broadcast pkt (eg: ARP) and it is not its own
* source, then clone the pkt and send the cloned pkt for
@@ -1722,6 +1633,7 @@ bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
nbuf))
return true;
hal_rx_msdu_packet_metadata_get_generic_be(rx_tlv_hdr, &msdu_metadata);
params.dest_soc = soc;
if (dp_rx_intrabss_ucast_check_be(nbuf, ta_peer, rx_tlv_hdr,
&msdu_metadata, &params)) {

View File

@@ -25,6 +25,7 @@
#include "dp_peer.h"
#include <dp_rx.h>
#include "hal_be_rx.h"
#include "hal_be_rx_tlv.h"
/*
* dp_be_intrabss_params
@@ -43,17 +44,16 @@ struct dp_be_intrabss_params {
* dp_rx_intrabss_fwd_be() - API for intrabss fwd. For EAPOL
* pkt with DA not equal to vdev mac addr, fwd is not allowed.
* @soc: core txrx main context
* @ta_peer: source peer entry
* @ta_txrx_peer: source peer entry
* @rx_tlv_hdr: start address of rx tlvs
* @nbuf: nbuf that has to be intrabss forwarded
* @msdu_metadata: msdu metadata
*
* Return: true if it is forwarded else false
*/
bool dp_rx_intrabss_fwd_be(struct dp_soc *soc, struct dp_txrx_peer *ta_peer,
uint8_t *rx_tlv_hdr, qdf_nbuf_t nbuf,
struct hal_rx_msdu_metadata msdu_metadata);
bool dp_rx_intrabss_fwd_be(struct dp_soc *soc,
struct dp_txrx_peer *ta_txrx_peer,
uint8_t *rx_tlv_hdr,
qdf_nbuf_t nbuf);
#endif
/**
@@ -611,4 +611,173 @@ dp_rx_null_q_desc_handle_be(struct dp_soc *soc, qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr, uint8_t pool_id,
struct dp_txrx_peer *txrx_peer,
bool is_reo_exception);
#if defined(DP_PKT_STATS_PER_LMAC) && defined(WLAN_FEATURE_11BE_MLO)
static inline void
dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
{
uint8_t lmac_id;
lmac_id = dp_rx_peer_metadata_lmac_id_get_be(peer_mdata);
qdf_nbuf_set_lmac_id(nbuf, lmac_id);
}
#else
static inline void
dp_rx_set_msdu_lmac_id(qdf_nbuf_t nbuf, uint32_t peer_mdata)
{
}
#endif
#ifndef CONFIG_NBUF_AP_PLATFORM
static inline uint16_t
dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)
{
return QDF_NBUF_CB_RX_PEER_ID(nbuf);
}
static inline void
dp_rx_set_mpdu_msdu_desc_info_in_nbuf(qdf_nbuf_t nbuf,
uint32_t mpdu_desc_info,
uint32_t peer_mdata,
uint32_t msdu_desc_info)
{
}
static inline uint8_t dp_rx_copy_desc_info_in_nbuf_cb(struct dp_soc *soc,
hal_ring_desc_t ring_desc,
qdf_nbuf_t nbuf,
uint8_t reo_ring_num)
{
struct hal_rx_mpdu_desc_info mpdu_desc_info;
struct hal_rx_msdu_desc_info msdu_desc_info;
uint8_t pkt_capture_offload = 0;
uint32_t peer_mdata = 0;
qdf_mem_zero(&mpdu_desc_info, sizeof(mpdu_desc_info));
qdf_mem_zero(&msdu_desc_info, sizeof(msdu_desc_info));
/* Get MPDU DESC info */
hal_rx_mpdu_desc_info_get_be(ring_desc, &mpdu_desc_info);
/* Get MSDU DESC info */
hal_rx_msdu_desc_info_get_be(ring_desc, &msdu_desc_info);
/* Set the end bit to identify the last buffer in MPDU */
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_LAST_MSDU_IN_MPDU)
qdf_nbuf_set_rx_chfrag_end(nbuf, 1);
if (mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RETRY_BIT)
qdf_nbuf_set_rx_retry_flag(nbuf, 1);
if (qdf_unlikely(mpdu_desc_info.mpdu_flags & HAL_MPDU_F_RAW_AMPDU))
qdf_nbuf_set_raw_frame(nbuf, 1);
peer_mdata = mpdu_desc_info.peer_meta_data;
QDF_NBUF_CB_RX_PEER_ID(nbuf) =
dp_rx_peer_metadata_peer_id_get_be(soc, peer_mdata);
QDF_NBUF_CB_RX_VDEV_ID(nbuf) =
dp_rx_peer_metadata_vdev_id_get_be(soc, peer_mdata);
dp_rx_set_msdu_lmac_id(nbuf, peer_mdata);
/* to indicate whether this msdu is rx offload */
pkt_capture_offload =
DP_PEER_METADATA_OFFLOAD_GET_BE(peer_mdata);
/*
* save msdu flags first, last and continuation msdu in
* nbuf->cb, also save mcbc, is_da_valid, is_sa_valid and
* length to nbuf->cb. This ensures the info required for
* per pkt processing is always in the same cache line.
* This helps in improving throughput for smaller pkt
* sizes.
*/
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_FIRST_MSDU_IN_MPDU)
qdf_nbuf_set_rx_chfrag_start(nbuf, 1);
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_MSDU_CONTINUATION)
qdf_nbuf_set_rx_chfrag_cont(nbuf, 1);
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_MCBC)
qdf_nbuf_set_da_mcbc(nbuf, 1);
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_DA_IS_VALID)
qdf_nbuf_set_da_valid(nbuf, 1);
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_SA_IS_VALID)
qdf_nbuf_set_sa_valid(nbuf, 1);
if (msdu_desc_info.msdu_flags & HAL_MSDU_F_INTRA_BSS)
qdf_nbuf_set_intra_bss(nbuf, 1);
if (qdf_likely(mpdu_desc_info.mpdu_flags &
HAL_MPDU_F_QOS_CONTROL_VALID))
qdf_nbuf_set_tid_val(nbuf, mpdu_desc_info.tid);
/* set sw exception */
qdf_nbuf_set_rx_reo_dest_ind_or_sw_excpt(
nbuf,
hal_rx_sw_exception_get_be(ring_desc));
QDF_NBUF_CB_RX_PKT_LEN(nbuf) = msdu_desc_info.msdu_len;
QDF_NBUF_CB_RX_CTX_ID(nbuf) = reo_ring_num;
return pkt_capture_offload;
}
static inline uint8_t hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr)
{
return HAL_RX_TLV_L3_HEADER_PADDING_GET(rx_tlv_hdr);
}
#else
static inline uint16_t
dp_rx_get_peer_id_be(qdf_nbuf_t nbuf)
{
uint32_t peer_metadata = QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf);
return ((peer_metadata & DP_BE_PEER_METADATA_PEER_ID_MASK) >>
DP_BE_PEER_METADATA_PEER_ID_SHIFT);
}
static inline void
dp_rx_set_mpdu_msdu_desc_info_in_nbuf(qdf_nbuf_t nbuf,
uint32_t mpdu_desc_info,
uint32_t peer_mdata,
uint32_t msdu_desc_info)
{
QDF_NBUF_CB_RX_MPDU_DESC_INFO_1(nbuf) = mpdu_desc_info;
QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(nbuf) = peer_mdata;
QDF_NBUF_CB_RX_MSDU_DESC_INFO(nbuf) = msdu_desc_info;
}
static inline uint8_t dp_rx_copy_desc_info_in_nbuf_cb(struct dp_soc *soc,
hal_ring_desc_t ring_desc,
qdf_nbuf_t nbuf,
uint8_t reo_ring_num)
{
uint32_t mpdu_desc_info = 0;
uint32_t msdu_desc_info = 0;
uint32_t peer_mdata = 0;
/* get REO mpdu & msdu desc info */
hal_rx_get_mpdu_msdu_desc_info_be(ring_desc,
&mpdu_desc_info,
&peer_mdata,
&msdu_desc_info);
dp_rx_set_mpdu_msdu_desc_info_in_nbuf(nbuf,
mpdu_desc_info,
peer_mdata,
msdu_desc_info);
return 0;
}
static inline uint8_t hal_rx_get_l3_pad_bytes_be(qdf_nbuf_t nbuf,
uint8_t *rx_tlv_hdr)
{
return QDF_NBUF_CB_RX_L3_PAD_MSB(nbuf) ? 2 : 0;
}
#endif
#endif

View File

@@ -1,6 +1,6 @@
/*
* Copyright (c) 2016-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -538,4 +538,38 @@ hal_rx_tlv_get_dest_chip_pmac_id(uint8_t *buf,
*d_chip_pmac_id = HAL_RX_TLV_DEST_CHIP_PMAC_ID_GET(rx_pkt_tlvs);
}
#endif /* INTRA_BSS_FWD_OFFLOAD */
static inline uint8_t hal_rx_get_reo_push_rsn(void *desc_addr)
{
struct reo_destination_ring *reo_dst_ring;
reo_dst_ring = (struct reo_destination_ring *)desc_addr;
return reo_dst_ring->reo_push_reason;
}
/**
* hal_rx_get_mpdu_msdu_desc_info_be() - get msdu, mpdu, peer meta data info
* from HAL Desc.
* @desc_addr: REO ring descriptor addr
* @mpdu_info: pointer to MPDU info
* @peer_mdata: pointer to peer meta data info
* @msdu_info: pointer to msdu info
*
* Return: void
*/
static inline void
hal_rx_get_mpdu_msdu_desc_info_be(void *desc_addr,
uint32_t *mpdu_info,
uint32_t *peer_mdata,
uint32_t *msdu_info)
{
struct reo_destination_ring *reo_dst_ring;
reo_dst_ring = (struct reo_destination_ring *)desc_addr;
*mpdu_info = *(uint32_t *)(&reo_dst_ring->rx_mpdu_desc_info_details);
*peer_mdata = *((uint32_t *)
&reo_dst_ring->rx_mpdu_desc_info_details + 1);
*msdu_info = *(uint32_t *)(&reo_dst_ring->rx_msdu_desc_info_details);
}
#endif /* _HAL_BE_RX_H_ */

View File

@@ -121,516 +121,6 @@ typedef union {
qdf_dma_addr_t dma_addr;
} qdf_paddr_t;
/**
* struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
* - data passed between layers of the driver.
*
* Notes:
* 1. Hard limited to 48 bytes. Please count your bytes
* 2. The size of this structure has to be easily calculable and
* consistently so: do not use any conditional compile flags
* 3. Split into a common part followed by a tx/rx overlay
* 4. There is only one extra frag, which represents the HTC/HTT header
* 5. "ext_cb_pt" must be the first member in both TX and RX unions
* for the priv_cb_w since it must be at same offset for both
* TX and RX union
* 6. "ipa.owned" bit must be first member in both TX and RX unions
* for the priv_cb_m since it must be at same offset for both
* TX and RX union.
*
* @paddr : physical addressed retrieved by dma_map of nbuf->data
* @u: union of rx and tx data
* @u.rx: rx data
* @u.rx.dev: union of priv_cb_w and priv_cb_m
*
* @u.rx.dev.priv_cb_w:
* @u.rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
* @u.rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
* @u.rx.dev.priv_cb_w.msdu_len: length of RX packet
* @u.rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
* @u.rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
* @u.rx.dev.priv_cb_w.peer_id: peer_id for RX packet
* @u.rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet
* type
* @u.rx.dev.priv_cb_w.flow_idx_valid: flow entry is found
* @u.rx.dev.priv_cb_w.flow_idx_timeout: flow entry search timed out
* @u.rx.dev.priv_cb_w.rsvd: rerserved bits
* @u.rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
*
* @u.rx.dev.priv_cb_m:
* @u.rx.dev.priv_cb_m.ipa.owned: packet owned by IPA
* @u.rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
* @u.rx.dev.priv_cb_m.flush_ind: flush indication
* @u.rx.dev.priv_cb_m.packet_buf_pool: packet buff bool
* @u.rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
* @u.rx.dev.priv_cb_m.exc_frm: exception frame
* @u.rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
* @u.rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
* sw exception bit from ring desc
* @u.rx.dev.priv_cb_m.lmac_id: lmac id for RX packet
* @u.rx.dev.priv_cb_m.reserved1: reserved bits
* @u.rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
* @u.rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
* @u.rx.dev.priv_cb_m.dp: Union of wifi3 and wifi2 structs
* @u.rx.dev.priv_cb_m.dp.wifi3: wifi3 data
* @u.rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
* @u.rx.dev.priv_cb_m.dp.wifi3.peer_id: peer_id for RX packet
* @u.rx.dev.priv_cb_m.dp.wifi2: wifi2 data
* @u.rx.dev.priv_cb_m.dp.wifi2.map_index:
* @u.rx.dev.priv_cb_m.lro_ctx: LRO context
*
* @u.rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
* @u.rx.tcp_proto: L4 protocol is TCP
* @u.rx.tcp_pure_ack: A TCP ACK packet with no payload
* @u.rx.ipv6_proto: L3 protocol is IPV6
* @u.rx.ip_offset: offset to IP header
* @u.rx.tcp_offset: offset to TCP header
* @u.rx.rx_ctx_id: Rx context id
* @u.rx.fcs_err: FCS error
* @u.rx.is_raw_frame: RAW frame
* @u.rx.num_elements_in_list: number of elements in the nbuf list
*
* @u.rx.tcp_udp_chksum: L4 payload checksum
* @u.rx.tcp_win: TCP window size
*
* @u.rx.flow_id: 32bit flow id
*
* @u.rx.flag_chfrag_start: first MSDU in an AMSDU
* @u.rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
* @u.rx.flag_chfrag_end: last MSDU in an AMSDU
* @u.rx.flag_retry: flag to indicate MSDU is retried
* @u.rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
* @u.rx.flag_da_valid: flag to indicate DA is valid for RX packet
* @u.rx.flag_sa_valid: flag to indicate SA is valid for RX packet
* @u.rx.flag_is_frag: flag to indicate skb has frag list
*
* @u.rx.trace: combined structure for DP and protocol trace
* @u.rx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
* + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
* @u.rx.trace.dp_trace: flag (Datapath trace)
* @u.rx.trace.packet_track: RX_DATA packet
* @u.rx.trace.rsrvd: enable packet logging
*
* @u.rx.vdev_id: vdev_id for RX pkt
* @u.rx.tid_val: tid value
* @u.rx.ftype: mcast2ucast, TSO, SG, MESH
*
* @u.tx: tx data
* @u.tx.dev: union of priv_cb_w and priv_cb_m
*
* @u.tx.dev.priv_cb_w:
* @u.tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
* @u.tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
*
* @u.tx.dev.priv_cb_m:
* @u.tx.dev.priv_cb_m:ipa: IPA-specific data
* @u.tx.dev.priv_cb_m.ipa.ipa.owned: packet owned by IPA
* @u.tx.dev.priv_cb_m.ipa.ipa.priv: private data, used by IPA
* @u.tx.dev.priv_cb_m.data_attr: value that is programmed in CE descr, includes
* + (1) CE classification enablement bit
* + (2) packet type (802.3 or Ethernet type II)
* + (3) packet offset (usually length of HTC/HTT descr)
* @u.tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
* @u.tx.dev.priv_cb_m.dma_option: DMA options
* @u.tx.dev.priv_cb_m.dma_option.mgmt_desc_id: mgmt descriptor for tx
* completion cb
* @u.tx.dev.priv_cb_m.dma_option.dma_option.bi_map: flag to do bi-direction
* dma map
* @u.tx.dev.priv_cb_m.dma_option.dma_option.reserved: reserved bits for future
* use
* @u.tx.dev.priv_cb_m.flag_notify_comp: reserved
* @u.tx.dev.priv_cb_m.rsvd: reserved
* @u.tx.dev.priv_cb_m.reserved: reserved
*
* @u.tx.ftype: mcast2ucast, TSO, SG, MESH
* @u.tx.vdev_id: vdev (for protocol trace)
* @u.tx.len: length of efrag pointed by the above pointers
*
* @u.tx.flags: union of flag representations
* @u.tx.flags.bits: flags represent as individual bitmasks
* @u.tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
* @u.tx.flags.bits.num: number of extra frags ( 0 or 1)
* @u.tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
* @u.tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
* @u.tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
* @u.tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
* @u.tx.flags.bits.flag_ext_header: extended flags
* @u.tx.flags.bits.is_critical: flag indicating a critical frame
* @u.tx.flags.u8: flags as a single u8
* @u.tx.trace: combined structure for DP and protocol trace
* @u.tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
* + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
* @u.tx.trace.is_packet_priv:
* @u.tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
* @u.tx.trace.to_fw: Flag to indicate send this packet to FW
* @u.tx.trace.htt2_frm: flag (high-latency path only)
* @u.tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
* + (MGMT_ACTION)] - 4 bits
* @u.tx.trace.dp_trace: flag (Datapath trace)
* @u.tx.trace.is_bcast: flag (Broadcast packet)
* @u.tx.trace.is_mcast: flag (Multicast packet)
* @u.tx.trace.packet_type: flag (Packet type)
* @u.tx.trace.print: enable packet logging
*
* @u.tx.vaddr: virtual address of ~
* @u.tx.paddr: physical/DMA address of ~
*/
struct qdf_nbuf_cb {
/* common */
qdf_paddr_t paddr; /* of skb->data */
/* valid only in one direction */
union {
/* Note: MAX: 40 bytes */
struct {
union {
struct {
void *ext_cb_ptr;
void *fctx;
uint16_t msdu_len : 14,
flag_intra_bss : 1,
ipa_smmu_map : 1;
uint16_t peer_id;
uint8_t protocol_tag;
uint8_t flow_idx_valid: 1,
flow_idx_timeout: 1,
rsvd:6;
uint16_t flow_tag;
} priv_cb_w;
struct {
/* ipa_owned bit is common between rx
* control block and tx control block.
* Do not change location of this bit.
*/
uint32_t ipa_owned:1,
peer_cached_buf_frm:1,
flush_ind:1,
packet_buf_pool:1,
l3_hdr_pad:3,
/* exception frame flag */
exc_frm:1,
ipa_smmu_map:1,
reo_dest_ind_or_sw_excpt:5,
lmac_id:2,
reserved1:16;
uint32_t tcp_seq_num;
uint32_t tcp_ack_num;
union {
struct {
uint16_t msdu_len;
uint16_t peer_id;
} wifi3;
struct {
uint32_t map_index;
} wifi2;
} dp;
unsigned char *lro_ctx;
} priv_cb_m;
} dev;
uint32_t lro_eligible:1,
tcp_proto:1,
tcp_pure_ack:1,
ipv6_proto:1,
ip_offset:7,
tcp_offset:7,
rx_ctx_id:4,
fcs_err:1,
is_raw_frame:1,
num_elements_in_list:8;
uint32_t tcp_udp_chksum:16,
tcp_win:16;
uint32_t flow_id;
uint8_t flag_chfrag_start:1,
flag_chfrag_cont:1,
flag_chfrag_end:1,
flag_retry:1,
flag_da_mcbc:1,
flag_da_valid:1,
flag_sa_valid:1,
flag_is_frag:1;
union {
uint8_t packet_state;
uint8_t dp_trace:1,
packet_track:3,
rsrvd:4;
} trace;
uint16_t vdev_id:8,
tid_val:4,
ftype:4;
} rx;
/* Note: MAX: 40 bytes */
struct {
union {
struct {
void *ext_cb_ptr;
void *fctx;
} priv_cb_w;
struct {
/* ipa_owned bit is common between rx
* control block and tx control block.
* Do not change location of this bit.
*/
struct {
uint32_t owned:1,
priv:31;
} ipa;
uint32_t data_attr;
uint16_t desc_id;
uint16_t mgmt_desc_id;
struct {
uint8_t bi_map:1,
reserved:7;
} dma_option;
uint8_t flag_notify_comp:1,
rsvd:7;
uint8_t reserved[2];
} priv_cb_m;
} dev;
uint8_t ftype;
uint8_t vdev_id;
uint16_t len;
union {
struct {
uint8_t flag_efrag:1,
flag_nbuf:1,
num:1,
flag_chfrag_start:1,
flag_chfrag_cont:1,
flag_chfrag_end:1,
flag_ext_header:1,
is_critical:1;
} bits;
uint8_t u8;
} flags;
struct {
uint8_t packet_state:7,
is_packet_priv:1;
uint8_t packet_track:3,
to_fw:1,
htt2_frm:1,
proto_type:3;
uint8_t dp_trace:1,
is_bcast:1,
is_mcast:1,
packet_type:4,
print:1;
} trace;
unsigned char *vaddr;
qdf_paddr_t paddr;
} tx;
} u;
}; /* struct qdf_nbuf_cb: MAX 48 bytes */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(sizeof(struct qdf_nbuf_cb)) <=
sizeof_field(struct sk_buff, cb));
#else
QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(sizeof(struct qdf_nbuf_cb)) <=
FIELD_SIZEOF(struct sk_buff, cb));
#endif
/*
* access macros to qdf_nbuf_cb
* Note: These macros can be used as L-values as well as R-values.
* When used as R-values, they effectively function as "get" macros
* When used as L_values, they effectively function as "set" macros
*/
#define QDF_NBUF_CB_PADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
#define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
#define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
#define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
#define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
#define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
#define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
#define QDF_NBUF_CB_RX_CTX_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
#define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
#define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
#define QDF_NBUF_CB_RX_TCP_WIN(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
#define QDF_NBUF_CB_RX_FLOW_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
#define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
#define QDF_NBUF_CB_RX_DP_TRACE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
#define QDF_NBUF_CB_RX_FTYPE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
#define QDF_NBUF_CB_RX_VDEV_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
#define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_chfrag_start)
#define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_chfrag_cont)
#define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_chfrag_end)
#define QDF_NBUF_CB_RX_DA_MCBC(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_da_mcbc)
#define QDF_NBUF_CB_RX_DA_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_da_valid)
#define QDF_NBUF_CB_RX_SA_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_sa_valid)
#define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_retry)
#define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.is_raw_frame)
#define QDF_NBUF_CB_RX_TID_VAL(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.tid_val)
#define QDF_NBUF_CB_RX_IS_FRAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_is_frag)
#define QDF_NBUF_CB_RX_FCS_ERR(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.fcs_err)
#define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
qdf_nbuf_set_state(skb, PACKET_STATE)
#define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
#define QDF_NBUF_CB_TX_FTYPE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
#define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
/* Tx Flags Accessor Macros*/
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_efrag)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_nbuf)
#define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_ext_header)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
#define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
/* End of Tx Flags Accessor Macros */
/* Tx trace accessor macros */
#define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.packet_state)
#define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_packet_priv)
#define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.packet_track)
#define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.to_fw)
#define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.trace.packet_track)
#define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.proto_type)
#define QDF_NBUF_CB_TX_DP_TRACE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
#define QDF_NBUF_CB_DP_TRACE_PRINT(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
#define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
#define QDF_NBUF_CB_GET_IS_BCAST(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
#define QDF_NBUF_CB_GET_IS_MCAST(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
#define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
#define QDF_NBUF_CB_SET_BCAST(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_bcast = true)
#define QDF_NBUF_CB_SET_MCAST(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_mcast = true)
/* End of Tx trace accessor macros */
#define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
/* assume the OS provides a single fragment */
#define __qdf_nbuf_get_num_frags(skb) \
(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
#define __qdf_nbuf_reset_num_frags(skb) \
(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
/*
* end of nbuf->cb access macros
*/
typedef void (*qdf_nbuf_trace_update_t)(char *);
typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
@@ -797,26 +287,9 @@ typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
#define __qdf_nbuf_trace_get_proto_type(skb) \
QDF_NBUF_CB_TX_PROTO_TYPE(skb)
#define __qdf_nbuf_data_attr_get(skb) \
QDF_NBUF_CB_TX_DATA_ATTR(skb)
#define __qdf_nbuf_data_attr_set(skb, data_attr) \
(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
#define __qdf_nbuf_queue_walk_safe(queue, var, tvar) \
skb_queue_walk_safe(queue, var, tvar)
/**
* __qdf_nbuf_num_frags_init() - init extra frags
* @skb: sk buffer
*
* Return: none
*/
static inline
void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
{
QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
}
/*
* prototypes. Implemented in qdf_nbuf.c
*/
@@ -1786,26 +1259,6 @@ static inline QDF_STATUS __qdf_to_status(signed int error)
}
}
/**
* __qdf_nbuf_len() - return the amount of valid data in the skb
* @skb: Pointer to network buffer
*
* This API returns the amount of valid data in the skb, If there are frags
* then it returns total length.
*
* Return: network buffer length
*/
static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
{
int i, extra_frag_len = 0;
i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
if (i > 0)
extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
return extra_frag_len + skb->len;
}
/**
* __qdf_nbuf_cat() - link two nbufs
* @dst: Buffer to piggyback into
@@ -2074,46 +1527,6 @@ __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
__skb_unlink(skb, list);
}
/**
* __qdf_nbuf_reset() - reset the buffer data and pointer
* @skb: Network buf instance
* @reserve: reserve
* @align: align
*
* Return: none
*/
static inline void
__qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
{
int offset;
skb_push(skb, skb_headroom(skb));
skb_put(skb, skb_tailroom(skb));
memset(skb->data, 0x0, skb->len);
skb_trim(skb, 0);
skb_reserve(skb, NET_SKB_PAD);
memset(skb->cb, 0x0, sizeof(skb->cb));
/*
* The default is for netbuf fragments to be interpreted
* as wordstreams rather than bytestreams.
*/
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
/*
* Align & make sure that the tail & data are adjusted properly
*/
if (align) {
offset = ((unsigned long)skb->data) % align;
if (offset)
skb_reserve(skb, align - offset);
}
skb_reserve(skb, reserve);
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
/**
* __qdf_nbuf_is_dev_scratch_supported() - dev_scratch support for network
@@ -3401,78 +2814,6 @@ static inline void __qdf_record_nbuf_nbytes(
}
#endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
/**
* __qdf_nbuf_map_nbytes_single() - map nbytes
* @osdev: os device
* @buf: buffer
* @dir: direction
* @nbytes: number of bytes
*
* Return: QDF_STATUS
*/
#ifdef A_SIMOS_DEVHOST
static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr;
QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
return QDF_STATUS_SUCCESS;
}
#else
static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr;
QDF_STATUS ret;
/* assume that the OS only provides a single fragment */
QDF_NBUF_CB_PADDR(buf) = paddr =
dma_map_single(osdev->dev, buf->data,
nbytes, __qdf_dma_dir_to_os(dir));
ret = dma_mapping_error(osdev->dev, paddr) ?
QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
if (QDF_IS_STATUS_SUCCESS(ret))
__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
dir, true);
return ret;
}
#endif
/**
* __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
* @osdev: os device
* @buf: buffer
* @dir: direction
* @nbytes: number of bytes
*
* Return: none
*/
#if defined(A_SIMOS_DEVHOST)
static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
}
#else
static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
if (qdf_likely(paddr)) {
__qdf_record_nbuf_nbytes(
__qdf_nbuf_get_end_offset(buf), dir, false);
dma_unmap_single(osdev->dev, paddr, nbytes,
__qdf_dma_dir_to_os(dir));
return;
}
}
#endif
static inline struct sk_buff *
__qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
{

View File

@@ -27,6 +27,503 @@
#ifndef _I_QDF_NBUF_M_H
#define _I_QDF_NBUF_M_H
/**
* struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
* - data passed between layers of the driver.
*
* Notes:
* 1. Hard limited to 48 bytes. Please count your bytes
* 2. The size of this structure has to be easily calculable and
* consistently so: do not use any conditional compile flags
* 3. Split into a common part followed by a tx/rx overlay
* 4. There is only one extra frag, which represents the HTC/HTT header
* 5. "ext_cb_pt" must be the first member in both TX and RX unions
* for the priv_cb_w since it must be at same offset for both
* TX and RX union
* 6. "ipa.owned" bit must be first member in both TX and RX unions
* for the priv_cb_m since it must be at same offset for both
* TX and RX union.
*
* @paddr : physical addressed retrieved by dma_map of nbuf->data
* @u: union of rx and tx data
* @u.rx: rx data
* @u.rx.dev: union of priv_cb_w and priv_cb_m
*
* @u.rx.dev.priv_cb_w:
* @u.rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
* @u.rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
* @u.rx.dev.priv_cb_w.msdu_len: length of RX packet
* @u.rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
* @u.rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
* @u.rx.dev.priv_cb_w.peer_id: peer_id for RX packet
* @u.rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet
* type
* @u.rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
*
* @u.rx.dev.priv_cb_m:
* @u.rx.dev.priv_cb_m.ipa.owned: packet owned by IPA
* @u.rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
* @u.rx.dev.priv_cb_m.flush_ind: flush indication
* @u.rx.dev.priv_cb_m.packet_buf_pool: packet buff bool
* @u.rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
* @u.rx.dev.priv_cb_m.exc_frm: exception frame
* @u.rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
* @u.rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
* sw exception bit from ring desc
* @u.rx.dev.priv_cb_m.lmac_id: lmac id for RX packet
* @u.rx.dev.priv_cb_m.reserved1: reserved bits
* @u.rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
* @u.rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
* @u.rx.dev.priv_cb_m.dp: Union of wifi3 and wifi2 structs
* @u.rx.dev.priv_cb_m.dp.wifi3: wifi3 data
* @u.rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
* @u.rx.dev.priv_cb_m.dp.wifi3.peer_id: peer_id for RX packet
* @u.rx.dev.priv_cb_m.dp.wifi2: wifi2 data
* @u.rx.dev.priv_cb_m.dp.wifi2.map_index:
* @u.rx.dev.priv_cb_m.lro_ctx: LRO context
*
* @u.rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
* @u.rx.tcp_proto: L4 protocol is TCP
* @u.rx.tcp_pure_ack: A TCP ACK packet with no payload
* @u.rx.ipv6_proto: L3 protocol is IPV6
* @u.rx.ip_offset: offset to IP header
* @u.rx.tcp_offset: offset to TCP header
* @u.rx.rx_ctx_id: Rx context id
* @u.rx.fcs_err: FCS error
* @u.rx.is_raw_frame: RAW frame
* @u.rx.num_elements_in_list: number of elements in the nbuf list
*
* @u.rx.tcp_udp_chksum: L4 payload checksum
* @u.rx.tcp_win: TCP window size
*
* @u.rx.flow_id: 32bit flow id
*
* @u.rx.flag_chfrag_start: first MSDU in an AMSDU
* @u.rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
* @u.rx.flag_chfrag_end: last MSDU in an AMSDU
* @u.rx.flag_retry: flag to indicate MSDU is retried
* @u.rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
* @u.rx.flag_da_valid: flag to indicate DA is valid for RX packet
* @u.rx.flag_sa_valid: flag to indicate SA is valid for RX packet
* @u.rx.flag_is_frag: flag to indicate skb has frag list
*
* @u.rx.trace: combined structure for DP and protocol trace
* @u.rx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
* + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
* @u.rx.trace.dp_trace: flag (Datapath trace)
* @u.rx.trace.packet_track: RX_DATA packet
* @u.rx.trace.rsrvd: enable packet logging
*
* @u.rx.vdev_id: vdev_id for RX pkt
* @u.rx.tid_val: tid value
* @u.rx.ftype: mcast2ucast, TSO, SG, MESH
*
* @u.tx: tx data
* @u.tx.dev: union of priv_cb_w and priv_cb_m
*
* @u.tx.dev.priv_cb_w:
* @u.tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
* @u.tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
*
* @u.tx.dev.priv_cb_m:
* @u.tx.dev.priv_cb_m:ipa: IPA-specific data
* @u.tx.dev.priv_cb_m.ipa.ipa.owned: packet owned by IPA
* @u.tx.dev.priv_cb_m.ipa.ipa.priv: private data, used by IPA
* @u.tx.dev.priv_cb_m.data_attr: value that is programmed in CE descr, includes
* + (1) CE classification enablement bit
* + (2) packet type (802.3 or Ethernet type II)
* + (3) packet offset (usually length of HTC/HTT descr)
* @u.tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
* @u.tx.dev.priv_cb_m.dma_option: DMA options
* @u.tx.dev.priv_cb_m.dma_option.mgmt_desc_id: mgmt descriptor for tx
* completion cb
* @u.tx.dev.priv_cb_m.dma_option.dma_option.bi_map: flag to do bi-direction
* dma map
* @u.tx.dev.priv_cb_m.dma_option.dma_option.reserved: reserved bits for future
* use
* @u.tx.dev.priv_cb_m.flag_notify_comp: reserved
* @u.tx.dev.priv_cb_m.rsvd: reserved
* @u.tx.dev.priv_cb_m.reserved: reserved
*
* @u.tx.ftype: mcast2ucast, TSO, SG, MESH
* @u.tx.vdev_id: vdev (for protocol trace)
* @u.tx.len: length of efrag pointed by the above pointers
*
* @u.tx.flags: union of flag representations
* @u.tx.flags.bits: flags represent as individual bitmasks
* @u.tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
* @u.tx.flags.bits.num: number of extra frags ( 0 or 1)
* @u.tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
* @u.tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
* @u.tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
* @u.tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
* @u.tx.flags.bits.flag_ext_header: extended flags
* @u.tx.flags.bits.is_critical: flag indicating a critical frame
* @u.tx.flags.u8: flags as a single u8
* @u.tx.trace: combined structure for DP and protocol trace
* @u.tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
* + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
* @u.tx.trace.is_packet_priv:
* @u.tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
* @u.tx.trace.to_fw: Flag to indicate send this packet to FW
* @u.tx.trace.htt2_frm: flag (high-latency path only)
* @u.tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
* + (MGMT_ACTION)] - 4 bits
* @u.tx.trace.dp_trace: flag (Datapath trace)
* @u.tx.trace.is_bcast: flag (Broadcast packet)
* @u.tx.trace.is_mcast: flag (Multicast packet)
* @u.tx.trace.packet_type: flag (Packet type)
* @u.tx.trace.print: enable packet logging
*
* @u.tx.vaddr: virtual address of ~
* @u.tx.paddr: physical/DMA address of ~
*/
struct qdf_nbuf_cb {
/* common */
qdf_paddr_t paddr; /* of skb->data */
/* valid only in one direction */
union {
/* Note: MAX: 40 bytes */
struct {
union {
struct {
void *ext_cb_ptr;
void *fctx;
uint16_t msdu_len : 14,
flag_intra_bss : 1,
ipa_smmu_map : 1;
uint16_t peer_id;
uint16_t protocol_tag;
uint16_t flow_tag;
} priv_cb_w;
struct {
/* ipa_owned bit is common between rx
* control block and tx control block.
* Do not change location of this bit.
*/
uint32_t ipa_owned:1,
peer_cached_buf_frm:1,
flush_ind:1,
packet_buf_pool:1,
l3_hdr_pad:3,
/* exception frame flag */
exc_frm:1,
ipa_smmu_map:1,
reo_dest_ind_or_sw_excpt:5,
lmac_id:2,
reserved1:16;
uint32_t tcp_seq_num;
uint32_t tcp_ack_num;
union {
struct {
uint16_t msdu_len;
uint16_t peer_id;
} wifi3;
struct {
uint32_t map_index;
} wifi2;
} dp;
unsigned char *lro_ctx;
} priv_cb_m;
} dev;
uint32_t lro_eligible:1,
tcp_proto:1,
tcp_pure_ack:1,
ipv6_proto:1,
ip_offset:7,
tcp_offset:7,
rx_ctx_id:4,
fcs_err:1,
is_raw_frame:1,
num_elements_in_list:8;
uint32_t tcp_udp_chksum:16,
tcp_win:16;
uint32_t flow_id;
uint8_t flag_chfrag_start:1,
flag_chfrag_cont:1,
flag_chfrag_end:1,
flag_retry:1,
flag_da_mcbc:1,
flag_da_valid:1,
flag_sa_valid:1,
flag_is_frag:1;
union {
uint8_t packet_state;
uint8_t dp_trace:1,
packet_track:3,
rsrvd:4;
} trace;
uint16_t vdev_id:8,
tid_val:4,
ftype:4;
} rx;
/* Note: MAX: 40 bytes */
struct {
union {
struct {
void *ext_cb_ptr;
void *fctx;
} priv_cb_w;
struct {
/* ipa_owned bit is common between rx
* control block and tx control block.
* Do not change location of this bit.
*/
struct {
uint32_t owned:1,
priv:31;
} ipa;
uint32_t data_attr;
uint16_t desc_id;
uint16_t mgmt_desc_id;
struct {
uint8_t bi_map:1,
reserved:7;
} dma_option;
uint8_t flag_notify_comp:1,
rsvd:7;
uint8_t reserved[2];
} priv_cb_m;
} dev;
uint8_t ftype;
uint8_t vdev_id;
uint16_t len;
union {
struct {
uint8_t flag_efrag:1,
flag_nbuf:1,
num:1,
flag_chfrag_start:1,
flag_chfrag_cont:1,
flag_chfrag_end:1,
flag_ext_header:1,
is_critical:1;
} bits;
uint8_t u8;
} flags;
struct {
uint8_t packet_state:7,
is_packet_priv:1;
uint8_t packet_track:3,
to_fw:1,
htt2_frm:1,
proto_type:3;
uint8_t dp_trace:1,
is_bcast:1,
is_mcast:1,
packet_type:4,
print:1;
} trace;
unsigned char *vaddr;
qdf_paddr_t paddr;
} tx;
} u;
}; /* struct qdf_nbuf_cb: MAX 48 bytes */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(sizeof(struct qdf_nbuf_cb)) <=
sizeof_field(struct sk_buff, cb));
#else
QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(sizeof(struct qdf_nbuf_cb)) <=
FIELD_SIZEOF(struct sk_buff, cb));
#endif
/*
* access macros to qdf_nbuf_cb
* Note: These macros can be used as L-values as well as R-values.
* When used as R-values, they effectively function as "get" macros
* When used as L_values, they effectively function as "set" macros
*/
#define QDF_NBUF_CB_PADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
#define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
#define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
#define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
#define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
#define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
#define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
#define QDF_NBUF_CB_RX_CTX_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
#define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
#define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
#define QDF_NBUF_CB_RX_TCP_WIN(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
#define QDF_NBUF_CB_RX_FLOW_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
#define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
#define QDF_NBUF_CB_RX_DP_TRACE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
#define QDF_NBUF_CB_RX_FTYPE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
#define QDF_NBUF_CB_RX_VDEV_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
#define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_chfrag_start)
#define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_chfrag_cont)
#define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_chfrag_end)
#define QDF_NBUF_CB_RX_DA_MCBC(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_da_mcbc)
#define QDF_NBUF_CB_RX_DA_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_da_valid)
#define QDF_NBUF_CB_RX_SA_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_sa_valid)
#define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_retry)
#define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.is_raw_frame)
#define QDF_NBUF_CB_RX_TID_VAL(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.tid_val)
#define QDF_NBUF_CB_RX_IS_FRAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_is_frag)
#define QDF_NBUF_CB_RX_FCS_ERR(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.fcs_err)
#define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
qdf_nbuf_set_state(skb, PACKET_STATE)
#define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
#define QDF_NBUF_CB_TX_FTYPE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
#define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
/* Tx Flags Accessor Macros*/
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_efrag)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_nbuf)
#define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_ext_header)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
#define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
/* End of Tx Flags Accessor Macros */
/* Tx trace accessor macros */
#define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.packet_state)
#define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_packet_priv)
#define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.packet_track)
#define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.to_fw)
#define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.trace.packet_track)
#define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.proto_type)
#define QDF_NBUF_CB_TX_DP_TRACE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
#define QDF_NBUF_CB_DP_TRACE_PRINT(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
#define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
#define QDF_NBUF_CB_GET_IS_BCAST(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
#define QDF_NBUF_CB_GET_IS_MCAST(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
#define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
#define QDF_NBUF_CB_SET_BCAST(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_bcast = true)
#define QDF_NBUF_CB_SET_MCAST(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_mcast = true)
/* End of Tx trace accessor macros */
#define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
/* assume the OS provides a single fragment */
#define __qdf_nbuf_get_num_frags(skb) \
(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
#define __qdf_nbuf_reset_num_frags(skb) \
(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
#define QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.tcp_seq_num)
@@ -108,6 +605,126 @@
#define __qdf_nbuf_ipa_priv_set(skb, priv) \
(QDF_NBUF_CB_TX_IPA_PRIV(skb) = (priv))
#define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
#define __qdf_nbuf_data_attr_get(skb) \
QDF_NBUF_CB_TX_DATA_ATTR(skb)
#define __qdf_nbuf_data_attr_set(skb, data_attr) \
(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
/**
* __qdf_nbuf_map_nbytes_single() - map nbytes
* @osdev: os device
* @buf: buffer
* @dir: direction
* @nbytes: number of bytes
*
* Return: QDF_STATUS
*/
#ifdef A_SIMOS_DEVHOST
static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr;
QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
return QDF_STATUS_SUCCESS;
}
#else
static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr;
QDF_STATUS ret;
/* assume that the OS only provides a single fragment */
QDF_NBUF_CB_PADDR(buf) = paddr =
dma_map_single(osdev->dev, buf->data,
nbytes, __qdf_dma_dir_to_os(dir));
ret = dma_mapping_error(osdev->dev, paddr) ?
QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
if (QDF_IS_STATUS_SUCCESS(ret))
__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
dir, true);
return ret;
}
#endif
/**
* __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
* @osdev: os device
* @buf: buffer
* @dir: direction
* @nbytes: number of bytes
*
* Return: none
*/
#if defined(A_SIMOS_DEVHOST)
static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
}
#else
static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
if (qdf_likely(paddr)) {
__qdf_record_nbuf_nbytes(
__qdf_nbuf_get_end_offset(buf), dir, false);
dma_unmap_single(osdev->dev, paddr, nbytes,
__qdf_dma_dir_to_os(dir));
return;
}
}
#endif
/**
* __qdf_nbuf_reset() - reset the buffer data and pointer
* @skb: Network buf instance
* @reserve: reserve
* @align: align
*
* Return: none
*/
static inline void
__qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
{
int offset;
skb_push(skb, skb_headroom(skb));
skb_put(skb, skb_tailroom(skb));
memset(skb->data, 0x0, skb->len);
skb_trim(skb, 0);
skb_reserve(skb, NET_SKB_PAD);
memset(skb->cb, 0x0, sizeof(skb->cb));
/*
* The default is for netbuf fragments to be interpreted
* as wordstreams rather than bytestreams.
*/
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
/*
* Align & make sure that the tail & data are adjusted properly
*/
if (align) {
offset = ((unsigned long)skb->data) % align;
if (offset)
skb_reserve(skb, align - offset);
}
skb_reserve(skb, reserve);
}
/**
* qdf_nbuf_cb_update_vdev_id() - update vdev id in skb cb
* @skb: skb pointer whose cb is updated with vdev id information
@@ -139,6 +756,38 @@ void __qdf_nbuf_init_replenish_timer(void);
*/
void __qdf_nbuf_deinit_replenish_timer(void);
/**
* __qdf_nbuf_len() - return the amount of valid data in the skb
* @skb: Pointer to network buffer
*
* This API returns the amount of valid data in the skb, If there are frags
* then it returns total length.
*
* Return: network buffer length
*/
static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
{
int i, extra_frag_len = 0;
i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
if (i > 0)
extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
return extra_frag_len + skb->len;
}
/**
* __qdf_nbuf_num_frags_init() - init extra frags
* @skb: sk buffer
*
* Return: none
*/
static inline
void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
{
QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
}
/**
* __qdf_nbuf_push_head() - Push data in the front
* @skb: Pointer to network buffer
@@ -234,4 +883,5 @@ __qdf_dsb(void) {}
static inline void
__qdf_nbuf_dma_clean_range(const void *buf_start, const void *buf_end) {}
#endif /*_I_QDF_NBUF_M_H */

View File

@@ -28,10 +28,524 @@
#ifndef _I_QDF_NBUF_W_H
#define _I_QDF_NBUF_W_H
/* ext_cb accessor macros and internal API's */
/**
* struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
* - data passed between layers of the driver.
*
* Notes:
* 1. Hard limited to 48 bytes. Please count your bytes
* 2. The size of this structure has to be easily calculable and
* consistently so: do not use any conditional compile flags
* 3. Split into a common part followed by a tx/rx overlay
* 4. There is only one extra frag, which represents the HTC/HTT header
* 5. "ext_cb_pt" must be the first member in both TX and RX unions
* for the priv_cb_w since it must be at same offset for both
* TX and RX union
* 6. "ipa.owned" bit must be first member in both TX and RX unions
* for the priv_cb_m since it must be at same offset for both
* TX and RX union.
*
* @paddr : physical addressed retrieved by dma_map of nbuf->data
* @u: union of TX and RX member elements
* @u.rx.ext_cb_ptr: extended cb pointer
* @u.rx.fctx: ctx to handle special pkts defined by ftype
* @u.rx.rx_ctx_id: RX ring id
* @u.rx.fcs_err: fcs error in RX packet
* @u.rx.ipa_smmu_map: do IPA smmu map
* @u.rx.flow_idx_valid: is flow_idx_valid flag
* @u.rx.flow_idx_timeout: is flow_idx_timeout flag
* @u.rx.rsvd8: reserved bits
* @u.rx.num_elements_in_list: num of elements (nbufs) in the list
* @u.rx.trace: combined structure for DP and protocol trace
* @u.rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
* + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
* @u.rx.trace.dp_trace: flag (Datapath trace)
* @u.rx.trace.packet_track: RX_DATA packet
* @u.rx.trace.rsrvd: enable packet logging
*
* @u.rx.protocol_tag: protocol tag set by app for rcvd packet type
* @u.rx.flow_tag: flow tag set by application for 5 tuples rcvd
*
* @u.rx.hw_info: combined structure for HW info fields
* @u.rx.hw_info.desc_tlv_members.msdu_count: num of msdus
* @u.rx.hw_info.desc_tlv_members.fragment_flag: is this fragment mpdu
* @u.rx.hw_info.desc_tlv_members.flag_retry: is mpdu retry flag set
* @u.rx.hw_info.desc_tlv_members.flag_is_ampdu: is ampdu flag set
* @u.rx.hw_info.desc_tlv_members.bar_frame: is this bar frame
* @u.rx.hw_info.desc_tlv_members.pn_fields_contain_valid_info: pn valid
* @u.rx.hw_info.desc_tlv_members.is_raw_frame: is this raw frame
* @u.rx.hw_info.desc_tlv_members.more_fragment_flag: more fragment flag
* @u.rx.hw_info.desc_tlv_members.src_info: PPE VP number
* @u.rx.hw_info.desc_tlv_members.mpdu_qos_control_valid: is qos ctrl valid
* @u.rx.hw_info.desc_tlv_members.tid_val: tid value
*
* @u.rx.hw_info.desc_tlv_members.peer_id: peer id
* @u.rx.hw_info.desc_tlv_members.ml_peer_valid: is ml peer valid
* @u.rx.hw_info.desc_tlv_members.logical_link_id: logical link id
* @u.rx.hw_info.desc_tlv_members.vdev_id: vdev id
* @u.rx.hw_info.desc_tlv_members.lmac_id: lmac id
* @u.rx.hw_info.desc_tlv_members.chip_id: chip id
* @u.rx.hw_info.desc_tlv_members.reserved2: reserved
*
* @u.rx.hw_info.desc_tlv_members.flag_chfrag_start: first fragment of msdu
* @u.rx.hw_info.desc_tlv_members.flag_chfrag_end: last fragment of msdu
* @u.rx.hw_info.desc_tlv_members.flag_chfrag_cont: msdu frag is continued
* @u.rx.hw_info.desc_tlv_members.msdu_len: msdu length
* @u.rx.hw_info.desc_tlv_members.flag_is_frag: msdu is frag
* @u.rx.hw_info.desc_tlv_members.flag_sa_valid: source address is valid
* @u.rx.hw_info.desc_tlv_members.flag_da_valid: dest address is valid
* @u.rx.hw_info.desc_tlv_members.flag_da_mcbc: is mcast/bcast msdu
* @u.rx.hw_info.desc_tlv_members.l3_hdr_pad_msb: l3 pad bytes
* @u.rx.hw_info.desc_tlv_members.tcp_udp_chksum_fail: tcp/udp checksum failed
* @u.rx.hw_info.desc_tlv_members.ip_chksum_fail: ip checksum failed
* @u.rx.hw_info.desc_tlv_members.fr_ds: FROM DS bit is set
* @u.rx.hw_info.desc_tlv_members.to_ds: TO DS bit is set
* @u.rx.hw_info.desc_tlv_members.intra_bss: this is intra-bss msdu
* @u.rx.hw_info.desc_tlv_members.rsvd4: reserved
*
* @u.rx.hw_info.desc_tlv_members.release_source_module: release source
* @u.rx.hw_info.desc_tlv_members.bm_action: bm action
* @u.rx.hw_info.desc_tlv_members.buffer_or_desc_type: buffer or desc
* @u.rx.hw_info.desc_tlv_members.return_buffer_manager: rbm value
* @u.rx.hw_info.desc_tlv_members.reserved_2a: reserved
* @u.rx.hw_info.desc_tlv_members.cache_id: cache_id
* @u.rx.hw_info.desc_tlv_members.cookie_conversion_status: cc status
* @u.rx.hw_info.desc_tlv_members.rxdma_push_reason: rxdma push reason
* @u.rx.hw_info.desc_tlv_members.rxdma_error_code: rxdma error code
* @u.rx.hw_info.desc_tlv_members.reo_push_reason: reo push reason
* @u.rx.hw_info.desc_tlv_members.reo_error_code: reo error code
* @u.rx.hw_info.desc_tlv_members.wbm_internal_error: wbm internal error
*
* @u.rx.hw_info.desc_info.mpdu_desc_info[2]: reo destination mpdu desc info
* @u.rx.hw_info.desc_info.msdu_desc_info: reo destination msdu desc info
* @u.rx.hw_info.desc_info.rx_error_code: wbm error codes
*
*
* @u.tx.ext_cb_ptr: extended cb pointer
* @u.tx.fctx: ctx to handle special pkts defined by ftype
* @u.tx.ftype: ftype
* @u.tx.vdev_id: vdev_id
* @u.tx.len: len
* @u.tx.flags:
* @u.tx.flags.bits.flag_efrag:
* @u.tx.flags.bits.flag_nbuf:
* @u.tx.flags.bits.num:
* @u.tx.flags.bits.flag_chfrag_start:
* @u.tx.flags.bits.flag_chfrag_cont:
* @u.tx.flags.bits.flag_chfrag_end:
* @u.tx.flags.bits.flag_ext_header:
* @u.tx.flags.bits.is_critical:
* @u.tx.flags.u8:
* @u.tx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
* + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
* @u.tx.trace.is_packet_priv:
* @u.tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
* @u.tx.trace.to_fw: Flag to indicate send this packet to FW
* @u.tx.trace.htt2_frm: flag (high-latency path only)
* @u.tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
* + (MGMT_ACTION)] - 4 bits
* @u.tx.trace.dp_trace: flag (Datapath trace)
* @u.tx.trace.is_bcast: flag (Broadcast packet)
* @u.tx.trace.is_mcast: flag (Multicast packet)
* @u.tx.trace.packet_type: flag (Packet type)
* @u.tx.trace.print: enable packet logging
*
* @u.tx.vaddr: virtual address of ~
* @u.tx.paddr: physical/DMA address of ~
*/
struct qdf_nbuf_cb {
/* common */
qdf_paddr_t paddr; /* of skb->data */
/* valid only in one direction */
union {
/* Note: MAX: 40 bytes */
struct {
void *ext_cb_ptr;
void *fctx;
uint16_t ftype:4,
rx_ctx_id:4,
fcs_err:1,
ipa_smmu_map:1,
flow_idx_valid:1,
flow_idx_timeout:1,
rsvd8:4;
uint8_t num_elements_in_list;
union {
uint8_t packet_state;
uint8_t dp_trace:1,
packet_track:3,
rsrvd:4;
} trace;
uint16_t protocol_tag;
uint16_t flow_tag;
union {
struct {
/* do not re-arrange the fields in
* the below 3 uint32_t words as
* they map exactly to the desc info
*/
#ifndef BIG_ENDIAN_HOST
/* 1st word rx_mpdu_desc_info */
uint32_t msdu_count:8,
fragment_flag:1,
flag_retry:1,
flag_is_ampdu:1,
bar_frame:1,
pn_fields_contain_valid_info:1,
is_raw_frame:1,
more_fragment_flag:1,
src_info:12,
mpdu_qos_control_valid:1,
tid_val:4;
#else
uint32_t tid_val:4,
mpdu_qos_control_valid:1,
src_info:12,
more_fragment_flag:1,
is_raw_frame:1,
pn_fields_contain_valid_info:1,
bar_frame:1,
flag_is_ampdu:1,
flag_retry:1,
fragment_flag:1,
msdu_count:8;
#endif
/* 2nd word rx_mpdu_desc_info */
uint32_t peer_id:13,
ml_peer_valid:1,
logical_link_id:2,
vdev_id:8,
lmac_id:2,
chip_id:3,
reserved2:3;
#ifndef BIG_ENDIAN_HOST
/* 1st word of rx_msdu_desc_info */
uint32_t flag_chfrag_start:1,
flag_chfrag_end:1,
flag_chfrag_cont:1,
msdu_len:14,
flag_is_frag:1,
flag_sa_valid:1,
flag_da_valid:1,
flag_da_mcbc:1,
l3_hdr_pad_msb:1,
tcp_udp_chksum_fail:1,
ip_chksum_fail:1,
fr_ds:1,
to_ds:1,
intra_bss:1,
rsvd4:5;
uint32_t release_source_module:3,
bm_action:3,
buffer_or_desc_type:3,
return_buffer_manager:4,
reserved_2a:2,
cache_id:1,
cookie_conversion_status:1,
rxdma_push_reason:2,
rxdma_error_code:5,
reo_push_reason:2,
reo_error_code:5,
wbm_internal_error:1;
#else
uint32_t rsvd4:5,
intra_bss:1,
to_ds:1,
fr_ds:1,
ip_chksum_fail:1,
tcp_udp_chksum_fail:1,
l3_hdr_pad_msb:1,
flag_da_mcbc:1,
flag_da_valid:1,
flag_sa_valid:1,
flag_is_frag:1,
msdu_len:14,
flag_chfrag_cont:1,
flag_chfrag_end:1,
flag_chfrag_start:1;
uint32_t wbm_internal_error:1,
reo_error_code:5,
reo_push_reason:2,
rxdma_error_code:5,
rxdma_push_reason:2,
cookie_conversion_status:1,
cache_id:1,
reserved_2a:2,
return_buffer_manager:4,
buffer_or_desc_type:3,
bm_action:3,
release_source_module:3;
#endif
} desc_tlv_members;
struct {
uint32_t mpdu_desc_info[2];
uint32_t msdu_desc_info;
uint32_t rx_error_codes;
} desc_info;
} hw_info;
} rx;
/* Note: MAX: 40 bytes */
struct {
void *ext_cb_ptr;
void *fctx;
uint8_t ftype;
uint8_t vdev_id;
uint16_t len;
union {
struct {
uint8_t flag_efrag:1,
flag_nbuf:1,
num:1,
flag_chfrag_start:1,
flag_chfrag_cont:1,
flag_chfrag_end:1,
flag_ext_header:1,
is_critical:1;
} bits;
uint8_t u8;
} flags;
struct {
uint8_t packet_state:7,
is_packet_priv:1;
uint8_t packet_track:3,
to_fw:1,
/* used only for hl */
htt2_frm:1,
proto_type:3;
uint8_t dp_trace:1,
is_bcast:1,
is_mcast:1,
packet_type:4,
print:1;
} trace;
unsigned char *vaddr;
qdf_paddr_t paddr;
} tx;
} u;
}; /* struct qdf_nbuf_cb: MAX 48 bytes */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(sizeof(struct qdf_nbuf_cb)) <=
sizeof_field(struct sk_buff, cb));
#else
QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(sizeof(struct qdf_nbuf_cb)) <=
FIELD_SIZEOF(struct sk_buff, cb));
#endif
/*
* access macros to qdf_nbuf_cb
* Note: These macros can be used as L-values as well as R-values.
* When used as R-values, they effectively function as "get" macros
* When used as L_values, they effectively function as "set" macros
*/
#define QDF_NBUF_CB_PADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
#define QDF_NBUF_CB_RX_CTX_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
#define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
#define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
#define QDF_NBUF_CB_RX_DP_TRACE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
#define QDF_NBUF_CB_RX_FTYPE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
#define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_chfrag_start)
#define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_chfrag_cont)
#define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_chfrag_end)
#define QDF_NBUF_CB_RX_DA_MCBC(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_da_mcbc)
#define QDF_NBUF_CB_RX_L3_PAD_MSB(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.l3_hdr_pad_msb)
#define QDF_NBUF_CB_RX_DA_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_da_valid)
#define QDF_NBUF_CB_RX_SA_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_sa_valid)
#define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_retry)
#define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.is_raw_frame)
#define QDF_NBUF_CB_RX_IS_FRAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_is_frag)
#define QDF_NBUF_CB_RX_FCS_ERR(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.fcs_err)
#define QDF_NBUF_CB_RX_MSDU_DESC_INFO(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.msdu_desc_info)
#define QDF_NBUF_CB_RX_MPDU_DESC_INFO(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.mpdu_desc_info)
#define QDF_NBUF_CB_RX_MPDU_DESC_INFO_1(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.mpdu_desc_info[0])
#define QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.mpdu_desc_info[1])
#define QDF_NBUF_CB_RX_PEER_ID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.peer_id)
#define QDF_NBUF_CB_RX_VDEV_ID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.vdev_id)
#define QDF_NBUF_CB_RX_PACKET_LMAC_ID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.lmac_id)
#define QDF_NBUF_CB_RX_PKT_LEN(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.msdu_len)
#define QDF_NBUF_CB_RX_TID_VAL(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.tid_val)
#define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
#define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
qdf_nbuf_set_state(skb, PACKET_STATE)
#define QDF_NBUF_CB_TX_FTYPE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
#define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
/* Tx Flags Accessor Macros*/
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_efrag)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_nbuf)
#define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_ext_header)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
#define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
/* End of Tx Flags Accessor Macros */
/* Tx trace accessor macros */
#define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.packet_state)
#define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_packet_priv)
#define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.packet_track)
#define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.to_fw)
#define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.trace.packet_track)
#define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.proto_type)
#define QDF_NBUF_CB_TX_DP_TRACE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
#define QDF_NBUF_CB_DP_TRACE_PRINT(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
#define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
#define QDF_NBUF_CB_GET_IS_BCAST(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
#define QDF_NBUF_CB_GET_IS_MCAST(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
#define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
#define QDF_NBUF_CB_SET_BCAST(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_bcast = true)
#define QDF_NBUF_CB_SET_MCAST(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_mcast = true)
/* End of Tx trace accessor macros */
#define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
/* assume the OS provides a single fragment */
#define __qdf_nbuf_get_num_frags(skb) \
(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
#define __qdf_nbuf_reset_num_frags(skb) \
(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
/* ext_cb accessor macros and internal API's */
#define QDF_NBUF_CB_EXT_CB(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_w.ext_cb_ptr)
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ext_cb_ptr)
#define __qdf_nbuf_set_ext_cb(skb, ref) \
do { \
@@ -42,21 +556,15 @@
QDF_NBUF_CB_EXT_CB((skb))
/* fctx accessor macros and internal API's*/
#define QDF_NBUF_CB_RX_FCTX(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.fctx)
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.fctx)
#define QDF_NBUF_CB_TX_FCTX(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_w.fctx)
#define QDF_NBUF_CB_RX_PEER_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.peer_id)
#define QDF_NBUF_CB_RX_PKT_LEN(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.msdu_len)
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.fctx)
#define QDF_NBUF_CB_RX_INTRA_BSS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.flag_intra_bss)
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.intra_bss)
#define __qdf_nbuf_set_rx_fctx_type(skb, ctx, type) \
do { \
@@ -83,8 +591,7 @@
QDF_NBUF_CB_TX_FCTX((skb))
#define QDF_NBUF_CB_RX_PROTOCOL_TAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.dev.priv_cb_w.protocol_tag)
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.protocol_tag)
#define __qdf_nbuf_set_rx_protocol_tag(skb, val) \
((QDF_NBUF_CB_RX_PROTOCOL_TAG((skb))) = val)
@@ -93,8 +600,7 @@
(QDF_NBUF_CB_RX_PROTOCOL_TAG((skb)))
#define QDF_NBUF_CB_RX_FLOW_TAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.dev.priv_cb_w.flow_tag)
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_tag)
#define __qdf_nbuf_set_rx_flow_tag(skb, val) \
((QDF_NBUF_CB_RX_FLOW_TAG((skb))) = val)
@@ -104,7 +610,7 @@
#define QDF_NBUF_CB_RX_FLOW_IDX_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.dev.priv_cb_w.flow_idx_valid)
((skb)->cb))->u.rx.flow_idx_valid)
#define __qdf_nbuf_set_rx_flow_idx_valid(skb, val) \
((QDF_NBUF_CB_RX_FLOW_IDX_VALID((skb))) = val)
@@ -114,7 +620,7 @@
#define QDF_NBUF_CB_RX_FLOW_IDX_TIMEOUT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.dev.priv_cb_w.flow_idx_timeout)
((skb)->cb))->u.rx.flow_idx_timeout)
#define __qdf_nbuf_set_rx_flow_idx_timeout(skb, val) \
((QDF_NBUF_CB_RX_FLOW_IDX_TIMEOUT((skb))) = val)
@@ -123,8 +629,153 @@
(QDF_NBUF_CB_RX_FLOW_IDX_TIMEOUT((skb)))
#define QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w. \
ipa_smmu_map)
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipa_smmu_map)
#define __qdf_nbuf_data_attr_get(skb) (0)
#define __qdf_nbuf_data_attr_set(skb, data_attr)
/**
* __qdf_nbuf_map_nbytes_single() - map nbytes
* @osdev: os device
* @buf: buffer
* @dir: direction
* @nbytes: number of bytes
*
* Return: QDF_STATUS
*/
#ifdef A_SIMOS_DEVHOST
static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr;
QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
return QDF_STATUS_SUCCESS;
}
#else
static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr;
QDF_STATUS ret;
/* assume that the OS only provides a single fragment */
QDF_NBUF_CB_PADDR(buf) = paddr =
dma_map_single(osdev->dev, buf->data,
nbytes, __qdf_dma_dir_to_os(dir));
ret = dma_mapping_error(osdev->dev, paddr) ?
QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
if (QDF_IS_STATUS_SUCCESS(ret))
__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
dir, true);
return ret;
}
#endif
/**
* __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
* @osdev: os device
* @buf: buffer
* @dir: direction
* @nbytes: number of bytes
*
* Return: none
*/
#if defined(A_SIMOS_DEVHOST)
static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
}
#else
static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
if (qdf_likely(paddr)) {
__qdf_record_nbuf_nbytes(
__qdf_nbuf_get_end_offset(buf), dir, false);
dma_unmap_single(osdev->dev, paddr, nbytes,
__qdf_dma_dir_to_os(dir));
return;
}
}
#endif
/**
* __qdf_nbuf_reset() - reset the buffer data and pointer
* @skb: Network buf instance
* @reserve: reserve
* @align: align
*
* Return: none
*/
static inline void
__qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
{
int offset;
skb_push(skb, skb_headroom(skb));
skb_put(skb, skb_tailroom(skb));
memset(skb->data, 0x0, skb->len);
skb_trim(skb, 0);
skb_reserve(skb, NET_SKB_PAD);
memset(skb->cb, 0x0, sizeof(skb->cb));
/*
* The default is for netbuf fragments to be interpreted
* as wordstreams rather than bytestreams.
*/
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
/*
* Align & make sure that the tail & data are adjusted properly
*/
if (align) {
offset = ((unsigned long)skb->data) % align;
if (offset)
skb_reserve(skb, align - offset);
}
skb_reserve(skb, reserve);
}
/**
* __qdf_nbuf_len() - return the amount of valid data in the skb
* @skb: Pointer to network buffer
*
* This API returns the amount of valid data in the skb, If there are frags
* then it returns total length.
*
* Return: network buffer length
*/
static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
{
int i, extra_frag_len = 0;
i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
if (i > 0)
extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
return extra_frag_len + skb->len;
}
/**
* __qdf_nbuf_num_frags_init() - init extra frags
* @skb: sk buffer
*
* Return: none
*/
static inline
void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
{
QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
}
/**
* qdf_nbuf_cb_update_vdev_id() - update vdev id in skb cb