qcacmn: create qdf_nbuf_cb definition for win and mcc

WIN and MCC shared a common definition od qdf_nbuf_cb
considering that there are various fields which are
not common it is decided to have 2 different definitions
for WIN and MCC. in case of WIN he per pkt cycles will
be significantly reduced in RX path.

CRs-Fixed: 3257879
Change-Id: I5d3fa7c91592afb905702f2dca49972be89664e3
このコミットが含まれているのは:
Tallapragada Kalyan
2022-11-14 14:49:25 +05:30
committed by Madan Koyyalamudi
コミット 44e4444e61
6個のファイルの変更1553行の追加796行の削除

ファイルの表示

@@ -121,516 +121,6 @@ typedef union {
qdf_dma_addr_t dma_addr;
} qdf_paddr_t;
/**
* struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
* - data passed between layers of the driver.
*
* Notes:
* 1. Hard limited to 48 bytes. Please count your bytes
* 2. The size of this structure has to be easily calculable and
* consistently so: do not use any conditional compile flags
* 3. Split into a common part followed by a tx/rx overlay
* 4. There is only one extra frag, which represents the HTC/HTT header
* 5. "ext_cb_pt" must be the first member in both TX and RX unions
* for the priv_cb_w since it must be at same offset for both
* TX and RX union
* 6. "ipa.owned" bit must be first member in both TX and RX unions
* for the priv_cb_m since it must be at same offset for both
* TX and RX union.
*
* @paddr : physical addressed retrieved by dma_map of nbuf->data
* @u: union of rx and tx data
* @u.rx: rx data
* @u.rx.dev: union of priv_cb_w and priv_cb_m
*
* @u.rx.dev.priv_cb_w:
* @u.rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
* @u.rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
* @u.rx.dev.priv_cb_w.msdu_len: length of RX packet
* @u.rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
* @u.rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
* @u.rx.dev.priv_cb_w.peer_id: peer_id for RX packet
* @u.rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet
* type
* @u.rx.dev.priv_cb_w.flow_idx_valid: flow entry is found
* @u.rx.dev.priv_cb_w.flow_idx_timeout: flow entry search timed out
* @u.rx.dev.priv_cb_w.rsvd: rerserved bits
* @u.rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
*
* @u.rx.dev.priv_cb_m:
* @u.rx.dev.priv_cb_m.ipa.owned: packet owned by IPA
* @u.rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
* @u.rx.dev.priv_cb_m.flush_ind: flush indication
* @u.rx.dev.priv_cb_m.packet_buf_pool: packet buff bool
* @u.rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
* @u.rx.dev.priv_cb_m.exc_frm: exception frame
* @u.rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
* @u.rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
* sw exception bit from ring desc
* @u.rx.dev.priv_cb_m.lmac_id: lmac id for RX packet
* @u.rx.dev.priv_cb_m.reserved1: reserved bits
* @u.rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
* @u.rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
* @u.rx.dev.priv_cb_m.dp: Union of wifi3 and wifi2 structs
* @u.rx.dev.priv_cb_m.dp.wifi3: wifi3 data
* @u.rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
* @u.rx.dev.priv_cb_m.dp.wifi3.peer_id: peer_id for RX packet
* @u.rx.dev.priv_cb_m.dp.wifi2: wifi2 data
* @u.rx.dev.priv_cb_m.dp.wifi2.map_index:
* @u.rx.dev.priv_cb_m.lro_ctx: LRO context
*
* @u.rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
* @u.rx.tcp_proto: L4 protocol is TCP
* @u.rx.tcp_pure_ack: A TCP ACK packet with no payload
* @u.rx.ipv6_proto: L3 protocol is IPV6
* @u.rx.ip_offset: offset to IP header
* @u.rx.tcp_offset: offset to TCP header
* @u.rx.rx_ctx_id: Rx context id
* @u.rx.fcs_err: FCS error
* @u.rx.is_raw_frame: RAW frame
* @u.rx.num_elements_in_list: number of elements in the nbuf list
*
* @u.rx.tcp_udp_chksum: L4 payload checksum
* @u.rx.tcp_win: TCP window size
*
* @u.rx.flow_id: 32bit flow id
*
* @u.rx.flag_chfrag_start: first MSDU in an AMSDU
* @u.rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
* @u.rx.flag_chfrag_end: last MSDU in an AMSDU
* @u.rx.flag_retry: flag to indicate MSDU is retried
* @u.rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
* @u.rx.flag_da_valid: flag to indicate DA is valid for RX packet
* @u.rx.flag_sa_valid: flag to indicate SA is valid for RX packet
* @u.rx.flag_is_frag: flag to indicate skb has frag list
*
* @u.rx.trace: combined structure for DP and protocol trace
* @u.rx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
* + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
* @u.rx.trace.dp_trace: flag (Datapath trace)
* @u.rx.trace.packet_track: RX_DATA packet
* @u.rx.trace.rsrvd: enable packet logging
*
* @u.rx.vdev_id: vdev_id for RX pkt
* @u.rx.tid_val: tid value
* @u.rx.ftype: mcast2ucast, TSO, SG, MESH
*
* @u.tx: tx data
* @u.tx.dev: union of priv_cb_w and priv_cb_m
*
* @u.tx.dev.priv_cb_w:
* @u.tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
* @u.tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
*
* @u.tx.dev.priv_cb_m:
* @u.tx.dev.priv_cb_m:ipa: IPA-specific data
* @u.tx.dev.priv_cb_m.ipa.ipa.owned: packet owned by IPA
* @u.tx.dev.priv_cb_m.ipa.ipa.priv: private data, used by IPA
* @u.tx.dev.priv_cb_m.data_attr: value that is programmed in CE descr, includes
* + (1) CE classification enablement bit
* + (2) packet type (802.3 or Ethernet type II)
* + (3) packet offset (usually length of HTC/HTT descr)
* @u.tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
* @u.tx.dev.priv_cb_m.dma_option: DMA options
* @u.tx.dev.priv_cb_m.dma_option.mgmt_desc_id: mgmt descriptor for tx
* completion cb
* @u.tx.dev.priv_cb_m.dma_option.dma_option.bi_map: flag to do bi-direction
* dma map
* @u.tx.dev.priv_cb_m.dma_option.dma_option.reserved: reserved bits for future
* use
* @u.tx.dev.priv_cb_m.flag_notify_comp: reserved
* @u.tx.dev.priv_cb_m.rsvd: reserved
* @u.tx.dev.priv_cb_m.reserved: reserved
*
* @u.tx.ftype: mcast2ucast, TSO, SG, MESH
* @u.tx.vdev_id: vdev (for protocol trace)
* @u.tx.len: length of efrag pointed by the above pointers
*
* @u.tx.flags: union of flag representations
* @u.tx.flags.bits: flags represent as individual bitmasks
* @u.tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
* @u.tx.flags.bits.num: number of extra frags ( 0 or 1)
* @u.tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
* @u.tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
* @u.tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
* @u.tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
* @u.tx.flags.bits.flag_ext_header: extended flags
* @u.tx.flags.bits.is_critical: flag indicating a critical frame
* @u.tx.flags.u8: flags as a single u8
* @u.tx.trace: combined structure for DP and protocol trace
* @u.tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
* + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
* @u.tx.trace.is_packet_priv:
* @u.tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
* @u.tx.trace.to_fw: Flag to indicate send this packet to FW
* @u.tx.trace.htt2_frm: flag (high-latency path only)
* @u.tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
* + (MGMT_ACTION)] - 4 bits
* @u.tx.trace.dp_trace: flag (Datapath trace)
* @u.tx.trace.is_bcast: flag (Broadcast packet)
* @u.tx.trace.is_mcast: flag (Multicast packet)
* @u.tx.trace.packet_type: flag (Packet type)
* @u.tx.trace.print: enable packet logging
*
* @u.tx.vaddr: virtual address of ~
* @u.tx.paddr: physical/DMA address of ~
*/
struct qdf_nbuf_cb {
/* common */
qdf_paddr_t paddr; /* of skb->data */
/* valid only in one direction */
union {
/* Note: MAX: 40 bytes */
struct {
union {
struct {
void *ext_cb_ptr;
void *fctx;
uint16_t msdu_len : 14,
flag_intra_bss : 1,
ipa_smmu_map : 1;
uint16_t peer_id;
uint8_t protocol_tag;
uint8_t flow_idx_valid: 1,
flow_idx_timeout: 1,
rsvd:6;
uint16_t flow_tag;
} priv_cb_w;
struct {
/* ipa_owned bit is common between rx
* control block and tx control block.
* Do not change location of this bit.
*/
uint32_t ipa_owned:1,
peer_cached_buf_frm:1,
flush_ind:1,
packet_buf_pool:1,
l3_hdr_pad:3,
/* exception frame flag */
exc_frm:1,
ipa_smmu_map:1,
reo_dest_ind_or_sw_excpt:5,
lmac_id:2,
reserved1:16;
uint32_t tcp_seq_num;
uint32_t tcp_ack_num;
union {
struct {
uint16_t msdu_len;
uint16_t peer_id;
} wifi3;
struct {
uint32_t map_index;
} wifi2;
} dp;
unsigned char *lro_ctx;
} priv_cb_m;
} dev;
uint32_t lro_eligible:1,
tcp_proto:1,
tcp_pure_ack:1,
ipv6_proto:1,
ip_offset:7,
tcp_offset:7,
rx_ctx_id:4,
fcs_err:1,
is_raw_frame:1,
num_elements_in_list:8;
uint32_t tcp_udp_chksum:16,
tcp_win:16;
uint32_t flow_id;
uint8_t flag_chfrag_start:1,
flag_chfrag_cont:1,
flag_chfrag_end:1,
flag_retry:1,
flag_da_mcbc:1,
flag_da_valid:1,
flag_sa_valid:1,
flag_is_frag:1;
union {
uint8_t packet_state;
uint8_t dp_trace:1,
packet_track:3,
rsrvd:4;
} trace;
uint16_t vdev_id:8,
tid_val:4,
ftype:4;
} rx;
/* Note: MAX: 40 bytes */
struct {
union {
struct {
void *ext_cb_ptr;
void *fctx;
} priv_cb_w;
struct {
/* ipa_owned bit is common between rx
* control block and tx control block.
* Do not change location of this bit.
*/
struct {
uint32_t owned:1,
priv:31;
} ipa;
uint32_t data_attr;
uint16_t desc_id;
uint16_t mgmt_desc_id;
struct {
uint8_t bi_map:1,
reserved:7;
} dma_option;
uint8_t flag_notify_comp:1,
rsvd:7;
uint8_t reserved[2];
} priv_cb_m;
} dev;
uint8_t ftype;
uint8_t vdev_id;
uint16_t len;
union {
struct {
uint8_t flag_efrag:1,
flag_nbuf:1,
num:1,
flag_chfrag_start:1,
flag_chfrag_cont:1,
flag_chfrag_end:1,
flag_ext_header:1,
is_critical:1;
} bits;
uint8_t u8;
} flags;
struct {
uint8_t packet_state:7,
is_packet_priv:1;
uint8_t packet_track:3,
to_fw:1,
htt2_frm:1,
proto_type:3;
uint8_t dp_trace:1,
is_bcast:1,
is_mcast:1,
packet_type:4,
print:1;
} trace;
unsigned char *vaddr;
qdf_paddr_t paddr;
} tx;
} u;
}; /* struct qdf_nbuf_cb: MAX 48 bytes */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(sizeof(struct qdf_nbuf_cb)) <=
sizeof_field(struct sk_buff, cb));
#else
QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(sizeof(struct qdf_nbuf_cb)) <=
FIELD_SIZEOF(struct sk_buff, cb));
#endif
/*
* access macros to qdf_nbuf_cb
* Note: These macros can be used as L-values as well as R-values.
* When used as R-values, they effectively function as "get" macros
* When used as L_values, they effectively function as "set" macros
*/
#define QDF_NBUF_CB_PADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
#define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
#define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
#define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
#define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
#define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
#define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
#define QDF_NBUF_CB_RX_CTX_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
#define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
#define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
#define QDF_NBUF_CB_RX_TCP_WIN(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
#define QDF_NBUF_CB_RX_FLOW_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
#define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
#define QDF_NBUF_CB_RX_DP_TRACE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
#define QDF_NBUF_CB_RX_FTYPE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
#define QDF_NBUF_CB_RX_VDEV_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
#define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_chfrag_start)
#define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_chfrag_cont)
#define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_chfrag_end)
#define QDF_NBUF_CB_RX_DA_MCBC(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_da_mcbc)
#define QDF_NBUF_CB_RX_DA_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_da_valid)
#define QDF_NBUF_CB_RX_SA_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_sa_valid)
#define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_retry)
#define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.is_raw_frame)
#define QDF_NBUF_CB_RX_TID_VAL(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.tid_val)
#define QDF_NBUF_CB_RX_IS_FRAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_is_frag)
#define QDF_NBUF_CB_RX_FCS_ERR(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.fcs_err)
#define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
qdf_nbuf_set_state(skb, PACKET_STATE)
#define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
#define QDF_NBUF_CB_TX_FTYPE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
#define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
/* Tx Flags Accessor Macros*/
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_efrag)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_nbuf)
#define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_ext_header)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
#define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
/* End of Tx Flags Accessor Macros */
/* Tx trace accessor macros */
#define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.packet_state)
#define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_packet_priv)
#define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.packet_track)
#define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.to_fw)
#define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.trace.packet_track)
#define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.proto_type)
#define QDF_NBUF_CB_TX_DP_TRACE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
#define QDF_NBUF_CB_DP_TRACE_PRINT(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
#define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
#define QDF_NBUF_CB_GET_IS_BCAST(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
#define QDF_NBUF_CB_GET_IS_MCAST(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
#define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
#define QDF_NBUF_CB_SET_BCAST(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_bcast = true)
#define QDF_NBUF_CB_SET_MCAST(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_mcast = true)
/* End of Tx trace accessor macros */
#define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
/* assume the OS provides a single fragment */
#define __qdf_nbuf_get_num_frags(skb) \
(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
#define __qdf_nbuf_reset_num_frags(skb) \
(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
/*
* end of nbuf->cb access macros
*/
typedef void (*qdf_nbuf_trace_update_t)(char *);
typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
@@ -797,26 +287,9 @@ typedef void (*qdf_nbuf_free_t)(__qdf_nbuf_t);
#define __qdf_nbuf_trace_get_proto_type(skb) \
QDF_NBUF_CB_TX_PROTO_TYPE(skb)
#define __qdf_nbuf_data_attr_get(skb) \
QDF_NBUF_CB_TX_DATA_ATTR(skb)
#define __qdf_nbuf_data_attr_set(skb, data_attr) \
(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
#define __qdf_nbuf_queue_walk_safe(queue, var, tvar) \
skb_queue_walk_safe(queue, var, tvar)
/**
* __qdf_nbuf_num_frags_init() - init extra frags
* @skb: sk buffer
*
* Return: none
*/
static inline
void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
{
QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
}
/*
* prototypes. Implemented in qdf_nbuf.c
*/
@@ -1786,26 +1259,6 @@ static inline QDF_STATUS __qdf_to_status(signed int error)
}
}
/**
* __qdf_nbuf_len() - return the amount of valid data in the skb
* @skb: Pointer to network buffer
*
* This API returns the amount of valid data in the skb, If there are frags
* then it returns total length.
*
* Return: network buffer length
*/
static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
{
int i, extra_frag_len = 0;
i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
if (i > 0)
extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
return extra_frag_len + skb->len;
}
/**
* __qdf_nbuf_cat() - link two nbufs
* @dst: Buffer to piggyback into
@@ -2074,46 +1527,6 @@ __qdf_nbuf_unlink_no_lock(struct sk_buff *skb, struct sk_buff_head *list)
__skb_unlink(skb, list);
}
/**
* __qdf_nbuf_reset() - reset the buffer data and pointer
* @skb: Network buf instance
* @reserve: reserve
* @align: align
*
* Return: none
*/
static inline void
__qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
{
int offset;
skb_push(skb, skb_headroom(skb));
skb_put(skb, skb_tailroom(skb));
memset(skb->data, 0x0, skb->len);
skb_trim(skb, 0);
skb_reserve(skb, NET_SKB_PAD);
memset(skb->cb, 0x0, sizeof(skb->cb));
/*
* The default is for netbuf fragments to be interpreted
* as wordstreams rather than bytestreams.
*/
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
/*
* Align & make sure that the tail & data are adjusted properly
*/
if (align) {
offset = ((unsigned long)skb->data) % align;
if (offset)
skb_reserve(skb, align - offset);
}
skb_reserve(skb, reserve);
}
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
/**
* __qdf_nbuf_is_dev_scratch_supported() - dev_scratch support for network
@@ -3401,78 +2814,6 @@ static inline void __qdf_record_nbuf_nbytes(
}
#endif /* CONFIG_WLAN_SYSFS_MEM_STATS */
/**
* __qdf_nbuf_map_nbytes_single() - map nbytes
* @osdev: os device
* @buf: buffer
* @dir: direction
* @nbytes: number of bytes
*
* Return: QDF_STATUS
*/
#ifdef A_SIMOS_DEVHOST
static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr;
QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
return QDF_STATUS_SUCCESS;
}
#else
static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr;
QDF_STATUS ret;
/* assume that the OS only provides a single fragment */
QDF_NBUF_CB_PADDR(buf) = paddr =
dma_map_single(osdev->dev, buf->data,
nbytes, __qdf_dma_dir_to_os(dir));
ret = dma_mapping_error(osdev->dev, paddr) ?
QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
if (QDF_IS_STATUS_SUCCESS(ret))
__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
dir, true);
return ret;
}
#endif
/**
* __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
* @osdev: os device
* @buf: buffer
* @dir: direction
* @nbytes: number of bytes
*
* Return: none
*/
#if defined(A_SIMOS_DEVHOST)
static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
}
#else
static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
if (qdf_likely(paddr)) {
__qdf_record_nbuf_nbytes(
__qdf_nbuf_get_end_offset(buf), dir, false);
dma_unmap_single(osdev->dev, paddr, nbytes,
__qdf_dma_dir_to_os(dir));
return;
}
}
#endif
static inline struct sk_buff *
__qdf_nbuf_queue_head_dequeue(struct sk_buff_head *skb_queue_head)
{

ファイルの表示

@@ -27,6 +27,503 @@
#ifndef _I_QDF_NBUF_M_H
#define _I_QDF_NBUF_M_H
/**
* struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
* - data passed between layers of the driver.
*
* Notes:
* 1. Hard limited to 48 bytes. Please count your bytes
* 2. The size of this structure has to be easily calculable and
* consistently so: do not use any conditional compile flags
* 3. Split into a common part followed by a tx/rx overlay
* 4. There is only one extra frag, which represents the HTC/HTT header
* 5. "ext_cb_pt" must be the first member in both TX and RX unions
* for the priv_cb_w since it must be at same offset for both
* TX and RX union
* 6. "ipa.owned" bit must be first member in both TX and RX unions
* for the priv_cb_m since it must be at same offset for both
* TX and RX union.
*
* @paddr : physical addressed retrieved by dma_map of nbuf->data
* @u: union of rx and tx data
* @u.rx: rx data
* @u.rx.dev: union of priv_cb_w and priv_cb_m
*
* @u.rx.dev.priv_cb_w:
* @u.rx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
* @u.rx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
* @u.rx.dev.priv_cb_w.msdu_len: length of RX packet
* @u.rx.dev.priv_cb_w.flag_intra_bss: flag to indicate this is intra bss packet
* @u.rx.dev.priv_cb_w.ipa_smmu_map: do IPA smmu map
* @u.rx.dev.priv_cb_w.peer_id: peer_id for RX packet
* @u.rx.dev.priv_cb_w.protocol_tag: protocol tag set by app for rcvd packet
* type
* @u.rx.dev.priv_cb_w.flow_tag: flow tag set by application for 5 tuples rcvd
*
* @u.rx.dev.priv_cb_m:
* @u.rx.dev.priv_cb_m.ipa.owned: packet owned by IPA
* @u.rx.dev.priv_cb_m.peer_cached_buf_frm: peer cached buffer
* @u.rx.dev.priv_cb_m.flush_ind: flush indication
* @u.rx.dev.priv_cb_m.packet_buf_pool: packet buff bool
* @u.rx.dev.priv_cb_m.l3_hdr_pad: L3 header padding offset
* @u.rx.dev.priv_cb_m.exc_frm: exception frame
* @u.rx.dev.priv_cb_m.ipa_smmu_map: do IPA smmu map
* @u.rx.dev.priv_cb_m.reo_dest_ind_or_sw_excpt: reo destination indication or
* sw exception bit from ring desc
* @u.rx.dev.priv_cb_m.lmac_id: lmac id for RX packet
* @u.rx.dev.priv_cb_m.reserved1: reserved bits
* @u.rx.dev.priv_cb_m.tcp_seq_num: TCP sequence number
* @u.rx.dev.priv_cb_m.tcp_ack_num: TCP ACK number
* @u.rx.dev.priv_cb_m.dp: Union of wifi3 and wifi2 structs
* @u.rx.dev.priv_cb_m.dp.wifi3: wifi3 data
* @u.rx.dev.priv_cb_m.dp.wifi3.msdu_len: length of RX packet
* @u.rx.dev.priv_cb_m.dp.wifi3.peer_id: peer_id for RX packet
* @u.rx.dev.priv_cb_m.dp.wifi2: wifi2 data
* @u.rx.dev.priv_cb_m.dp.wifi2.map_index:
* @u.rx.dev.priv_cb_m.lro_ctx: LRO context
*
* @u.rx.lro_eligible: flag to indicate whether the MSDU is LRO eligible
* @u.rx.tcp_proto: L4 protocol is TCP
* @u.rx.tcp_pure_ack: A TCP ACK packet with no payload
* @u.rx.ipv6_proto: L3 protocol is IPV6
* @u.rx.ip_offset: offset to IP header
* @u.rx.tcp_offset: offset to TCP header
* @u.rx.rx_ctx_id: Rx context id
* @u.rx.fcs_err: FCS error
* @u.rx.is_raw_frame: RAW frame
* @u.rx.num_elements_in_list: number of elements in the nbuf list
*
* @u.rx.tcp_udp_chksum: L4 payload checksum
* @u.rx.tcp_win: TCP window size
*
* @u.rx.flow_id: 32bit flow id
*
* @u.rx.flag_chfrag_start: first MSDU in an AMSDU
* @u.rx.flag_chfrag_cont: middle or part of MSDU in an AMSDU
* @u.rx.flag_chfrag_end: last MSDU in an AMSDU
* @u.rx.flag_retry: flag to indicate MSDU is retried
* @u.rx.flag_da_mcbc: flag to indicate mulicast or broadcast packets
* @u.rx.flag_da_valid: flag to indicate DA is valid for RX packet
* @u.rx.flag_sa_valid: flag to indicate SA is valid for RX packet
* @u.rx.flag_is_frag: flag to indicate skb has frag list
*
* @u.rx.trace: combined structure for DP and protocol trace
* @u.rx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
* + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
* @u.rx.trace.dp_trace: flag (Datapath trace)
* @u.rx.trace.packet_track: RX_DATA packet
* @u.rx.trace.rsrvd: enable packet logging
*
* @u.rx.vdev_id: vdev_id for RX pkt
* @u.rx.tid_val: tid value
* @u.rx.ftype: mcast2ucast, TSO, SG, MESH
*
* @u.tx: tx data
* @u.tx.dev: union of priv_cb_w and priv_cb_m
*
* @u.tx.dev.priv_cb_w:
* @u.tx.dev.priv_cb_w.ext_cb_ptr: extended cb pointer
* @u.tx.dev.priv_cb_w.fctx: ctx to handle special pkts defined by ftype
*
* @u.tx.dev.priv_cb_m:
* @u.tx.dev.priv_cb_m:ipa: IPA-specific data
* @u.tx.dev.priv_cb_m.ipa.ipa.owned: packet owned by IPA
* @u.tx.dev.priv_cb_m.ipa.ipa.priv: private data, used by IPA
* @u.tx.dev.priv_cb_m.data_attr: value that is programmed in CE descr, includes
* + (1) CE classification enablement bit
* + (2) packet type (802.3 or Ethernet type II)
* + (3) packet offset (usually length of HTC/HTT descr)
* @u.tx.dev.priv_cb_m.desc_id: tx desc id, used to sync between host and fw
* @u.tx.dev.priv_cb_m.dma_option: DMA options
* @u.tx.dev.priv_cb_m.dma_option.mgmt_desc_id: mgmt descriptor for tx
* completion cb
* @u.tx.dev.priv_cb_m.dma_option.dma_option.bi_map: flag to do bi-direction
* dma map
* @u.tx.dev.priv_cb_m.dma_option.dma_option.reserved: reserved bits for future
* use
* @u.tx.dev.priv_cb_m.flag_notify_comp: reserved
* @u.tx.dev.priv_cb_m.rsvd: reserved
* @u.tx.dev.priv_cb_m.reserved: reserved
*
* @u.tx.ftype: mcast2ucast, TSO, SG, MESH
* @u.tx.vdev_id: vdev (for protocol trace)
* @u.tx.len: length of efrag pointed by the above pointers
*
* @u.tx.flags: union of flag representations
* @u.tx.flags.bits: flags represent as individual bitmasks
* @u.tx.flags.bits.flag_efrag: flag, efrag payload to be swapped (wordstream)
* @u.tx.flags.bits.num: number of extra frags ( 0 or 1)
* @u.tx.flags.bits.nbuf: flag, nbuf payload to be swapped (wordstream)
* @u.tx.flags.bits.flag_chfrag_start: first MSDU in an AMSDU
* @u.tx.flags.bits.flag_chfrag_cont: middle or part of MSDU in an AMSDU
* @u.tx.flags.bits.flag_chfrag_end: last MSDU in an AMSDU
* @u.tx.flags.bits.flag_ext_header: extended flags
* @u.tx.flags.bits.is_critical: flag indicating a critical frame
* @u.tx.flags.u8: flags as a single u8
* @u.tx.trace: combined structure for DP and protocol trace
* @u.tx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
* + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
* @u.tx.trace.is_packet_priv:
* @u.tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
* @u.tx.trace.to_fw: Flag to indicate send this packet to FW
* @u.tx.trace.htt2_frm: flag (high-latency path only)
* @u.tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
* + (MGMT_ACTION)] - 4 bits
* @u.tx.trace.dp_trace: flag (Datapath trace)
* @u.tx.trace.is_bcast: flag (Broadcast packet)
* @u.tx.trace.is_mcast: flag (Multicast packet)
* @u.tx.trace.packet_type: flag (Packet type)
* @u.tx.trace.print: enable packet logging
*
* @u.tx.vaddr: virtual address of ~
* @u.tx.paddr: physical/DMA address of ~
*/
struct qdf_nbuf_cb {
/* common */
qdf_paddr_t paddr; /* of skb->data */
/* valid only in one direction */
union {
/* Note: MAX: 40 bytes */
struct {
union {
struct {
void *ext_cb_ptr;
void *fctx;
uint16_t msdu_len : 14,
flag_intra_bss : 1,
ipa_smmu_map : 1;
uint16_t peer_id;
uint16_t protocol_tag;
uint16_t flow_tag;
} priv_cb_w;
struct {
/* ipa_owned bit is common between rx
* control block and tx control block.
* Do not change location of this bit.
*/
uint32_t ipa_owned:1,
peer_cached_buf_frm:1,
flush_ind:1,
packet_buf_pool:1,
l3_hdr_pad:3,
/* exception frame flag */
exc_frm:1,
ipa_smmu_map:1,
reo_dest_ind_or_sw_excpt:5,
lmac_id:2,
reserved1:16;
uint32_t tcp_seq_num;
uint32_t tcp_ack_num;
union {
struct {
uint16_t msdu_len;
uint16_t peer_id;
} wifi3;
struct {
uint32_t map_index;
} wifi2;
} dp;
unsigned char *lro_ctx;
} priv_cb_m;
} dev;
uint32_t lro_eligible:1,
tcp_proto:1,
tcp_pure_ack:1,
ipv6_proto:1,
ip_offset:7,
tcp_offset:7,
rx_ctx_id:4,
fcs_err:1,
is_raw_frame:1,
num_elements_in_list:8;
uint32_t tcp_udp_chksum:16,
tcp_win:16;
uint32_t flow_id;
uint8_t flag_chfrag_start:1,
flag_chfrag_cont:1,
flag_chfrag_end:1,
flag_retry:1,
flag_da_mcbc:1,
flag_da_valid:1,
flag_sa_valid:1,
flag_is_frag:1;
union {
uint8_t packet_state;
uint8_t dp_trace:1,
packet_track:3,
rsrvd:4;
} trace;
uint16_t vdev_id:8,
tid_val:4,
ftype:4;
} rx;
/* Note: MAX: 40 bytes */
struct {
union {
struct {
void *ext_cb_ptr;
void *fctx;
} priv_cb_w;
struct {
/* ipa_owned bit is common between rx
* control block and tx control block.
* Do not change location of this bit.
*/
struct {
uint32_t owned:1,
priv:31;
} ipa;
uint32_t data_attr;
uint16_t desc_id;
uint16_t mgmt_desc_id;
struct {
uint8_t bi_map:1,
reserved:7;
} dma_option;
uint8_t flag_notify_comp:1,
rsvd:7;
uint8_t reserved[2];
} priv_cb_m;
} dev;
uint8_t ftype;
uint8_t vdev_id;
uint16_t len;
union {
struct {
uint8_t flag_efrag:1,
flag_nbuf:1,
num:1,
flag_chfrag_start:1,
flag_chfrag_cont:1,
flag_chfrag_end:1,
flag_ext_header:1,
is_critical:1;
} bits;
uint8_t u8;
} flags;
struct {
uint8_t packet_state:7,
is_packet_priv:1;
uint8_t packet_track:3,
to_fw:1,
htt2_frm:1,
proto_type:3;
uint8_t dp_trace:1,
is_bcast:1,
is_mcast:1,
packet_type:4,
print:1;
} trace;
unsigned char *vaddr;
qdf_paddr_t paddr;
} tx;
} u;
}; /* struct qdf_nbuf_cb: MAX 48 bytes */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(sizeof(struct qdf_nbuf_cb)) <=
sizeof_field(struct sk_buff, cb));
#else
QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(sizeof(struct qdf_nbuf_cb)) <=
FIELD_SIZEOF(struct sk_buff, cb));
#endif
/*
* access macros to qdf_nbuf_cb
* Note: These macros can be used as L-values as well as R-values.
* When used as R-values, they effectively function as "get" macros
* When used as L_values, they effectively function as "set" macros
*/
#define QDF_NBUF_CB_PADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
#define QDF_NBUF_CB_RX_LRO_ELIGIBLE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.lro_eligible)
#define QDF_NBUF_CB_RX_TCP_PROTO(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_proto)
#define QDF_NBUF_CB_RX_TCP_PURE_ACK(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_pure_ack)
#define QDF_NBUF_CB_RX_IPV6_PROTO(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipv6_proto)
#define QDF_NBUF_CB_RX_IP_OFFSET(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ip_offset)
#define QDF_NBUF_CB_RX_TCP_OFFSET(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_offset)
#define QDF_NBUF_CB_RX_CTX_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
#define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
#define QDF_NBUF_CB_RX_TCP_CHKSUM(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_udp_chksum)
#define QDF_NBUF_CB_RX_TCP_WIN(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.tcp_win)
#define QDF_NBUF_CB_RX_FLOW_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_id)
#define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
#define QDF_NBUF_CB_RX_DP_TRACE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
#define QDF_NBUF_CB_RX_FTYPE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
#define QDF_NBUF_CB_RX_VDEV_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.vdev_id)
#define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_chfrag_start)
#define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_chfrag_cont)
#define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_chfrag_end)
#define QDF_NBUF_CB_RX_DA_MCBC(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_da_mcbc)
#define QDF_NBUF_CB_RX_DA_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_da_valid)
#define QDF_NBUF_CB_RX_SA_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_sa_valid)
#define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_retry)
#define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.is_raw_frame)
#define QDF_NBUF_CB_RX_TID_VAL(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.tid_val)
#define QDF_NBUF_CB_RX_IS_FRAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.flag_is_frag)
#define QDF_NBUF_CB_RX_FCS_ERR(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.fcs_err)
#define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
qdf_nbuf_set_state(skb, PACKET_STATE)
#define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
#define QDF_NBUF_CB_TX_FTYPE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
#define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
/* Tx Flags Accessor Macros*/
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_efrag)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_nbuf)
#define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_ext_header)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
#define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
/* End of Tx Flags Accessor Macros */
/* Tx trace accessor macros */
#define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.packet_state)
#define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_packet_priv)
#define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.packet_track)
#define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.to_fw)
#define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.trace.packet_track)
#define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.proto_type)
#define QDF_NBUF_CB_TX_DP_TRACE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
#define QDF_NBUF_CB_DP_TRACE_PRINT(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
#define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
#define QDF_NBUF_CB_GET_IS_BCAST(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
#define QDF_NBUF_CB_GET_IS_MCAST(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
#define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
#define QDF_NBUF_CB_SET_BCAST(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_bcast = true)
#define QDF_NBUF_CB_SET_MCAST(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_mcast = true)
/* End of Tx trace accessor macros */
#define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
/* assume the OS provides a single fragment */
#define __qdf_nbuf_get_num_frags(skb) \
(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
#define __qdf_nbuf_reset_num_frags(skb) \
(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
#define QDF_NBUF_CB_RX_TCP_SEQ_NUM(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_m.tcp_seq_num)
@@ -108,6 +605,126 @@
#define __qdf_nbuf_ipa_priv_set(skb, priv) \
(QDF_NBUF_CB_TX_IPA_PRIV(skb) = (priv))
#define QDF_NBUF_CB_TX_DATA_ATTR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_m.data_attr)
#define __qdf_nbuf_data_attr_get(skb) \
QDF_NBUF_CB_TX_DATA_ATTR(skb)
#define __qdf_nbuf_data_attr_set(skb, data_attr) \
(QDF_NBUF_CB_TX_DATA_ATTR(skb) = (data_attr))
/**
* __qdf_nbuf_map_nbytes_single() - map nbytes
* @osdev: os device
* @buf: buffer
* @dir: direction
* @nbytes: number of bytes
*
* Return: QDF_STATUS
*/
#ifdef A_SIMOS_DEVHOST
static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr;
QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
return QDF_STATUS_SUCCESS;
}
#else
static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr;
QDF_STATUS ret;
/* assume that the OS only provides a single fragment */
QDF_NBUF_CB_PADDR(buf) = paddr =
dma_map_single(osdev->dev, buf->data,
nbytes, __qdf_dma_dir_to_os(dir));
ret = dma_mapping_error(osdev->dev, paddr) ?
QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
if (QDF_IS_STATUS_SUCCESS(ret))
__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
dir, true);
return ret;
}
#endif
/**
* __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
* @osdev: os device
* @buf: buffer
* @dir: direction
* @nbytes: number of bytes
*
* Return: none
*/
#if defined(A_SIMOS_DEVHOST)
static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
}
#else
static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
if (qdf_likely(paddr)) {
__qdf_record_nbuf_nbytes(
__qdf_nbuf_get_end_offset(buf), dir, false);
dma_unmap_single(osdev->dev, paddr, nbytes,
__qdf_dma_dir_to_os(dir));
return;
}
}
#endif
/**
* __qdf_nbuf_reset() - reset the buffer data and pointer
* @skb: Network buf instance
* @reserve: reserve
* @align: align
*
* Return: none
*/
static inline void
__qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
{
int offset;
skb_push(skb, skb_headroom(skb));
skb_put(skb, skb_tailroom(skb));
memset(skb->data, 0x0, skb->len);
skb_trim(skb, 0);
skb_reserve(skb, NET_SKB_PAD);
memset(skb->cb, 0x0, sizeof(skb->cb));
/*
* The default is for netbuf fragments to be interpreted
* as wordstreams rather than bytestreams.
*/
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
/*
* Align & make sure that the tail & data are adjusted properly
*/
if (align) {
offset = ((unsigned long)skb->data) % align;
if (offset)
skb_reserve(skb, align - offset);
}
skb_reserve(skb, reserve);
}
/**
* qdf_nbuf_cb_update_vdev_id() - update vdev id in skb cb
* @skb: skb pointer whose cb is updated with vdev id information
@@ -139,6 +756,38 @@ void __qdf_nbuf_init_replenish_timer(void);
*/
void __qdf_nbuf_deinit_replenish_timer(void);
/**
* __qdf_nbuf_len() - return the amount of valid data in the skb
* @skb: Pointer to network buffer
*
* This API returns the amount of valid data in the skb, If there are frags
* then it returns total length.
*
* Return: network buffer length
*/
static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
{
int i, extra_frag_len = 0;
i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
if (i > 0)
extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
return extra_frag_len + skb->len;
}
/**
* __qdf_nbuf_num_frags_init() - init extra frags
* @skb: sk buffer
*
* Return: none
*/
static inline
void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
{
QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
}
/**
* __qdf_nbuf_push_head() - Push data in the front
* @skb: Pointer to network buffer
@@ -234,4 +883,5 @@ __qdf_dsb(void) {}
static inline void
__qdf_nbuf_dma_clean_range(const void *buf_start, const void *buf_end) {}
#endif /*_I_QDF_NBUF_M_H */

ファイルの表示

@@ -28,10 +28,524 @@
#ifndef _I_QDF_NBUF_W_H
#define _I_QDF_NBUF_W_H
/* ext_cb accessor macros and internal API's */
/**
* struct qdf_nbuf_cb - network buffer control block contents (skb->cb)
* - data passed between layers of the driver.
*
* Notes:
* 1. Hard limited to 48 bytes. Please count your bytes
* 2. The size of this structure has to be easily calculable and
* consistently so: do not use any conditional compile flags
* 3. Split into a common part followed by a tx/rx overlay
* 4. There is only one extra frag, which represents the HTC/HTT header
* 5. "ext_cb_pt" must be the first member in both TX and RX unions
* for the priv_cb_w since it must be at same offset for both
* TX and RX union
* 6. "ipa.owned" bit must be first member in both TX and RX unions
* for the priv_cb_m since it must be at same offset for both
* TX and RX union.
*
* @paddr : physical addressed retrieved by dma_map of nbuf->data
* @u: union of TX and RX member elements
* @u.rx.ext_cb_ptr: extended cb pointer
* @u.rx.fctx: ctx to handle special pkts defined by ftype
* @u.rx.rx_ctx_id: RX ring id
* @u.rx.fcs_err: fcs error in RX packet
* @u.rx.ipa_smmu_map: do IPA smmu map
* @u.rx.flow_idx_valid: is flow_idx_valid flag
* @u.rx.flow_idx_timeout: is flow_idx_timeout flag
* @u.rx.rsvd8: reserved bits
* @u.rx.num_elements_in_list: num of elements (nbufs) in the list
* @u.rx.trace: combined structure for DP and protocol trace
* @u.rx.trace.packet_stat: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
* + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
* @u.rx.trace.dp_trace: flag (Datapath trace)
* @u.rx.trace.packet_track: RX_DATA packet
* @u.rx.trace.rsrvd: enable packet logging
*
* @u.rx.protocol_tag: protocol tag set by app for rcvd packet type
* @u.rx.flow_tag: flow tag set by application for 5 tuples rcvd
*
* @u.rx.hw_info: combined structure for HW info fields
* @u.rx.hw_info.desc_tlv_members.msdu_count: num of msdus
* @u.rx.hw_info.desc_tlv_members.fragment_flag: is this fragment mpdu
* @u.rx.hw_info.desc_tlv_members.flag_retry: is mpdu retry flag set
* @u.rx.hw_info.desc_tlv_members.flag_is_ampdu: is ampdu flag set
* @u.rx.hw_info.desc_tlv_members.bar_frame: is this bar frame
* @u.rx.hw_info.desc_tlv_members.pn_fields_contain_valid_info: pn valid
* @u.rx.hw_info.desc_tlv_members.is_raw_frame: is this raw frame
* @u.rx.hw_info.desc_tlv_members.more_fragment_flag: more fragment flag
* @u.rx.hw_info.desc_tlv_members.src_info: PPE VP number
* @u.rx.hw_info.desc_tlv_members.mpdu_qos_control_valid: is qos ctrl valid
* @u.rx.hw_info.desc_tlv_members.tid_val: tid value
*
* @u.rx.hw_info.desc_tlv_members.peer_id: peer id
* @u.rx.hw_info.desc_tlv_members.ml_peer_valid: is ml peer valid
* @u.rx.hw_info.desc_tlv_members.logical_link_id: logical link id
* @u.rx.hw_info.desc_tlv_members.vdev_id: vdev id
* @u.rx.hw_info.desc_tlv_members.lmac_id: lmac id
* @u.rx.hw_info.desc_tlv_members.chip_id: chip id
* @u.rx.hw_info.desc_tlv_members.reserved2: reserved
*
* @u.rx.hw_info.desc_tlv_members.flag_chfrag_start: first fragment of msdu
* @u.rx.hw_info.desc_tlv_members.flag_chfrag_end: last fragment of msdu
* @u.rx.hw_info.desc_tlv_members.flag_chfrag_cont: msdu frag is continued
* @u.rx.hw_info.desc_tlv_members.msdu_len: msdu length
* @u.rx.hw_info.desc_tlv_members.flag_is_frag: msdu is frag
* @u.rx.hw_info.desc_tlv_members.flag_sa_valid: source address is valid
* @u.rx.hw_info.desc_tlv_members.flag_da_valid: dest address is valid
* @u.rx.hw_info.desc_tlv_members.flag_da_mcbc: is mcast/bcast msdu
* @u.rx.hw_info.desc_tlv_members.l3_hdr_pad_msb: l3 pad bytes
* @u.rx.hw_info.desc_tlv_members.tcp_udp_chksum_fail: tcp/udp checksum failed
* @u.rx.hw_info.desc_tlv_members.ip_chksum_fail: ip checksum failed
* @u.rx.hw_info.desc_tlv_members.fr_ds: FROM DS bit is set
* @u.rx.hw_info.desc_tlv_members.to_ds: TO DS bit is set
* @u.rx.hw_info.desc_tlv_members.intra_bss: this is intra-bss msdu
* @u.rx.hw_info.desc_tlv_members.rsvd4: reserved
*
* @u.rx.hw_info.desc_tlv_members.release_source_module: release source
* @u.rx.hw_info.desc_tlv_members.bm_action: bm action
* @u.rx.hw_info.desc_tlv_members.buffer_or_desc_type: buffer or desc
* @u.rx.hw_info.desc_tlv_members.return_buffer_manager: rbm value
* @u.rx.hw_info.desc_tlv_members.reserved_2a: reserved
* @u.rx.hw_info.desc_tlv_members.cache_id: cache_id
* @u.rx.hw_info.desc_tlv_members.cookie_conversion_status: cc status
* @u.rx.hw_info.desc_tlv_members.rxdma_push_reason: rxdma push reason
* @u.rx.hw_info.desc_tlv_members.rxdma_error_code: rxdma error code
* @u.rx.hw_info.desc_tlv_members.reo_push_reason: reo push reason
* @u.rx.hw_info.desc_tlv_members.reo_error_code: reo error code
* @u.rx.hw_info.desc_tlv_members.wbm_internal_error: wbm internal error
*
* @u.rx.hw_info.desc_info.mpdu_desc_info[2]: reo destination mpdu desc info
* @u.rx.hw_info.desc_info.msdu_desc_info: reo destination msdu desc info
* @u.rx.hw_info.desc_info.rx_error_code: wbm error codes
*
*
* @u.tx.ext_cb_ptr: extended cb pointer
* @u.tx.fctx: ctx to handle special pkts defined by ftype
* @u.tx.ftype: ftype
* @u.tx.vdev_id: vdev_id
* @u.tx.len: len
* @u.tx.flags:
* @u.tx.flags.bits.flag_efrag:
* @u.tx.flags.bits.flag_nbuf:
* @u.tx.flags.bits.num:
* @u.tx.flags.bits.flag_chfrag_start:
* @u.tx.flags.bits.flag_chfrag_cont:
* @u.tx.flags.bits.flag_chfrag_end:
* @u.tx.flags.bits.flag_ext_header:
* @u.tx.flags.bits.is_critical:
* @u.tx.flags.u8:
* @u.tx.trace.packet_state: {NBUF_TX_PKT_[(HDD)|(TXRX_ENQUEUE)|(TXRX_DEQUEUE)|
* + (TXRX)|(HTT)|(HTC)|(HIF)|(CE)|(FREE)]
* @u.tx.trace.is_packet_priv:
* @u.tx.trace.packet_track: {NBUF_TX_PKT_[(DATA)|(MGMT)]_TRACK}
* @u.tx.trace.to_fw: Flag to indicate send this packet to FW
* @u.tx.trace.htt2_frm: flag (high-latency path only)
* @u.tx.trace.proto_type: bitmap of NBUF_PKT_TRAC_TYPE[(EAPOL)|(DHCP)|
* + (MGMT_ACTION)] - 4 bits
* @u.tx.trace.dp_trace: flag (Datapath trace)
* @u.tx.trace.is_bcast: flag (Broadcast packet)
* @u.tx.trace.is_mcast: flag (Multicast packet)
* @u.tx.trace.packet_type: flag (Packet type)
* @u.tx.trace.print: enable packet logging
*
* @u.tx.vaddr: virtual address of ~
* @u.tx.paddr: physical/DMA address of ~
*/
struct qdf_nbuf_cb {
/* common */
qdf_paddr_t paddr; /* of skb->data */
/* valid only in one direction */
union {
/* Note: MAX: 40 bytes */
struct {
void *ext_cb_ptr;
void *fctx;
uint16_t ftype:4,
rx_ctx_id:4,
fcs_err:1,
ipa_smmu_map:1,
flow_idx_valid:1,
flow_idx_timeout:1,
rsvd8:4;
uint8_t num_elements_in_list;
union {
uint8_t packet_state;
uint8_t dp_trace:1,
packet_track:3,
rsrvd:4;
} trace;
uint16_t protocol_tag;
uint16_t flow_tag;
union {
struct {
/* do not re-arrange the fields in
* the below 3 uint32_t words as
* they map exactly to the desc info
*/
#ifndef BIG_ENDIAN_HOST
/* 1st word rx_mpdu_desc_info */
uint32_t msdu_count:8,
fragment_flag:1,
flag_retry:1,
flag_is_ampdu:1,
bar_frame:1,
pn_fields_contain_valid_info:1,
is_raw_frame:1,
more_fragment_flag:1,
src_info:12,
mpdu_qos_control_valid:1,
tid_val:4;
#else
uint32_t tid_val:4,
mpdu_qos_control_valid:1,
src_info:12,
more_fragment_flag:1,
is_raw_frame:1,
pn_fields_contain_valid_info:1,
bar_frame:1,
flag_is_ampdu:1,
flag_retry:1,
fragment_flag:1,
msdu_count:8;
#endif
/* 2nd word rx_mpdu_desc_info */
uint32_t peer_id:13,
ml_peer_valid:1,
logical_link_id:2,
vdev_id:8,
lmac_id:2,
chip_id:3,
reserved2:3;
#ifndef BIG_ENDIAN_HOST
/* 1st word of rx_msdu_desc_info */
uint32_t flag_chfrag_start:1,
flag_chfrag_end:1,
flag_chfrag_cont:1,
msdu_len:14,
flag_is_frag:1,
flag_sa_valid:1,
flag_da_valid:1,
flag_da_mcbc:1,
l3_hdr_pad_msb:1,
tcp_udp_chksum_fail:1,
ip_chksum_fail:1,
fr_ds:1,
to_ds:1,
intra_bss:1,
rsvd4:5;
uint32_t release_source_module:3,
bm_action:3,
buffer_or_desc_type:3,
return_buffer_manager:4,
reserved_2a:2,
cache_id:1,
cookie_conversion_status:1,
rxdma_push_reason:2,
rxdma_error_code:5,
reo_push_reason:2,
reo_error_code:5,
wbm_internal_error:1;
#else
uint32_t rsvd4:5,
intra_bss:1,
to_ds:1,
fr_ds:1,
ip_chksum_fail:1,
tcp_udp_chksum_fail:1,
l3_hdr_pad_msb:1,
flag_da_mcbc:1,
flag_da_valid:1,
flag_sa_valid:1,
flag_is_frag:1,
msdu_len:14,
flag_chfrag_cont:1,
flag_chfrag_end:1,
flag_chfrag_start:1;
uint32_t wbm_internal_error:1,
reo_error_code:5,
reo_push_reason:2,
rxdma_error_code:5,
rxdma_push_reason:2,
cookie_conversion_status:1,
cache_id:1,
reserved_2a:2,
return_buffer_manager:4,
buffer_or_desc_type:3,
bm_action:3,
release_source_module:3;
#endif
} desc_tlv_members;
struct {
uint32_t mpdu_desc_info[2];
uint32_t msdu_desc_info;
uint32_t rx_error_codes;
} desc_info;
} hw_info;
} rx;
/* Note: MAX: 40 bytes */
struct {
void *ext_cb_ptr;
void *fctx;
uint8_t ftype;
uint8_t vdev_id;
uint16_t len;
union {
struct {
uint8_t flag_efrag:1,
flag_nbuf:1,
num:1,
flag_chfrag_start:1,
flag_chfrag_cont:1,
flag_chfrag_end:1,
flag_ext_header:1,
is_critical:1;
} bits;
uint8_t u8;
} flags;
struct {
uint8_t packet_state:7,
is_packet_priv:1;
uint8_t packet_track:3,
to_fw:1,
/* used only for hl */
htt2_frm:1,
proto_type:3;
uint8_t dp_trace:1,
is_bcast:1,
is_mcast:1,
packet_type:4,
print:1;
} trace;
unsigned char *vaddr;
qdf_paddr_t paddr;
} tx;
} u;
}; /* struct qdf_nbuf_cb: MAX 48 bytes */
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0))
QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(sizeof(struct qdf_nbuf_cb)) <=
sizeof_field(struct sk_buff, cb));
#else
QDF_COMPILE_TIME_ASSERT(qdf_nbuf_cb_size,
(sizeof(struct qdf_nbuf_cb)) <=
FIELD_SIZEOF(struct sk_buff, cb));
#endif
/*
* access macros to qdf_nbuf_cb
* Note: These macros can be used as L-values as well as R-values.
* When used as R-values, they effectively function as "get" macros
* When used as L_values, they effectively function as "set" macros
*/
#define QDF_NBUF_CB_PADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->paddr.dma_addr)
#define QDF_NBUF_CB_RX_CTX_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.rx_ctx_id)
#define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
#define QDF_NBUF_CB_RX_PACKET_STATE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.packet_state)
#define QDF_NBUF_CB_RX_DP_TRACE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.trace.dp_trace)
#define QDF_NBUF_CB_RX_FTYPE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ftype)
#define QDF_NBUF_CB_RX_CHFRAG_START(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_chfrag_start)
#define QDF_NBUF_CB_RX_CHFRAG_CONT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_chfrag_cont)
#define QDF_NBUF_CB_RX_CHFRAG_END(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_chfrag_end)
#define QDF_NBUF_CB_RX_DA_MCBC(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_da_mcbc)
#define QDF_NBUF_CB_RX_L3_PAD_MSB(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.l3_hdr_pad_msb)
#define QDF_NBUF_CB_RX_DA_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_da_valid)
#define QDF_NBUF_CB_RX_SA_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_sa_valid)
#define QDF_NBUF_CB_RX_RETRY_FLAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_retry)
#define QDF_NBUF_CB_RX_RAW_FRAME(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.is_raw_frame)
#define QDF_NBUF_CB_RX_IS_FRAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.flag_is_frag)
#define QDF_NBUF_CB_RX_FCS_ERR(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.fcs_err)
#define QDF_NBUF_CB_RX_MSDU_DESC_INFO(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.msdu_desc_info)
#define QDF_NBUF_CB_RX_MPDU_DESC_INFO(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.mpdu_desc_info)
#define QDF_NBUF_CB_RX_MPDU_DESC_INFO_1(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.mpdu_desc_info[0])
#define QDF_NBUF_CB_RX_MPDU_DESC_INFO_2(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_info.mpdu_desc_info[1])
#define QDF_NBUF_CB_RX_PEER_ID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.peer_id)
#define QDF_NBUF_CB_RX_VDEV_ID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.vdev_id)
#define QDF_NBUF_CB_RX_PACKET_LMAC_ID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.lmac_id)
#define QDF_NBUF_CB_RX_PKT_LEN(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.msdu_len)
#define QDF_NBUF_CB_RX_TID_VAL(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.tid_val)
#define QDF_NBUF_CB_RX_NUM_ELEMENTS_IN_LIST(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.num_elements_in_list)
#define QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, PACKET_STATE) \
qdf_nbuf_set_state(skb, PACKET_STATE)
#define QDF_NBUF_CB_TX_FTYPE(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ftype)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.len)
#define QDF_NBUF_CB_TX_VDEV_CTX(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vdev_id)
/* Tx Flags Accessor Macros*/
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_efrag)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_nbuf)
#define QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.num)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_START(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_start)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_CONT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_cont)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_CHFRAG_END(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_chfrag_end)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.flags.bits.flag_ext_header)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_FLAGS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.u8)
#define QDF_NBUF_CB_TX_EXTRA_IS_CRITICAL(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.flags.bits.is_critical)
/* End of Tx Flags Accessor Macros */
/* Tx trace accessor macros */
#define QDF_NBUF_CB_TX_PACKET_STATE(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.packet_state)
#define QDF_NBUF_CB_TX_IS_PACKET_PRIV(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_packet_priv)
#define QDF_NBUF_CB_TX_PACKET_TRACK(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.packet_track)
#define QDF_NBUF_CB_TX_PACKET_TO_FW(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.to_fw)
#define QDF_NBUF_CB_RX_PACKET_TRACK(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.trace.packet_track)
#define QDF_NBUF_CB_TX_PROTO_TYPE(skb)\
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.proto_type)
#define QDF_NBUF_CB_TX_DP_TRACE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.dp_trace)
#define QDF_NBUF_CB_DP_TRACE_PRINT(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.print)
#define QDF_NBUF_CB_TX_HL_HTT2_FRM(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.htt2_frm)
#define QDF_NBUF_CB_GET_IS_BCAST(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_bcast)
#define QDF_NBUF_CB_GET_IS_MCAST(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.is_mcast)
#define QDF_NBUF_CB_GET_PACKET_TYPE(skb)\
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.trace.packet_type)
#define QDF_NBUF_CB_SET_BCAST(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_bcast = true)
#define QDF_NBUF_CB_SET_MCAST(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.tx.trace.is_mcast = true)
/* End of Tx trace accessor macros */
#define QDF_NBUF_CB_TX_EXTRA_FRAG_VADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.vaddr)
#define QDF_NBUF_CB_TX_EXTRA_FRAG_PADDR(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.paddr.dma_addr)
/* assume the OS provides a single fragment */
#define __qdf_nbuf_get_num_frags(skb) \
(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) + 1)
#define __qdf_nbuf_reset_num_frags(skb) \
(QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0)
/* ext_cb accessor macros and internal API's */
#define QDF_NBUF_CB_EXT_CB(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_w.ext_cb_ptr)
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.ext_cb_ptr)
#define __qdf_nbuf_set_ext_cb(skb, ref) \
do { \
@@ -42,21 +556,15 @@
QDF_NBUF_CB_EXT_CB((skb))
/* fctx accessor macros and internal API's*/
#define QDF_NBUF_CB_RX_FCTX(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.fctx)
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.fctx)
#define QDF_NBUF_CB_TX_FCTX(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.dev.priv_cb_w.fctx)
#define QDF_NBUF_CB_RX_PEER_ID(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.peer_id)
#define QDF_NBUF_CB_RX_PKT_LEN(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.msdu_len)
(((struct qdf_nbuf_cb *)((skb)->cb))->u.tx.fctx)
#define QDF_NBUF_CB_RX_INTRA_BSS(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w.flag_intra_bss)
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.hw_info.desc_tlv_members.intra_bss)
#define __qdf_nbuf_set_rx_fctx_type(skb, ctx, type) \
do { \
@@ -83,8 +591,7 @@
QDF_NBUF_CB_TX_FCTX((skb))
#define QDF_NBUF_CB_RX_PROTOCOL_TAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.dev.priv_cb_w.protocol_tag)
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.protocol_tag)
#define __qdf_nbuf_set_rx_protocol_tag(skb, val) \
((QDF_NBUF_CB_RX_PROTOCOL_TAG((skb))) = val)
@@ -93,8 +600,7 @@
(QDF_NBUF_CB_RX_PROTOCOL_TAG((skb)))
#define QDF_NBUF_CB_RX_FLOW_TAG(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.dev.priv_cb_w.flow_tag)
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.flow_tag)
#define __qdf_nbuf_set_rx_flow_tag(skb, val) \
((QDF_NBUF_CB_RX_FLOW_TAG((skb))) = val)
@@ -104,7 +610,7 @@
#define QDF_NBUF_CB_RX_FLOW_IDX_VALID(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.dev.priv_cb_w.flow_idx_valid)
((skb)->cb))->u.rx.flow_idx_valid)
#define __qdf_nbuf_set_rx_flow_idx_valid(skb, val) \
((QDF_NBUF_CB_RX_FLOW_IDX_VALID((skb))) = val)
@@ -114,7 +620,7 @@
#define QDF_NBUF_CB_RX_FLOW_IDX_TIMEOUT(skb) \
(((struct qdf_nbuf_cb *) \
((skb)->cb))->u.rx.dev.priv_cb_w.flow_idx_timeout)
((skb)->cb))->u.rx.flow_idx_timeout)
#define __qdf_nbuf_set_rx_flow_idx_timeout(skb, val) \
((QDF_NBUF_CB_RX_FLOW_IDX_TIMEOUT((skb))) = val)
@@ -123,8 +629,153 @@
(QDF_NBUF_CB_RX_FLOW_IDX_TIMEOUT((skb)))
#define QDF_NBUF_CB_RX_PACKET_IPA_SMMU_MAP(skb) \
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.dev.priv_cb_w. \
ipa_smmu_map)
(((struct qdf_nbuf_cb *)((skb)->cb))->u.rx.ipa_smmu_map)
#define __qdf_nbuf_data_attr_get(skb) (0)
#define __qdf_nbuf_data_attr_set(skb, data_attr)
/**
* __qdf_nbuf_map_nbytes_single() - map nbytes
* @osdev: os device
* @buf: buffer
* @dir: direction
* @nbytes: number of bytes
*
* Return: QDF_STATUS
*/
#ifdef A_SIMOS_DEVHOST
static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr;
QDF_NBUF_CB_PADDR(buf) = paddr = buf->data;
return QDF_STATUS_SUCCESS;
}
#else
static inline QDF_STATUS __qdf_nbuf_map_nbytes_single(
qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr;
QDF_STATUS ret;
/* assume that the OS only provides a single fragment */
QDF_NBUF_CB_PADDR(buf) = paddr =
dma_map_single(osdev->dev, buf->data,
nbytes, __qdf_dma_dir_to_os(dir));
ret = dma_mapping_error(osdev->dev, paddr) ?
QDF_STATUS_E_FAULT : QDF_STATUS_SUCCESS;
if (QDF_IS_STATUS_SUCCESS(ret))
__qdf_record_nbuf_nbytes(__qdf_nbuf_get_end_offset(buf),
dir, true);
return ret;
}
#endif
/**
* __qdf_nbuf_unmap_nbytes_single() - unmap nbytes
* @osdev: os device
* @buf: buffer
* @dir: direction
* @nbytes: number of bytes
*
* Return: none
*/
#if defined(A_SIMOS_DEVHOST)
static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
}
#else
static inline void
__qdf_nbuf_unmap_nbytes_single(qdf_device_t osdev, struct sk_buff *buf,
qdf_dma_dir_t dir, int nbytes)
{
qdf_dma_addr_t paddr = QDF_NBUF_CB_PADDR(buf);
if (qdf_likely(paddr)) {
__qdf_record_nbuf_nbytes(
__qdf_nbuf_get_end_offset(buf), dir, false);
dma_unmap_single(osdev->dev, paddr, nbytes,
__qdf_dma_dir_to_os(dir));
return;
}
}
#endif
/**
* __qdf_nbuf_reset() - reset the buffer data and pointer
* @skb: Network buf instance
* @reserve: reserve
* @align: align
*
* Return: none
*/
static inline void
__qdf_nbuf_reset(struct sk_buff *skb, int reserve, int align)
{
int offset;
skb_push(skb, skb_headroom(skb));
skb_put(skb, skb_tailroom(skb));
memset(skb->data, 0x0, skb->len);
skb_trim(skb, 0);
skb_reserve(skb, NET_SKB_PAD);
memset(skb->cb, 0x0, sizeof(skb->cb));
/*
* The default is for netbuf fragments to be interpreted
* as wordstreams rather than bytestreams.
*/
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_EFRAG(skb) = 1;
QDF_NBUF_CB_TX_EXTRA_FRAG_WORDSTR_NBUF(skb) = 1;
/*
* Align & make sure that the tail & data are adjusted properly
*/
if (align) {
offset = ((unsigned long)skb->data) % align;
if (offset)
skb_reserve(skb, align - offset);
}
skb_reserve(skb, reserve);
}
/**
* __qdf_nbuf_len() - return the amount of valid data in the skb
* @skb: Pointer to network buffer
*
* This API returns the amount of valid data in the skb, If there are frags
* then it returns total length.
*
* Return: network buffer length
*/
static inline size_t __qdf_nbuf_len(struct sk_buff *skb)
{
int i, extra_frag_len = 0;
i = QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb);
if (i > 0)
extra_frag_len = QDF_NBUF_CB_TX_EXTRA_FRAG_LEN(skb);
return extra_frag_len + skb->len;
}
/**
* __qdf_nbuf_num_frags_init() - init extra frags
* @skb: sk buffer
*
* Return: none
*/
static inline
void __qdf_nbuf_num_frags_init(struct sk_buff *skb)
{
QDF_NBUF_CB_TX_NUM_EXTRA_FRAGS(skb) = 0;
}
/**
* qdf_nbuf_cb_update_vdev_id() - update vdev id in skb cb