qcacld-3.0: Fix Datapath kernel checkpatch warnings in ol_tx.c

Fix Datapath kernel checkpatch warnings in ol_tx.c

Change-Id: I538bf59ab717dfac6cd5d88273dc938b1f76b6d2
CRs-Fixed: 2032874
This commit is contained in:
Yun Park
2017-04-05 22:33:51 -07:00
committed by snandini
parent 4afce45d11
commit 3453282718

View File

@@ -70,24 +70,29 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
* succeeds, that guarantees that the target has room to accept
* the new tx frame.
*/
#define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \
do { \
struct ol_txrx_pdev_t *pdev = vdev->pdev; \
(msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \
tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \
if (qdf_unlikely(!tx_desc)) { \
/* \
* If TSO packet, free associated \
* remaining TSO segment descriptors \
*/ \
if (qdf_nbuf_is_tso(msdu)) \
ol_free_remaining_tso_segs( \
vdev, msdu_info, true); \
TXRX_STATS_MSDU_LIST_INCR( \
pdev, tx.dropped.host_reject, msdu); \
return msdu; /* the list of unaccepted MSDUs */ \
} \
} while (0)
static inline qdf_nbuf_t ol_tx_prepare_ll(struct ol_tx_desc_t *tx_desc,
ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu,
struct ol_txrx_msdu_info_t *msdu_info)
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
(msdu_info)->htt.info.frame_type = pdev->htt_pkt_type;
tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info);
if (qdf_unlikely(!tx_desc)) {
/*
* If TSO packet, free associated
* remaining TSO segment descriptors
*/
if (qdf_nbuf_is_tso(msdu))
ol_free_remaining_tso_segs(
vdev, msdu_info, true);
TXRX_STATS_MSDU_LIST_INCR(
pdev, tx.dropped.host_reject, msdu);
return msdu; /* the list of unaccepted MSDUs */
}
return NULL;
}
#if defined(FEATURE_TSO)
void ol_free_remaining_tso_segs(ol_txrx_vdev_handle vdev,
@@ -252,10 +257,10 @@ qdf_nbuf_t ol_tx_data(void *data_vdev, qdf_nbuf_t skb)
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"%s:vdev is null", __func__);
return skb;
} else {
pdev = vdev->pdev;
}
pdev = vdev->pdev;
if (qdf_unlikely(!pdev)) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"%s:pdev is null", __func__);
@@ -323,7 +328,7 @@ static inline void ol_tx_tso_update_stats(struct ol_txrx_pdev_t *pdev,
qdf_nbuf_t msdu,
uint32_t tso_msdu_idx)
{
TXRX_STATS_TSO_HISTOGRAM(pdev, tso_info->num_segs);
TXRX_STATS_TSO_HISTOGRAM(pdev, tso_info->num_segs);
TXRX_STATS_TSO_GSO_SIZE_UPDATE(pdev, tso_msdu_idx,
qdf_nbuf_tcp_tso_size(msdu));
TXRX_STATS_TSO_TOTAL_LEN_UPDATE(pdev,
@@ -376,7 +381,7 @@ qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
*/
while (msdu) {
qdf_nbuf_t next;
struct ol_tx_desc_t *tx_desc;
struct ol_tx_desc_t *tx_desc = NULL;
int segments = 1;
msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
@@ -416,14 +421,15 @@ qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
segments--;
ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
if (ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info))
return msdu;
/**
* if this is a jumbo nbuf, then increment the number
* of nbuf users for each additional segment of the msdu.
* This will ensure that the skb is freed only after
* receiving tx completion for all segments of an nbuf
*/
/*
* If this is a jumbo nbuf, then increment the number
* of nbuf users for each additional segment of the msdu
* This will ensure that the skb is freed only after
* receiving tx completion for all segments of an nbuf.
*/
if (segments)
qdf_nbuf_inc_users(msdu);
@@ -474,11 +480,12 @@ qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
*/
while (msdu) {
qdf_nbuf_t next;
struct ol_tx_desc_t *tx_desc;
struct ol_tx_desc_t *tx_desc = NULL;
msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
msdu_info.peer = NULL;
ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
if (ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info))
return msdu;
TXRX_STATS_MSDU_INCR(vdev->pdev, tx.from_stack, msdu);
@@ -627,15 +634,15 @@ ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
}
/*
* Do we want to turn on word_stream bit-map here ? For linux, non-TSO
* this is not required. We still have to mark the swap bit correctly,
* when posting to the ring
* Do we want to turn on word_stream bit-map here ? For linux, non-TSO
* this is not required. We still have to mark the swap bit correctly,
* when posting to the ring
*/
/* Check to make sure, data download length is correct */
/*
* TODO : Can we remove this check and always download a fixed length ?
* */
*/
if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
@@ -913,9 +920,11 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
next = qdf_nbuf_next(msdu);
if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu,
ep_id, pkt_download_len))) {
/* The packet could not be sent */
/* Free the descriptor, return the packet to the
* caller */
/*
* The packet could not be sent
* Free the descriptor, return the packet to the
* caller
*/
ol_tx_desc_free(pdev, tx_desc);
return msdu;
}
@@ -988,6 +997,7 @@ static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
qdf_nbuf_t tx_msdu;
max_to_accept--;
vdev->ll_pause.txq.depth--;
tx_msdu = vdev->ll_pause.txq.head;
@@ -1034,6 +1044,7 @@ ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
while (msdu_list &&
vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
DPTRACE(qdf_dp_trace(msdu_list,
@@ -1099,23 +1110,31 @@ qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
if (vdev->ll_pause.txq.depth > 0 ||
vdev->pdev->tx_throttle.current_throttle_level !=
THROTTLE_LEVEL_0) {
/* not paused, but there is a backlog of frms
from a prior pause or throttle off phase */
/*
* not paused, but there is a backlog of frms
* from a prior pause or throttle off phase
*/
msdu_list = ol_tx_vdev_pause_queue_append(
vdev, msdu_list, 0);
/* if throttle is disabled or phase is "on",
send the frame */
/*
* if throttle is disabled or phase is "on",
* send the frame
*/
if (vdev->pdev->tx_throttle.current_throttle_level ==
THROTTLE_LEVEL_0 ||
vdev->pdev->tx_throttle.current_throttle_phase ==
THROTTLE_PHASE_ON) {
/* send as many frames as possible
from the vdevs backlog */
/*
* send as many frames as possible
* from the vdevs backlog
*/
ol_tx_vdev_ll_pause_queue_send_base(vdev);
}
} else {
/* not paused, no throttle and no backlog -
send the new frames */
/*
* not paused, no throttle and no backlog -
* send the new frames
*/
msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
}
}
@@ -1144,11 +1163,13 @@ void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
/* round robin through the vdev queues for the given pdev */
/* Potential improvement: download several frames from the same vdev
at a time, since it is more likely that those frames could be
aggregated together, remember which vdev was serviced last,
so the next call this function can resume the round-robin
traversing where the current invocation left off */
/*
* Potential improvement: download several frames from the same vdev
* at a time, since it is more likely that those frames could be
* aggregated together, remember which vdev was serviced last,
* so the next call this function can resume the round-robin
* traversing where the current invocation left off
*/
do {
more = 0;
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
@@ -1269,13 +1290,14 @@ ol_tx_non_std_ll(struct ol_txrx_vdev_t *vdev,
*/
while (msdu) {
qdf_nbuf_t next;
struct ol_tx_desc_t *tx_desc;
struct ol_tx_desc_t *tx_desc = NULL;
msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
msdu_info.peer = NULL;
msdu_info.tso_info.is_tso = 0;
ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
if (ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info))
return msdu;
/*
* The netbuf may get linked into a different list inside the
@@ -1315,20 +1337,34 @@ ol_tx_non_std_ll(struct ol_txrx_vdev_t *vdev,
}
#ifdef QCA_SUPPORT_SW_TXRX_ENCAP
#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \
do { \
if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \
qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \
ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \
if (tx_msdu_info.peer) { \
/* remove the peer reference added above */ \
ol_txrx_peer_unref_delete(tx_msdu_info.peer); \
} \
goto MSDU_LOOP_BOTTOM; \
} \
} while (0)
static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
ol_txrx_vdev_handle vdev,
struct ol_tx_desc_t *tx_desc,
qdf_nbuf_t msdu,
struct ol_txrx_msdu_info_t *tx_msdu_info)
{
if (OL_TX_ENCAP(vdev, tx_desc, msdu, tx_msdu_info) != A_OK) {
qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
if (tx_msdu_info->peer) {
/* remove the peer reference added above */
OL_TXRX_PEER_UNREF_DELETE(tx_msdu_info->peer);
}
return -EINVAL;
}
return 0;
}
#else
#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */
static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
ol_txrx_vdev_handle vdev,
struct ol_tx_desc_t *tx_desc,
qdf_nbuf_t msdu,
struct ol_txrx_msdu_info_t *tx_msdu_info)
{
/* no-op */
return 0;
}
#endif
/* tx filtering is handled within the target FW */
@@ -1418,6 +1454,7 @@ struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
struct ol_txrx_msdu_info_t *msdu_info)
{
struct ol_tx_desc_t *tx_desc = NULL;
tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
return tx_desc;
}
@@ -1444,6 +1481,7 @@ ol_txrx_mgmt_tx_desc_alloc(
struct ol_txrx_msdu_info_t *tx_msdu_info)
{
struct ol_tx_desc_t *tx_desc;
tx_msdu_info->htt.action.tx_comp_req = 1;
tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
return tx_desc;
@@ -1471,14 +1509,16 @@ int ol_txrx_mgmt_send_frame(
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
struct ol_tx_frms_queue_t *txq;
/*
* 1. Look up the peer and queue the frame in the peer's mgmt queue.
* 2. Invoke the download scheduler.
*/
txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info);
if (!txq) {
/*TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
msdu);*/
/* TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
* msdu);
*/
qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc,
1 /* error */);
@@ -1527,6 +1567,7 @@ ol_txrx_mgmt_tx_desc_alloc(
struct ol_txrx_msdu_info_t *tx_msdu_info)
{
struct ol_tx_desc_t *tx_desc;
/* For LL tx_comp_req is not used so initialized to 0 */
tx_msdu_info->htt.action.tx_comp_req = 0;
tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
@@ -1538,7 +1579,8 @@ ol_txrx_mgmt_tx_desc_alloc(
*/
#if defined(HELIUMPLUS)
/* ol_txrx_dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
tx_desc); */
* tx_desc);
*/
#endif /* defined(HELIUMPLUS) */
if (tx_desc) {
/*
@@ -1570,6 +1612,7 @@ int ol_txrx_mgmt_send_frame(
uint16_t chanfreq)
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
QDF_NBUF_TX_PKT_MGMT_TRACK;
@@ -1600,8 +1643,8 @@ ol_tx_hl_base(
qdf_nbuf_t msdu = msdu_list;
struct ol_txrx_msdu_info_t tx_msdu_info;
struct ocb_tx_ctrl_hdr_t tx_ctrl;
htt_pdev_handle htt_pdev = pdev->htt_pdev;
tx_msdu_info.tso_info.is_tso = 0;
/*
@@ -1703,8 +1746,10 @@ ol_tx_hl_base(
}
if (tx_msdu_info.peer) {
/*If the state is not associated then drop all
*the data packets received for that peer*/
/*
* If the state is not associated then drop all
* the data packets received for that peer
*/
if (tx_msdu_info.peer->state ==
OL_TXRX_PEER_STATE_DISC) {
qdf_atomic_inc(
@@ -1751,8 +1796,9 @@ ol_tx_hl_base(
* encap, it performs encap, and if an error is
* encountered, jumps to the MSDU_LOOP_BOTTOM label.
*/
OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu,
tx_msdu_info);
if (ol_tx_encap_wrapper(pdev, vdev, tx_desc, msdu,
&tx_msdu_info))
goto MSDU_LOOP_BOTTOM;
/* initialize the HW tx descriptor */
htt_tx_desc_init(
@@ -1790,6 +1836,7 @@ ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
{
struct ol_txrx_pdev_t *pdev = vdev->pdev;
int tx_comp_req = pdev->cfg.default_tx_comp_req;
return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list, tx_comp_req);
}
@@ -1827,6 +1874,7 @@ ol_txrx_data_tx_cb_set(struct cdp_vdev *pvdev,
{
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
struct ol_txrx_pdev_t *pdev = vdev->pdev;
pdev->tx_data_callback.func = callback;
pdev->tx_data_callback.ctxt = ctxt;
}
@@ -1858,8 +1906,10 @@ void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
__func__, __LINE__, tx_desc->htt_frag_desc,
&tx_desc->htt_frag_desc_paddr);
/* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
is already de-referrable (=> in virtual address space) */
/*
* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
* is already de-referrable (=> in virtual address space)
*/
frag_ptr_i_p = tx_desc->htt_frag_desc;
/* Dump 6 words of TSO flags */
@@ -1877,10 +1927,9 @@ void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
i++;
if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
break;
else /* jump to next pointer - skip length */
frag_ptr_i_p += 2;
/* jump to next pointer - skip length */
frag_ptr_i_p += 2;
}
return;
}
#endif /* HELIUMPLUS */
@@ -1895,6 +1944,7 @@ ol_txrx_mgmt_send_ext(struct cdp_vdev *pvdev,
struct ol_tx_desc_t *tx_desc;
struct ol_txrx_msdu_info_t tx_msdu_info;
int result = 0;
tx_msdu_info.tso_info.is_tso = 0;
tx_msdu_info.htt.action.use_6mbps = use_6mbps;
@@ -1963,7 +2013,7 @@ ol_txrx_mgmt_send_ext(struct cdp_vdev *pvdev,
qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
qdf_nbuf_t msdu, uint16_t peer_id)
{
struct ol_tx_desc_t *tx_desc;
struct ol_tx_desc_t *tx_desc = NULL;
struct ol_txrx_msdu_info_t msdu_info;
msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
@@ -1972,7 +2022,9 @@ qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
msdu_info.htt.action.tx_comp_req = 0;
msdu_info.tso_info.is_tso = 0;
ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info);
if (ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info))
return msdu;
HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);