qcacld-3.0: Fix Datapath kernel checkpatch warnings in ol_tx.c

Fix Datapath kernel checkpatch warnings in ol_tx.c

Change-Id: I538bf59ab717dfac6cd5d88273dc938b1f76b6d2
CRs-Fixed: 2032874
This commit is contained in:
Yun Park
2017-04-05 22:33:51 -07:00
committed by snandini
부모 4afce45d11
커밋 3453282718

파일 보기

@@ -70,24 +70,29 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
* succeeds, that guarantees that the target has room to accept * succeeds, that guarantees that the target has room to accept
* the new tx frame. * the new tx frame.
*/ */
#define ol_tx_prepare_ll(tx_desc, vdev, msdu, msdu_info) \ static inline qdf_nbuf_t ol_tx_prepare_ll(struct ol_tx_desc_t *tx_desc,
do { \ ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu,
struct ol_txrx_pdev_t *pdev = vdev->pdev; \ struct ol_txrx_msdu_info_t *msdu_info)
(msdu_info)->htt.info.frame_type = pdev->htt_pkt_type; \ {
tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info); \ struct ol_txrx_pdev_t *pdev = vdev->pdev;
if (qdf_unlikely(!tx_desc)) { \
/* \ (msdu_info)->htt.info.frame_type = pdev->htt_pkt_type;
* If TSO packet, free associated \ tx_desc = ol_tx_desc_ll(pdev, vdev, msdu, msdu_info);
* remaining TSO segment descriptors \ if (qdf_unlikely(!tx_desc)) {
*/ \ /*
if (qdf_nbuf_is_tso(msdu)) \ * If TSO packet, free associated
ol_free_remaining_tso_segs( \ * remaining TSO segment descriptors
vdev, msdu_info, true); \ */
TXRX_STATS_MSDU_LIST_INCR( \ if (qdf_nbuf_is_tso(msdu))
pdev, tx.dropped.host_reject, msdu); \ ol_free_remaining_tso_segs(
return msdu; /* the list of unaccepted MSDUs */ \ vdev, msdu_info, true);
} \ TXRX_STATS_MSDU_LIST_INCR(
} while (0) pdev, tx.dropped.host_reject, msdu);
return msdu; /* the list of unaccepted MSDUs */
}
return NULL;
}
#if defined(FEATURE_TSO) #if defined(FEATURE_TSO)
void ol_free_remaining_tso_segs(ol_txrx_vdev_handle vdev, void ol_free_remaining_tso_segs(ol_txrx_vdev_handle vdev,
@@ -252,10 +257,10 @@ qdf_nbuf_t ol_tx_data(void *data_vdev, qdf_nbuf_t skb)
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"%s:vdev is null", __func__); "%s:vdev is null", __func__);
return skb; return skb;
} else {
pdev = vdev->pdev;
} }
pdev = vdev->pdev;
if (qdf_unlikely(!pdev)) { if (qdf_unlikely(!pdev)) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG, QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
"%s:pdev is null", __func__); "%s:pdev is null", __func__);
@@ -376,7 +381,7 @@ qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
*/ */
while (msdu) { while (msdu) {
qdf_nbuf_t next; qdf_nbuf_t next;
struct ol_tx_desc_t *tx_desc; struct ol_tx_desc_t *tx_desc = NULL;
int segments = 1; int segments = 1;
msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
@@ -416,13 +421,14 @@ qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
segments--; segments--;
ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info); if (ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info))
return msdu;
/** /*
* if this is a jumbo nbuf, then increment the number * If this is a jumbo nbuf, then increment the number
* of nbuf users for each additional segment of the msdu. * of nbuf users for each additional segment of the msdu
* This will ensure that the skb is freed only after * This will ensure that the skb is freed only after
* receiving tx completion for all segments of an nbuf * receiving tx completion for all segments of an nbuf.
*/ */
if (segments) if (segments)
qdf_nbuf_inc_users(msdu); qdf_nbuf_inc_users(msdu);
@@ -474,11 +480,12 @@ qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
*/ */
while (msdu) { while (msdu) {
qdf_nbuf_t next; qdf_nbuf_t next;
struct ol_tx_desc_t *tx_desc; struct ol_tx_desc_t *tx_desc = NULL;
msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
msdu_info.peer = NULL; msdu_info.peer = NULL;
ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info); if (ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info))
return msdu;
TXRX_STATS_MSDU_INCR(vdev->pdev, tx.from_stack, msdu); TXRX_STATS_MSDU_INCR(vdev->pdev, tx.from_stack, msdu);
@@ -635,7 +642,7 @@ ol_tx_prepare_ll_fast(struct ol_txrx_pdev_t *pdev,
/* /*
* TODO : Can we remove this check and always download a fixed length ? * TODO : Can we remove this check and always download a fixed length ?
* */ */
if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu)) if (QDF_NBUF_CB_TX_EXTRA_FRAG_FLAGS_EXT_HEADER(msdu))
@@ -913,9 +920,11 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
next = qdf_nbuf_next(msdu); next = qdf_nbuf_next(msdu);
if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu, if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu,
ep_id, pkt_download_len))) { ep_id, pkt_download_len))) {
/* The packet could not be sent */ /*
/* Free the descriptor, return the packet to the * The packet could not be sent
* caller */ * Free the descriptor, return the packet to the
* caller
*/
ol_tx_desc_free(pdev, tx_desc); ol_tx_desc_free(pdev, tx_desc);
return msdu; return msdu;
} }
@@ -988,6 +997,7 @@ static void ol_tx_vdev_ll_pause_queue_send_base(struct ol_txrx_vdev_t *vdev)
OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN; OL_TX_VDEV_PAUSE_QUEUE_SEND_MARGIN;
while (max_to_accept > 0 && vdev->ll_pause.txq.depth) { while (max_to_accept > 0 && vdev->ll_pause.txq.depth) {
qdf_nbuf_t tx_msdu; qdf_nbuf_t tx_msdu;
max_to_accept--; max_to_accept--;
vdev->ll_pause.txq.depth--; vdev->ll_pause.txq.depth--;
tx_msdu = vdev->ll_pause.txq.head; tx_msdu = vdev->ll_pause.txq.head;
@@ -1034,6 +1044,7 @@ ol_tx_vdev_pause_queue_append(struct ol_txrx_vdev_t *vdev,
while (msdu_list && while (msdu_list &&
vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) { vdev->ll_pause.txq.depth < vdev->ll_pause.max_q_depth) {
qdf_nbuf_t next = qdf_nbuf_next(msdu_list); qdf_nbuf_t next = qdf_nbuf_next(msdu_list);
QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list, QDF_NBUF_UPDATE_TX_PKT_COUNT(msdu_list,
QDF_NBUF_TX_PKT_TXRX_ENQUEUE); QDF_NBUF_TX_PKT_TXRX_ENQUEUE);
DPTRACE(qdf_dp_trace(msdu_list, DPTRACE(qdf_dp_trace(msdu_list,
@@ -1099,23 +1110,31 @@ qdf_nbuf_t ol_tx_ll_queue(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
if (vdev->ll_pause.txq.depth > 0 || if (vdev->ll_pause.txq.depth > 0 ||
vdev->pdev->tx_throttle.current_throttle_level != vdev->pdev->tx_throttle.current_throttle_level !=
THROTTLE_LEVEL_0) { THROTTLE_LEVEL_0) {
/* not paused, but there is a backlog of frms /*
from a prior pause or throttle off phase */ * not paused, but there is a backlog of frms
* from a prior pause or throttle off phase
*/
msdu_list = ol_tx_vdev_pause_queue_append( msdu_list = ol_tx_vdev_pause_queue_append(
vdev, msdu_list, 0); vdev, msdu_list, 0);
/* if throttle is disabled or phase is "on", /*
send the frame */ * if throttle is disabled or phase is "on",
* send the frame
*/
if (vdev->pdev->tx_throttle.current_throttle_level == if (vdev->pdev->tx_throttle.current_throttle_level ==
THROTTLE_LEVEL_0 || THROTTLE_LEVEL_0 ||
vdev->pdev->tx_throttle.current_throttle_phase == vdev->pdev->tx_throttle.current_throttle_phase ==
THROTTLE_PHASE_ON) { THROTTLE_PHASE_ON) {
/* send as many frames as possible /*
from the vdevs backlog */ * send as many frames as possible
* from the vdevs backlog
*/
ol_tx_vdev_ll_pause_queue_send_base(vdev); ol_tx_vdev_ll_pause_queue_send_base(vdev);
} }
} else { } else {
/* not paused, no throttle and no backlog - /*
send the new frames */ * not paused, no throttle and no backlog -
* send the new frames
*/
msdu_list = ol_tx_ll_wrapper(vdev, msdu_list); msdu_list = ol_tx_ll_wrapper(vdev, msdu_list);
} }
} }
@@ -1144,11 +1163,13 @@ void ol_tx_pdev_ll_pause_queue_send_all(struct ol_txrx_pdev_t *pdev)
/* round robin through the vdev queues for the given pdev */ /* round robin through the vdev queues for the given pdev */
/* Potential improvement: download several frames from the same vdev /*
at a time, since it is more likely that those frames could be * Potential improvement: download several frames from the same vdev
aggregated together, remember which vdev was serviced last, * at a time, since it is more likely that those frames could be
so the next call this function can resume the round-robin * aggregated together, remember which vdev was serviced last,
traversing where the current invocation left off */ * so the next call this function can resume the round-robin
* traversing where the current invocation left off
*/
do { do {
more = 0; more = 0;
TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) { TAILQ_FOREACH(vdev, &pdev->vdev_list, vdev_list_elem) {
@@ -1269,13 +1290,14 @@ ol_tx_non_std_ll(struct ol_txrx_vdev_t *vdev,
*/ */
while (msdu) { while (msdu) {
qdf_nbuf_t next; qdf_nbuf_t next;
struct ol_tx_desc_t *tx_desc; struct ol_tx_desc_t *tx_desc = NULL;
msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu); msdu_info.htt.info.ext_tid = qdf_nbuf_get_tid(msdu);
msdu_info.peer = NULL; msdu_info.peer = NULL;
msdu_info.tso_info.is_tso = 0; msdu_info.tso_info.is_tso = 0;
ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info); if (ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info))
return msdu;
/* /*
* The netbuf may get linked into a different list inside the * The netbuf may get linked into a different list inside the
@@ -1315,20 +1337,34 @@ ol_tx_non_std_ll(struct ol_txrx_vdev_t *vdev,
} }
#ifdef QCA_SUPPORT_SW_TXRX_ENCAP #ifdef QCA_SUPPORT_SW_TXRX_ENCAP
#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) \ static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
do { \ ol_txrx_vdev_handle vdev,
if (OL_TX_ENCAP(vdev, tx_desc, msdu, &tx_msdu_info) != A_OK) { \ struct ol_tx_desc_t *tx_desc,
qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); \ qdf_nbuf_t msdu,
ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1); \ struct ol_txrx_msdu_info_t *tx_msdu_info)
if (tx_msdu_info.peer) { \ {
/* remove the peer reference added above */ \ if (OL_TX_ENCAP(vdev, tx_desc, msdu, tx_msdu_info) != A_OK) {
ol_txrx_peer_unref_delete(tx_msdu_info.peer); \ qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
} \ ol_tx_desc_frame_free_nonstd(pdev, tx_desc, 1);
goto MSDU_LOOP_BOTTOM; \ if (tx_msdu_info->peer) {
} \ /* remove the peer reference added above */
} while (0) OL_TXRX_PEER_UNREF_DELETE(tx_msdu_info->peer);
}
return -EINVAL;
}
return 0;
}
#else #else
#define OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, tx_msdu_info) /* no-op */ static inline int ol_tx_encap_wrapper(struct ol_txrx_pdev_t *pdev,
ol_txrx_vdev_handle vdev,
struct ol_tx_desc_t *tx_desc,
qdf_nbuf_t msdu,
struct ol_txrx_msdu_info_t *tx_msdu_info)
{
/* no-op */
return 0;
}
#endif #endif
/* tx filtering is handled within the target FW */ /* tx filtering is handled within the target FW */
@@ -1418,6 +1454,7 @@ struct ol_tx_desc_t *ol_tx_hl_desc_alloc(struct ol_txrx_pdev_t *pdev,
struct ol_txrx_msdu_info_t *msdu_info) struct ol_txrx_msdu_info_t *msdu_info)
{ {
struct ol_tx_desc_t *tx_desc = NULL; struct ol_tx_desc_t *tx_desc = NULL;
tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info); tx_desc = ol_tx_desc_hl(pdev, vdev, msdu, msdu_info);
return tx_desc; return tx_desc;
} }
@@ -1444,6 +1481,7 @@ ol_txrx_mgmt_tx_desc_alloc(
struct ol_txrx_msdu_info_t *tx_msdu_info) struct ol_txrx_msdu_info_t *tx_msdu_info)
{ {
struct ol_tx_desc_t *tx_desc; struct ol_tx_desc_t *tx_desc;
tx_msdu_info->htt.action.tx_comp_req = 1; tx_msdu_info->htt.action.tx_comp_req = 1;
tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info); tx_desc = ol_tx_desc_hl(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
return tx_desc; return tx_desc;
@@ -1471,14 +1509,16 @@ int ol_txrx_mgmt_send_frame(
{ {
struct ol_txrx_pdev_t *pdev = vdev->pdev; struct ol_txrx_pdev_t *pdev = vdev->pdev;
struct ol_tx_frms_queue_t *txq; struct ol_tx_frms_queue_t *txq;
/* /*
* 1. Look up the peer and queue the frame in the peer's mgmt queue. * 1. Look up the peer and queue the frame in the peer's mgmt queue.
* 2. Invoke the download scheduler. * 2. Invoke the download scheduler.
*/ */
txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info); txq = ol_tx_classify_mgmt(vdev, tx_desc, tx_mgmt_frm, tx_msdu_info);
if (!txq) { if (!txq) {
/*TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq, /* TXRX_STATS_MSDU_LIST_INCR(vdev->pdev, tx.dropped.no_txq,
msdu);*/ * msdu);
*/
qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt); qdf_atomic_inc(&pdev->tx_queue.rsrc_cnt);
ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc, ol_tx_desc_frame_free_nonstd(vdev->pdev, tx_desc,
1 /* error */); 1 /* error */);
@@ -1527,6 +1567,7 @@ ol_txrx_mgmt_tx_desc_alloc(
struct ol_txrx_msdu_info_t *tx_msdu_info) struct ol_txrx_msdu_info_t *tx_msdu_info)
{ {
struct ol_tx_desc_t *tx_desc; struct ol_tx_desc_t *tx_desc;
/* For LL tx_comp_req is not used so initialized to 0 */ /* For LL tx_comp_req is not used so initialized to 0 */
tx_msdu_info->htt.action.tx_comp_req = 0; tx_msdu_info->htt.action.tx_comp_req = 0;
tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info); tx_desc = ol_tx_desc_ll(pdev, vdev, tx_mgmt_frm, tx_msdu_info);
@@ -1538,7 +1579,8 @@ ol_txrx_mgmt_tx_desc_alloc(
*/ */
#if defined(HELIUMPLUS) #if defined(HELIUMPLUS)
/* ol_txrx_dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll", /* ol_txrx_dump_frag_desc("ol_txrx_mgmt_send(): after ol_tx_desc_ll",
tx_desc); */ * tx_desc);
*/
#endif /* defined(HELIUMPLUS) */ #endif /* defined(HELIUMPLUS) */
if (tx_desc) { if (tx_desc) {
/* /*
@@ -1570,6 +1612,7 @@ int ol_txrx_mgmt_send_frame(
uint16_t chanfreq) uint16_t chanfreq)
{ {
struct ol_txrx_pdev_t *pdev = vdev->pdev; struct ol_txrx_pdev_t *pdev = vdev->pdev;
htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq); htt_tx_desc_set_chanfreq(tx_desc->htt_tx_desc, chanfreq);
QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) = QDF_NBUF_CB_TX_PACKET_TRACK(tx_desc->netbuf) =
QDF_NBUF_TX_PKT_MGMT_TRACK; QDF_NBUF_TX_PKT_MGMT_TRACK;
@@ -1600,8 +1643,8 @@ ol_tx_hl_base(
qdf_nbuf_t msdu = msdu_list; qdf_nbuf_t msdu = msdu_list;
struct ol_txrx_msdu_info_t tx_msdu_info; struct ol_txrx_msdu_info_t tx_msdu_info;
struct ocb_tx_ctrl_hdr_t tx_ctrl; struct ocb_tx_ctrl_hdr_t tx_ctrl;
htt_pdev_handle htt_pdev = pdev->htt_pdev; htt_pdev_handle htt_pdev = pdev->htt_pdev;
tx_msdu_info.tso_info.is_tso = 0; tx_msdu_info.tso_info.is_tso = 0;
/* /*
@@ -1703,8 +1746,10 @@ ol_tx_hl_base(
} }
if (tx_msdu_info.peer) { if (tx_msdu_info.peer) {
/*If the state is not associated then drop all /*
*the data packets received for that peer*/ * If the state is not associated then drop all
* the data packets received for that peer
*/
if (tx_msdu_info.peer->state == if (tx_msdu_info.peer->state ==
OL_TXRX_PEER_STATE_DISC) { OL_TXRX_PEER_STATE_DISC) {
qdf_atomic_inc( qdf_atomic_inc(
@@ -1751,8 +1796,9 @@ ol_tx_hl_base(
* encap, it performs encap, and if an error is * encap, it performs encap, and if an error is
* encountered, jumps to the MSDU_LOOP_BOTTOM label. * encountered, jumps to the MSDU_LOOP_BOTTOM label.
*/ */
OL_TX_ENCAP_WRAPPER(pdev, vdev, tx_desc, msdu, if (ol_tx_encap_wrapper(pdev, vdev, tx_desc, msdu,
tx_msdu_info); &tx_msdu_info))
goto MSDU_LOOP_BOTTOM;
/* initialize the HW tx descriptor */ /* initialize the HW tx descriptor */
htt_tx_desc_init( htt_tx_desc_init(
@@ -1790,6 +1836,7 @@ ol_tx_hl(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
{ {
struct ol_txrx_pdev_t *pdev = vdev->pdev; struct ol_txrx_pdev_t *pdev = vdev->pdev;
int tx_comp_req = pdev->cfg.default_tx_comp_req; int tx_comp_req = pdev->cfg.default_tx_comp_req;
return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list, tx_comp_req); return ol_tx_hl_base(vdev, OL_TX_SPEC_STD, msdu_list, tx_comp_req);
} }
@@ -1827,6 +1874,7 @@ ol_txrx_data_tx_cb_set(struct cdp_vdev *pvdev,
{ {
struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev; struct ol_txrx_vdev_t *vdev = (struct ol_txrx_vdev_t *)pvdev;
struct ol_txrx_pdev_t *pdev = vdev->pdev; struct ol_txrx_pdev_t *pdev = vdev->pdev;
pdev->tx_data_callback.func = callback; pdev->tx_data_callback.func = callback;
pdev->tx_data_callback.ctxt = ctxt; pdev->tx_data_callback.ctxt = ctxt;
} }
@@ -1858,8 +1906,10 @@ void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
__func__, __LINE__, tx_desc->htt_frag_desc, __func__, __LINE__, tx_desc->htt_frag_desc,
&tx_desc->htt_frag_desc_paddr); &tx_desc->htt_frag_desc_paddr);
/* it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc /*
is already de-referrable (=> in virtual address space) */ * it looks from htt_tx_desc_frag() that tx_desc->htt_frag_desc
* is already de-referrable (=> in virtual address space)
*/
frag_ptr_i_p = tx_desc->htt_frag_desc; frag_ptr_i_p = tx_desc->htt_frag_desc;
/* Dump 6 words of TSO flags */ /* Dump 6 words of TSO flags */
@@ -1877,10 +1927,9 @@ void ol_txrx_dump_frag_desc(char *msg, struct ol_tx_desc_t *tx_desc)
i++; i++;
if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */ if (i > 5) /* max 6 times: frag_ptr0 to frag_ptr5 */
break; break;
else /* jump to next pointer - skip length */ /* jump to next pointer - skip length */
frag_ptr_i_p += 2; frag_ptr_i_p += 2;
} }
return;
} }
#endif /* HELIUMPLUS */ #endif /* HELIUMPLUS */
@@ -1895,6 +1944,7 @@ ol_txrx_mgmt_send_ext(struct cdp_vdev *pvdev,
struct ol_tx_desc_t *tx_desc; struct ol_tx_desc_t *tx_desc;
struct ol_txrx_msdu_info_t tx_msdu_info; struct ol_txrx_msdu_info_t tx_msdu_info;
int result = 0; int result = 0;
tx_msdu_info.tso_info.is_tso = 0; tx_msdu_info.tso_info.is_tso = 0;
tx_msdu_info.htt.action.use_6mbps = use_6mbps; tx_msdu_info.htt.action.use_6mbps = use_6mbps;
@@ -1963,7 +2013,7 @@ ol_txrx_mgmt_send_ext(struct cdp_vdev *pvdev,
qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev, qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
qdf_nbuf_t msdu, uint16_t peer_id) qdf_nbuf_t msdu, uint16_t peer_id)
{ {
struct ol_tx_desc_t *tx_desc; struct ol_tx_desc_t *tx_desc = NULL;
struct ol_txrx_msdu_info_t msdu_info; struct ol_txrx_msdu_info_t msdu_info;
msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type; msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
@@ -1972,7 +2022,9 @@ qdf_nbuf_t ol_tx_reinject(struct ol_txrx_vdev_t *vdev,
msdu_info.htt.action.tx_comp_req = 0; msdu_info.htt.action.tx_comp_req = 0;
msdu_info.tso_info.is_tso = 0; msdu_info.tso_info.is_tso = 0;
ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info); if (ol_tx_prepare_ll(tx_desc, vdev, msdu, &msdu_info))
return msdu;
HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true); HTT_TX_DESC_POSTPONED_SET(*((uint32_t *) (tx_desc->htt_tx_desc)), true);
htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id); htt_tx_desc_set_peer_id(tx_desc->htt_tx_desc, peer_id);