qcacmn: add dp_tx_send_exception

dp_tx_send_exception is an exception path that transmits frames
through FW by setting DP_TX_DESC_FLAG_TO_FW

Change-Id: Id6abfcd2bf060c4fe3aaca91634f3be501530a7e
CRs-Fixed: 2176814
This commit is contained in:
Prathyusha Guduri
2018-01-19 14:17:14 +05:30
committed by snandini
parent 6f7ddcadb4
commit be41d97175
4 changed files with 180 additions and 17 deletions

View File

@@ -319,6 +319,23 @@ enum cdp_sec_type {
cdp_num_sec_types
};
/**
* struct cdp_tx_exception_metadata - Exception path parameters
* @peer_id: Peer id of the peer
* @tid: Transmit Identifier
* @tx_encap_type: Transmit encap type (i.e. Raw, Native Wi-Fi, Ethernet)
* @sec_type: sec_type to be passed to HAL
*
* This structure holds the parameters needed in the exception path of tx
*
*/
struct cdp_tx_exception_metadata {
uint16_t peer_id;
uint8_t tid;
uint16_t tx_encap_type;
enum cdp_sec_type sec_type;
};
typedef struct cdp_soc_t *ol_txrx_soc_handle;
/**
@@ -375,6 +392,18 @@ typedef void
*/
typedef qdf_nbuf_t (*ol_txrx_tx_fp)(void *data_vdev,
qdf_nbuf_t msdu_list);
/**
* ol_txrx_tx_exc_fp - top-level transmit function on exception path
* @data_vdev - handle to the virtual device object
* @msdu_list - list of network buffers
* @tx_exc_metadata - structure that holds parameters to exception path
*/
typedef qdf_nbuf_t (*ol_txrx_tx_exc_fp)(void *data_vdev,
qdf_nbuf_t msdu_list,
struct cdp_tx_exception_metadata
*tx_exc_metadata);
/**
* ol_txrx_tx_flow_control_fp - tx flow control notification
* function from txrx to OS shim
@@ -518,6 +547,7 @@ struct ol_txrx_ops {
/* tx function pointers - specified by txrx, stored by OS shim */
struct {
ol_txrx_tx_fp tx;
ol_txrx_tx_exc_fp tx_exception;
ol_txrx_tx_free_ext_fp tx_free_ext;
} tx;

View File

@@ -2987,6 +2987,8 @@ static void dp_vdev_register_wifi3(struct cdp_vdev *vdev_handle,
/* TODO: Enable the following once Tx code is integrated */
txrx_ops->tx.tx = dp_tx_send;
txrx_ops->tx.tx_exception = dp_tx_send_exception;
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_INFO_LOW,
"DP Vdev Register success");
}

View File

@@ -518,6 +518,8 @@ struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
* @vdev: DP vdev handle
* @nbuf: skb
* @desc_pool_id: Descriptor pool ID
* @meta_data: Metadata to the fw
* @tx_exc_metadata: Handle that holds exception path metadata
* Allocate and prepare Tx descriptor with msdu information.
*
* Return: Pointer to Tx Descriptor on success,
@@ -525,8 +527,8 @@ struct dp_tx_ext_desc_elem_s *dp_tx_prepare_ext_desc(struct dp_vdev *vdev,
*/
static
struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
qdf_nbuf_t nbuf, uint8_t desc_pool_id,
uint32_t *meta_data)
qdf_nbuf_t nbuf, uint8_t desc_pool_id, uint32_t *meta_data,
struct cdp_tx_exception_metadata *tx_exc_metadata)
{
uint8_t align_pad;
uint8_t is_exception = 0;
@@ -549,7 +551,8 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
/* Initialize the SW tx descriptor */
tx_desc->nbuf = nbuf;
tx_desc->frm_type = dp_tx_frm_std;
tx_desc->tx_encap_type = vdev->tx_encap_type;
tx_desc->tx_encap_type = (tx_exc_metadata ?
tx_exc_metadata->tx_encap_type : vdev->tx_encap_type);
tx_desc->vdev = vdev;
tx_desc->pdev = pdev;
tx_desc->msdu_ext_desc = NULL;
@@ -623,7 +626,7 @@ struct dp_tx_desc_s *dp_tx_prepare_desc_single(struct dp_vdev *vdev,
}
#if !TQM_BYPASS_WAR
if (is_exception)
if (is_exception || tx_exc_metadata)
#endif
{
/* Temporary WAR due to TQM VP issues */
@@ -790,6 +793,7 @@ error:
* @tid: TID from HLOS for overriding default DSCP-TID mapping
* @fw_metadata: Metadata to send to Target Firmware along with frame
* @ring_id: Ring ID of H/W ring to which we enqueue the packet
* @tx_exc_metadata: Handle that holds exception path meta data
*
* Gets the next free TCL HW DMA descriptor and sets up required parameters
* from software Tx descriptor
@@ -798,7 +802,9 @@ error:
*/
static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
struct dp_tx_desc_s *tx_desc, uint8_t tid,
uint16_t fw_metadata, uint8_t ring_id)
uint16_t fw_metadata, uint8_t ring_id,
struct cdp_tx_exception_metadata
*tx_exc_metadata)
{
uint8_t type;
uint16_t length;
@@ -806,6 +812,9 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
qdf_dma_addr_t dma_addr;
uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES];
enum cdp_sec_type sec_type = (tx_exc_metadata ?
tx_exc_metadata->sec_type : vdev->sec_type);
/* Return Buffer Manager ID */
uint8_t bm_id = ring_id;
void *hal_srng = soc->tcl_data_ring[ring_id].hal_srng;
@@ -832,7 +841,7 @@ static QDF_STATUS dp_tx_hw_enqueue(struct dp_soc *soc, struct dp_vdev *vdev,
hal_tx_desc_set_dscp_tid_table_id(hal_tx_desc_cached,
vdev->dscp_tid_map_id);
hal_tx_desc_set_encrypt_type(hal_tx_desc_cached,
sec_type_map[vdev->sec_type]);
sec_type_map[sec_type]);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%s length:%d , type = %d, dma_addr %llx, offset %d desc id %u",
@@ -1162,15 +1171,18 @@ static void dp_non_std_tx_comp_free_buff(struct dp_tx_desc_s *tx_desc,
* @vdev: DP vdev handle
* @nbuf: skb
* @tid: TID from HLOS for overriding default DSCP-TID mapping
* @meta_data: Metadata to the fw
* @tx_q: Tx queue to be used for this Tx frame
* @peer_id: peer_id of the peer in case of NAWDS frames
* @tx_exc_metadata: Handle that holds exception path metadata
*
* Return: NULL on success,
* nbuf when it fails to send
*/
static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
uint8_t tid, struct dp_tx_queue *tx_q,
uint32_t *meta_data, uint16_t peer_id)
uint32_t *meta_data, uint16_t peer_id,
struct cdp_tx_exception_metadata *tx_exc_metadata)
{
struct dp_pdev *pdev = vdev->pdev;
struct dp_soc *soc = pdev->soc;
@@ -1181,7 +1193,8 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
HTT_TX_TCL_METADATA_VALID_HTT_SET(htt_tcl_metadata, 0);
/* Setup Tx descriptor for an MSDU, and MSDU extension descriptor */
tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id, meta_data);
tx_desc = dp_tx_prepare_desc_single(vdev, nbuf, tx_q->desc_pool_id,
meta_data, tx_exc_metadata);
if (!tx_desc) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"%s Tx_desc prepare Fail vdev %pK queue %d\n",
@@ -1221,7 +1234,7 @@ static qdf_nbuf_t dp_tx_send_msdu_single(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
/* Enqueue the Tx MSDU descriptor to HW for transmit */
status = dp_tx_hw_enqueue(soc, vdev, tx_desc, tid,
htt_tcl_metadata, tx_q->ring_id);
htt_tcl_metadata, tx_q->ring_id, tx_exc_metadata);
if (status != QDF_STATUS_SUCCESS) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
@@ -1328,7 +1341,7 @@ qdf_nbuf_t dp_tx_send_msdu_multiple(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
* Enqueue the Tx MSDU descriptor to HW for transmit
*/
status = dp_tx_hw_enqueue(soc, vdev, tx_desc, msdu_info->tid,
vdev->htt_tcl_metadata, tx_q->ring_id);
vdev->htt_tcl_metadata, tx_q->ring_id, NULL);
if (status != QDF_STATUS_SUCCESS) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
@@ -1610,7 +1623,7 @@ static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
peer_id = peer->peer_ids[0];
nbuf_copy = dp_tx_send_msdu_single(vdev, nbuf_copy, tid,
tx_q, meta_data, peer_id);
tx_q, meta_data, peer_id, NULL);
if (nbuf_copy != NULL) {
qdf_nbuf_free(nbuf_copy);
continue;
@@ -1626,6 +1639,121 @@ static qdf_nbuf_t dp_tx_prepare_nawds(struct dp_vdev *vdev, qdf_nbuf_t nbuf,
}
#endif
/**
* dp_check_exc_metadata() - Checks if parameters are valid
* @tx_exc - holds all exception path parameters
*
* Returns true when all the parameters are valid else false
*
*/
static bool dp_check_exc_metadata(struct cdp_tx_exception_metadata *tx_exc)
{
if ((tx_exc->tid > DP_MAX_TIDS && tx_exc->tid != HTT_INVALID_TID) ||
tx_exc->tx_encap_type > htt_cmn_pkt_num_types ||
tx_exc->sec_type > cdp_num_sec_types) {
return false;
}
return true;
}
/**
* dp_tx_send_exception() - Transmit a frame on a given VAP in exception path
* @vap_dev: DP vdev handle
* @nbuf: skb
* @tx_exc_metadata: Handle that holds exception path meta data
*
* Entry point for Core Tx layer (DP_TX) invoked from
* hard_start_xmit in OSIF/HDD to transmit frames through fw
*
* Return: NULL on success,
* nbuf when it fails to send
*/
qdf_nbuf_t dp_tx_send_exception(void *vap_dev, qdf_nbuf_t nbuf,
struct cdp_tx_exception_metadata *tx_exc_metadata)
{
struct ether_header *eh = NULL;
struct dp_vdev *vdev = (struct dp_vdev *) vap_dev;
struct dp_tx_queue tx_queue;
eh = (struct ether_header *)qdf_nbuf_data(nbuf);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%s , skb %pM",
__func__, nbuf->data);
DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, qdf_nbuf_len(nbuf));
if (qdf_unlikely(!dp_check_exc_metadata(tx_exc_metadata))) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"Invalid parameters in exception path");
goto fail;
}
/* Basic sanity checks for unsupported packets */
/* MESH mode */
if (qdf_unlikely(vdev->mesh_vdev)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"Mesh mode is not supported in exception path");
goto fail;
}
/* TSO or SG */
if (qdf_unlikely(qdf_nbuf_is_tso(nbuf)) ||
qdf_unlikely(qdf_nbuf_is_nonlinear(nbuf))) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"TSO and SG are not supported in exception path");
goto fail;
}
/* RAW */
if (qdf_unlikely(tx_exc_metadata->tx_encap_type == htt_cmn_pkt_type_raw)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"Raw frame is not supported in exception path");
goto fail;
}
/* Mcast enhancement*/
if (qdf_unlikely(vdev->mcast_enhancement_en > 0)) {
if (DP_FRAME_IS_MULTICAST((eh)->ether_dhost)) {
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_ERROR,
"Ignoring mcast_enhancement_en which is set and sending the mcast packet to the FW\n");
}
}
/*
* Get HW Queue to use for this frame.
* TCL supports upto 4 DMA rings, out of which 3 rings are
* dedicated for data and 1 for command.
* "queue_id" maps to one hardware ring.
* With each ring, we also associate a unique Tx descriptor pool
* to minimize lock contention for these resources.
*/
dp_tx_get_queue(vdev, nbuf, &tx_queue);
/* Reset the control block */
qdf_nbuf_reset_ctxt(nbuf);
/* Single linear frame */
/*
* If nbuf is a simple linear frame, use send_single function to
* prepare direct-buffer type TCL descriptor and enqueue to TCL
* SRNG. There is no need to setup a MSDU extension descriptor.
*/
nbuf = dp_tx_send_msdu_single(vdev, nbuf, tx_exc_metadata->tid,
&tx_queue, NULL, tx_exc_metadata->peer_id, tx_exc_metadata);
return nbuf;
fail:
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"pkt send failed");
return nbuf;
}
/**
* dp_tx_send() - Transmit a frame on a given VAP
* @vap_dev: DP vdev handle
@@ -1653,9 +1781,9 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
eh = (struct ether_header *)qdf_nbuf_data(nbuf);
QDF_TRACE(QDF_MODULE_ID_DP, QDF_TRACE_LEVEL_DEBUG,
"%s , skb %0x:%0x:%0x:%0x:%0x:%0x\n",
__func__, nbuf->data[0], nbuf->data[1], nbuf->data[2],
nbuf->data[3], nbuf->data[4], nbuf->data[5]);
"%s , skb %pM",
__func__, nbuf->data);
/*
* Set Default Host TID value to invalid TID
* (TID override disabled)
@@ -1774,7 +1902,7 @@ qdf_nbuf_t dp_tx_send(void *vap_dev, qdf_nbuf_t nbuf)
* SRNG. There is no need to setup a MSDU extension descriptor.
*/
nbuf = dp_tx_send_msdu_single(vdev, nbuf, msdu_info.tid,
&msdu_info.tx_queue, msdu_info.meta_data, peer_id);
&msdu_info.tx_queue, msdu_info.meta_data, peer_id, NULL);
return nbuf;
@@ -1914,7 +2042,8 @@ void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
msdu_info.tid,
&msdu_info.tx_queue,
msdu_info.meta_data,
peer_id);
peer_id,
NULL);
if (nbuf_copy) {
QDF_TRACE(QDF_MODULE_ID_DP,
@@ -1940,7 +2069,7 @@ void dp_tx_reinject_handler(struct dp_tx_desc_s *tx_desc, uint8_t *status)
nbuf = dp_tx_send_msdu_single(vdev,
nbuf, msdu_info.tid,
&msdu_info.tx_queue,
msdu_info.meta_data, peer_id);
msdu_info.meta_data, peer_id, NULL);
if (nbuf) {
QDF_TRACE(QDF_MODULE_ID_DP,

View File

@@ -143,6 +143,8 @@ QDF_STATUS dp_tx_pdev_detach(struct dp_pdev *pdev);
QDF_STATUS dp_tx_pdev_attach(struct dp_pdev *pdev);
qdf_nbuf_t dp_tx_send(void *data_vdev, qdf_nbuf_t nbuf);
qdf_nbuf_t dp_tx_send_exception(void *data_vdev, qdf_nbuf_t nbuf,
struct cdp_tx_exception_metadata *tx_exc);
#ifdef CONVERGED_TDLS_ENABLE
qdf_nbuf_t dp_tx_non_std(struct cdp_vdev *vdev_handle,