|
@@ -800,6 +800,7 @@ dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
|
|
|
uint8_t bm_id = dp_tx_get_rbm_id_be(soc, ring_id);
|
|
|
hal_ring_handle_t hal_ring_hdl = NULL;
|
|
|
QDF_STATUS status = QDF_STATUS_E_RESOURCES;
|
|
|
+ uint8_t num_desc_bytes = HAL_TX_DESC_LEN_BYTES;
|
|
|
|
|
|
be_vdev = dp_get_be_vdev_from_dp_vdev(vdev);
|
|
|
|
|
@@ -860,7 +861,7 @@ dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
|
|
|
hal_tx_desc_set_l4_checksum_en(hal_tx_desc_cached, 1);
|
|
|
}
|
|
|
|
|
|
- hal_tx_desc_set_bank_id(hal_tx_desc_cached, be_vdev->bank_id);
|
|
|
+ hal_tx_desc_set_bank_id(hal_tx_desc_cached, vdev->bank_id);
|
|
|
|
|
|
dp_tx_vdev_id_set_hal_tx_desc(hal_tx_desc_cached, vdev, msdu_info);
|
|
|
|
|
@@ -894,7 +895,7 @@ dp_tx_hw_enqueue_be(struct dp_soc *soc, struct dp_vdev *vdev,
|
|
|
dp_vdev_peer_stats_update_protocol_cnt_tx(vdev, tx_desc->nbuf);
|
|
|
|
|
|
/* Sync cached descriptor with HW */
|
|
|
- hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc);
|
|
|
+ hal_tx_desc_sync(hal_tx_desc_cached, hal_tx_desc, num_desc_bytes);
|
|
|
|
|
|
coalesce = dp_tx_attempt_coalescing(soc, vdev, tx_desc, tid,
|
|
|
msdu_info, ring_id);
|
|
@@ -1089,6 +1090,7 @@ void dp_tx_update_bank_profile(struct dp_soc_be *be_soc,
|
|
|
{
|
|
|
dp_tx_put_bank_profile(be_soc, be_vdev);
|
|
|
be_vdev->bank_id = dp_tx_get_bank_profile(be_soc, be_vdev);
|
|
|
+ be_vdev->vdev.bank_id = be_vdev->bank_id;
|
|
|
}
|
|
|
|
|
|
QDF_STATUS dp_tx_desc_pool_init_be(struct dp_soc *soc,
|
|
@@ -1256,3 +1258,160 @@ QDF_STATUS dp_tx_compute_tx_delay_be(struct dp_soc *soc,
|
|
|
{
|
|
|
return dp_mlo_compute_hw_delay_us(soc, vdev, ts, delay_us);
|
|
|
}
|
|
|
+
|
|
|
+static inline
|
|
|
+qdf_dma_addr_t dp_tx_nbuf_map_be(struct dp_vdev *vdev,
|
|
|
+ struct dp_tx_desc_s *tx_desc,
|
|
|
+ qdf_nbuf_t nbuf)
|
|
|
+{
|
|
|
+ qdf_nbuf_dma_clean_range_no_dsb((void *)nbuf->data,
|
|
|
+ (void *)(nbuf->data + 256));
|
|
|
+
|
|
|
+ return (qdf_dma_addr_t)qdf_mem_virt_to_phys(nbuf->data);
|
|
|
+}
|
|
|
+
|
|
|
+static inline
|
|
|
+void dp_tx_nbuf_unmap_be(struct dp_soc *soc,
|
|
|
+ struct dp_tx_desc_s *desc)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * dp_tx_fast_send_be() - Transmit a frame on a given VAP
|
|
|
+ * @soc: DP soc handle
|
|
|
+ * @vdev_id: id of DP vdev handle
|
|
|
+ * @nbuf: skb
|
|
|
+ *
|
|
|
+ * Entry point for Core Tx layer (DP_TX) invoked from
|
|
|
+ * hard_start_xmit in OSIF/HDD or from dp_rx_process for intravap forwarding
|
|
|
+ * cases
|
|
|
+ *
|
|
|
+ * Return: NULL on success,
|
|
|
+ * nbuf when it fails to send
|
|
|
+ */
|
|
|
+qdf_nbuf_t dp_tx_fast_send_be(struct cdp_soc_t *soc_hdl, uint8_t vdev_id,
|
|
|
+ qdf_nbuf_t nbuf)
|
|
|
+{
|
|
|
+ struct dp_soc *soc = cdp_soc_t_to_dp_soc(soc_hdl);
|
|
|
+ struct dp_vdev *vdev = NULL;
|
|
|
+ struct dp_pdev *pdev = NULL;
|
|
|
+ struct dp_tx_desc_s *tx_desc;
|
|
|
+ uint16_t desc_pool_id;
|
|
|
+ uint16_t pkt_len;
|
|
|
+ qdf_dma_addr_t paddr;
|
|
|
+ QDF_STATUS status = QDF_STATUS_E_RESOURCES;
|
|
|
+ uint8_t cached_desc[HAL_TX_DESC_LEN_BYTES] = { 0 };
|
|
|
+ hal_ring_handle_t hal_ring_hdl = NULL;
|
|
|
+ uint32_t *hal_tx_desc_cached;
|
|
|
+ void *hal_tx_desc;
|
|
|
+
|
|
|
+ if (qdf_unlikely(vdev_id >= MAX_VDEV_CNT))
|
|
|
+ return nbuf;
|
|
|
+
|
|
|
+ vdev = soc->vdev_id_map[vdev_id];
|
|
|
+ if (qdf_unlikely(!vdev))
|
|
|
+ return nbuf;
|
|
|
+
|
|
|
+ desc_pool_id = qdf_nbuf_get_queue_mapping(nbuf) & DP_TX_QUEUE_MASK;
|
|
|
+
|
|
|
+ pkt_len = qdf_nbuf_headlen(nbuf);
|
|
|
+ DP_STATS_INC_PKT(vdev, tx_i.rcvd, 1, pkt_len);
|
|
|
+ DP_STATS_INC(vdev, tx_i.rcvd_in_fast_xmit_flow, 1);
|
|
|
+ DP_STATS_INC(vdev, tx_i.rcvd_per_core[desc_pool_id], 1);
|
|
|
+
|
|
|
+ pdev = vdev->pdev;
|
|
|
+ if (dp_tx_limit_check(vdev))
|
|
|
+ return nbuf;
|
|
|
+
|
|
|
+ tx_desc = dp_tx_desc_alloc(soc, desc_pool_id);
|
|
|
+
|
|
|
+ if (qdf_unlikely(!tx_desc)) {
|
|
|
+ DP_STATS_INC(vdev, tx_i.dropped.desc_na.num, 1);
|
|
|
+ DP_STATS_INC(vdev, tx_i.dropped.desc_na_exc_alloc_fail.num, 1);
|
|
|
+ return nbuf;
|
|
|
+ }
|
|
|
+
|
|
|
+ dp_tx_outstanding_inc(pdev);
|
|
|
+
|
|
|
+ /* Initialize the SW tx descriptor */
|
|
|
+ tx_desc->nbuf = nbuf;
|
|
|
+ tx_desc->shinfo_addr = skb_end_pointer(nbuf);
|
|
|
+ tx_desc->frm_type = dp_tx_frm_std;
|
|
|
+ tx_desc->tx_encap_type = vdev->tx_encap_type;
|
|
|
+ tx_desc->vdev_id = vdev_id;
|
|
|
+ tx_desc->pdev = pdev;
|
|
|
+ tx_desc->pkt_offset = 0;
|
|
|
+ tx_desc->length = pkt_len;
|
|
|
+ tx_desc->flags |= DP_TX_DESC_FLAG_SIMPLE;
|
|
|
+
|
|
|
+ paddr = dp_tx_nbuf_map_be(vdev, tx_desc, nbuf);
|
|
|
+ if (!paddr) {
|
|
|
+ /* Handle failure */
|
|
|
+ dp_err("qdf_nbuf_map failed");
|
|
|
+ DP_STATS_INC(vdev, tx_i.dropped.dma_error, 1);
|
|
|
+ goto release_desc;
|
|
|
+ }
|
|
|
+
|
|
|
+ tx_desc->dma_addr = paddr;
|
|
|
+
|
|
|
+ hal_tx_desc_cached = (void *)cached_desc;
|
|
|
+ hal_tx_desc_cached[0] = (uint32_t)tx_desc->dma_addr;
|
|
|
+ hal_tx_desc_cached[1] = tx_desc->id <<
|
|
|
+ TCL_DATA_CMD_BUF_ADDR_INFO_SW_BUFFER_COOKIE_LSB;
|
|
|
+
|
|
|
+ /* bank_id */
|
|
|
+ hal_tx_desc_cached[2] = vdev->bank_id << TCL_DATA_CMD_BANK_ID_LSB;
|
|
|
+ hal_tx_desc_cached[3] = vdev->htt_tcl_metadata <<
|
|
|
+ TCL_DATA_CMD_TCL_CMD_NUMBER_LSB;
|
|
|
+
|
|
|
+ hal_tx_desc_cached[4] = tx_desc->length;
|
|
|
+ /* l3 and l4 checksum enable */
|
|
|
+ hal_tx_desc_cached[4] |= DP_TX_L3_L4_CSUM_ENABLE <<
|
|
|
+ TCL_DATA_CMD_IPV4_CHECKSUM_EN_LSB;
|
|
|
+
|
|
|
+ hal_tx_desc_cached[5] = vdev->lmac_id << TCL_DATA_CMD_PMAC_ID_LSB;
|
|
|
+ hal_tx_desc_cached[5] |= vdev->vdev_id << TCL_DATA_CMD_VDEV_ID_LSB;
|
|
|
+
|
|
|
+ hal_ring_hdl = dp_tx_get_hal_ring_hdl(soc, desc_pool_id);
|
|
|
+
|
|
|
+ if (qdf_unlikely(dp_tx_hal_ring_access_start(soc, hal_ring_hdl))) {
|
|
|
+ dp_err("HAL RING Access Failed -- %pK", hal_ring_hdl);
|
|
|
+ DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
|
|
|
+ DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
|
|
|
+ goto ring_access_fail2;
|
|
|
+ }
|
|
|
+
|
|
|
+ hal_tx_desc = hal_srng_src_get_next(soc->hal_soc, hal_ring_hdl);
|
|
|
+ if (qdf_unlikely(!hal_tx_desc)) {
|
|
|
+ dp_verbose_debug("TCL ring full ring_id:%d", desc_pool_id);
|
|
|
+ DP_STATS_INC(soc, tx.tcl_ring_full[desc_pool_id], 1);
|
|
|
+ DP_STATS_INC(vdev, tx_i.dropped.enqueue_fail, 1);
|
|
|
+ goto ring_access_fail;
|
|
|
+ }
|
|
|
+
|
|
|
+ tx_desc->flags |= DP_TX_DESC_FLAG_QUEUED_TX;
|
|
|
+
|
|
|
+ /* Sync cached descriptor with HW */
|
|
|
+ qdf_mem_copy(hal_tx_desc, hal_tx_desc_cached, DP_TX_FAST_DESC_SIZE);
|
|
|
+ qdf_dsb();
|
|
|
+
|
|
|
+ DP_STATS_INC_PKT(vdev, tx_i.processed, 1, tx_desc->length);
|
|
|
+ DP_STATS_INC(soc, tx.tcl_enq[desc_pool_id], 1);
|
|
|
+ status = QDF_STATUS_SUCCESS;
|
|
|
+
|
|
|
+ring_access_fail:
|
|
|
+ dp_tx_ring_access_end_wrapper(soc, hal_ring_hdl, 0);
|
|
|
+
|
|
|
+ring_access_fail2:
|
|
|
+ if (status != QDF_STATUS_SUCCESS) {
|
|
|
+ dp_tx_nbuf_unmap_be(soc, tx_desc);
|
|
|
+ goto release_desc;
|
|
|
+ }
|
|
|
+
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+release_desc:
|
|
|
+ dp_tx_desc_release(tx_desc, desc_pool_id);
|
|
|
+
|
|
|
+ return nbuf;
|
|
|
+}
|